diff options
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1004_linux-5.3.5.patch | 6035 |
2 files changed, 6039 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 74f4ffa6..1b2145a4 100644 --- a/0000_README +++ b/0000_README @@ -59,6 +59,10 @@ Patch: 1003_linux-5.3.4.patch From: http://www.kernel.org Desc: Linux 5.3.4 +Patch: 1004_linux-5.3.5.patch +From: http://www.kernel.org +Desc: Linux 5.3.5 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1004_linux-5.3.5.patch b/1004_linux-5.3.5.patch new file mode 100644 index 00000000..ab833c15 --- /dev/null +++ b/1004_linux-5.3.5.patch @@ -0,0 +1,6035 @@ +diff --git a/Makefile b/Makefile +index fa11c1d89acf..bf03c110ed9b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 3 +-SUBLEVEL = 4 ++SUBLEVEL = 5 + EXTRAVERSION = + NAME = Bobtail Squid + +@@ -751,6 +751,11 @@ else + # These warnings generated too much noise in a regular build. + # Use make W=1 to enable them (see scripts/Makefile.extrawarn) + KBUILD_CFLAGS += -Wno-unused-but-set-variable ++ ++# Warn about unmarked fall-throughs in switch statement. ++# Disabled for clang while comment to attribute conversion happens and ++# https://github.com/ClangBuiltLinux/linux/issues/636 is discussed. ++KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,) + endif + + KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) +@@ -845,9 +850,6 @@ NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) + # warn about C99 declaration after statement + KBUILD_CFLAGS += -Wdeclaration-after-statement + +-# Warn about unmarked fall-throughs in switch statement. +-KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,) +- + # Variable Length Arrays (VLAs) should not be used anywhere in the kernel + KBUILD_CFLAGS += -Wvla + +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig +index 24360211534a..b587a3b3939a 100644 +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -82,7 +82,7 @@ config ARM + select HAVE_FAST_GUP if ARM_LPAE + select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL + select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG +- select HAVE_FUNCTION_TRACER if !XIP_KERNEL ++ select HAVE_FUNCTION_TRACER if !XIP_KERNEL && (CC_IS_GCC || CLANG_VERSION >= 100000) + select HAVE_GCC_PLUGINS + select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7) + select HAVE_IDE if PCI || ISA || PCMCIA +@@ -1572,8 +1572,9 @@ config ARM_PATCH_IDIV + code to do integer division. + + config AEABI +- bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K +- default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K ++ bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && \ ++ !CPU_V7M && !CPU_V6 && !CPU_V6K && !CC_IS_CLANG ++ default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K || CC_IS_CLANG + help + This option allows for the kernel to be compiled using the latest + ARM ABI (aka EABI). This is only useful if you are using a user +diff --git a/arch/arm/Makefile b/arch/arm/Makefile +index c3624ca6c0bc..9b3d4deca9e4 100644 +--- a/arch/arm/Makefile ++++ b/arch/arm/Makefile +@@ -112,6 +112,10 @@ ifeq ($(CONFIG_ARM_UNWIND),y) + CFLAGS_ABI +=-funwind-tables + endif + ++ifeq ($(CONFIG_CC_IS_CLANG),y) ++CFLAGS_ABI += -meabi gnu ++endif ++ + # Accept old syntax despite ".syntax unified" + AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) + +diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts +index bfaa2de63a10..e2030ba16512 100644 +--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts ++++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts +@@ -72,7 +72,6 @@ + reg = <0>; + /* 50 ns min period = 20 MHz */ + spi-max-frequency = <20000000>; +- spi-cpol; /* Clock active low */ + vcc-supply = <&vdisp>; + iovcc-supply = <&vdisp>; + vci-supply = <&vdisp>; +diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c +index 890eeaac3cbb..bd0f4821f7e1 100644 +--- a/arch/arm/mm/fault.c ++++ b/arch/arm/mm/fault.c +@@ -191,7 +191,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) + { + unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; + +- if (fsr & FSR_WRITE) ++ if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) + mask = VM_WRITE; + if (fsr & FSR_LNX_PF) + mask = VM_EXEC; +@@ -262,7 +262,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) + + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; +- if (fsr & FSR_WRITE) ++ if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) + flags |= FAULT_FLAG_WRITE; + + /* +diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h +index c063708fa503..9ecc2097a87a 100644 +--- a/arch/arm/mm/fault.h ++++ b/arch/arm/mm/fault.h +@@ -6,6 +6,7 @@ + * Fault status register encodings. We steal bit 31 for our own purposes. + */ + #define FSR_LNX_PF (1 << 31) ++#define FSR_CM (1 << 13) + #define FSR_WRITE (1 << 11) + #define FSR_FS4 (1 << 10) + #define FSR_FS3_0 (15) +diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c +index f866870db749..0b94b674aa91 100644 +--- a/arch/arm/mm/mmap.c ++++ b/arch/arm/mm/mmap.c +@@ -18,8 +18,9 @@ + (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) + + /* gap between mmap and stack */ +-#define MIN_GAP (128*1024*1024UL) +-#define MAX_GAP ((TASK_SIZE)/6*5) ++#define MIN_GAP (128*1024*1024UL) ++#define MAX_GAP ((STACK_TOP)/6*5) ++#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) + + static int mmap_is_legacy(struct rlimit *rlim_stack) + { +@@ -35,13 +36,22 @@ static int mmap_is_legacy(struct rlimit *rlim_stack) + static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) + { + unsigned long gap = rlim_stack->rlim_cur; ++ unsigned long pad = stack_guard_gap; ++ ++ /* Account for stack randomization if necessary */ ++ if (current->flags & PF_RANDOMIZE) ++ pad += (STACK_RND_MASK << PAGE_SHIFT); ++ ++ /* Values close to RLIM_INFINITY can overflow. */ ++ if (gap + pad > gap) ++ gap += pad; + + if (gap < MIN_GAP) + gap = MIN_GAP; + else if (gap > MAX_GAP) + gap = MAX_GAP; + +- return PAGE_ALIGN(TASK_SIZE - gap - rnd); ++ return PAGE_ALIGN(STACK_TOP - gap - rnd); + } + + /* +diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c +index d9a0038774a6..d5e0b908f0ba 100644 +--- a/arch/arm/mm/mmu.c ++++ b/arch/arm/mm/mmu.c +@@ -1177,6 +1177,22 @@ void __init adjust_lowmem_bounds(void) + */ + vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; + ++ /* ++ * The first usable region must be PMD aligned. Mark its start ++ * as MEMBLOCK_NOMAP if it isn't ++ */ ++ for_each_memblock(memory, reg) { ++ if (!memblock_is_nomap(reg)) { ++ if (!IS_ALIGNED(reg->base, PMD_SIZE)) { ++ phys_addr_t len; ++ ++ len = round_up(reg->base, PMD_SIZE) - reg->base; ++ memblock_mark_nomap(reg->base, len); ++ } ++ break; ++ } ++ } ++ + for_each_memblock(memory, reg) { + phys_addr_t block_start = reg->base; + phys_addr_t block_end = reg->base + reg->size; +diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h +index 7a299a20f6dc..7a8b8bc69e8d 100644 +--- a/arch/arm64/include/asm/cmpxchg.h ++++ b/arch/arm64/include/asm/cmpxchg.h +@@ -63,7 +63,7 @@ __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory") + #undef __XCHG_CASE + + #define __XCHG_GEN(sfx) \ +-static inline unsigned long __xchg##sfx(unsigned long x, \ ++static __always_inline unsigned long __xchg##sfx(unsigned long x, \ + volatile void *ptr, \ + int size) \ + { \ +@@ -105,7 +105,7 @@ __XCHG_GEN(_mb) + #define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) + + #define __CMPXCHG_GEN(sfx) \ +-static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ ++static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ + unsigned long old, \ + unsigned long new, \ + int size) \ +@@ -212,7 +212,7 @@ __CMPWAIT_CASE( , , 64); + #undef __CMPWAIT_CASE + + #define __CMPWAIT_GEN(sfx) \ +-static inline void __cmpwait##sfx(volatile void *ptr, \ ++static __always_inline void __cmpwait##sfx(volatile void *ptr, \ + unsigned long val, \ + int size) \ + { \ +diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c +index b050641b5139..8dac7110f0cb 100644 +--- a/arch/arm64/mm/mmap.c ++++ b/arch/arm64/mm/mmap.c +@@ -54,7 +54,11 @@ unsigned long arch_mmap_rnd(void) + static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) + { + unsigned long gap = rlim_stack->rlim_cur; +- unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap; ++ unsigned long pad = stack_guard_gap; ++ ++ /* Account for stack randomization if necessary */ ++ if (current->flags & PF_RANDOMIZE) ++ pad += (STACK_RND_MASK << PAGE_SHIFT); + + /* Values close to RLIM_INFINITY can overflow. */ + if (gap + pad > gap) +diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h +index 9a82dd11c0e9..bb8658cc7f12 100644 +--- a/arch/mips/include/asm/atomic.h ++++ b/arch/mips/include/asm/atomic.h +@@ -68,7 +68,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ + "\t" __scbeqz " %0, 1b \n" \ + " .set pop \n" \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ +- : "Ir" (i)); \ ++ : "Ir" (i) : __LLSC_CLOBBER); \ + } else { \ + unsigned long flags; \ + \ +@@ -98,7 +98,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ + " .set pop \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ +- : "Ir" (i)); \ ++ : "Ir" (i) : __LLSC_CLOBBER); \ + } else { \ + unsigned long flags; \ + \ +@@ -132,7 +132,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ + " move %0, %1 \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ +- : "Ir" (i)); \ ++ : "Ir" (i) : __LLSC_CLOBBER); \ + } else { \ + unsigned long flags; \ + \ +@@ -193,6 +193,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) + if (kernel_uses_llsc) { + int temp; + ++ loongson_llsc_mb(); + __asm__ __volatile__( + " .set push \n" + " .set "MIPS_ISA_LEVEL" \n" +@@ -200,16 +201,16 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) + " .set pop \n" + " subu %0, %1, %3 \n" + " move %1, %0 \n" +- " bltz %0, 1f \n" ++ " bltz %0, 2f \n" + " .set push \n" + " .set "MIPS_ISA_LEVEL" \n" + " sc %1, %2 \n" + "\t" __scbeqz " %1, 1b \n" +- "1: \n" ++ "2: \n" + " .set pop \n" + : "=&r" (result), "=&r" (temp), + "+" GCC_OFF_SMALL_ASM() (v->counter) +- : "Ir" (i)); ++ : "Ir" (i) : __LLSC_CLOBBER); + } else { + unsigned long flags; + +@@ -269,7 +270,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \ + "\t" __scbeqz " %0, 1b \n" \ + " .set pop \n" \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ +- : "Ir" (i)); \ ++ : "Ir" (i) : __LLSC_CLOBBER); \ + } else { \ + unsigned long flags; \ + \ +@@ -299,7 +300,7 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \ + " .set pop \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ +- : "Ir" (i)); \ ++ : "Ir" (i) : __LLSC_CLOBBER); \ + } else { \ + unsigned long flags; \ + \ +@@ -333,7 +334,7 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \ + " .set pop \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ +- : "Ir" (i)); \ ++ : "Ir" (i) : __LLSC_CLOBBER); \ + } else { \ + unsigned long flags; \ + \ +diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h +index b865e317a14f..9228f7386220 100644 +--- a/arch/mips/include/asm/barrier.h ++++ b/arch/mips/include/asm/barrier.h +@@ -211,14 +211,22 @@ + #define __smp_wmb() barrier() + #endif + ++/* ++ * When LL/SC does imply order, it must also be a compiler barrier to avoid the ++ * compiler from reordering where the CPU will not. When it does not imply ++ * order, the compiler is also free to reorder across the LL/SC loop and ++ * ordering will be done by smp_llsc_mb() and friends. ++ */ + #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) + #define __WEAK_LLSC_MB " sync \n" ++#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") ++#define __LLSC_CLOBBER + #else + #define __WEAK_LLSC_MB " \n" ++#define smp_llsc_mb() do { } while (0) ++#define __LLSC_CLOBBER "memory" + #endif + +-#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") +- + #ifdef CONFIG_CPU_CAVIUM_OCTEON + #define smp_mb__before_llsc() smp_wmb() + #define __smp_mb__before_llsc() __smp_wmb() +@@ -238,36 +246,40 @@ + + /* + * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load, +- * store or pref) in between an ll & sc can cause the sc instruction to ++ * store or prefetch) in between an LL & SC can cause the SC instruction to + * erroneously succeed, breaking atomicity. Whilst it's unusual to write code + * containing such sequences, this bug bites harder than we might otherwise + * expect due to reordering & speculation: + * +- * 1) A memory access appearing prior to the ll in program order may actually +- * be executed after the ll - this is the reordering case. ++ * 1) A memory access appearing prior to the LL in program order may actually ++ * be executed after the LL - this is the reordering case. + * +- * In order to avoid this we need to place a memory barrier (ie. a sync +- * instruction) prior to every ll instruction, in between it & any earlier +- * memory access instructions. Many of these cases are already covered by +- * smp_mb__before_llsc() but for the remaining cases, typically ones in +- * which multiple CPUs may operate on a memory location but ordering is not +- * usually guaranteed, we use loongson_llsc_mb() below. ++ * In order to avoid this we need to place a memory barrier (ie. a SYNC ++ * instruction) prior to every LL instruction, in between it and any earlier ++ * memory access instructions. + * + * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later. + * +- * 2) If a conditional branch exists between an ll & sc with a target outside +- * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg() ++ * 2) If a conditional branch exists between an LL & SC with a target outside ++ * of the LL-SC loop, for example an exit upon value mismatch in cmpxchg() + * or similar, then misprediction of the branch may allow speculative +- * execution of memory accesses from outside of the ll-sc loop. ++ * execution of memory accesses from outside of the LL-SC loop. + * +- * In order to avoid this we need a memory barrier (ie. a sync instruction) ++ * In order to avoid this we need a memory barrier (ie. a SYNC instruction) + * at each affected branch target, for which we also use loongson_llsc_mb() + * defined below. + * + * This case affects all current Loongson 3 CPUs. ++ * ++ * The above described cases cause an error in the cache coherence protocol; ++ * such that the Invalidate of a competing LL-SC goes 'missing' and SC ++ * erroneously observes its core still has Exclusive state and lets the SC ++ * proceed. ++ * ++ * Therefore the error only occurs on SMP systems. + */ + #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */ +-#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") ++#define loongson_llsc_mb() __asm__ __volatile__("sync" : : :"memory") + #else + #define loongson_llsc_mb() do { } while (0) + #endif +diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h +index 9a466dde9b96..985d6a02f9ea 100644 +--- a/arch/mips/include/asm/bitops.h ++++ b/arch/mips/include/asm/bitops.h +@@ -66,7 +66,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) + " beqzl %0, 1b \n" + " .set pop \n" + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m) +- : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); ++ : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m) ++ : __LLSC_CLOBBER); + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) + } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { + loongson_llsc_mb(); +@@ -76,7 +77,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) + " " __INS "%0, %3, %2, 1 \n" + " " __SC "%0, %1 \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) +- : "ir" (bit), "r" (~0)); ++ : "ir" (bit), "r" (~0) ++ : __LLSC_CLOBBER); + } while (unlikely(!temp)); + #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ + } else if (kernel_uses_llsc) { +@@ -90,7 +92,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) + " " __SC "%0, %1 \n" + " .set pop \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) +- : "ir" (1UL << bit)); ++ : "ir" (1UL << bit) ++ : __LLSC_CLOBBER); + } while (unlikely(!temp)); + } else + __mips_set_bit(nr, addr); +@@ -122,7 +125,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) + " beqzl %0, 1b \n" + " .set pop \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) +- : "ir" (~(1UL << bit))); ++ : "ir" (~(1UL << bit)) ++ : __LLSC_CLOBBER); + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) + } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { + loongson_llsc_mb(); +@@ -132,7 +136,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) + " " __INS "%0, $0, %2, 1 \n" + " " __SC "%0, %1 \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) +- : "ir" (bit)); ++ : "ir" (bit) ++ : __LLSC_CLOBBER); + } while (unlikely(!temp)); + #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ + } else if (kernel_uses_llsc) { +@@ -146,7 +151,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) + " " __SC "%0, %1 \n" + " .set pop \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) +- : "ir" (~(1UL << bit))); ++ : "ir" (~(1UL << bit)) ++ : __LLSC_CLOBBER); + } while (unlikely(!temp)); + } else + __mips_clear_bit(nr, addr); +@@ -192,7 +198,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) + " beqzl %0, 1b \n" + " .set pop \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) +- : "ir" (1UL << bit)); ++ : "ir" (1UL << bit) ++ : __LLSC_CLOBBER); + } else if (kernel_uses_llsc) { + unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); + unsigned long temp; +@@ -207,7 +214,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) + " " __SC "%0, %1 \n" + " .set pop \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) +- : "ir" (1UL << bit)); ++ : "ir" (1UL << bit) ++ : __LLSC_CLOBBER); + } while (unlikely(!temp)); + } else + __mips_change_bit(nr, addr); +@@ -244,11 +252,12 @@ static inline int test_and_set_bit(unsigned long nr, + " .set pop \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) + : "r" (1UL << bit) +- : "memory"); ++ : __LLSC_CLOBBER); + } else if (kernel_uses_llsc) { + unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); + unsigned long temp; + ++ loongson_llsc_mb(); + do { + __asm__ __volatile__( + " .set push \n" +@@ -259,7 +268,7 @@ static inline int test_and_set_bit(unsigned long nr, + " .set pop \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) + : "r" (1UL << bit) +- : "memory"); ++ : __LLSC_CLOBBER); + } while (unlikely(!res)); + + res = temp & (1UL << bit); +@@ -300,11 +309,12 @@ static inline int test_and_set_bit_lock(unsigned long nr, + " .set pop \n" + : "=&r" (temp), "+m" (*m), "=&r" (res) + : "r" (1UL << bit) +- : "memory"); ++ : __LLSC_CLOBBER); + } else if (kernel_uses_llsc) { + unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); + unsigned long temp; + ++ loongson_llsc_mb(); + do { + __asm__ __volatile__( + " .set push \n" +@@ -315,7 +325,7 @@ static inline int test_and_set_bit_lock(unsigned long nr, + " .set pop \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) + : "r" (1UL << bit) +- : "memory"); ++ : __LLSC_CLOBBER); + } while (unlikely(!res)); + + res = temp & (1UL << bit); +@@ -358,12 +368,13 @@ static inline int test_and_clear_bit(unsigned long nr, + " .set pop \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) + : "r" (1UL << bit) +- : "memory"); ++ : __LLSC_CLOBBER); + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) + } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { + unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); + unsigned long temp; + ++ loongson_llsc_mb(); + do { + __asm__ __volatile__( + " " __LL "%0, %1 # test_and_clear_bit \n" +@@ -372,13 +383,14 @@ static inline int test_and_clear_bit(unsigned long nr, + " " __SC "%0, %1 \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) + : "ir" (bit) +- : "memory"); ++ : __LLSC_CLOBBER); + } while (unlikely(!temp)); + #endif + } else if (kernel_uses_llsc) { + unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); + unsigned long temp; + ++ loongson_llsc_mb(); + do { + __asm__ __volatile__( + " .set push \n" +@@ -390,7 +402,7 @@ static inline int test_and_clear_bit(unsigned long nr, + " .set pop \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) + : "r" (1UL << bit) +- : "memory"); ++ : __LLSC_CLOBBER); + } while (unlikely(!res)); + + res = temp & (1UL << bit); +@@ -433,11 +445,12 @@ static inline int test_and_change_bit(unsigned long nr, + " .set pop \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) + : "r" (1UL << bit) +- : "memory"); ++ : __LLSC_CLOBBER); + } else if (kernel_uses_llsc) { + unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); + unsigned long temp; + ++ loongson_llsc_mb(); + do { + __asm__ __volatile__( + " .set push \n" +@@ -448,7 +461,7 @@ static inline int test_and_change_bit(unsigned long nr, + " .set pop \n" + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) + : "r" (1UL << bit) +- : "memory"); ++ : __LLSC_CLOBBER); + } while (unlikely(!res)); + + res = temp & (1UL << bit); +diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h +index f345a873742d..c8a47d18f628 100644 +--- a/arch/mips/include/asm/cmpxchg.h ++++ b/arch/mips/include/asm/cmpxchg.h +@@ -46,6 +46,7 @@ extern unsigned long __xchg_called_with_bad_pointer(void) + __typeof(*(m)) __ret; \ + \ + if (kernel_uses_llsc) { \ ++ loongson_llsc_mb(); \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ +@@ -60,7 +61,7 @@ extern unsigned long __xchg_called_with_bad_pointer(void) + " .set pop \n" \ + : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \ +- : "memory"); \ ++ : __LLSC_CLOBBER); \ + } else { \ + unsigned long __flags; \ + \ +@@ -117,6 +118,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, + __typeof(*(m)) __ret; \ + \ + if (kernel_uses_llsc) { \ ++ loongson_llsc_mb(); \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ +@@ -132,8 +134,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, + " .set pop \n" \ + "2: \n" \ + : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ +- : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ +- : "memory"); \ ++ : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ ++ : __LLSC_CLOBBER); \ ++ loongson_llsc_mb(); \ + } else { \ + unsigned long __flags; \ + \ +@@ -229,6 +232,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr, + */ + local_irq_save(flags); + ++ loongson_llsc_mb(); + asm volatile( + " .set push \n" + " .set " MIPS_ISA_ARCH_LEVEL " \n" +@@ -274,6 +278,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr, + "r" (old), + "r" (new) + : "memory"); ++ loongson_llsc_mb(); + + local_irq_restore(flags); + return ret; +diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h +index 1e6966e8527e..bdbdc19a2b8f 100644 +--- a/arch/mips/include/asm/mipsregs.h ++++ b/arch/mips/include/asm/mipsregs.h +@@ -689,6 +689,9 @@ + #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) + #define MIPS_CONF7_AR (_ULCAST_(1) << 16) + ++/* Ingenic Config7 bits */ ++#define MIPS_CONF7_BTB_LOOP_EN (_ULCAST_(1) << 4) ++ + /* Config7 Bits specific to MIPS Technologies. */ + + /* Performance counters implemented Per TC */ +@@ -2813,6 +2816,7 @@ __BUILD_SET_C0(status) + __BUILD_SET_C0(cause) + __BUILD_SET_C0(config) + __BUILD_SET_C0(config5) ++__BUILD_SET_C0(config7) + __BUILD_SET_C0(intcontrol) + __BUILD_SET_C0(intctl) + __BUILD_SET_C0(srsmap) +diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c +index 1db29957a931..2c38f75d87ff 100644 +--- a/arch/mips/kernel/branch.c ++++ b/arch/mips/kernel/branch.c +@@ -58,6 +58,7 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, + unsigned long *contpc) + { + union mips_instruction insn = (union mips_instruction)dec_insn.insn; ++ int __maybe_unused bc_false = 0; + + if (!cpu_has_mmips) + return 0; +@@ -139,7 +140,6 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, + #ifdef CONFIG_MIPS_FP_SUPPORT + case mm_bc2f_op: + case mm_bc1f_op: { +- int bc_false = 0; + unsigned int fcr31; + unsigned int bit; + +diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c +index 9635c1db3ae6..e654ffc1c8a0 100644 +--- a/arch/mips/kernel/cpu-probe.c ++++ b/arch/mips/kernel/cpu-probe.c +@@ -1964,6 +1964,13 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) + c->cputype = CPU_JZRISC; + c->writecombine = _CACHE_UNCACHED_ACCELERATED; + __cpu_name[cpu] = "Ingenic JZRISC"; ++ /* ++ * The XBurst core by default attempts to avoid branch target ++ * buffer lookups by detecting & special casing loops. This ++ * feature will cause BogoMIPS and lpj calculate in error. ++ * Set cp0 config7 bit 4 to disable this feature. ++ */ ++ set_c0_config7(MIPS_CONF7_BTB_LOOP_EN); + break; + default: + panic("Unknown Ingenic Processor ID!"); +diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c +index b6dc78ad5d8c..b0e25e913bdb 100644 +--- a/arch/mips/kernel/syscall.c ++++ b/arch/mips/kernel/syscall.c +@@ -132,6 +132,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) + [efault] "i" (-EFAULT) + : "memory"); + } else if (cpu_has_llsc) { ++ loongson_llsc_mb(); + __asm__ __volatile__ ( + " .set push \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" +diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c +index d79f2b432318..f5c778113384 100644 +--- a/arch/mips/mm/mmap.c ++++ b/arch/mips/mm/mmap.c +@@ -21,8 +21,9 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ + EXPORT_SYMBOL(shm_align_mask); + + /* gap between mmap and stack */ +-#define MIN_GAP (128*1024*1024UL) +-#define MAX_GAP ((TASK_SIZE)/6*5) ++#define MIN_GAP (128*1024*1024UL) ++#define MAX_GAP ((TASK_SIZE)/6*5) ++#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) + + static int mmap_is_legacy(struct rlimit *rlim_stack) + { +@@ -38,6 +39,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack) + static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) + { + unsigned long gap = rlim_stack->rlim_cur; ++ unsigned long pad = stack_guard_gap; ++ ++ /* Account for stack randomization if necessary */ ++ if (current->flags & PF_RANDOMIZE) ++ pad += (STACK_RND_MASK << PAGE_SHIFT); ++ ++ /* Values close to RLIM_INFINITY can overflow. */ ++ if (gap + pad > gap) ++ gap += pad; + + if (gap < MIN_GAP) + gap = MIN_GAP; +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c +index 144ceb0fba88..bece1264d1c5 100644 +--- a/arch/mips/mm/tlbex.c ++++ b/arch/mips/mm/tlbex.c +@@ -631,7 +631,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, + return; + } + +- if (cpu_has_rixi && _PAGE_NO_EXEC) { ++ if (cpu_has_rixi && !!_PAGE_NO_EXEC) { + if (fill_includes_sw_bits) { + UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); + } else { +diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h +index 3a6aa57b9d90..eea28ca679db 100644 +--- a/arch/powerpc/include/asm/futex.h ++++ b/arch/powerpc/include/asm/futex.h +@@ -60,8 +60,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, + + pagefault_enable(); + +- if (!ret) +- *oval = oldval; ++ *oval = oldval; + + prevent_write_to_user(uaddr, sizeof(*uaddr)); + return ret; +diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c +index 89623962c727..fe0c32fb9f96 100644 +--- a/arch/powerpc/kernel/eeh_driver.c ++++ b/arch/powerpc/kernel/eeh_driver.c +@@ -744,6 +744,33 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, + */ + #define MAX_WAIT_FOR_RECOVERY 300 + ++ ++/* Walks the PE tree after processing an event to remove any stale PEs. ++ * ++ * NB: This needs to be recursive to ensure the leaf PEs get removed ++ * before their parents do. Although this is possible to do recursively ++ * we don't since this is easier to read and we need to garantee ++ * the leaf nodes will be handled first. ++ */ ++static void eeh_pe_cleanup(struct eeh_pe *pe) ++{ ++ struct eeh_pe *child_pe, *tmp; ++ ++ list_for_each_entry_safe(child_pe, tmp, &pe->child_list, child) ++ eeh_pe_cleanup(child_pe); ++ ++ if (pe->state & EEH_PE_KEEP) ++ return; ++ ++ if (!(pe->state & EEH_PE_INVALID)) ++ return; ++ ++ if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) { ++ list_del(&pe->child); ++ kfree(pe); ++ } ++} ++ + /** + * eeh_handle_normal_event - Handle EEH events on a specific PE + * @pe: EEH PE - which should not be used after we return, as it may +@@ -782,8 +809,6 @@ void eeh_handle_normal_event(struct eeh_pe *pe) + return; + } + +- eeh_pe_state_mark(pe, EEH_PE_RECOVERING); +- + eeh_pe_update_time_stamp(pe); + pe->freeze_count++; + if (pe->freeze_count > eeh_max_freezes) { +@@ -793,6 +818,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe) + result = PCI_ERS_RESULT_DISCONNECT; + } + ++ eeh_for_each_pe(pe, tmp_pe) ++ eeh_pe_for_each_dev(tmp_pe, edev, tmp) ++ edev->mode &= ~EEH_DEV_NO_HANDLER; ++ + /* Walk the various device drivers attached to this slot through + * a reset sequence, giving each an opportunity to do what it needs + * to accomplish the reset. Each child gets a report of the +@@ -969,6 +998,12 @@ void eeh_handle_normal_event(struct eeh_pe *pe) + return; + } + } ++ ++ /* ++ * Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING ++ * we don't want to modify the PE tree structure so we do it here. ++ */ ++ eeh_pe_cleanup(pe); + eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); + } + +@@ -981,7 +1016,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe) + */ + void eeh_handle_special_event(void) + { +- struct eeh_pe *pe, *phb_pe; ++ struct eeh_pe *pe, *phb_pe, *tmp_pe; ++ struct eeh_dev *edev, *tmp_edev; + struct pci_bus *bus; + struct pci_controller *hose; + unsigned long flags; +@@ -1040,6 +1076,7 @@ void eeh_handle_special_event(void) + */ + if (rc == EEH_NEXT_ERR_FROZEN_PE || + rc == EEH_NEXT_ERR_FENCED_PHB) { ++ eeh_pe_state_mark(pe, EEH_PE_RECOVERING); + eeh_handle_normal_event(pe); + } else { + pci_lock_rescan_remove(); +@@ -1050,6 +1087,10 @@ void eeh_handle_special_event(void) + (phb_pe->state & EEH_PE_RECOVERING)) + continue; + ++ eeh_for_each_pe(pe, tmp_pe) ++ eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev) ++ edev->mode &= ~EEH_DEV_NO_HANDLER; ++ + /* Notify all devices to be down */ + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); + eeh_set_channel_state(pe, pci_channel_io_perm_failure); +diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c +index 64cfbe41174b..e36653e5f76b 100644 +--- a/arch/powerpc/kernel/eeh_event.c ++++ b/arch/powerpc/kernel/eeh_event.c +@@ -121,6 +121,14 @@ int __eeh_send_failure_event(struct eeh_pe *pe) + } + event->pe = pe; + ++ /* ++ * Mark the PE as recovering before inserting it in the queue. ++ * This prevents the PE from being free()ed by a hotplug driver ++ * while the PE is sitting in the event queue. ++ */ ++ if (pe) ++ eeh_pe_state_mark(pe, EEH_PE_RECOVERING); ++ + /* We may or may not be called in an interrupt context */ + spin_lock_irqsave(&eeh_eventlist_lock, flags); + list_add(&event->list, &eeh_eventlist); +diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c +index 854cef7b18f4..f0813d50e0b1 100644 +--- a/arch/powerpc/kernel/eeh_pe.c ++++ b/arch/powerpc/kernel/eeh_pe.c +@@ -491,6 +491,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) + int eeh_rmv_from_parent_pe(struct eeh_dev *edev) + { + struct eeh_pe *pe, *parent, *child; ++ bool keep, recover; + int cnt; + struct pci_dn *pdn = eeh_dev_to_pdn(edev); + +@@ -516,10 +517,21 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev) + */ + while (1) { + parent = pe->parent; ++ ++ /* PHB PEs should never be removed */ + if (pe->type & EEH_PE_PHB) + break; + +- if (!(pe->state & EEH_PE_KEEP)) { ++ /* ++ * XXX: KEEP is set while resetting a PE. I don't think it's ++ * ever set without RECOVERING also being set. I could ++ * be wrong though so catch that with a WARN. ++ */ ++ keep = !!(pe->state & EEH_PE_KEEP); ++ recover = !!(pe->state & EEH_PE_RECOVERING); ++ WARN_ON(keep && !recover); ++ ++ if (!keep && !recover) { + if (list_empty(&pe->edevs) && + list_empty(&pe->child_list)) { + list_del(&pe->child); +@@ -528,6 +540,15 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev) + break; + } + } else { ++ /* ++ * Mark the PE as invalid. At the end of the recovery ++ * process any invalid PEs will be garbage collected. ++ * ++ * We need to delay the free()ing of them since we can ++ * remove edev's while traversing the PE tree which ++ * might trigger the removal of a PE and we can't ++ * deal with that (yet). ++ */ + if (list_empty(&pe->edevs)) { + cnt = 0; + list_for_each_entry(child, &pe->child_list, child) { +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S +index 6ba3cc2ef8ab..36c8a3652cf3 100644 +--- a/arch/powerpc/kernel/exceptions-64s.S ++++ b/arch/powerpc/kernel/exceptions-64s.S +@@ -1211,6 +1211,10 @@ FTR_SECTION_ELSE + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) + 9: + /* Deliver the machine check to host kernel in V mode. */ ++BEGIN_FTR_SECTION ++ ld r10,ORIG_GPR3(r1) ++ mtspr SPRN_CFAR,r10 ++END_FTR_SECTION_IFSET(CPU_FTR_CFAR) + MACHINE_CHECK_HANDLER_WINDUP + EXCEPTION_PROLOG_0 PACA_EXMC + b machine_check_pSeries_0 +diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c +index 5faf0a64c92b..05824eb4323b 100644 +--- a/arch/powerpc/kernel/rtas.c ++++ b/arch/powerpc/kernel/rtas.c +@@ -871,15 +871,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, + return 0; + + for_each_cpu(cpu, cpus) { ++ struct device *dev = get_cpu_device(cpu); ++ + switch (state) { + case DOWN: +- cpuret = cpu_down(cpu); ++ cpuret = device_offline(dev); + break; + case UP: +- cpuret = cpu_up(cpu); ++ cpuret = device_online(dev); + break; + } +- if (cpuret) { ++ if (cpuret < 0) { + pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", + __func__, + ((state == UP) ? "up" : "down"), +@@ -968,6 +970,8 @@ int rtas_ibm_suspend_me(u64 handle) + data.token = rtas_token("ibm,suspend-me"); + data.complete = &done; + ++ lock_device_hotplug(); ++ + /* All present CPUs must be online */ + cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); + cpuret = rtas_online_cpus_mask(offline_mask); +@@ -1006,6 +1010,7 @@ out_hotplug_enable: + __func__); + + out: ++ unlock_device_hotplug(); + free_cpumask_var(offline_mask); + return atomic_read(&data.error); + } +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c +index 11caa0291254..82f43535e686 100644 +--- a/arch/powerpc/kernel/traps.c ++++ b/arch/powerpc/kernel/traps.c +@@ -472,6 +472,7 @@ void system_reset_exception(struct pt_regs *regs) + if (debugger(regs)) + goto out; + ++ kmsg_dump(KMSG_DUMP_OOPS); + /* + * A system reset is a request to dump, so we always send + * it through the crashdump code (if fadump or kdump are +diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c +index b4ca9e95e678..c5cc16ab1954 100644 +--- a/arch/powerpc/mm/book3s64/radix_pgtable.c ++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c +@@ -902,7 +902,7 @@ int __meminit radix__create_section_mapping(unsigned long start, unsigned long e + return -1; + } + +- return create_physical_mapping(start, end, nid); ++ return create_physical_mapping(__pa(start), __pa(end), nid); + } + + int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end) +diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c +index 6a88a9f585d4..5d6111a9ee0e 100644 +--- a/arch/powerpc/mm/ptdump/ptdump.c ++++ b/arch/powerpc/mm/ptdump/ptdump.c +@@ -299,17 +299,15 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) + + static void walk_pagetables(struct pg_state *st) + { +- pgd_t *pgd = pgd_offset_k(0UL); + unsigned int i; +- unsigned long addr; +- +- addr = st->start_address; ++ unsigned long addr = st->start_address & PGDIR_MASK; ++ pgd_t *pgd = pgd_offset_k(addr); + + /* + * Traverse the linux pagetable structure and dump pages that are in + * the hash pagetable. + */ +- for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) { ++ for (i = pgd_index(addr); i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) { + if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd)) + /* pgd exists */ + walk_pud(st, pgd, addr); +diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c +index dea243185ea4..cb50a9e1fd2d 100644 +--- a/arch/powerpc/perf/imc-pmu.c ++++ b/arch/powerpc/perf/imc-pmu.c +@@ -577,6 +577,7 @@ static int core_imc_mem_init(int cpu, int size) + { + int nid, rc = 0, core_id = (cpu / threads_per_core); + struct imc_mem_info *mem_info; ++ struct page *page; + + /* + * alloc_pages_node() will allocate memory for core in the +@@ -587,11 +588,12 @@ static int core_imc_mem_init(int cpu, int size) + mem_info->id = core_id; + + /* We need only vbase for core counters */ +- mem_info->vbase = page_address(alloc_pages_node(nid, +- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | +- __GFP_NOWARN, get_order(size))); +- if (!mem_info->vbase) ++ page = alloc_pages_node(nid, ++ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | ++ __GFP_NOWARN, get_order(size)); ++ if (!page) + return -ENOMEM; ++ mem_info->vbase = page_address(page); + + /* Init the mutex */ + core_imc_refc[core_id].id = core_id; +@@ -849,15 +851,17 @@ static int thread_imc_mem_alloc(int cpu_id, int size) + int nid = cpu_to_node(cpu_id); + + if (!local_mem) { ++ struct page *page; + /* + * This case could happen only once at start, since we dont + * free the memory in cpu offline path. + */ +- local_mem = page_address(alloc_pages_node(nid, ++ page = alloc_pages_node(nid, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | +- __GFP_NOWARN, get_order(size))); +- if (!local_mem) ++ __GFP_NOWARN, get_order(size)); ++ if (!page) + return -ENOMEM; ++ local_mem = page_address(page); + + per_cpu(thread_imc_mem, cpu_id) = local_mem; + } +@@ -1095,11 +1099,14 @@ static int trace_imc_mem_alloc(int cpu_id, int size) + int core_id = (cpu_id / threads_per_core); + + if (!local_mem) { +- local_mem = page_address(alloc_pages_node(phys_id, +- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | +- __GFP_NOWARN, get_order(size))); +- if (!local_mem) ++ struct page *page; ++ ++ page = alloc_pages_node(phys_id, ++ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | ++ __GFP_NOWARN, get_order(size)); ++ if (!page) + return -ENOMEM; ++ local_mem = page_address(page); + per_cpu(trace_imc_mem, cpu_id) = local_mem; + + /* Initialise the counters for trace mode */ +diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c +index e28f03e1eb5e..c75ec37bf0cd 100644 +--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c ++++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c +@@ -36,7 +36,8 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift) + struct page *tce_mem = NULL; + __be64 *addr; + +- tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT); ++ tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN, ++ shift - PAGE_SHIFT); + if (!tce_mem) { + pr_err("Failed to allocate a TCE memory, level shift=%d\n", + shift); +@@ -161,6 +162,9 @@ void pnv_tce_free(struct iommu_table *tbl, long index, long npages) + + if (ptce) + *ptce = cpu_to_be64(0); ++ else ++ /* Skip the rest of the level */ ++ i |= tbl->it_level_size - 1; + } + } + +@@ -260,7 +264,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, + unsigned int table_shift = max_t(unsigned int, entries_shift + 3, + PAGE_SHIFT); + const unsigned long tce_table_size = 1UL << table_shift; +- unsigned int tmplevels = levels; + + if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS)) + return -EINVAL; +@@ -268,9 +271,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, + if (!is_power_of_2(window_size)) + return -EINVAL; + +- if (alloc_userspace_copy && (window_size > (1ULL << 32))) +- tmplevels = 1; +- + /* Adjust direct table size from window_size and levels */ + entries_shift = (entries_shift + levels - 1) / levels; + level_shift = entries_shift + 3; +@@ -281,7 +281,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, + + /* Allocate TCE table */ + addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, +- tmplevels, tce_table_size, &offset, &total_allocated); ++ 1, tce_table_size, &offset, &total_allocated); + + /* addr==NULL means that the first level allocation failed */ + if (!addr) +@@ -292,18 +292,18 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, + * we did not allocate as much as we wanted, + * release partially allocated table. + */ +- if (tmplevels == levels && offset < tce_table_size) ++ if (levels == 1 && offset < tce_table_size) + goto free_tces_exit; + + /* Allocate userspace view of the TCE table */ + if (alloc_userspace_copy) { + offset = 0; + uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, +- tmplevels, tce_table_size, &offset, ++ 1, tce_table_size, &offset, + &total_allocated_uas); + if (!uas) + goto free_tces_exit; +- if (tmplevels == levels && (offset < tce_table_size || ++ if (levels == 1 && (offset < tce_table_size || + total_allocated_uas != total_allocated)) + goto free_uas_exit; + } +@@ -318,7 +318,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, + + pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n", + window_size, tce_table_size, bus_offset, tbl->it_base, +- tbl->it_userspace, tmplevels, levels); ++ tbl->it_userspace, 1, levels); + + return 0; + +diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h +index 469c24463247..f914f0b14e4e 100644 +--- a/arch/powerpc/platforms/powernv/pci.h ++++ b/arch/powerpc/platforms/powernv/pci.h +@@ -219,7 +219,7 @@ extern struct iommu_table_group *pnv_npu_compound_attach( + struct pnv_ioda_pe *pe); + + /* pci-ioda-tce.c */ +-#define POWERNV_IOMMU_DEFAULT_LEVELS 1 ++#define POWERNV_IOMMU_DEFAULT_LEVELS 2 + #define POWERNV_IOMMU_MAX_LEVELS 5 + + extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, +diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c +index fe812bebdf5e..b571285f6c14 100644 +--- a/arch/powerpc/platforms/pseries/mobility.c ++++ b/arch/powerpc/platforms/pseries/mobility.c +@@ -9,6 +9,7 @@ + #include <linux/cpu.h> + #include <linux/kernel.h> + #include <linux/kobject.h> ++#include <linux/sched.h> + #include <linux/smp.h> + #include <linux/stat.h> + #include <linux/completion.h> +@@ -207,7 +208,11 @@ static int update_dt_node(__be32 phandle, s32 scope) + + prop_data += vd; + } ++ ++ cond_resched(); + } ++ ++ cond_resched(); + } while (rtas_rc == 1); + + of_node_put(dn); +@@ -310,8 +315,12 @@ int pseries_devicetree_update(s32 scope) + add_dt_node(phandle, drc_index); + break; + } ++ ++ cond_resched(); + } + } ++ ++ cond_resched(); + } while (rc == 1); + + kfree(rtas_buf); +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c +index f5940cc71c37..63462e96cf0e 100644 +--- a/arch/powerpc/platforms/pseries/setup.c ++++ b/arch/powerpc/platforms/pseries/setup.c +@@ -316,6 +316,9 @@ static void pseries_lpar_idle(void) + * low power mode by ceding processor to hypervisor + */ + ++ if (!prep_irq_for_idle()) ++ return; ++ + /* Indicate to hypervisor that we are idle. */ + get_lppaca()->idle = 1; + +diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c +index 14e56c25879f..25d4adccf750 100644 +--- a/arch/powerpc/xmon/xmon.c ++++ b/arch/powerpc/xmon/xmon.c +@@ -2534,13 +2534,16 @@ static void dump_pacas(void) + static void dump_one_xive(int cpu) + { + unsigned int hwid = get_hard_smp_processor_id(cpu); +- +- opal_xive_dump(XIVE_DUMP_TM_HYP, hwid); +- opal_xive_dump(XIVE_DUMP_TM_POOL, hwid); +- opal_xive_dump(XIVE_DUMP_TM_OS, hwid); +- opal_xive_dump(XIVE_DUMP_TM_USER, hwid); +- opal_xive_dump(XIVE_DUMP_VP, hwid); +- opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid); ++ bool hv = cpu_has_feature(CPU_FTR_HVMODE); ++ ++ if (hv) { ++ opal_xive_dump(XIVE_DUMP_TM_HYP, hwid); ++ opal_xive_dump(XIVE_DUMP_TM_POOL, hwid); ++ opal_xive_dump(XIVE_DUMP_TM_OS, hwid); ++ opal_xive_dump(XIVE_DUMP_TM_USER, hwid); ++ opal_xive_dump(XIVE_DUMP_VP, hwid); ++ opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid); ++ } + + if (setjmp(bus_error_jmp) != 0) { + catch_memory_errors = 0; +diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c +index ccad1398abd4..b5cfcad953c2 100644 +--- a/arch/s390/hypfs/inode.c ++++ b/arch/s390/hypfs/inode.c +@@ -269,7 +269,7 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root) + static int hypfs_fill_super(struct super_block *sb, void *data, int silent) + { + struct inode *root_inode; +- struct dentry *root_dentry; ++ struct dentry *root_dentry, *update_file; + int rc = 0; + struct hypfs_sb_info *sbi; + +@@ -300,9 +300,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent) + rc = hypfs_diag_create_files(root_dentry); + if (rc) + return rc; +- sbi->update_file = hypfs_create_update_file(root_dentry); +- if (IS_ERR(sbi->update_file)) +- return PTR_ERR(sbi->update_file); ++ update_file = hypfs_create_update_file(root_dentry); ++ if (IS_ERR(update_file)) ++ return PTR_ERR(update_file); ++ sbi->update_file = update_file; + hypfs_update_update(sb); + pr_info("Hypervisor filesystem mounted\n"); + return 0; +diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c +index fff790a3f4ee..c0867b0aae3e 100644 +--- a/arch/x86/kvm/hyperv.c ++++ b/arch/x86/kvm/hyperv.c +@@ -645,7 +645,9 @@ static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer) + .vector = stimer->config.apic_vector + }; + +- return !kvm_apic_set_irq(vcpu, &irq, NULL); ++ if (lapic_in_kernel(vcpu)) ++ return !kvm_apic_set_irq(vcpu, &irq, NULL); ++ return 0; + } + + static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) +@@ -1852,7 +1854,13 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, + + ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE; + ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; +- ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; ++ ++ /* ++ * Direct Synthetic timers only make sense with in-kernel ++ * LAPIC ++ */ ++ if (lapic_in_kernel(vcpu)) ++ ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; + + break; + +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c +index b33be928d164..70bcbd02edcb 100644 +--- a/block/bfq-iosched.c ++++ b/block/bfq-iosched.c +@@ -5809,12 +5809,14 @@ static void bfq_update_inject_limit(struct bfq_data *bfqd, + */ + if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) || + tot_time_ns < bfqq->last_serv_time_ns) { ++ if (bfqq->last_serv_time_ns == 0) { ++ /* ++ * Now we certainly have a base value: make sure we ++ * start trying injection. ++ */ ++ bfqq->inject_limit = max_t(unsigned int, 1, old_limit); ++ } + bfqq->last_serv_time_ns = tot_time_ns; +- /* +- * Now we certainly have a base value: make sure we +- * start trying injection. +- */ +- bfqq->inject_limit = max_t(unsigned int, 1, old_limit); + } else if (!bfqd->rqs_injected && bfqd->rq_in_driver == 1) + /* + * No I/O injected and no request still in service in +diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c +index 024060165afa..76457003f140 100644 +--- a/drivers/block/pktcdvd.c ++++ b/drivers/block/pktcdvd.c +@@ -2594,7 +2594,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) + if (ret) + return ret; + if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) { +- WARN_ONCE(true, "Attempt to register a non-SCSI queue\n"); + blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); + return -EINVAL; + } +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c +index da5b6723329a..28693dbcb0c3 100644 +--- a/drivers/char/ipmi/ipmi_si_intf.c ++++ b/drivers/char/ipmi/ipmi_si_intf.c +@@ -221,6 +221,9 @@ struct smi_info { + */ + bool irq_enable_broken; + ++ /* Is the driver in maintenance mode? */ ++ bool in_maintenance_mode; ++ + /* + * Did we get an attention that we did not handle? + */ +@@ -1007,11 +1010,20 @@ static int ipmi_thread(void *data) + spin_unlock_irqrestore(&(smi_info->si_lock), flags); + busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, + &busy_until); +- if (smi_result == SI_SM_CALL_WITHOUT_DELAY) ++ if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { + ; /* do nothing */ +- else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) +- schedule(); +- else if (smi_result == SI_SM_IDLE) { ++ } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) { ++ /* ++ * In maintenance mode we run as fast as ++ * possible to allow firmware updates to ++ * complete as fast as possible, but normally ++ * don't bang on the scheduler. ++ */ ++ if (smi_info->in_maintenance_mode) ++ schedule(); ++ else ++ usleep_range(100, 200); ++ } else if (smi_result == SI_SM_IDLE) { + if (atomic_read(&smi_info->need_watch)) { + schedule_timeout_interruptible(100); + } else { +@@ -1019,8 +1031,9 @@ static int ipmi_thread(void *data) + __set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } +- } else ++ } else { + schedule_timeout_interruptible(1); ++ } + } + return 0; + } +@@ -1198,6 +1211,7 @@ static void set_maintenance_mode(void *send_info, bool enable) + + if (!enable) + atomic_set(&smi_info->req_events, 0); ++ smi_info->in_maintenance_mode = enable; + } + + static void shutdown_smi(void *send_info); +diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c +index 32dd29e0a37e..4de97cc7cb54 100644 +--- a/drivers/clk/actions/owl-common.c ++++ b/drivers/clk/actions/owl-common.c +@@ -68,16 +68,17 @@ int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks) + struct clk_hw *hw; + + for (i = 0; i < hw_clks->num; i++) { ++ const char *name; + + hw = hw_clks->hws[i]; +- + if (IS_ERR_OR_NULL(hw)) + continue; + ++ name = hw->init->name; + ret = devm_clk_hw_register(dev, hw); + if (ret) { + dev_err(dev, "Couldn't register clock %d - %s\n", +- i, hw->init->name); ++ i, name); + return ret; + } + } +diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c +index f607ee702c83..311cea0c3ae2 100644 +--- a/drivers/clk/at91/clk-main.c ++++ b/drivers/clk/at91/clk-main.c +@@ -21,6 +21,10 @@ + + #define MOR_KEY_MASK (0xff << 16) + ++#define clk_main_parent_select(s) (((s) & \ ++ (AT91_PMC_MOSCEN | \ ++ AT91_PMC_OSCBYPASS)) ? 1 : 0) ++ + struct clk_main_osc { + struct clk_hw hw; + struct regmap *regmap; +@@ -113,7 +117,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw) + + regmap_read(regmap, AT91_PMC_SR, &status); + +- return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN); ++ return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp); + } + + static const struct clk_ops main_osc_ops = { +@@ -450,7 +454,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw) + + regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status); + +- return status & AT91_PMC_MOSCEN ? 1 : 0; ++ return clk_main_parent_select(status); + } + + static const struct clk_ops sam9x5_main_ops = { +@@ -492,7 +496,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap, + clkmain->hw.init = &init; + clkmain->regmap = regmap; + regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status); +- clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0; ++ clkmain->parent = clk_main_parent_select(status); + + hw = &clkmain->hw; + ret = clk_hw_register(NULL, &clkmain->hw); +diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c +index 524bf9a53098..e9e16425c739 100644 +--- a/drivers/clk/clk-bulk.c ++++ b/drivers/clk/clk-bulk.c +@@ -18,10 +18,13 @@ static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks, + int ret; + int i; + +- for (i = 0; i < num_clks; i++) ++ for (i = 0; i < num_clks; i++) { ++ clks[i].id = NULL; + clks[i].clk = NULL; ++ } + + for (i = 0; i < num_clks; i++) { ++ of_property_read_string_index(np, "clock-names", i, &clks[i].id); + clks[i].clk = of_clk_get(np, i); + if (IS_ERR(clks[i].clk)) { + ret = PTR_ERR(clks[i].clk); +diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c +index 07f3b252f3e0..bed140f7375f 100644 +--- a/drivers/clk/clk-qoriq.c ++++ b/drivers/clk/clk-qoriq.c +@@ -686,7 +686,7 @@ static const struct clockgen_chipinfo chipinfo[] = { + .guts_compat = "fsl,qoriq-device-config-1.0", + .init_periph = p5020_init_periph, + .cmux_groups = { +- &p2041_cmux_grp1, &p2041_cmux_grp2 ++ &p5020_cmux_grp1, &p5020_cmux_grp2 + }, + .cmux_to_group = { + 0, 1, -1 +diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c +index d407a07e7e6d..e07c69afc359 100644 +--- a/drivers/clk/imx/clk-imx8mq.c ++++ b/drivers/clk/imx/clk-imx8mq.c +@@ -406,7 +406,8 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) + clks[IMX8MQ_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80); + + /* AHB */ +- clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite("ahb", imx8mq_ahb_sels, base + 0x9000); ++ /* AHB clock is used by the AHB bus therefore marked as critical */ ++ clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000); + clks[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100); + + /* IPG */ +diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c +index b7213023b238..7a815ec76aa5 100644 +--- a/drivers/clk/imx/clk-pll14xx.c ++++ b/drivers/clk/imx/clk-pll14xx.c +@@ -191,6 +191,10 @@ static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate, + tmp &= ~RST_MASK; + writel_relaxed(tmp, pll->base); + ++ /* Enable BYPASS */ ++ tmp |= BYPASS_MASK; ++ writel(tmp, pll->base); ++ + div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) | + (rate->sdiv << SDIV_SHIFT); + writel_relaxed(div_val, pll->base + 0x4); +@@ -250,6 +254,10 @@ static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate, + tmp &= ~RST_MASK; + writel_relaxed(tmp, pll->base); + ++ /* Enable BYPASS */ ++ tmp |= BYPASS_MASK; ++ writel_relaxed(tmp, pll->base); ++ + div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) | + (rate->sdiv << SDIV_SHIFT); + writel_relaxed(div_val, pll->base + 0x4); +@@ -283,16 +291,28 @@ static int clk_pll14xx_prepare(struct clk_hw *hw) + { + struct clk_pll14xx *pll = to_clk_pll14xx(hw); + u32 val; ++ int ret; + + /* + * RESETB = 1 from 0, PLL starts its normal + * operation after lock time + */ + val = readl_relaxed(pll->base + GNRL_CTL); ++ if (val & RST_MASK) ++ return 0; ++ val |= BYPASS_MASK; ++ writel_relaxed(val, pll->base + GNRL_CTL); + val |= RST_MASK; + writel_relaxed(val, pll->base + GNRL_CTL); + +- return clk_pll14xx_wait_lock(pll); ++ ret = clk_pll14xx_wait_lock(pll); ++ if (ret) ++ return ret; ++ ++ val &= ~BYPASS_MASK; ++ writel_relaxed(val, pll->base + GNRL_CTL); ++ ++ return 0; + } + + static int clk_pll14xx_is_prepared(struct clk_hw *hw) +@@ -348,6 +368,7 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name, + struct clk_pll14xx *pll; + struct clk *clk; + struct clk_init_data init; ++ u32 val; + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) +@@ -379,6 +400,10 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name, + pll->rate_table = pll_clk->rate_table; + pll->rate_count = pll_clk->rate_count; + ++ val = readl_relaxed(pll->base + GNRL_CTL); ++ val &= ~BYPASS_MASK; ++ writel_relaxed(val, pll->base + GNRL_CTL); ++ + clk = clk_register(NULL, &pll->hw); + if (IS_ERR(clk)) { + pr_err("%s: failed to register pll %s %lu\n", +diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c +index 4c0a20949c2c..9b27d75d9485 100644 +--- a/drivers/clk/ingenic/jz4740-cgu.c ++++ b/drivers/clk/ingenic/jz4740-cgu.c +@@ -53,6 +53,10 @@ static const u8 jz4740_cgu_cpccr_div_table[] = { + 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, + }; + ++static const u8 jz4740_cgu_pll_half_div_table[] = { ++ 2, 1, ++}; ++ + static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = { + + /* External clocks */ +@@ -86,7 +90,10 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = { + [JZ4740_CLK_PLL_HALF] = { + "pll half", CGU_CLK_DIV, + .parents = { JZ4740_CLK_PLL, -1, -1, -1 }, +- .div = { CGU_REG_CPCCR, 21, 1, 1, -1, -1, -1 }, ++ .div = { ++ CGU_REG_CPCCR, 21, 1, 1, -1, -1, -1, ++ jz4740_cgu_pll_half_div_table, ++ }, + }, + + [JZ4740_CLK_CCLK] = { +diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c +index 8028ff6f6610..db0b73d53551 100644 +--- a/drivers/clk/meson/axg-audio.c ++++ b/drivers/clk/meson/axg-audio.c +@@ -992,15 +992,18 @@ static int axg_audio_clkc_probe(struct platform_device *pdev) + + /* Take care to skip the registered input clocks */ + for (i = AUD_CLKID_DDR_ARB; i < data->hw_onecell_data->num; i++) { ++ const char *name; ++ + hw = data->hw_onecell_data->hws[i]; + /* array might be sparse */ + if (!hw) + continue; + ++ name = hw->init->name; ++ + ret = devm_clk_hw_register(dev, hw); + if (ret) { +- dev_err(dev, "failed to register clock %s\n", +- hw->init->name); ++ dev_err(dev, "failed to register clock %s\n", name); + return ret; + } + } +diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c +index 7131dcf9b060..95be125c3bdd 100644 +--- a/drivers/clk/qcom/gcc-sdm845.c ++++ b/drivers/clk/qcom/gcc-sdm845.c +@@ -685,7 +685,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { + .name = "gcc_sdcc2_apps_clk_src", + .parent_names = gcc_parent_names_10, + .num_parents = 5, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_floor_ops, + }, + }; + +@@ -709,7 +709,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = { + .name = "gcc_sdcc4_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_floor_ops, + }, + }; + +diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c +index 2db9093546c6..e326e6dc09fc 100644 +--- a/drivers/clk/renesas/clk-mstp.c ++++ b/drivers/clk/renesas/clk-mstp.c +@@ -334,7 +334,8 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np) + return; + + pd->name = np->name; +- pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; ++ pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON | ++ GENPD_FLAG_ACTIVE_WAKEUP; + pd->attach_dev = cpg_mstp_attach_dev; + pd->detach_dev = cpg_mstp_detach_dev; + pm_genpd_init(pd, &pm_domain_always_on_gov, false); +diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c +index d4075b130674..132cc96895e3 100644 +--- a/drivers/clk/renesas/renesas-cpg-mssr.c ++++ b/drivers/clk/renesas/renesas-cpg-mssr.c +@@ -551,7 +551,8 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev, + + genpd = &pd->genpd; + genpd->name = np->name; +- genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; ++ genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON | ++ GENPD_FLAG_ACTIVE_WAKEUP; + genpd->attach_dev = cpg_mssr_attach_dev; + genpd->detach_dev = cpg_mssr_detach_dev; + pm_genpd_init(genpd, &pm_domain_always_on_gov, false); +diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c +index ad7951b6b285..dcf4e25a0216 100644 +--- a/drivers/clk/sirf/clk-common.c ++++ b/drivers/clk/sirf/clk-common.c +@@ -297,9 +297,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw) + { + struct clk_dmn *clk = to_dmnclk(hw); + u32 cfg = clkc_readl(clk->regofs); ++ const char *name = clk_hw_get_name(hw); + + /* parent of io domain can only be pll3 */ +- if (strcmp(hw->init->name, "io") == 0) ++ if (strcmp(name, "io") == 0) + return 4; + + WARN_ON((cfg & (BIT(3) - 1)) > 4); +@@ -311,9 +312,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent) + { + struct clk_dmn *clk = to_dmnclk(hw); + u32 cfg = clkc_readl(clk->regofs); ++ const char *name = clk_hw_get_name(hw); + + /* parent of io domain can only be pll3 */ +- if (strcmp(hw->init->name, "io") == 0) ++ if (strcmp(name, "io") == 0) + return -EINVAL; + + cfg &= ~(BIT(3) - 1); +@@ -353,7 +355,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate, + { + unsigned long fin; + unsigned ratio, wait, hold; +- unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4; ++ const char *name = clk_hw_get_name(hw); ++ unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4; + + fin = *parent_rate; + ratio = fin / rate; +@@ -375,7 +378,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate, + struct clk_dmn *clk = to_dmnclk(hw); + unsigned long fin; + unsigned ratio, wait, hold, reg; +- unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4; ++ const char *name = clk_hw_get_name(hw); ++ unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4; + + fin = parent_rate; + ratio = fin / rate; +diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c +index a5bdca1de5d0..9d56eac43832 100644 +--- a/drivers/clk/sprd/common.c ++++ b/drivers/clk/sprd/common.c +@@ -76,16 +76,17 @@ int sprd_clk_probe(struct device *dev, struct clk_hw_onecell_data *clkhw) + struct clk_hw *hw; + + for (i = 0; i < clkhw->num; i++) { ++ const char *name; + + hw = clkhw->hws[i]; +- + if (!hw) + continue; + ++ name = hw->init->name; + ret = devm_clk_hw_register(dev, hw); + if (ret) { + dev_err(dev, "Couldn't register clock %d - %s\n", +- i, hw->init->name); ++ i, name); + return ret; + } + } +diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c +index 36b4402bf09e..640270f51aa5 100644 +--- a/drivers/clk/sprd/pll.c ++++ b/drivers/clk/sprd/pll.c +@@ -136,6 +136,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll, + k2 + refin * nint * CLK_PLL_1M; + } + ++ kfree(cfg); + return rate; + } + +@@ -222,6 +223,7 @@ static int _sprd_pll_set_rate(const struct sprd_pll *pll, + if (!ret) + udelay(pll->udelay); + ++ kfree(cfg); + return ret; + } + +diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +index 9b3939fc7faa..5ca4d34b4094 100644 +--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c ++++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +@@ -502,6 +502,9 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = { + [CLK_MMC1] = &mmc1_clk.common.hw, + [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw, + [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw, ++ [CLK_MMC2] = &mmc2_clk.common.hw, ++ [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw, ++ [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw, + [CLK_CE] = &ce_clk.common.hw, + [CLK_SPI0] = &spi0_clk.common.hw, + [CLK_USB_PHY0] = &usb_phy0_clk.common.hw, +diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c +index 7fe3ac980e5f..2e20e650b6c0 100644 +--- a/drivers/clk/sunxi-ng/ccu_common.c ++++ b/drivers/clk/sunxi-ng/ccu_common.c +@@ -97,14 +97,15 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, + + for (i = 0; i < desc->hw_clks->num ; i++) { + struct clk_hw *hw = desc->hw_clks->hws[i]; ++ const char *name; + + if (!hw) + continue; + ++ name = hw->init->name; + ret = of_clk_hw_register(node, hw); + if (ret) { +- pr_err("Couldn't register clock %d - %s\n", +- i, clk_hw_get_name(hw)); ++ pr_err("Couldn't register clock %d - %s\n", i, name); + goto err_clk_unreg; + } + } +diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c +index fd6c347bec6a..dd7045bc48c1 100644 +--- a/drivers/clk/zte/clk-zx296718.c ++++ b/drivers/clk/zte/clk-zx296718.c +@@ -564,6 +564,7 @@ static int __init top_clocks_init(struct device_node *np) + { + void __iomem *reg_base; + int i, ret; ++ const char *name; + + reg_base = of_iomap(np, 0); + if (!reg_base) { +@@ -573,11 +574,10 @@ static int __init top_clocks_init(struct device_node *np) + + for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) { + zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base; ++ name = zx296718_pll_clk[i].hw.init->name; + ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw); +- if (ret) { +- pr_warn("top clk %s init error!\n", +- zx296718_pll_clk[i].hw.init->name); +- } ++ if (ret) ++ pr_warn("top clk %s init error!\n", name); + } + + for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) { +@@ -585,11 +585,10 @@ static int __init top_clocks_init(struct device_node *np) + top_hw_onecell_data.hws[top_ffactor_clk[i].id] = + &top_ffactor_clk[i].factor.hw; + ++ name = top_ffactor_clk[i].factor.hw.init->name; + ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw); +- if (ret) { +- pr_warn("top clk %s init error!\n", +- top_ffactor_clk[i].factor.hw.init->name); +- } ++ if (ret) ++ pr_warn("top clk %s init error!\n", name); + } + + for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) { +@@ -598,11 +597,10 @@ static int __init top_clocks_init(struct device_node *np) + &top_mux_clk[i].mux.hw; + + top_mux_clk[i].mux.reg += (uintptr_t)reg_base; ++ name = top_mux_clk[i].mux.hw.init->name; + ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw); +- if (ret) { +- pr_warn("top clk %s init error!\n", +- top_mux_clk[i].mux.hw.init->name); +- } ++ if (ret) ++ pr_warn("top clk %s init error!\n", name); + } + + for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) { +@@ -611,11 +609,10 @@ static int __init top_clocks_init(struct device_node *np) + &top_gate_clk[i].gate.hw; + + top_gate_clk[i].gate.reg += (uintptr_t)reg_base; ++ name = top_gate_clk[i].gate.hw.init->name; + ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw); +- if (ret) { +- pr_warn("top clk %s init error!\n", +- top_gate_clk[i].gate.hw.init->name); +- } ++ if (ret) ++ pr_warn("top clk %s init error!\n", name); + } + + for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) { +@@ -624,11 +621,10 @@ static int __init top_clocks_init(struct device_node *np) + &top_div_clk[i].div.hw; + + top_div_clk[i].div.reg += (uintptr_t)reg_base; ++ name = top_div_clk[i].div.hw.init->name; + ret = clk_hw_register(NULL, &top_div_clk[i].div.hw); +- if (ret) { +- pr_warn("top clk %s init error!\n", +- top_div_clk[i].div.hw.init->name); +- } ++ if (ret) ++ pr_warn("top clk %s init error!\n", name); + } + + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, +@@ -754,6 +750,7 @@ static int __init lsp0_clocks_init(struct device_node *np) + { + void __iomem *reg_base; + int i, ret; ++ const char *name; + + reg_base = of_iomap(np, 0); + if (!reg_base) { +@@ -767,11 +764,10 @@ static int __init lsp0_clocks_init(struct device_node *np) + &lsp0_mux_clk[i].mux.hw; + + lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base; ++ name = lsp0_mux_clk[i].mux.hw.init->name; + ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw); +- if (ret) { +- pr_warn("lsp0 clk %s init error!\n", +- lsp0_mux_clk[i].mux.hw.init->name); +- } ++ if (ret) ++ pr_warn("lsp0 clk %s init error!\n", name); + } + + for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) { +@@ -780,11 +776,10 @@ static int __init lsp0_clocks_init(struct device_node *np) + &lsp0_gate_clk[i].gate.hw; + + lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base; ++ name = lsp0_gate_clk[i].gate.hw.init->name; + ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw); +- if (ret) { +- pr_warn("lsp0 clk %s init error!\n", +- lsp0_gate_clk[i].gate.hw.init->name); +- } ++ if (ret) ++ pr_warn("lsp0 clk %s init error!\n", name); + } + + for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) { +@@ -793,11 +788,10 @@ static int __init lsp0_clocks_init(struct device_node *np) + &lsp0_div_clk[i].div.hw; + + lsp0_div_clk[i].div.reg += (uintptr_t)reg_base; ++ name = lsp0_div_clk[i].div.hw.init->name; + ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw); +- if (ret) { +- pr_warn("lsp0 clk %s init error!\n", +- lsp0_div_clk[i].div.hw.init->name); +- } ++ if (ret) ++ pr_warn("lsp0 clk %s init error!\n", name); + } + + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, +@@ -862,6 +856,7 @@ static int __init lsp1_clocks_init(struct device_node *np) + { + void __iomem *reg_base; + int i, ret; ++ const char *name; + + reg_base = of_iomap(np, 0); + if (!reg_base) { +@@ -875,11 +870,10 @@ static int __init lsp1_clocks_init(struct device_node *np) + &lsp0_mux_clk[i].mux.hw; + + lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base; ++ name = lsp1_mux_clk[i].mux.hw.init->name; + ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw); +- if (ret) { +- pr_warn("lsp1 clk %s init error!\n", +- lsp1_mux_clk[i].mux.hw.init->name); +- } ++ if (ret) ++ pr_warn("lsp1 clk %s init error!\n", name); + } + + for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) { +@@ -888,11 +882,10 @@ static int __init lsp1_clocks_init(struct device_node *np) + &lsp1_gate_clk[i].gate.hw; + + lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base; ++ name = lsp1_gate_clk[i].gate.hw.init->name; + ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw); +- if (ret) { +- pr_warn("lsp1 clk %s init error!\n", +- lsp1_gate_clk[i].gate.hw.init->name); +- } ++ if (ret) ++ pr_warn("lsp1 clk %s init error!\n", name); + } + + for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) { +@@ -901,11 +894,10 @@ static int __init lsp1_clocks_init(struct device_node *np) + &lsp1_div_clk[i].div.hw; + + lsp1_div_clk[i].div.reg += (uintptr_t)reg_base; ++ name = lsp1_div_clk[i].div.hw.init->name; + ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw); +- if (ret) { +- pr_warn("lsp1 clk %s init error!\n", +- lsp1_div_clk[i].div.hw.init->name); +- } ++ if (ret) ++ pr_warn("lsp1 clk %s init error!\n", name); + } + + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, +@@ -979,6 +971,7 @@ static int __init audio_clocks_init(struct device_node *np) + { + void __iomem *reg_base; + int i, ret; ++ const char *name; + + reg_base = of_iomap(np, 0); + if (!reg_base) { +@@ -992,11 +985,10 @@ static int __init audio_clocks_init(struct device_node *np) + &audio_mux_clk[i].mux.hw; + + audio_mux_clk[i].mux.reg += (uintptr_t)reg_base; ++ name = audio_mux_clk[i].mux.hw.init->name; + ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw); +- if (ret) { +- pr_warn("audio clk %s init error!\n", +- audio_mux_clk[i].mux.hw.init->name); +- } ++ if (ret) ++ pr_warn("audio clk %s init error!\n", name); + } + + for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) { +@@ -1005,11 +997,10 @@ static int __init audio_clocks_init(struct device_node *np) + &audio_adiv_clk[i].hw; + + audio_adiv_clk[i].reg_base += (uintptr_t)reg_base; ++ name = audio_adiv_clk[i].hw.init->name; + ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw); +- if (ret) { +- pr_warn("audio clk %s init error!\n", +- audio_adiv_clk[i].hw.init->name); +- } ++ if (ret) ++ pr_warn("audio clk %s init error!\n", name); + } + + for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) { +@@ -1018,11 +1009,10 @@ static int __init audio_clocks_init(struct device_node *np) + &audio_div_clk[i].div.hw; + + audio_div_clk[i].div.reg += (uintptr_t)reg_base; ++ name = audio_div_clk[i].div.hw.init->name; + ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw); +- if (ret) { +- pr_warn("audio clk %s init error!\n", +- audio_div_clk[i].div.hw.init->name); +- } ++ if (ret) ++ pr_warn("audio clk %s init error!\n", name); + } + + for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) { +@@ -1031,11 +1021,10 @@ static int __init audio_clocks_init(struct device_node *np) + &audio_gate_clk[i].gate.hw; + + audio_gate_clk[i].gate.reg += (uintptr_t)reg_base; ++ name = audio_gate_clk[i].gate.hw.init->name; + ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw); +- if (ret) { +- pr_warn("audio clk %s init error!\n", +- audio_gate_clk[i].gate.hw.init->name); +- } ++ if (ret) ++ pr_warn("audio clk %s init error!\n", name); + } + + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, +diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c +index 02768af0dccd..8c789b8671fc 100644 +--- a/drivers/crypto/hisilicon/sec/sec_algs.c ++++ b/drivers/crypto/hisilicon/sec/sec_algs.c +@@ -215,17 +215,18 @@ static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl, + dma_addr_t psec_sgl, struct sec_dev_info *info) + { + struct sec_hw_sgl *sgl_current, *sgl_next; ++ dma_addr_t sgl_next_dma; + +- if (!hw_sgl) +- return; + sgl_current = hw_sgl; +- while (sgl_current->next) { ++ while (sgl_current) { + sgl_next = sgl_current->next; +- dma_pool_free(info->hw_sgl_pool, sgl_current, +- sgl_current->next_sgl); ++ sgl_next_dma = sgl_current->next_sgl; ++ ++ dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl); ++ + sgl_current = sgl_next; ++ psec_sgl = sgl_next_dma; + } +- dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl); + } + + static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm, +diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c +index 051f6c2873c7..6713cfb1995c 100644 +--- a/drivers/dma-buf/sw_sync.c ++++ b/drivers/dma-buf/sw_sync.c +@@ -132,17 +132,14 @@ static void timeline_fence_release(struct dma_fence *fence) + { + struct sync_pt *pt = dma_fence_to_sync_pt(fence); + struct sync_timeline *parent = dma_fence_parent(fence); ++ unsigned long flags; + ++ spin_lock_irqsave(fence->lock, flags); + if (!list_empty(&pt->link)) { +- unsigned long flags; +- +- spin_lock_irqsave(fence->lock, flags); +- if (!list_empty(&pt->link)) { +- list_del(&pt->link); +- rb_erase(&pt->node, &parent->pt_tree); +- } +- spin_unlock_irqrestore(fence->lock, flags); ++ list_del(&pt->link); ++ rb_erase(&pt->node, &parent->pt_tree); + } ++ spin_unlock_irqrestore(fence->lock, flags); + + sync_timeline_put(parent); + dma_fence_free(fence); +@@ -265,7 +262,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, + p = &parent->rb_left; + } else { + if (dma_fence_get_rcu(&other->base)) { +- dma_fence_put(&pt->base); ++ sync_timeline_put(obj); ++ kfree(pt); + pt = other; + goto unlock; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +index eb3569b46c1e..430c56f9544a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +@@ -139,14 +139,14 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, + mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, + fb_tiled); + domain = amdgpu_display_supported_domains(adev); +- + height = ALIGN(mode_cmd->height, 8); + size = mode_cmd->pitches[0] * height; + aligned_size = ALIGN(size, PAGE_SIZE); + ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | +- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | +- AMDGPU_GEM_CREATE_VRAM_CLEARED, ++ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | ++ AMDGPU_GEM_CREATE_VRAM_CLEARED | ++ AMDGPU_GEM_CREATE_CPU_GTT_USWC, + ttm_bo_type_kernel, NULL, &gobj); + if (ret) { + pr_err("failed to allocate framebuffer (%d)\n", aligned_size); +@@ -168,7 +168,6 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, + dev_err(adev->dev, "FB failed to set tiling flags\n"); + } + +- + ret = amdgpu_bo_pin(abo, domain); + if (ret) { + amdgpu_bo_unreserve(abo); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +index 939f8305511b..fb291366d5ad 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +@@ -747,7 +747,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, + struct amdgpu_device *adev = dev->dev_private; + struct drm_gem_object *gobj; + uint32_t handle; +- u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; ++ u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | ++ AMDGPU_GEM_CREATE_CPU_GTT_USWC; + u32 domain; + int r; + +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +index 3747c3f1f0cc..15c371fac469 100644 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +@@ -1583,7 +1583,8 @@ static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = { + + static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev) + { +- adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; ++ adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + ++ adev->sdma.num_instances; + adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs; + adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c +index 4d74453f3cfb..602397016b64 100644 +--- a/drivers/gpu/drm/amd/amdgpu/si.c ++++ b/drivers/gpu/drm/amd/amdgpu/si.c +@@ -1881,7 +1881,7 @@ static void si_program_aspm(struct amdgpu_device *adev) + if (orig != data) + si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data); + +- if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) { ++ if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) { + orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0); + data &= ~PLL_RAMP_UP_TIME_0_MASK; + if (orig != data) +@@ -1930,14 +1930,14 @@ static void si_program_aspm(struct amdgpu_device *adev) + + orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL); + data &= ~LS2_EXIT_TIME_MASK; +- if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN)) ++ if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN)) + data |= LS2_EXIT_TIME(5); + if (orig != data) + si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data); + + orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL); + data &= ~LS2_EXIT_TIME_MASK; +- if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN)) ++ if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN)) + data |= LS2_EXIT_TIME(5); + if (orig != data) + si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data); +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +index 592fa499c9f8..9594c154664f 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +@@ -334,7 +334,7 @@ bool dm_pp_get_clock_levels_by_type( + } + } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) { + if (smu_get_clock_by_type(&adev->smu, +- dc_to_smu_clock_type(clk_type), ++ dc_to_pp_clock_type(clk_type), + &pp_clks)) { + get_default_clock_levels(clk_type, dc_clks); + return true; +@@ -419,7 +419,7 @@ bool dm_pp_get_clock_levels_by_type_with_latency( + return false; + } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) { + if (smu_get_clock_by_type_with_latency(&adev->smu, +- dc_to_pp_clock_type(clk_type), ++ dc_to_smu_clock_type(clk_type), + &pp_clks)) + return false; + } +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +index 50bfb5921de0..2ab0f97719b5 100644 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +@@ -348,6 +348,8 @@ void dcn20_clk_mgr_construct( + + clk_mgr->base.dprefclk_khz = 700000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved + ++ clk_mgr->pp_smu = pp_smu; ++ + if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { + dcn2_funcs.update_clocks = dcn2_update_clocks_fpga; + clk_mgr->dentist_vco_freq_khz = 3850000; +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index cbc480a33376..730f97ba8dbb 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -2187,6 +2187,14 @@ void dc_set_power_state( + dc_resource_state_construct(dc, dc->current_state); + + dc->hwss.init_hw(dc); ++ ++#ifdef CONFIG_DRM_AMD_DC_DCN2_0 ++ if (dc->hwss.init_sys_ctx != NULL && ++ dc->vm_pa_config.valid) { ++ dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); ++ } ++#endif ++ + break; + default: + ASSERT(dc->current_state->stream_count == 0); +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +index 2c7aaed907b9..0bf85a7a2cd3 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +@@ -3033,6 +3033,8 @@ void dp_set_fec_ready(struct dc_link *link, bool ready) + link_enc->funcs->fec_set_ready(link_enc, true); + link->fec_state = dc_link_fec_ready; + } else { ++ link->link_enc->funcs->fec_set_ready(link->link_enc, false); ++ link->fec_state = dc_link_fec_not_ready; + dm_error("dpcd write failed to set fec_ready"); + } + } else if (link->fec_state == dc_link_fec_ready && !ready) { +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +index 2d019e1f6135..a9135764e580 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +@@ -160,6 +160,10 @@ bool edp_receiver_ready_T7(struct dc_link *link) + break; + udelay(25); //MAx T7 is 50ms + } while (++tries < 300); ++ ++ if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0) ++ udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000); ++ + return result; + } + +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +index 2ceaab4fb5de..68db60e4caf3 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +@@ -265,12 +265,10 @@ bool resource_construct( + DC_ERR("DC: failed to create audio!\n"); + return false; + } +- + if (!aud->funcs->endpoint_valid(aud)) { + aud->funcs->destroy(&aud); + break; + } +- + pool->audios[i] = aud; + pool->audio_count++; + } +@@ -1659,24 +1657,25 @@ static struct audio *find_first_free_audio( + const struct resource_pool *pool, + enum engine_id id) + { +- int i; +- for (i = 0; i < pool->audio_count; i++) { ++ int i, available_audio_count; ++ ++ available_audio_count = pool->audio_count; ++ ++ for (i = 0; i < available_audio_count; i++) { + if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) { + /*we have enough audio endpoint, find the matching inst*/ + if (id != i) + continue; +- + return pool->audios[i]; + } + } + +- /* use engine id to find free audio */ +- if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) { ++ /* use engine id to find free audio */ ++ if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) { + return pool->audios[id]; + } +- + /*not found the matching one, first come first serve*/ +- for (i = 0; i < pool->audio_count; i++) { ++ for (i = 0; i < available_audio_count; i++) { + if (res_ctx->is_audio_acquired[i] == false) { + return pool->audios[i]; + } +diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h +index 6eabb6491a3d..ce6d73d21cca 100644 +--- a/drivers/gpu/drm/amd/display/dc/dc_types.h ++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h +@@ -202,6 +202,7 @@ struct dc_panel_patch { + unsigned int dppowerup_delay; + unsigned int extra_t12_ms; + unsigned int extra_delay_backlight_off; ++ unsigned int extra_t7_ms; + }; + + struct dc_edid_caps { +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +index 4a10a5d22c90..5de9623bdf66 100644 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +@@ -613,6 +613,8 @@ void dce_aud_az_configure( + + AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1, + value); ++ DC_LOG_HW_AUDIO("\n\tAUDIO:az_configure: index: %u data, 0x%x, displayName %s: \n", ++ audio->inst, value, audio_info->display_name); + + /* + *write the port ID: +@@ -922,7 +924,6 @@ static const struct audio_funcs funcs = { + .az_configure = dce_aud_az_configure, + .destroy = dce_aud_destroy, + }; +- + void dce_aud_destroy(struct audio **audio) + { + struct dce_audio *aud = DCE_AUD(*audio); +@@ -953,7 +954,6 @@ struct audio *dce_audio_create( + audio->regs = reg; + audio->shifts = shifts; + audio->masks = masks; +- + return &audio->base; + } + +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +index 7469333a2c8a..8166fdbacd73 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +@@ -357,9 +357,10 @@ bool cm_helper_translate_curve_to_hw_format( + seg_distr[7] = 4; + seg_distr[8] = 4; + seg_distr[9] = 4; ++ seg_distr[10] = 1; + + region_start = -10; +- region_end = 0; ++ region_end = 1; + } + + for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++) +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +index a546c2bc9129..e365f2dd7f9a 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +@@ -824,6 +824,9 @@ void optc1_program_manual_trigger(struct timing_generator *optc) + + REG_SET(OTG_MANUAL_FLOW_CONTROL, 0, + MANUAL_FLOW_CONTROL, 1); ++ ++ REG_SET(OTG_MANUAL_FLOW_CONTROL, 0, ++ MANUAL_FLOW_CONTROL, 0); + } + + +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +index d810c8940129..8fdb53a44bfb 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +@@ -585,6 +585,10 @@ static void dcn20_init_hw(struct dc *dc) + } + } + ++ /* Power gate DSCs */ ++ for (i = 0; i < res_pool->res_cap->num_dsc; i++) ++ dcn20_dsc_pg_control(hws, res_pool->dscs[i]->inst, false); ++ + /* Blank pixel data with OPP DPG */ + for (i = 0; i < dc->res_pool->timing_generator_count; i++) { + struct timing_generator *tg = dc->res_pool->timing_generators[i]; +@@ -1106,6 +1110,9 @@ void dcn20_enable_plane( + /* enable DCFCLK current DCHUB */ + pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true); + ++ /* initialize HUBP on power up */ ++ pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp); ++ + /* make sure OPP_PIPE_CLOCK_EN = 1 */ + pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( + pipe_ctx->stream_res.opp, +@@ -1315,6 +1322,18 @@ static void dcn20_apply_ctx_for_surface( + if (!top_pipe_to_program) + return; + ++ /* Carry over GSL groups in case the context is changing. */ ++ for (i = 0; i < dc->res_pool->pipe_count; i++) { ++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; ++ struct pipe_ctx *old_pipe_ctx = ++ &dc->current_state->res_ctx.pipe_ctx[i]; ++ ++ if (pipe_ctx->stream == stream && ++ pipe_ctx->stream == old_pipe_ctx->stream) ++ pipe_ctx->stream_res.gsl_group = ++ old_pipe_ctx->stream_res.gsl_group; ++ } ++ + tg = top_pipe_to_program->stream_res.tg; + + interdependent_update = top_pipe_to_program->plane_state && +diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +index 3cc0f2a1f77c..5db29bf582d3 100644 +--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c ++++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +@@ -167,6 +167,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { + .ack = NULL + }; + ++static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { ++ .set = NULL, ++ .ack = NULL ++}; ++ + #undef BASE_INNER + #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +@@ -221,12 +226,15 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { + .funcs = &pflip_irq_info_funcs\ + } + +-#define vupdate_int_entry(reg_num)\ ++/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic ++ * of DCE's DC_IRQ_SOURCE_VUPDATEx. ++ */ ++#define vupdate_no_lock_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ +- OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\ +- OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\ +- .funcs = &vblank_irq_info_funcs\ ++ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\ ++ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\ ++ .funcs = &vupdate_no_lock_irq_info_funcs\ + } + + #define vblank_int_entry(reg_num)\ +@@ -333,12 +341,12 @@ irq_source_info_dcn20[DAL_IRQ_SOURCES_NUMBER] = { + dc_underflow_int_entry(6), + [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), + [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), +- vupdate_int_entry(0), +- vupdate_int_entry(1), +- vupdate_int_entry(2), +- vupdate_int_entry(3), +- vupdate_int_entry(4), +- vupdate_int_entry(5), ++ vupdate_no_lock_int_entry(0), ++ vupdate_no_lock_int_entry(1), ++ vupdate_no_lock_int_entry(2), ++ vupdate_no_lock_int_entry(3), ++ vupdate_no_lock_int_entry(4), ++ vupdate_no_lock_int_entry(5), + vblank_int_entry(0), + vblank_int_entry(1), + vblank_int_entry(2), +diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +index 7c20171a3b6d..a53666ff6cf8 100644 +--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c ++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +@@ -435,6 +435,12 @@ static void apply_below_the_range(struct core_freesync *core_freesync, + /* Either we've calculated the number of frames to insert, + * or we need to insert min duration frames + */ ++ if (last_render_time_in_us / frames_to_insert < ++ in_out_vrr->min_duration_in_us){ ++ frames_to_insert -= (frames_to_insert > 1) ? ++ 1 : 0; ++ } ++ + if (frames_to_insert > 0) + inserted_frame_duration_in_us = last_render_time_in_us / + frames_to_insert; +@@ -887,8 +893,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, + struct core_freesync *core_freesync = NULL; + unsigned long long nominal_field_rate_in_uhz = 0; + unsigned int refresh_range = 0; +- unsigned int min_refresh_in_uhz = 0; +- unsigned int max_refresh_in_uhz = 0; ++ unsigned long long min_refresh_in_uhz = 0; ++ unsigned long long max_refresh_in_uhz = 0; + + if (mod_freesync == NULL) + return; +@@ -915,7 +921,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, + min_refresh_in_uhz = nominal_field_rate_in_uhz; + + if (!vrr_settings_require_update(core_freesync, +- in_config, min_refresh_in_uhz, max_refresh_in_uhz, ++ in_config, (unsigned int)min_refresh_in_uhz, (unsigned int)max_refresh_in_uhz, + in_out_vrr)) + return; + +@@ -931,15 +937,15 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, + return; + + } else { +- in_out_vrr->min_refresh_in_uhz = min_refresh_in_uhz; ++ in_out_vrr->min_refresh_in_uhz = (unsigned int)min_refresh_in_uhz; + in_out_vrr->max_duration_in_us = + calc_duration_in_us_from_refresh_in_uhz( +- min_refresh_in_uhz); ++ (unsigned int)min_refresh_in_uhz); + +- in_out_vrr->max_refresh_in_uhz = max_refresh_in_uhz; ++ in_out_vrr->max_refresh_in_uhz = (unsigned int)max_refresh_in_uhz; + in_out_vrr->min_duration_in_us = + calc_duration_in_us_from_refresh_in_uhz( +- max_refresh_in_uhz); ++ (unsigned int)max_refresh_in_uhz); + + refresh_range = in_out_vrr->max_refresh_in_uhz - + in_out_vrr->min_refresh_in_uhz; +@@ -950,17 +956,18 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, + in_out_vrr->fixed.ramping_active = in_config->ramping; + + in_out_vrr->btr.btr_enabled = in_config->btr; ++ + if (in_out_vrr->max_refresh_in_uhz < + 2 * in_out_vrr->min_refresh_in_uhz) + in_out_vrr->btr.btr_enabled = false; ++ + in_out_vrr->btr.btr_active = false; + in_out_vrr->btr.inserted_duration_in_us = 0; + in_out_vrr->btr.frames_to_insert = 0; + in_out_vrr->btr.frame_counter = 0; + in_out_vrr->btr.mid_point_in_us = +- in_out_vrr->min_duration_in_us + +- (in_out_vrr->max_duration_in_us - +- in_out_vrr->min_duration_in_us) / 2; ++ (in_out_vrr->min_duration_in_us + ++ in_out_vrr->max_duration_in_us) / 2; + + if (in_out_vrr->state == VRR_STATE_UNSUPPORTED) { + in_out_vrr->adjust.v_total_min = stream->timing.v_total; +diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +index b81c7e715dc9..9aaf2deff6e9 100644 +--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c ++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +@@ -1627,6 +1627,10 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu) + static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) + { + int ret = 0; ++ struct amdgpu_device *adev = smu->adev; ++ ++ if (adev->asic_type != CHIP_NAVI10) ++ return -EINVAL; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: +diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +index 3f7f4880be09..37bd541166a5 100644 +--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c ++++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +@@ -1035,16 +1035,17 @@ static int analogix_dp_commit(struct analogix_dp_device *dp) + if (ret) + return ret; + ++ /* Check whether panel supports fast training */ ++ ret = analogix_dp_fast_link_train_detection(dp); ++ if (ret) ++ dp->psr_enable = false; ++ + if (dp->psr_enable) { + ret = analogix_dp_enable_sink_psr(dp); + if (ret) + return ret; + } + +- /* Check whether panel supports fast training */ +- ret = analogix_dp_fast_link_train_detection(dp); +- if (ret) +- dp->psr_enable = false; + + return ret; + } +diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c +index dd7aa466b280..36acc256e67e 100644 +--- a/drivers/gpu/drm/bridge/sii902x.c ++++ b/drivers/gpu/drm/bridge/sii902x.c +@@ -750,6 +750,7 @@ static int sii902x_audio_codec_init(struct sii902x *sii902x, + sii902x->audio.i2s_fifo_sequence[i] |= audio_fifo_id[i] | + i2s_lane_id[lanes[i]] | SII902X_TPI_I2S_FIFO_ENABLE; + ++ sii902x->audio.mclk = devm_clk_get(dev, "mclk"); + if (IS_ERR(sii902x->audio.mclk)) { + dev_err(dev, "%s: No clock (audio mclk) found: %ld\n", + __func__, PTR_ERR(sii902x->audio.mclk)); +diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c +index 13ade28a36a8..b3a7d5f1250c 100644 +--- a/drivers/gpu/drm/bridge/tc358767.c ++++ b/drivers/gpu/drm/bridge/tc358767.c +@@ -313,7 +313,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) + { + struct tc_data *tc = aux_to_tc(aux); +- size_t size = min_t(size_t, 8, msg->size); ++ size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size); + u8 request = msg->request & ~DP_AUX_I2C_MOT; + u8 *buf = msg->buffer; + u32 tmp = 0; +diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c +index baf63fb6850a..a810568c76df 100644 +--- a/drivers/gpu/drm/mcde/mcde_drv.c ++++ b/drivers/gpu/drm/mcde/mcde_drv.c +@@ -319,7 +319,7 @@ static int mcde_probe(struct platform_device *pdev) + struct device *dev = &pdev->dev; + struct drm_device *drm; + struct mcde *mcde; +- struct component_match *match; ++ struct component_match *match = NULL; + struct resource *res; + u32 pid; + u32 val; +@@ -485,6 +485,10 @@ static int mcde_probe(struct platform_device *pdev) + } + put_device(p); + } ++ if (!match) { ++ dev_err(dev, "no matching components\n"); ++ return -ENODEV; ++ } + if (IS_ERR(match)) { + dev_err(dev, "could not create component match\n"); + ret = PTR_ERR(match); +diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c +index 283ff690350e..50303ec194bb 100644 +--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c ++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c +@@ -320,7 +320,9 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, + asyh->wndw.olut &= ~BIT(wndw->id); + } + +- if (!ilut && wndw->func->ilut_identity) { ++ if (!ilut && wndw->func->ilut_identity && ++ asyw->state.fb->format->format != DRM_FORMAT_XBGR16161616F && ++ asyw->state.fb->format->format != DRM_FORMAT_ABGR16161616F) { + static struct drm_property_blob dummy = {}; + ilut = &dummy; + } +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c +index 7143ea4611aa..33a9fb5ac558 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c +@@ -96,6 +96,8 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + info->min = min(info->base, + info->base + info->step * info->vidmask); + info->max = nvbios_rd32(bios, volt + 0x0e); ++ if (!info->max) ++ info->max = max(info->base, info->base + info->step * info->vidmask); + break; + case 0x50: + info->min = nvbios_rd32(bios, volt + 0x0a); +diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +index 28c0620dfe0f..b5b14aa059ea 100644 +--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c ++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +@@ -399,7 +399,13 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c, + + /* Look up the DSI host. It needs to probe before we do. */ + endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); ++ if (!endpoint) ++ return -ENODEV; ++ + dsi_host_node = of_graph_get_remote_port_parent(endpoint); ++ if (!dsi_host_node) ++ goto error; ++ + host = of_find_mipi_dsi_host_by_node(dsi_host_node); + of_node_put(dsi_host_node); + if (!host) { +@@ -408,6 +414,9 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c, + } + + info.node = of_graph_get_remote_port(endpoint); ++ if (!info.node) ++ goto error; ++ + of_node_put(endpoint); + + ts->dsi = mipi_dsi_device_register_full(host, &info); +@@ -428,6 +437,10 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c, + return ret; + + return 0; ++ ++error: ++ of_node_put(endpoint); ++ return -ENODEV; + } + + static int rpi_touchscreen_remove(struct i2c_client *i2c) +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c +index 5a93c4edf1e4..ee6900eb3906 100644 +--- a/drivers/gpu/drm/panel/panel-simple.c ++++ b/drivers/gpu/drm/panel/panel-simple.c +@@ -724,9 +724,9 @@ static const struct panel_desc auo_g133han01 = { + static const struct display_timing auo_g185han01_timings = { + .pixelclock = { 120000000, 144000000, 175000000 }, + .hactive = { 1920, 1920, 1920 }, +- .hfront_porch = { 18, 60, 74 }, +- .hback_porch = { 12, 44, 54 }, +- .hsync_len = { 10, 24, 32 }, ++ .hfront_porch = { 36, 120, 148 }, ++ .hback_porch = { 24, 88, 108 }, ++ .hsync_len = { 20, 48, 64 }, + .vactive = { 1080, 1080, 1080 }, + .vfront_porch = { 6, 10, 40 }, + .vback_porch = { 2, 5, 20 }, +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c +index c60d1a44d22a..b684cd719612 100644 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c +@@ -752,7 +752,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct + + radeon_encoder->output_csc = val; + +- if (connector->encoder->crtc) { ++ if (connector->encoder && connector->encoder->crtc) { + struct drm_crtc *crtc = connector->encoder->crtc; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + +diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c +index a6cbe11f79c6..15d7bebe1729 100644 +--- a/drivers/gpu/drm/radeon/radeon_drv.c ++++ b/drivers/gpu/drm/radeon/radeon_drv.c +@@ -349,11 +349,19 @@ radeon_pci_remove(struct pci_dev *pdev) + static void + radeon_pci_shutdown(struct pci_dev *pdev) + { ++ struct drm_device *ddev = pci_get_drvdata(pdev); ++ + /* if we are running in a VM, make sure the device + * torn down properly on reboot/shutdown + */ + if (radeon_device_is_virtual()) + radeon_pci_remove(pdev); ++ ++ /* Some adapters need to be suspended before a ++ * shutdown occurs in order to prevent an error ++ * during kexec. ++ */ ++ radeon_suspend_kms(ddev, true, true, false); + } + + static int radeon_pmops_suspend(struct device *dev) +diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c +index 2fe6c4a8d915..3ab4fbf8eb0d 100644 +--- a/drivers/gpu/drm/stm/ltdc.c ++++ b/drivers/gpu/drm/stm/ltdc.c +@@ -26,6 +26,7 @@ + #include <drm/drm_fb_cma_helper.h> + #include <drm/drm_fourcc.h> + #include <drm/drm_gem_cma_helper.h> ++#include <drm/drm_gem_framebuffer_helper.h> + #include <drm/drm_of.h> + #include <drm/drm_plane_helper.h> + #include <drm/drm_probe_helper.h> +@@ -922,6 +923,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = { + }; + + static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = { ++ .prepare_fb = drm_gem_fb_prepare_fb, + .atomic_check = ltdc_plane_atomic_check, + .atomic_update = ltdc_plane_atomic_update, + .atomic_disable = ltdc_plane_atomic_disable, +diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig +index 87819c82bcce..f2f0739d1035 100644 +--- a/drivers/gpu/drm/tinydrm/Kconfig ++++ b/drivers/gpu/drm/tinydrm/Kconfig +@@ -14,8 +14,8 @@ config TINYDRM_MIPI_DBI + config TINYDRM_HX8357D + tristate "DRM support for HX8357D display panels" + depends on DRM_TINYDRM && SPI +- depends on BACKLIGHT_CLASS_DEVICE + select TINYDRM_MIPI_DBI ++ select BACKLIGHT_CLASS_DEVICE + help + DRM driver for the following HX8357D panels: + * YX350HV15-T 3.5" 340x350 TFT (Adafruit 3.5") +@@ -35,8 +35,8 @@ config TINYDRM_ILI9225 + config TINYDRM_ILI9341 + tristate "DRM support for ILI9341 display panels" + depends on DRM_TINYDRM && SPI +- depends on BACKLIGHT_CLASS_DEVICE + select TINYDRM_MIPI_DBI ++ select BACKLIGHT_CLASS_DEVICE + help + DRM driver for the following Ilitek ILI9341 panels: + * YX240QV29-T 2.4" 240x320 TFT (Adafruit 2.4") +@@ -46,8 +46,8 @@ config TINYDRM_ILI9341 + config TINYDRM_MI0283QT + tristate "DRM support for MI0283QT" + depends on DRM_TINYDRM && SPI +- depends on BACKLIGHT_CLASS_DEVICE + select TINYDRM_MIPI_DBI ++ select BACKLIGHT_CLASS_DEVICE + help + DRM driver for the Multi-Inno MI0283QT display panel + If M is selected the module will be called mi0283qt. +@@ -78,8 +78,8 @@ config TINYDRM_ST7586 + config TINYDRM_ST7735R + tristate "DRM support for Sitronix ST7735R display panels" + depends on DRM_TINYDRM && SPI +- depends on BACKLIGHT_CLASS_DEVICE + select TINYDRM_MIPI_DBI ++ select BACKLIGHT_CLASS_DEVICE + help + DRM driver Sitronix ST7735R with one of the following LCDs: + * JD-T18003-T01 1.8" 128x160 TFT +diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c +index e66ff25c008e..e9fb4ebb789f 100644 +--- a/drivers/gpu/drm/vkms/vkms_crc.c ++++ b/drivers/gpu/drm/vkms/vkms_crc.c +@@ -166,16 +166,24 @@ void vkms_crc_work_handle(struct work_struct *work) + struct drm_plane *plane; + u32 crc32 = 0; + u64 frame_start, frame_end; ++ bool crc_pending; + unsigned long flags; + + spin_lock_irqsave(&out->state_lock, flags); + frame_start = crtc_state->frame_start; + frame_end = crtc_state->frame_end; ++ crc_pending = crtc_state->crc_pending; ++ crtc_state->frame_start = 0; ++ crtc_state->frame_end = 0; ++ crtc_state->crc_pending = false; + spin_unlock_irqrestore(&out->state_lock, flags); + +- /* _vblank_handle() hasn't updated frame_start yet */ +- if (!frame_start || frame_start == frame_end) +- goto out; ++ /* ++ * We raced with the vblank hrtimer and previous work already computed ++ * the crc, nothing to do. ++ */ ++ if (!crc_pending) ++ return; + + drm_for_each_plane(plane, &vdev->drm) { + struct vkms_plane_state *vplane_state; +@@ -196,20 +204,11 @@ void vkms_crc_work_handle(struct work_struct *work) + if (primary_crc) + crc32 = _vkms_get_crc(primary_crc, cursor_crc); + +- frame_end = drm_crtc_accurate_vblank_count(crtc); +- +- /* queue_work can fail to schedule crc_work; add crc for +- * missing frames ++ /* ++ * The worker can fall behind the vblank hrtimer, make sure we catch up. + */ + while (frame_start <= frame_end) + drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32); +- +-out: +- /* to avoid using the same value for frame number again */ +- spin_lock_irqsave(&out->state_lock, flags); +- crtc_state->frame_end = frame_end; +- crtc_state->frame_start = 0; +- spin_unlock_irqrestore(&out->state_lock, flags); + } + + static const char * const pipe_crc_sources[] = {"auto"}; +diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c +index 4d11292bc6f3..f392fa13015b 100644 +--- a/drivers/gpu/drm/vkms/vkms_crtc.c ++++ b/drivers/gpu/drm/vkms/vkms_crtc.c +@@ -30,13 +30,18 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) + * has read the data + */ + spin_lock(&output->state_lock); +- if (!state->frame_start) ++ if (!state->crc_pending) + state->frame_start = frame; ++ else ++ DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n", ++ state->frame_start, frame); ++ state->frame_end = frame; ++ state->crc_pending = true; + spin_unlock(&output->state_lock); + + ret = queue_work(output->crc_workq, &state->crc_work); + if (!ret) +- DRM_WARN("failed to queue vkms_crc_work_handle"); ++ DRM_DEBUG_DRIVER("vkms_crc_work_handle already queued\n"); + } + + spin_unlock(&output->lock); +diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c +index 738dd6206d85..92296bd8f623 100644 +--- a/drivers/gpu/drm/vkms/vkms_drv.c ++++ b/drivers/gpu/drm/vkms/vkms_drv.c +@@ -92,7 +92,7 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev) + dev->mode_config.max_height = YRES_MAX; + dev->mode_config.preferred_depth = 24; + +- return vkms_output_init(vkmsdev); ++ return vkms_output_init(vkmsdev, 0); + } + + static int __init vkms_init(void) +diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h +index b92c30c66a6f..2fee10a00051 100644 +--- a/drivers/gpu/drm/vkms/vkms_drv.h ++++ b/drivers/gpu/drm/vkms/vkms_drv.h +@@ -48,6 +48,8 @@ struct vkms_plane_state { + struct vkms_crtc_state { + struct drm_crtc_state base; + struct work_struct crc_work; ++ ++ bool crc_pending; + u64 frame_start; + u64 frame_end; + }; +@@ -105,10 +107,10 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, + int *max_error, ktime_t *vblank_time, + bool in_vblank_irq); + +-int vkms_output_init(struct vkms_device *vkmsdev); ++int vkms_output_init(struct vkms_device *vkmsdev, int index); + + struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev, +- enum drm_plane_type type); ++ enum drm_plane_type type, int index); + + /* Gem stuff */ + struct drm_gem_object *vkms_gem_create(struct drm_device *dev, +diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c +index 56fb5c2a2315..fb1941a6522c 100644 +--- a/drivers/gpu/drm/vkms/vkms_output.c ++++ b/drivers/gpu/drm/vkms/vkms_output.c +@@ -35,7 +35,7 @@ static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = { + .get_modes = vkms_conn_get_modes, + }; + +-int vkms_output_init(struct vkms_device *vkmsdev) ++int vkms_output_init(struct vkms_device *vkmsdev, int index) + { + struct vkms_output *output = &vkmsdev->output; + struct drm_device *dev = &vkmsdev->drm; +@@ -45,12 +45,12 @@ int vkms_output_init(struct vkms_device *vkmsdev) + struct drm_plane *primary, *cursor = NULL; + int ret; + +- primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY); ++ primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index); + if (IS_ERR(primary)) + return PTR_ERR(primary); + + if (enable_cursor) { +- cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR); ++ cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index); + if (IS_ERR(cursor)) { + ret = PTR_ERR(cursor); + goto err_cursor; +diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c +index 0fceb6258422..18c630cfc485 100644 +--- a/drivers/gpu/drm/vkms/vkms_plane.c ++++ b/drivers/gpu/drm/vkms/vkms_plane.c +@@ -176,7 +176,7 @@ static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = { + }; + + struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev, +- enum drm_plane_type type) ++ enum drm_plane_type type, int index) + { + struct drm_device *dev = &vkmsdev->drm; + const struct drm_plane_helper_funcs *funcs; +@@ -198,7 +198,7 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev, + funcs = &vkms_primary_helper_funcs; + } + +- ret = drm_universal_plane_init(dev, plane, 0, ++ ret = drm_universal_plane_init(dev, plane, 1 << index, + &vkms_plane_funcs, + formats, nformats, + NULL, type, NULL); +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c +index 81df62f48c4c..6ac8becc2372 100644 +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -54,7 +54,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") + struct apple_sc { + unsigned long quirks; + unsigned int fn_on; +- DECLARE_BITMAP(pressed_fn, KEY_CNT); + DECLARE_BITMAP(pressed_numlock, KEY_CNT); + }; + +@@ -181,6 +180,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, + { + struct apple_sc *asc = hid_get_drvdata(hid); + const struct apple_key_translation *trans, *table; ++ bool do_translate; ++ u16 code = 0; + + if (usage->code == KEY_FN) { + asc->fn_on = !!value; +@@ -189,8 +190,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, + } + + if (fnmode) { +- int do_translate; +- + if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI && + hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) + table = macbookair_fn_keys; +@@ -202,25 +201,33 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, + trans = apple_find_translation (table, usage->code); + + if (trans) { +- if (test_bit(usage->code, asc->pressed_fn)) +- do_translate = 1; +- else if (trans->flags & APPLE_FLAG_FKEY) +- do_translate = (fnmode == 2 && asc->fn_on) || +- (fnmode == 1 && !asc->fn_on); +- else +- do_translate = asc->fn_on; +- +- if (do_translate) { +- if (value) +- set_bit(usage->code, asc->pressed_fn); +- else +- clear_bit(usage->code, asc->pressed_fn); +- +- input_event(input, usage->type, trans->to, +- value); +- +- return 1; ++ if (test_bit(trans->from, input->key)) ++ code = trans->from; ++ else if (test_bit(trans->to, input->key)) ++ code = trans->to; ++ ++ if (!code) { ++ if (trans->flags & APPLE_FLAG_FKEY) { ++ switch (fnmode) { ++ case 1: ++ do_translate = !asc->fn_on; ++ break; ++ case 2: ++ do_translate = asc->fn_on; ++ break; ++ default: ++ /* should never happen */ ++ do_translate = false; ++ } ++ } else { ++ do_translate = asc->fn_on; ++ } ++ ++ code = do_translate ? trans->to : trans->from; + } ++ ++ input_event(input, usage->type, code, value); ++ return 1; + } + + if (asc->quirks & APPLE_NUMLOCK_EMULATION && +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c +index 53bddb50aeba..602219a8710d 100644 +--- a/drivers/hid/wacom_sys.c ++++ b/drivers/hid/wacom_sys.c +@@ -88,7 +88,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev, + } + + static int wacom_wac_pen_serial_enforce(struct hid_device *hdev, +- struct hid_report *report, u8 *raw_data, int size) ++ struct hid_report *report, u8 *raw_data, int report_size) + { + struct wacom *wacom = hid_get_drvdata(hdev); + struct wacom_wac *wacom_wac = &wacom->wacom_wac; +@@ -149,7 +149,8 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev, + if (flush) + wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo); + else if (insert) +- wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size); ++ wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, ++ raw_data, report_size); + + return insert && !flush; + } +@@ -2176,7 +2177,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix) + { + struct wacom_wac *wacom_wac = &wacom->wacom_wac; + struct wacom_features *features = &wacom_wac->features; +- char name[WACOM_NAME_MAX]; ++ char name[WACOM_NAME_MAX - 20]; /* Leave some room for suffixes */ + + /* Generic devices name unspecified */ + if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) { +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c +index 1713235d28cb..2b4640397375 100644 +--- a/drivers/hid/wacom_wac.c ++++ b/drivers/hid/wacom_wac.c +@@ -251,7 +251,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom) + + static int wacom_dtus_irq(struct wacom_wac *wacom) + { +- char *data = wacom->data; ++ unsigned char *data = wacom->data; + struct input_dev *input = wacom->pen_input; + unsigned short prox, pressure = 0; + +@@ -572,7 +572,7 @@ static int wacom_intuos_pad(struct wacom_wac *wacom) + strip2 = ((data[3] & 0x1f) << 8) | data[4]; + } + +- prox = (buttons & ~(~0 << nbuttons)) | (keys & ~(~0 << nkeys)) | ++ prox = (buttons & ~(~0U << nbuttons)) | (keys & ~(~0U << nkeys)) | + (ring1 & 0x80) | (ring2 & 0x80) | strip1 | strip2; + + wacom_report_numbered_buttons(input, nbuttons, buttons); +diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c +index 66af44bfa67d..f6546de66fbc 100644 +--- a/drivers/i2c/busses/i2c-cht-wc.c ++++ b/drivers/i2c/busses/i2c-cht-wc.c +@@ -178,6 +178,51 @@ static const struct i2c_algorithm cht_wc_i2c_adap_algo = { + .smbus_xfer = cht_wc_i2c_adap_smbus_xfer, + }; + ++/* ++ * We are an i2c-adapter which itself is part of an i2c-client. This means that ++ * transfers done through us take adapter->bus_lock twice, once for our parent ++ * i2c-adapter and once to take our own bus_lock. Lockdep does not like this ++ * nested locking, to make lockdep happy in the case of busses with muxes, the ++ * i2c-core's i2c_adapter_lock_bus function calls: ++ * rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter)); ++ * ++ * But i2c_adapter_depth only works when the direct parent of the adapter is ++ * another adapter, as it is only meant for muxes. In our case there is an ++ * i2c-client and MFD instantiated platform_device in the parent->child chain ++ * between the 2 devices. ++ * ++ * So we override the default i2c_lock_operations and pass a hardcoded ++ * depth of 1 to rt_mutex_lock_nested, to make lockdep happy. ++ * ++ * Note that if there were to be a mux attached to our adapter, this would ++ * break things again since the i2c-mux code expects the root-adapter to have ++ * a locking depth of 0. But we always have only 1 client directly attached ++ * in the form of the Charger IC paired with the CHT Whiskey Cove PMIC. ++ */ ++static void cht_wc_i2c_adap_lock_bus(struct i2c_adapter *adapter, ++ unsigned int flags) ++{ ++ rt_mutex_lock_nested(&adapter->bus_lock, 1); ++} ++ ++static int cht_wc_i2c_adap_trylock_bus(struct i2c_adapter *adapter, ++ unsigned int flags) ++{ ++ return rt_mutex_trylock(&adapter->bus_lock); ++} ++ ++static void cht_wc_i2c_adap_unlock_bus(struct i2c_adapter *adapter, ++ unsigned int flags) ++{ ++ rt_mutex_unlock(&adapter->bus_lock); ++} ++ ++static const struct i2c_lock_operations cht_wc_i2c_adap_lock_ops = { ++ .lock_bus = cht_wc_i2c_adap_lock_bus, ++ .trylock_bus = cht_wc_i2c_adap_trylock_bus, ++ .unlock_bus = cht_wc_i2c_adap_unlock_bus, ++}; ++ + /**** irqchip for the client connected to the extchgr i2c adapter ****/ + static void cht_wc_i2c_irq_lock(struct irq_data *data) + { +@@ -286,6 +331,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev) + adap->adapter.owner = THIS_MODULE; + adap->adapter.class = I2C_CLASS_HWMON; + adap->adapter.algo = &cht_wc_i2c_adap_algo; ++ adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops; + strlcpy(adap->adapter.name, "PMIC I2C Adapter", + sizeof(adap->adapter.name)); + adap->adapter.dev.parent = &pdev->dev; +diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c +index 9fcb13beeb8f..7a3291d91a5e 100644 +--- a/drivers/i2c/busses/i2c-tegra.c ++++ b/drivers/i2c/busses/i2c-tegra.c +@@ -713,12 +713,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) + u32 tsu_thd; + u8 tlow, thigh; + +- err = pm_runtime_get_sync(i2c_dev->dev); +- if (err < 0) { +- dev_err(i2c_dev->dev, "runtime resume failed %d\n", err); +- return err; +- } +- + reset_control_assert(i2c_dev->rst); + udelay(2); + reset_control_deassert(i2c_dev->rst); +@@ -772,7 +766,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) + if (err) { + dev_err(i2c_dev->dev, + "failed changing clock rate: %d\n", err); +- goto err; ++ return err; + } + } + +@@ -787,23 +781,21 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) + + err = tegra_i2c_flush_fifos(i2c_dev); + if (err) +- goto err; ++ return err; + + if (i2c_dev->is_multimaster_mode && i2c_dev->hw->has_slcg_override_reg) + i2c_writel(i2c_dev, I2C_MST_CORE_CLKEN_OVR, I2C_CLKEN_OVERRIDE); + + err = tegra_i2c_wait_for_config_load(i2c_dev); + if (err) +- goto err; ++ return err; + + if (i2c_dev->irq_disabled) { + i2c_dev->irq_disabled = false; + enable_irq(i2c_dev->irq); + } + +-err: +- pm_runtime_put(i2c_dev->dev); +- return err; ++ return 0; + } + + static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev) +@@ -1616,12 +1608,14 @@ static int tegra_i2c_probe(struct platform_device *pdev) + } + + pm_runtime_enable(&pdev->dev); +- if (!pm_runtime_enabled(&pdev->dev)) { ++ if (!pm_runtime_enabled(&pdev->dev)) + ret = tegra_i2c_runtime_resume(&pdev->dev); +- if (ret < 0) { +- dev_err(&pdev->dev, "runtime resume failed\n"); +- goto unprepare_div_clk; +- } ++ else ++ ret = pm_runtime_get_sync(i2c_dev->dev); ++ ++ if (ret < 0) { ++ dev_err(&pdev->dev, "runtime resume failed\n"); ++ goto unprepare_div_clk; + } + + if (i2c_dev->is_multimaster_mode) { +@@ -1666,6 +1660,8 @@ static int tegra_i2c_probe(struct platform_device *pdev) + if (ret) + goto release_dma; + ++ pm_runtime_put(&pdev->dev); ++ + return 0; + + release_dma: +@@ -1726,17 +1722,25 @@ static int tegra_i2c_resume(struct device *dev) + struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); + int err; + ++ err = tegra_i2c_runtime_resume(dev); ++ if (err) ++ return err; ++ + err = tegra_i2c_init(i2c_dev, false); + if (err) + return err; + ++ err = tegra_i2c_runtime_suspend(dev); ++ if (err) ++ return err; ++ + i2c_mark_adapter_resumed(&i2c_dev->adapter); + + return 0; + } + + static const struct dev_pm_ops tegra_i2c_pm = { +- SET_SYSTEM_SLEEP_PM_OPS(tegra_i2c_suspend, tegra_i2c_resume) ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_i2c_suspend, tegra_i2c_resume) + SET_RUNTIME_PM_OPS(tegra_i2c_runtime_suspend, tegra_i2c_runtime_resume, + NULL) + }; +diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c +index 00d5219094e5..48bba4913952 100644 +--- a/drivers/mailbox/mtk-cmdq-mailbox.c ++++ b/drivers/mailbox/mtk-cmdq-mailbox.c +@@ -22,6 +22,7 @@ + #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE) + + #define CMDQ_CURR_IRQ_STATUS 0x10 ++#define CMDQ_SYNC_TOKEN_UPDATE 0x68 + #define CMDQ_THR_SLOT_CYCLES 0x30 + #define CMDQ_THR_BASE 0x100 + #define CMDQ_THR_SIZE 0x80 +@@ -104,8 +105,12 @@ static void cmdq_thread_resume(struct cmdq_thread *thread) + + static void cmdq_init(struct cmdq *cmdq) + { ++ int i; ++ + WARN_ON(clk_enable(cmdq->clock) < 0); + writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); ++ for (i = 0; i <= CMDQ_MAX_EVENT; i++) ++ writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE); + clk_disable(cmdq->clock); + } + +diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c +index 705e17a5479c..d3676fd3cf94 100644 +--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c ++++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c +@@ -47,7 +47,6 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = { + + static int qcom_apcs_ipc_probe(struct platform_device *pdev) + { +- struct device_node *np = pdev->dev.of_node; + struct qcom_apcs_ipc *apcs; + struct regmap *regmap; + struct resource *res; +@@ -55,6 +54,11 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev) + void __iomem *base; + unsigned long i; + int ret; ++ const struct of_device_id apcs_clk_match_table[] = { ++ { .compatible = "qcom,msm8916-apcs-kpss-global", }, ++ { .compatible = "qcom,qcs404-apcs-apps-global", }, ++ {} ++ }; + + apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL); + if (!apcs) +@@ -89,7 +93,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev) + return ret; + } + +- if (of_device_is_compatible(np, "qcom,msm8916-apcs-kpss-global")) { ++ if (of_match_device(apcs_clk_match_table, &pdev->dev)) { + apcs->clk = platform_device_register_data(&pdev->dev, + "qcom-apcs-msm8916-clk", + -1, NULL, 0); +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c +index 1f933dd197cd..b0aa595e4375 100644 +--- a/drivers/md/dm-raid.c ++++ b/drivers/md/dm-raid.c +@@ -3738,18 +3738,18 @@ static int raid_iterate_devices(struct dm_target *ti, + static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) + { + struct raid_set *rs = ti->private; +- unsigned int chunk_size = to_bytes(rs->md.chunk_sectors); ++ unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors); + +- blk_limits_io_min(limits, chunk_size); +- blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs)); ++ blk_limits_io_min(limits, chunk_size_bytes); ++ blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); + + /* + * RAID1 and RAID10 personalities require bio splitting, + * RAID0/4/5/6 don't and process large discard bios properly. + */ + if (rs_is_raid1(rs) || rs_is_raid10(rs)) { +- limits->discard_granularity = chunk_size; +- limits->max_discard_sectors = chunk_size; ++ limits->discard_granularity = chunk_size_bytes; ++ limits->max_discard_sectors = rs->md.chunk_sectors; + } + } + +diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c +index 31478fef6032..d3bcc4197f5d 100644 +--- a/drivers/md/dm-zoned-target.c ++++ b/drivers/md/dm-zoned-target.c +@@ -134,8 +134,6 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, + + refcount_inc(&bioctx->ref); + generic_make_request(clone); +- if (clone->bi_status == BLK_STS_IOERR) +- return -EIO; + + if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) + zone->wp_block += nr_blocks; +diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c +index ade6e1ce5a98..e3a04929aaa3 100644 +--- a/drivers/mfd/intel-lpss-pci.c ++++ b/drivers/mfd/intel-lpss-pci.c +@@ -35,6 +35,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev, + info->mem = &pdev->resource[0]; + info->irq = pdev->irq; + ++ pdev->d3cold_delay = 0; ++ + /* Probably it is enough to set this for iDMA capable devices only */ + pci_set_master(pdev); + pci_try_set_mwi(pdev); +diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c +index ca3d17e43ed8..ac88caca5ad4 100644 +--- a/drivers/net/dsa/rtl8366.c ++++ b/drivers/net/dsa/rtl8366.c +@@ -339,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) + { + struct realtek_smi *smi = ds->priv; ++ u16 vid; + int ret; + +- if (!smi->ops->is_vlan_valid(smi, port)) +- return -EINVAL; ++ for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++) ++ if (!smi->ops->is_vlan_valid(smi, vid)) ++ return -EINVAL; + + dev_info(smi->dev, "prepare VLANs %04x..%04x\n", + vlan->vid_begin, vlan->vid_end); +@@ -370,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, + u16 vid; + int ret; + +- if (!smi->ops->is_vlan_valid(smi, port)) +- return; ++ for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++) ++ if (!smi->ops->is_vlan_valid(smi, vid)) ++ return; + + dev_info(smi->dev, "add VLAN on port %d, %s, %s\n", + port, +diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c +index df976b259e43..296286f4fb39 100644 +--- a/drivers/net/dsa/sja1105/sja1105_main.c ++++ b/drivers/net/dsa/sja1105/sja1105_main.c +@@ -1875,7 +1875,9 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds, + return sja1105_static_config_reload(priv); + } + +-/* Caller must hold priv->tagger_data.meta_lock */ ++/* Must be called only with priv->tagger_data.state bit ++ * SJA1105_HWTS_RX_EN cleared ++ */ + static int sja1105_change_rxtstamping(struct sja1105_private *priv, + bool on) + { +@@ -1932,16 +1934,17 @@ static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, + break; + } + +- if (rx_on != priv->tagger_data.hwts_rx_en) { +- spin_lock(&priv->tagger_data.meta_lock); ++ if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) { ++ clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state); ++ + rc = sja1105_change_rxtstamping(priv, rx_on); +- spin_unlock(&priv->tagger_data.meta_lock); + if (rc < 0) { + dev_err(ds->dev, + "Failed to change RX timestamping: %d\n", rc); +- return -EFAULT; ++ return rc; + } +- priv->tagger_data.hwts_rx_en = rx_on; ++ if (rx_on) ++ set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state); + } + + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) +@@ -1960,7 +1963,7 @@ static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, + config.tx_type = HWTSTAMP_TX_ON; + else + config.tx_type = HWTSTAMP_TX_OFF; +- if (priv->tagger_data.hwts_rx_en) ++ if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + else + config.rx_filter = HWTSTAMP_FILTER_NONE; +@@ -1983,12 +1986,12 @@ static void sja1105_rxtstamp_work(struct work_struct *work) + + mutex_lock(&priv->ptp_lock); + +- now = priv->tstamp_cc.read(&priv->tstamp_cc); +- + while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) { + struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb); + u64 ts; + ++ now = priv->tstamp_cc.read(&priv->tstamp_cc); ++ + *shwt = (struct skb_shared_hwtstamps) {0}; + + ts = SJA1105_SKB_CB(skb)->meta_tstamp; +@@ -2009,7 +2012,7 @@ static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port, + struct sja1105_private *priv = ds->priv; + struct sja1105_tagger_data *data = &priv->tagger_data; + +- if (!data->hwts_rx_en) ++ if (!test_bit(SJA1105_HWTS_RX_EN, &data->state)) + return false; + + /* We need to read the full PTP clock to reconstruct the Rx +@@ -2165,6 +2168,7 @@ static int sja1105_probe(struct spi_device *spi) + tagger_data = &priv->tagger_data; + skb_queue_head_init(&tagger_data->skb_rxtstamp_queue); + INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work); ++ spin_lock_init(&tagger_data->meta_lock); + + /* Connections between dsa_port and sja1105_port */ + for (i = 0; i < SJA1105_NUM_PORTS; i++) { +diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c +index 84dc603138cf..58dd37ecde17 100644 +--- a/drivers/net/dsa/sja1105/sja1105_spi.c ++++ b/drivers/net/dsa/sja1105/sja1105_spi.c +@@ -409,7 +409,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv) + rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len); + if (rc < 0) { + dev_err(dev, "Invalid config, cannot upload\n"); +- return -EINVAL; ++ rc = -EINVAL; ++ goto out; + } + /* Prevent PHY jabbering during switch reset by inhibiting + * Tx on all ports and waiting for current packet to drain. +@@ -418,7 +419,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv) + rc = sja1105_inhibit_tx(priv, port_bitmap, true); + if (rc < 0) { + dev_err(dev, "Failed to inhibit Tx on ports\n"); +- return -ENXIO; ++ rc = -ENXIO; ++ goto out; + } + /* Wait for an eventual egress packet to finish transmission + * (reach IFG). It is guaranteed that a second one will not +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +index 5b602243d573..a4dead4ab0ed 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +@@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, + static int alloc_uld_rxqs(struct adapter *adap, + struct sge_uld_rxq_info *rxq_info, bool lro) + { +- struct sge *s = &adap->sge; + unsigned int nq = rxq_info->nrxq + rxq_info->nciq; ++ int i, err, msi_idx, que_idx = 0, bmap_idx = 0; + struct sge_ofld_rxq *q = rxq_info->uldrxq; + unsigned short *ids = rxq_info->rspq_id; +- unsigned int bmap_idx = 0; ++ struct sge *s = &adap->sge; + unsigned int per_chan; +- int i, err, msi_idx, que_idx = 0; + + per_chan = rxq_info->nrxq / adap->params.nports; + +@@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap, + + if (msi_idx >= 0) { + bmap_idx = get_msix_idx_from_bmap(adap); ++ if (bmap_idx < 0) { ++ err = -ENOSPC; ++ goto freeout; ++ } + msi_idx = adap->msix_info_ulds[bmap_idx].idx; + } + err = t4_sge_alloc_rxq(adap, &q->rspq, false, +diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c +index 457444894d80..b4b8ba00ee01 100644 +--- a/drivers/net/ethernet/qlogic/qla3xxx.c ++++ b/drivers/net/ethernet/qlogic/qla3xxx.c +@@ -2787,6 +2787,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) + netdev_err(qdev->ndev, + "PCI mapping failed with error: %d\n", + err); ++ dev_kfree_skb_irq(skb); + ql_free_large_buffers(qdev); + return -ENOMEM; + } +diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c +index 1502fe8b0456..b9ac45d9dee8 100644 +--- a/drivers/net/ethernet/socionext/netsec.c ++++ b/drivers/net/ethernet/socionext/netsec.c +@@ -282,7 +282,6 @@ struct netsec_desc_ring { + void *vaddr; + u16 head, tail; + u16 xdp_xmit; /* netsec_xdp_xmit packets */ +- bool is_xdp; + struct page_pool *page_pool; + struct xdp_rxq_info xdp_rxq; + spinlock_t lock; /* XDP tx queue locking */ +@@ -634,8 +633,7 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv) + unsigned int bytes; + int cnt = 0; + +- if (dring->is_xdp) +- spin_lock(&dring->lock); ++ spin_lock(&dring->lock); + + bytes = 0; + entry = dring->vaddr + DESC_SZ * tail; +@@ -682,8 +680,8 @@ next: + entry = dring->vaddr + DESC_SZ * tail; + cnt++; + } +- if (dring->is_xdp) +- spin_unlock(&dring->lock); ++ ++ spin_unlock(&dring->lock); + + if (!cnt) + return false; +@@ -799,9 +797,6 @@ static void netsec_set_tx_de(struct netsec_priv *priv, + de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); + de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len; + de->attr = attr; +- /* under spin_lock if using XDP */ +- if (!dring->is_xdp) +- dma_wmb(); + + dring->desc[idx] = *desc; + if (desc->buf_type == TYPE_NETSEC_SKB) +@@ -1123,12 +1118,10 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb, + u16 tso_seg_len = 0; + int filled; + +- if (dring->is_xdp) +- spin_lock_bh(&dring->lock); ++ spin_lock_bh(&dring->lock); + filled = netsec_desc_used(dring); + if (netsec_check_stop_tx(priv, filled)) { +- if (dring->is_xdp) +- spin_unlock_bh(&dring->lock); ++ spin_unlock_bh(&dring->lock); + net_warn_ratelimited("%s %s Tx queue full\n", + dev_name(priv->dev), ndev->name); + return NETDEV_TX_BUSY; +@@ -1161,8 +1154,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb, + tx_desc.dma_addr = dma_map_single(priv->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) { +- if (dring->is_xdp) +- spin_unlock_bh(&dring->lock); ++ spin_unlock_bh(&dring->lock); + netif_err(priv, drv, priv->ndev, + "%s: DMA mapping failed\n", __func__); + ndev->stats.tx_dropped++; +@@ -1177,8 +1169,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb, + netdev_sent_queue(priv->ndev, skb->len); + + netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb); +- if (dring->is_xdp) +- spin_unlock_bh(&dring->lock); ++ spin_unlock_bh(&dring->lock); + netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */ + + return NETDEV_TX_OK; +@@ -1262,7 +1253,6 @@ err: + static void netsec_setup_tx_dring(struct netsec_priv *priv) + { + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; +- struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog); + int i; + + for (i = 0; i < DESC_NUM; i++) { +@@ -1275,12 +1265,6 @@ static void netsec_setup_tx_dring(struct netsec_priv *priv) + */ + de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; + } +- +- if (xdp_prog) +- dring->is_xdp = true; +- else +- dring->is_xdp = false; +- + } + + static int netsec_setup_rx_dring(struct netsec_priv *priv) +diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c +index ce78714f536f..a505b2ab88b8 100644 +--- a/drivers/net/usb/hso.c ++++ b/drivers/net/usb/hso.c +@@ -2620,14 +2620,18 @@ static struct hso_device *hso_create_bulk_serial_device( + */ + if (serial->tiocmget) { + tiocmget = serial->tiocmget; ++ tiocmget->endp = hso_get_ep(interface, ++ USB_ENDPOINT_XFER_INT, ++ USB_DIR_IN); ++ if (!tiocmget->endp) { ++ dev_err(&interface->dev, "Failed to find INT IN ep\n"); ++ goto exit; ++ } ++ + tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL); + if (tiocmget->urb) { + mutex_init(&tiocmget->mutex); + init_waitqueue_head(&tiocmget->waitq); +- tiocmget->endp = hso_get_ep( +- interface, +- USB_ENDPOINT_XFER_INT, +- USB_DIR_IN); + } else + hso_free_tiomget(serial); + } +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index b6dc5d714b5e..3d77cd402ba9 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1350,6 +1350,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ + {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ + {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/ ++ {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */ + {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index 5f5722bf6762..7370e06a0e4b 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -887,9 +887,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb, + return 0; + } + +-static RING_IDX xennet_fill_frags(struct netfront_queue *queue, +- struct sk_buff *skb, +- struct sk_buff_head *list) ++static int xennet_fill_frags(struct netfront_queue *queue, ++ struct sk_buff *skb, ++ struct sk_buff_head *list) + { + RING_IDX cons = queue->rx.rsp_cons; + struct sk_buff *nskb; +@@ -908,7 +908,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, + if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { + queue->rx.rsp_cons = ++cons + skb_queue_len(list); + kfree_skb(nskb); +- return ~0U; ++ return -ENOENT; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, +@@ -919,7 +919,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, + kfree_skb(nskb); + } + +- return cons; ++ queue->rx.rsp_cons = cons; ++ ++ return 0; + } + + static int checksum_setup(struct net_device *dev, struct sk_buff *skb) +@@ -1045,8 +1047,7 @@ err: + skb->data_len = rx->status; + skb->len += rx->status; + +- i = xennet_fill_frags(queue, skb, &tmpq); +- if (unlikely(i == ~0U)) ++ if (unlikely(xennet_fill_frags(queue, skb, &tmpq))) + goto err; + + if (rx->flags & XEN_NETRXF_csum_blank) +@@ -1056,7 +1057,7 @@ err: + + __skb_queue_tail(&rxq, skb); + +- queue->rx.rsp_cons = ++i; ++ i = ++queue->rx.rsp_cons; + work_done++; + } + +diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig +index 2ab92409210a..297bf928d652 100644 +--- a/drivers/pci/Kconfig ++++ b/drivers/pci/Kconfig +@@ -181,7 +181,7 @@ config PCI_LABEL + + config PCI_HYPERV + tristate "Hyper-V PCI Frontend" +- depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 ++ depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS + help + The PCI device frontend driver allows the kernel to import arbitrary + PCI devices from a PCI backend to support PCI driver domains. +diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c +index cee5f2f590e2..14a6ba4067fb 100644 +--- a/drivers/pci/controller/dwc/pci-exynos.c ++++ b/drivers/pci/controller/dwc/pci-exynos.c +@@ -465,7 +465,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev) + + ep->phy = devm_of_phy_get(dev, np, NULL); + if (IS_ERR(ep->phy)) { +- if (PTR_ERR(ep->phy) == -EPROBE_DEFER) ++ if (PTR_ERR(ep->phy) != -ENODEV) + return PTR_ERR(ep->phy); + + ep->phy = NULL; +diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c +index 9b5cb5b70389..aabf22eaa6b9 100644 +--- a/drivers/pci/controller/dwc/pci-imx6.c ++++ b/drivers/pci/controller/dwc/pci-imx6.c +@@ -1173,8 +1173,8 @@ static int imx6_pcie_probe(struct platform_device *pdev) + + imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); + if (IS_ERR(imx6_pcie->vpcie)) { +- if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) +- return -EPROBE_DEFER; ++ if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV) ++ return PTR_ERR(imx6_pcie->vpcie); + imx6_pcie->vpcie = NULL; + } + +diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c +index be61d96cc95e..ca9aa4501e7e 100644 +--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c ++++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c +@@ -44,6 +44,7 @@ static const struct pci_epc_features ls_pcie_epc_features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, ++ .bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4), + }; + + static const struct pci_epc_features* +diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c +index 954bc2b74bbc..811b5c6d62ea 100644 +--- a/drivers/pci/controller/dwc/pcie-histb.c ++++ b/drivers/pci/controller/dwc/pcie-histb.c +@@ -340,8 +340,8 @@ static int histb_pcie_probe(struct platform_device *pdev) + + hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie"); + if (IS_ERR(hipcie->vpcie)) { +- if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER) +- return -EPROBE_DEFER; ++ if (PTR_ERR(hipcie->vpcie) != -ENODEV) ++ return PTR_ERR(hipcie->vpcie); + hipcie->vpcie = NULL; + } + +diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c +index 9a917b2456f6..673a1725ef38 100644 +--- a/drivers/pci/controller/pci-tegra.c ++++ b/drivers/pci/controller/pci-tegra.c +@@ -2237,14 +2237,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) + err = of_pci_get_devfn(port); + if (err < 0) { + dev_err(dev, "failed to parse address: %d\n", err); +- return err; ++ goto err_node_put; + } + + index = PCI_SLOT(err); + + if (index < 1 || index > soc->num_ports) { + dev_err(dev, "invalid port number: %d\n", index); +- return -EINVAL; ++ err = -EINVAL; ++ goto err_node_put; + } + + index--; +@@ -2253,12 +2254,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) + if (err < 0) { + dev_err(dev, "failed to parse # of lanes: %d\n", + err); +- return err; ++ goto err_node_put; + } + + if (value > 16) { + dev_err(dev, "invalid # of lanes: %u\n", value); +- return -EINVAL; ++ err = -EINVAL; ++ goto err_node_put; + } + + lanes |= value << (index << 3); +@@ -2272,13 +2274,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) + lane += value; + + rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL); +- if (!rp) +- return -ENOMEM; ++ if (!rp) { ++ err = -ENOMEM; ++ goto err_node_put; ++ } + + err = of_address_to_resource(port, 0, &rp->regs); + if (err < 0) { + dev_err(dev, "failed to parse address: %d\n", err); +- return err; ++ goto err_node_put; + } + + INIT_LIST_HEAD(&rp->list); +@@ -2330,6 +2334,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) + return err; + + return 0; ++ ++err_node_put: ++ of_node_put(port); ++ return err; + } + + /* +diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c +index 672e633601c7..a45a6447b01d 100644 +--- a/drivers/pci/controller/pcie-mobiveil.c ++++ b/drivers/pci/controller/pcie-mobiveil.c +@@ -88,6 +88,7 @@ + #define AMAP_CTRL_TYPE_MASK 3 + + #define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win) ++#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win) + #define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win) + #define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win) + #define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win) +@@ -462,7 +463,7 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) + } + + static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, +- u64 pci_addr, u32 type, u64 size) ++ u64 cpu_addr, u64 pci_addr, u32 type, u64 size) + { + u32 value; + u64 size64 = ~(size - 1); +@@ -482,7 +483,10 @@ static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, + csr_writel(pcie, upper_32_bits(size64), + PAB_EXT_PEX_AMAP_SIZEN(win_num)); + +- csr_writel(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num)); ++ csr_writel(pcie, lower_32_bits(cpu_addr), ++ PAB_PEX_AMAP_AXI_WIN(win_num)); ++ csr_writel(pcie, upper_32_bits(cpu_addr), ++ PAB_EXT_PEX_AMAP_AXI_WIN(win_num)); + + csr_writel(pcie, lower_32_bits(pci_addr), + PAB_PEX_AMAP_PEX_WIN_L(win_num)); +@@ -624,7 +628,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) + CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res)); + + /* memory inbound translation window */ +- program_ib_windows(pcie, WIN_NUM_0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); ++ program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); + + /* Get the I/O and memory ranges from DT */ + resource_list_for_each_entry(win, &pcie->resources) { +diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c +index 8d20f1793a61..ef8e677ce9d1 100644 +--- a/drivers/pci/controller/pcie-rockchip-host.c ++++ b/drivers/pci/controller/pcie-rockchip-host.c +@@ -608,29 +608,29 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip) + + rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); + if (IS_ERR(rockchip->vpcie12v)) { +- if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER) +- return -EPROBE_DEFER; ++ if (PTR_ERR(rockchip->vpcie12v) != -ENODEV) ++ return PTR_ERR(rockchip->vpcie12v); + dev_info(dev, "no vpcie12v regulator found\n"); + } + + rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); + if (IS_ERR(rockchip->vpcie3v3)) { +- if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER) +- return -EPROBE_DEFER; ++ if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV) ++ return PTR_ERR(rockchip->vpcie3v3); + dev_info(dev, "no vpcie3v3 regulator found\n"); + } + + rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8"); + if (IS_ERR(rockchip->vpcie1v8)) { +- if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER) +- return -EPROBE_DEFER; ++ if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV) ++ return PTR_ERR(rockchip->vpcie1v8); + dev_info(dev, "no vpcie1v8 regulator found\n"); + } + + rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9"); + if (IS_ERR(rockchip->vpcie0v9)) { +- if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER) +- return -EPROBE_DEFER; ++ if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV) ++ return PTR_ERR(rockchip->vpcie0v9); + dev_info(dev, "no vpcie0v9 regulator found\n"); + } + +diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c +index bcd5d357ca23..c3899ee1db99 100644 +--- a/drivers/pci/hotplug/rpaphp_core.c ++++ b/drivers/pci/hotplug/rpaphp_core.c +@@ -230,7 +230,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, + struct of_drc_info drc; + const __be32 *value; + char cell_drc_name[MAX_DRC_NAME_LEN]; +- int j, fndit; ++ int j; + + info = of_find_property(dn->parent, "ibm,drc-info", NULL); + if (info == NULL) +@@ -245,17 +245,13 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, + + /* Should now know end of current entry */ + +- if (my_index > drc.last_drc_index) +- continue; +- +- fndit = 1; +- break; ++ /* Found it */ ++ if (my_index <= drc.last_drc_index) { ++ sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix, ++ my_index); ++ break; ++ } + } +- /* Found it */ +- +- if (fndit) +- sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix, +- my_index); + + if (((drc_name == NULL) || + (drc_name && !strcmp(drc_name, cell_drc_name))) && +diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c +index 06083b86d4f4..5fd90105510d 100644 +--- a/drivers/pci/pci-bridge-emul.c ++++ b/drivers/pci/pci-bridge-emul.c +@@ -38,7 +38,7 @@ struct pci_bridge_reg_behavior { + u32 rsvd; + }; + +-const static struct pci_bridge_reg_behavior pci_regs_behavior[] = { ++static const struct pci_bridge_reg_behavior pci_regs_behavior[] = { + [PCI_VENDOR_ID / 4] = { .ro = ~0 }, + [PCI_COMMAND / 4] = { + .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | +@@ -173,7 +173,7 @@ const static struct pci_bridge_reg_behavior pci_regs_behavior[] = { + }, + }; + +-const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { ++static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { + [PCI_CAP_LIST_ID / 4] = { + /* + * Capability ID, Next Capability Pointer and +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index 1b27b5af3d55..1f17da3dfeac 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -890,8 +890,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) + + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); +- if (dev->current_state != state && printk_ratelimit()) +- pci_info(dev, "Refused to change power state, currently in D%d\n", ++ if (dev->current_state != state) ++ pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n", + dev->current_state); + + /* +diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +index 6c640837073e..5bfa56f3847e 100644 +--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c ++++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +@@ -192,8 +192,8 @@ static const unsigned int uart_rts_b_pins[] = { GPIODV_27 }; + + static const unsigned int uart_tx_c_pins[] = { GPIOY_13 }; + static const unsigned int uart_rx_c_pins[] = { GPIOY_14 }; +-static const unsigned int uart_cts_c_pins[] = { GPIOX_11 }; +-static const unsigned int uart_rts_c_pins[] = { GPIOX_12 }; ++static const unsigned int uart_cts_c_pins[] = { GPIOY_11 }; ++static const unsigned int uart_rts_c_pins[] = { GPIOY_12 }; + + static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 }; + static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 }; +@@ -439,10 +439,10 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = { + GROUP(pwm_f_x, 3, 18), + + /* Bank Y */ +- GROUP(uart_cts_c, 1, 19), +- GROUP(uart_rts_c, 1, 18), +- GROUP(uart_tx_c, 1, 17), +- GROUP(uart_rx_c, 1, 16), ++ GROUP(uart_cts_c, 1, 17), ++ GROUP(uart_rts_c, 1, 16), ++ GROUP(uart_tx_c, 1, 19), ++ GROUP(uart_rx_c, 1, 18), + GROUP(pwm_a_y, 1, 21), + GROUP(pwm_f_y, 1, 20), + GROUP(i2s_out_ch23_y, 1, 5), +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c +index 9b9c61e3f065..977792654e01 100644 +--- a/drivers/pinctrl/pinctrl-amd.c ++++ b/drivers/pinctrl/pinctrl-amd.c +@@ -565,15 +565,25 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) + !(regval & BIT(INTERRUPT_MASK_OFF))) + continue; + irq = irq_find_mapping(gc->irq.domain, irqnr + i); +- generic_handle_irq(irq); ++ if (irq != 0) ++ generic_handle_irq(irq); + + /* Clear interrupt. + * We must read the pin register again, in case the + * value was changed while executing + * generic_handle_irq() above. ++ * If we didn't find a mapping for the interrupt, ++ * disable it in order to avoid a system hang caused ++ * by an interrupt storm. + */ + raw_spin_lock_irqsave(&gpio_dev->lock, flags); + regval = readl(regs + i); ++ if (irq == 0) { ++ regval &= ~BIT(INTERRUPT_ENABLE_OFF); ++ dev_dbg(&gpio_dev->pdev->dev, ++ "Disabling spurious GPIO IRQ %d\n", ++ irqnr + i); ++ } + writel(regval, regs + i); + raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); + ret = IRQ_HANDLED; +diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c +index d3332da35637..31b6e511670f 100644 +--- a/drivers/pinctrl/pinctrl-stmfx.c ++++ b/drivers/pinctrl/pinctrl-stmfx.c +@@ -296,29 +296,29 @@ static int stmfx_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, + switch (param) { + case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT: + case PIN_CONFIG_BIAS_DISABLE: ++ case PIN_CONFIG_DRIVE_PUSH_PULL: ++ ret = stmfx_pinconf_set_type(pctl, pin, 0); ++ if (ret) ++ return ret; ++ break; + case PIN_CONFIG_BIAS_PULL_DOWN: ++ ret = stmfx_pinconf_set_type(pctl, pin, 1); ++ if (ret) ++ return ret; + ret = stmfx_pinconf_set_pupd(pctl, pin, 0); + if (ret) + return ret; + break; + case PIN_CONFIG_BIAS_PULL_UP: +- ret = stmfx_pinconf_set_pupd(pctl, pin, 1); ++ ret = stmfx_pinconf_set_type(pctl, pin, 1); + if (ret) + return ret; +- break; +- case PIN_CONFIG_DRIVE_OPEN_DRAIN: +- if (!dir) +- ret = stmfx_pinconf_set_type(pctl, pin, 1); +- else +- ret = stmfx_pinconf_set_type(pctl, pin, 0); ++ ret = stmfx_pinconf_set_pupd(pctl, pin, 1); + if (ret) + return ret; + break; +- case PIN_CONFIG_DRIVE_PUSH_PULL: +- if (!dir) +- ret = stmfx_pinconf_set_type(pctl, pin, 0); +- else +- ret = stmfx_pinconf_set_type(pctl, pin, 1); ++ case PIN_CONFIG_DRIVE_OPEN_DRAIN: ++ ret = stmfx_pinconf_set_type(pctl, pin, 1); + if (ret) + return ret; + break; +diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c +index 186ef98e7b2b..f1b523beec5b 100644 +--- a/drivers/pinctrl/tegra/pinctrl-tegra.c ++++ b/drivers/pinctrl/tegra/pinctrl-tegra.c +@@ -32,7 +32,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg) + + static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg) + { +- writel(val, pmx->regs[bank] + reg); ++ writel_relaxed(val, pmx->regs[bank] + reg); ++ /* make sure pinmux register write completed */ ++ pmx_readl(pmx, bank, reg); + } + + static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) +diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c +index 51fe60440d12..75cf861ba492 100644 +--- a/drivers/power/supply/power_supply_hwmon.c ++++ b/drivers/power/supply/power_supply_hwmon.c +@@ -284,6 +284,7 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy) + struct device *dev = &psy->dev; + struct device *hwmon; + int ret, i; ++ const char *name; + + if (!devres_open_group(dev, power_supply_add_hwmon_sysfs, + GFP_KERNEL)) +@@ -334,7 +335,19 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy) + } + } + +- hwmon = devm_hwmon_device_register_with_info(dev, psy->desc->name, ++ name = psy->desc->name; ++ if (strchr(name, '-')) { ++ char *new_name; ++ ++ new_name = devm_kstrdup(dev, name, GFP_KERNEL); ++ if (!new_name) { ++ ret = -ENOMEM; ++ goto error; ++ } ++ strreplace(new_name, '-', '_'); ++ name = new_name; ++ } ++ hwmon = devm_hwmon_device_register_with_info(dev, name, + psyhw, + &power_supply_hwmon_chip_info, + NULL); +diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c +index c61f00b72e15..a577218d1ab7 100644 +--- a/drivers/ptp/ptp_qoriq.c ++++ b/drivers/ptp/ptp_qoriq.c +@@ -507,6 +507,8 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base, + ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET; + } + ++ spin_lock_init(&ptp_qoriq->lock); ++ + ktime_get_real_ts64(&now); + ptp_qoriq_settime(&ptp_qoriq->caps, &now); + +@@ -514,7 +516,6 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base, + (ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | + (ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT; + +- spin_lock_init(&ptp_qoriq->lock); + spin_lock_irqsave(&ptp_qoriq->lock, flags); + + regs = &ptp_qoriq->regs; +diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig +index e72f65b61176..add43c337489 100644 +--- a/drivers/rtc/Kconfig ++++ b/drivers/rtc/Kconfig +@@ -500,6 +500,7 @@ config RTC_DRV_M41T80_WDT + watchdog timer in the ST M41T60 and M41T80 RTC chips series. + config RTC_DRV_BD70528 + tristate "ROHM BD70528 PMIC RTC" ++ depends on MFD_ROHM_BD70528 && (BD70528_WATCHDOG || !BD70528_WATCHDOG) + help + If you say Y here you will get support for the RTC + on ROHM BD70528 Power Management IC. +diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c +index a075e77617dc..3450d615974d 100644 +--- a/drivers/rtc/rtc-pcf85363.c ++++ b/drivers/rtc/rtc-pcf85363.c +@@ -166,7 +166,12 @@ static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm) + buf[DT_YEARS] = bin2bcd(tm->tm_year % 100); + + ret = regmap_bulk_write(pcf85363->regmap, CTRL_STOP_EN, +- tmp, sizeof(tmp)); ++ tmp, 2); ++ if (ret) ++ return ret; ++ ++ ret = regmap_bulk_write(pcf85363->regmap, DT_100THS, ++ buf, sizeof(tmp) - 2); + if (ret) + return ret; + +diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c +index 7ee673a25fd0..4f9a107a0427 100644 +--- a/drivers/rtc/rtc-snvs.c ++++ b/drivers/rtc/rtc-snvs.c +@@ -279,6 +279,10 @@ static int snvs_rtc_probe(struct platform_device *pdev) + if (!data) + return -ENOMEM; + ++ data->rtc = devm_rtc_allocate_device(&pdev->dev); ++ if (IS_ERR(data->rtc)) ++ return PTR_ERR(data->rtc); ++ + data->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap"); + + if (IS_ERR(data->regmap)) { +@@ -343,10 +347,9 @@ static int snvs_rtc_probe(struct platform_device *pdev) + goto error_rtc_device_register; + } + +- data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, +- &snvs_rtc_ops, THIS_MODULE); +- if (IS_ERR(data->rtc)) { +- ret = PTR_ERR(data->rtc); ++ data->rtc->ops = &snvs_rtc_ops; ++ ret = rtc_register_device(data->rtc); ++ if (ret) { + dev_err(&pdev->dev, "failed to register rtc: %d\n", ret); + goto error_rtc_device_register; + } +diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c +index 39b8cc4574b4..c6ed0b12e807 100644 +--- a/drivers/scsi/scsi_logging.c ++++ b/drivers/scsi/scsi_logging.c +@@ -15,57 +15,15 @@ + #include <scsi/scsi_eh.h> + #include <scsi/scsi_dbg.h> + +-#define SCSI_LOG_SPOOLSIZE 4096 +- +-#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG +-#warning SCSI logging bitmask too large +-#endif +- +-struct scsi_log_buf { +- char buffer[SCSI_LOG_SPOOLSIZE]; +- unsigned long map; +-}; +- +-static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log); +- + static char *scsi_log_reserve_buffer(size_t *len) + { +- struct scsi_log_buf *buf; +- unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE; +- unsigned long idx = 0; +- +- preempt_disable(); +- buf = this_cpu_ptr(&scsi_format_log); +- idx = find_first_zero_bit(&buf->map, map_bits); +- if (likely(idx < map_bits)) { +- while (test_and_set_bit(idx, &buf->map)) { +- idx = find_next_zero_bit(&buf->map, map_bits, idx); +- if (idx >= map_bits) +- break; +- } +- } +- if (WARN_ON(idx >= map_bits)) { +- preempt_enable(); +- return NULL; +- } +- *len = SCSI_LOG_BUFSIZE; +- return buf->buffer + idx * SCSI_LOG_BUFSIZE; ++ *len = 128; ++ return kmalloc(*len, GFP_ATOMIC); + } + + static void scsi_log_release_buffer(char *bufptr) + { +- struct scsi_log_buf *buf; +- unsigned long idx; +- int ret; +- +- buf = this_cpu_ptr(&scsi_format_log); +- if (bufptr >= buf->buffer && +- bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) { +- idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE; +- ret = test_and_clear_bit(idx, &buf->map); +- WARN_ON(!ret); +- } +- preempt_enable(); ++ kfree(bufptr); + } + + static inline const char *scmd_name(const struct scsi_cmnd *scmd) +diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c +index 317873bc0555..ec25a71d0887 100644 +--- a/drivers/soundwire/intel.c ++++ b/drivers/soundwire/intel.c +@@ -289,6 +289,16 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm) + + if (pcm) { + count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num)); ++ ++ /* ++ * WORKAROUND: on all existing Intel controllers, pdi ++ * number 2 reports channel count as 1 even though it ++ * supports 8 channels. Performing hardcoding for pdi ++ * number 2. ++ */ ++ if (pdi_num == 2) ++ count = 7; ++ + } else { + count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id)); + count = ((count & SDW_SHIM_PDMSCAP_CPSS) >> +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c +index 703948c9fbe1..02206162eaa9 100644 +--- a/drivers/vfio/pci/vfio_pci.c ++++ b/drivers/vfio/pci/vfio_pci.c +@@ -438,11 +438,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev) + pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); + + /* +- * Try to reset the device. The success of this is dependent on +- * being able to lock the device, which is not always possible. ++ * Try to get the locks ourselves to prevent a deadlock. The ++ * success of this is dependent on being able to lock the device, ++ * which is not always possible. ++ * We can not use the "try" reset interface here, which will ++ * overwrite the previously restored configuration information. + */ +- if (vdev->reset_works && !pci_try_reset_function(pdev)) +- vdev->needs_reset = false; ++ if (vdev->reset_works && pci_cfg_access_trylock(pdev)) { ++ if (device_trylock(&pdev->dev)) { ++ if (!__pci_reset_function_locked(pdev)) ++ vdev->needs_reset = false; ++ device_unlock(&pdev->dev); ++ } ++ pci_cfg_access_unlock(pdev); ++ } + + pci_restore_state(pdev); + out: +diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c +index b674948e3bb8..3f28e1b5d422 100644 +--- a/drivers/video/fbdev/ssd1307fb.c ++++ b/drivers/video/fbdev/ssd1307fb.c +@@ -432,7 +432,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par) + if (ret < 0) + return ret; + +- ret = ssd1307fb_write_cmd(par->client, 0x0); ++ ret = ssd1307fb_write_cmd(par->client, par->page_offset); + if (ret < 0) + return ret; + +diff --git a/fs/9p/cache.c b/fs/9p/cache.c +index 995e332eee5c..eb2151fb6049 100644 +--- a/fs/9p/cache.c ++++ b/fs/9p/cache.c +@@ -51,6 +51,8 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses) + if (!v9ses->cachetag) { + if (v9fs_random_cachetag(v9ses) < 0) { + v9ses->fscache = NULL; ++ kfree(v9ses->cachetag); ++ v9ses->cachetag = NULL; + return; + } + } +diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c +index 8e83741b02e0..d4d4fdfac1a6 100644 +--- a/fs/ext4/block_validity.c ++++ b/fs/ext4/block_validity.c +@@ -38,6 +38,7 @@ int __init ext4_init_system_zone(void) + + void ext4_exit_system_zone(void) + { ++ rcu_barrier(); + kmem_cache_destroy(ext4_system_zone_cachep); + } + +@@ -49,17 +50,26 @@ static inline int can_merge(struct ext4_system_zone *entry1, + return 0; + } + ++static void release_system_zone(struct ext4_system_blocks *system_blks) ++{ ++ struct ext4_system_zone *entry, *n; ++ ++ rbtree_postorder_for_each_entry_safe(entry, n, ++ &system_blks->root, node) ++ kmem_cache_free(ext4_system_zone_cachep, entry); ++} ++ + /* + * Mark a range of blocks as belonging to the "system zone" --- that + * is, filesystem metadata blocks which should never be used by + * inodes. + */ +-static int add_system_zone(struct ext4_sb_info *sbi, ++static int add_system_zone(struct ext4_system_blocks *system_blks, + ext4_fsblk_t start_blk, + unsigned int count) + { + struct ext4_system_zone *new_entry = NULL, *entry; +- struct rb_node **n = &sbi->system_blks.rb_node, *node; ++ struct rb_node **n = &system_blks->root.rb_node, *node; + struct rb_node *parent = NULL, *new_node = NULL; + + while (*n) { +@@ -91,7 +101,7 @@ static int add_system_zone(struct ext4_sb_info *sbi, + new_node = &new_entry->node; + + rb_link_node(new_node, parent, n); +- rb_insert_color(new_node, &sbi->system_blks); ++ rb_insert_color(new_node, &system_blks->root); + } + + /* Can we merge to the left? */ +@@ -101,7 +111,7 @@ static int add_system_zone(struct ext4_sb_info *sbi, + if (can_merge(entry, new_entry)) { + new_entry->start_blk = entry->start_blk; + new_entry->count += entry->count; +- rb_erase(node, &sbi->system_blks); ++ rb_erase(node, &system_blks->root); + kmem_cache_free(ext4_system_zone_cachep, entry); + } + } +@@ -112,7 +122,7 @@ static int add_system_zone(struct ext4_sb_info *sbi, + entry = rb_entry(node, struct ext4_system_zone, node); + if (can_merge(new_entry, entry)) { + new_entry->count += entry->count; +- rb_erase(node, &sbi->system_blks); ++ rb_erase(node, &system_blks->root); + kmem_cache_free(ext4_system_zone_cachep, entry); + } + } +@@ -126,7 +136,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi) + int first = 1; + + printk(KERN_INFO "System zones: "); +- node = rb_first(&sbi->system_blks); ++ node = rb_first(&sbi->system_blks->root); + while (node) { + entry = rb_entry(node, struct ext4_system_zone, node); + printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ", +@@ -137,7 +147,47 @@ static void debug_print_tree(struct ext4_sb_info *sbi) + printk(KERN_CONT "\n"); + } + +-static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino) ++/* ++ * Returns 1 if the passed-in block region (start_blk, ++ * start_blk+count) is valid; 0 if some part of the block region ++ * overlaps with filesystem metadata blocks. ++ */ ++static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi, ++ struct ext4_system_blocks *system_blks, ++ ext4_fsblk_t start_blk, ++ unsigned int count) ++{ ++ struct ext4_system_zone *entry; ++ struct rb_node *n; ++ ++ if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || ++ (start_blk + count < start_blk) || ++ (start_blk + count > ext4_blocks_count(sbi->s_es))) { ++ sbi->s_es->s_last_error_block = cpu_to_le64(start_blk); ++ return 0; ++ } ++ ++ if (system_blks == NULL) ++ return 1; ++ ++ n = system_blks->root.rb_node; ++ while (n) { ++ entry = rb_entry(n, struct ext4_system_zone, node); ++ if (start_blk + count - 1 < entry->start_blk) ++ n = n->rb_left; ++ else if (start_blk >= (entry->start_blk + entry->count)) ++ n = n->rb_right; ++ else { ++ sbi->s_es->s_last_error_block = cpu_to_le64(start_blk); ++ return 0; ++ } ++ } ++ return 1; ++} ++ ++static int ext4_protect_reserved_inode(struct super_block *sb, ++ struct ext4_system_blocks *system_blks, ++ u32 ino) + { + struct inode *inode; + struct ext4_sb_info *sbi = EXT4_SB(sb); +@@ -163,14 +213,15 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino) + if (n == 0) { + i++; + } else { +- if (!ext4_data_block_valid(sbi, map.m_pblk, n)) { ++ if (!ext4_data_block_valid_rcu(sbi, system_blks, ++ map.m_pblk, n)) { + ext4_error(sb, "blocks %llu-%llu from inode %u " + "overlap system zone", map.m_pblk, + map.m_pblk + map.m_len - 1, ino); + err = -EFSCORRUPTED; + break; + } +- err = add_system_zone(sbi, map.m_pblk, n); ++ err = add_system_zone(system_blks, map.m_pblk, n); + if (err < 0) + break; + i += n; +@@ -180,94 +231,130 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino) + return err; + } + ++static void ext4_destroy_system_zone(struct rcu_head *rcu) ++{ ++ struct ext4_system_blocks *system_blks; ++ ++ system_blks = container_of(rcu, struct ext4_system_blocks, rcu); ++ release_system_zone(system_blks); ++ kfree(system_blks); ++} ++ ++/* ++ * Build system zone rbtree which is used for block validity checking. ++ * ++ * The update of system_blks pointer in this function is protected by ++ * sb->s_umount semaphore. However we have to be careful as we can be ++ * racing with ext4_data_block_valid() calls reading system_blks rbtree ++ * protected only by RCU. That's why we first build the rbtree and then ++ * swap it in place. ++ */ + int ext4_setup_system_zone(struct super_block *sb) + { + ext4_group_t ngroups = ext4_get_groups_count(sb); + struct ext4_sb_info *sbi = EXT4_SB(sb); ++ struct ext4_system_blocks *system_blks; + struct ext4_group_desc *gdp; + ext4_group_t i; + int flex_size = ext4_flex_bg_size(sbi); + int ret; + + if (!test_opt(sb, BLOCK_VALIDITY)) { +- if (sbi->system_blks.rb_node) ++ if (sbi->system_blks) + ext4_release_system_zone(sb); + return 0; + } +- if (sbi->system_blks.rb_node) ++ if (sbi->system_blks) + return 0; + ++ system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL); ++ if (!system_blks) ++ return -ENOMEM; ++ + for (i=0; i < ngroups; i++) { + cond_resched(); + if (ext4_bg_has_super(sb, i) && + ((i < 5) || ((i % flex_size) == 0))) +- add_system_zone(sbi, ext4_group_first_block_no(sb, i), ++ add_system_zone(system_blks, ++ ext4_group_first_block_no(sb, i), + ext4_bg_num_gdb(sb, i) + 1); + gdp = ext4_get_group_desc(sb, i, NULL); +- ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1); ++ ret = add_system_zone(system_blks, ++ ext4_block_bitmap(sb, gdp), 1); + if (ret) +- return ret; +- ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1); ++ goto err; ++ ret = add_system_zone(system_blks, ++ ext4_inode_bitmap(sb, gdp), 1); + if (ret) +- return ret; +- ret = add_system_zone(sbi, ext4_inode_table(sb, gdp), ++ goto err; ++ ret = add_system_zone(system_blks, ++ ext4_inode_table(sb, gdp), + sbi->s_itb_per_group); + if (ret) +- return ret; ++ goto err; + } + if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) { +- ret = ext4_protect_reserved_inode(sb, ++ ret = ext4_protect_reserved_inode(sb, system_blks, + le32_to_cpu(sbi->s_es->s_journal_inum)); + if (ret) +- return ret; ++ goto err; + } + ++ /* ++ * System blks rbtree complete, announce it once to prevent racing ++ * with ext4_data_block_valid() accessing the rbtree at the same ++ * time. ++ */ ++ rcu_assign_pointer(sbi->system_blks, system_blks); ++ + if (test_opt(sb, DEBUG)) + debug_print_tree(sbi); + return 0; ++err: ++ release_system_zone(system_blks); ++ kfree(system_blks); ++ return ret; + } + +-/* Called when the filesystem is unmounted */ ++/* ++ * Called when the filesystem is unmounted or when remounting it with ++ * noblock_validity specified. ++ * ++ * The update of system_blks pointer in this function is protected by ++ * sb->s_umount semaphore. However we have to be careful as we can be ++ * racing with ext4_data_block_valid() calls reading system_blks rbtree ++ * protected only by RCU. So we first clear the system_blks pointer and ++ * then free the rbtree only after RCU grace period expires. ++ */ + void ext4_release_system_zone(struct super_block *sb) + { +- struct ext4_system_zone *entry, *n; ++ struct ext4_system_blocks *system_blks; + +- rbtree_postorder_for_each_entry_safe(entry, n, +- &EXT4_SB(sb)->system_blks, node) +- kmem_cache_free(ext4_system_zone_cachep, entry); ++ system_blks = rcu_dereference_protected(EXT4_SB(sb)->system_blks, ++ lockdep_is_held(&sb->s_umount)); ++ rcu_assign_pointer(EXT4_SB(sb)->system_blks, NULL); + +- EXT4_SB(sb)->system_blks = RB_ROOT; ++ if (system_blks) ++ call_rcu(&system_blks->rcu, ext4_destroy_system_zone); + } + +-/* +- * Returns 1 if the passed-in block region (start_blk, +- * start_blk+count) is valid; 0 if some part of the block region +- * overlaps with filesystem metadata blocks. +- */ + int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk, + unsigned int count) + { +- struct ext4_system_zone *entry; +- struct rb_node *n = sbi->system_blks.rb_node; ++ struct ext4_system_blocks *system_blks; ++ int ret; + +- if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || +- (start_blk + count < start_blk) || +- (start_blk + count > ext4_blocks_count(sbi->s_es))) { +- sbi->s_es->s_last_error_block = cpu_to_le64(start_blk); +- return 0; +- } +- while (n) { +- entry = rb_entry(n, struct ext4_system_zone, node); +- if (start_blk + count - 1 < entry->start_blk) +- n = n->rb_left; +- else if (start_blk >= (entry->start_blk + entry->count)) +- n = n->rb_right; +- else { +- sbi->s_es->s_last_error_block = cpu_to_le64(start_blk); +- return 0; +- } +- } +- return 1; ++ /* ++ * Lock the system zone to prevent it being released concurrently ++ * when doing a remount which inverse current "[no]block_validity" ++ * mount option. ++ */ ++ rcu_read_lock(); ++ system_blks = rcu_dereference(sbi->system_blks); ++ ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk, ++ count); ++ rcu_read_unlock(); ++ return ret; + } + + int ext4_check_blockref(const char *function, unsigned int line, +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index bf660aa7a9e0..c025efcbcf27 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -184,6 +184,14 @@ struct ext4_map_blocks { + unsigned int m_flags; + }; + ++/* ++ * Block validity checking, system zone rbtree. ++ */ ++struct ext4_system_blocks { ++ struct rb_root root; ++ struct rcu_head rcu; ++}; ++ + /* + * Flags for ext4_io_end->flags + */ +@@ -1421,7 +1429,7 @@ struct ext4_sb_info { + int s_jquota_fmt; /* Format of quota to use */ + #endif + unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */ +- struct rb_root system_blks; ++ struct ext4_system_blocks __rcu *system_blks; + + #ifdef EXTENTS_STATS + /* ext4 extents stats */ +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index 78a1b873e48a..aa3178f1b145 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -873,7 +873,21 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) + + static int f2fs_drop_inode(struct inode *inode) + { ++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + int ret; ++ ++ /* ++ * during filesystem shutdown, if checkpoint is disabled, ++ * drop useless meta/node dirty pages. ++ */ ++ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { ++ if (inode->i_ino == F2FS_NODE_INO(sbi) || ++ inode->i_ino == F2FS_META_INO(sbi)) { ++ trace_f2fs_drop_inode(inode, 1); ++ return 1; ++ } ++ } ++ + /* + * This is to avoid a deadlock condition like below. + * writeback_single_inode(inode) +diff --git a/fs/fat/dir.c b/fs/fat/dir.c +index 1bda2ab6745b..814ad2c2ba80 100644 +--- a/fs/fat/dir.c ++++ b/fs/fat/dir.c +@@ -1100,8 +1100,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used, + err = -ENOMEM; + goto error; + } ++ /* Avoid race with userspace read via bdev */ ++ lock_buffer(bhs[n]); + memset(bhs[n]->b_data, 0, sb->s_blocksize); + set_buffer_uptodate(bhs[n]); ++ unlock_buffer(bhs[n]); + mark_buffer_dirty_inode(bhs[n], dir); + + n++; +@@ -1158,6 +1161,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts) + fat_time_unix2fat(sbi, ts, &time, &date, &time_cs); + + de = (struct msdos_dir_entry *)bhs[0]->b_data; ++ /* Avoid race with userspace read via bdev */ ++ lock_buffer(bhs[0]); + /* filling the new directory slots ("." and ".." entries) */ + memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME); + memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME); +@@ -1180,6 +1185,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts) + de[0].size = de[1].size = 0; + memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de)); + set_buffer_uptodate(bhs[0]); ++ unlock_buffer(bhs[0]); + mark_buffer_dirty_inode(bhs[0], dir); + + err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE); +@@ -1237,11 +1243,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots, + + /* fill the directory entry */ + copy = min(size, sb->s_blocksize); ++ /* Avoid race with userspace read via bdev */ ++ lock_buffer(bhs[n]); + memcpy(bhs[n]->b_data, slots, copy); +- slots += copy; +- size -= copy; + set_buffer_uptodate(bhs[n]); ++ unlock_buffer(bhs[n]); + mark_buffer_dirty_inode(bhs[n], dir); ++ slots += copy; ++ size -= copy; + if (!size) + break; + n++; +diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c +index 265983635f2b..3647c65a0f48 100644 +--- a/fs/fat/fatent.c ++++ b/fs/fat/fatent.c +@@ -388,8 +388,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs, + err = -ENOMEM; + goto error; + } ++ /* Avoid race with userspace read via bdev */ ++ lock_buffer(c_bh); + memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize); + set_buffer_uptodate(c_bh); ++ unlock_buffer(c_bh); + mark_buffer_dirty_inode(c_bh, sbi->fat_inode); + if (sb->s_flags & SB_SYNCHRONOUS) + err = sync_dirty_buffer(c_bh); +diff --git a/fs/fs_context.c b/fs/fs_context.c +index 103643c68e3f..87c2c9687d90 100644 +--- a/fs/fs_context.c ++++ b/fs/fs_context.c +@@ -279,10 +279,8 @@ static struct fs_context *alloc_fs_context(struct file_system_type *fs_type, + fc->user_ns = get_user_ns(reference->d_sb->s_user_ns); + break; + case FS_CONTEXT_FOR_RECONFIGURE: +- /* We don't pin any namespaces as the superblock's +- * subscriptions cannot be changed at this point. +- */ + atomic_inc(&reference->d_sb->s_active); ++ fc->user_ns = get_user_ns(reference->d_sb->s_user_ns); + fc->root = dget(reference); + break; + } +diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c +index e78657742bd8..3883633e82eb 100644 +--- a/fs/ocfs2/dlm/dlmunlock.c ++++ b/fs/ocfs2/dlm/dlmunlock.c +@@ -90,7 +90,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, + enum dlm_status status; + int actions = 0; + int in_use; +- u8 owner; ++ u8 owner; ++ int recovery_wait = 0; + + mlog(0, "master_node = %d, valblk = %d\n", master_node, + flags & LKM_VALBLK); +@@ -193,9 +194,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, + } + if (flags & LKM_CANCEL) + lock->cancel_pending = 0; +- else +- lock->unlock_pending = 0; +- ++ else { ++ if (!lock->unlock_pending) ++ recovery_wait = 1; ++ else ++ lock->unlock_pending = 0; ++ } + } + + /* get an extra ref on lock. if we are just switching +@@ -229,6 +233,17 @@ leave: + spin_unlock(&res->spinlock); + wake_up(&res->wq); + ++ if (recovery_wait) { ++ spin_lock(&res->spinlock); ++ /* Unlock request will directly succeed after owner dies, ++ * and the lock is already removed from grant list. We have to ++ * wait for RECOVERING done or we miss the chance to purge it ++ * since the removement is much faster than RECOVERING proc. ++ */ ++ __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING); ++ spin_unlock(&res->spinlock); ++ } ++ + /* let the caller's final dlm_lock_put handle the actual kfree */ + if (actions & DLM_UNLOCK_FREE_LOCK) { + /* this should always be coupled with list removal */ +diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c +index 2bb3468fc93a..8caff834f002 100644 +--- a/fs/pstore/ram.c ++++ b/fs/pstore/ram.c +@@ -144,6 +144,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time, + if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n", + (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type, + &header_length) == 3) { ++ time->tv_nsec *= 1000; + if (data_type == 'C') + *compressed = true; + else +@@ -151,6 +152,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time, + } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n", + (time64_t *)&time->tv_sec, &time->tv_nsec, + &header_length) == 2) { ++ time->tv_nsec *= 1000; + *compressed = false; + } else { + time->tv_sec = 0; +diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h +index 79435cfc20eb..897e799dbcb9 100644 +--- a/include/linux/dsa/sja1105.h ++++ b/include/linux/dsa/sja1105.h +@@ -31,6 +31,8 @@ + #define SJA1105_META_SMAC 0x222222222222ull + #define SJA1105_META_DMAC 0x0180C200000Eull + ++#define SJA1105_HWTS_RX_EN 0 ++ + /* Global tagger data: each struct sja1105_port has a reference to + * the structure defined in struct sja1105_private. + */ +@@ -42,7 +44,7 @@ struct sja1105_tagger_data { + * from taggers running on multiple ports on SMP systems + */ + spinlock_t meta_lock; +- bool hwts_rx_en; ++ unsigned long state; + }; + + struct sja1105_skb_cb { +diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h +index ccb73422c2fa..e6f54ef6698b 100644 +--- a/include/linux/mailbox/mtk-cmdq-mailbox.h ++++ b/include/linux/mailbox/mtk-cmdq-mailbox.h +@@ -20,6 +20,9 @@ + #define CMDQ_WFE_WAIT BIT(15) + #define CMDQ_WFE_WAIT_VALUE 0x1 + ++/** cmdq event maximum */ ++#define CMDQ_MAX_EVENT 0x3ff ++ + /* + * CMDQ_CODE_MASK: + * set write mask +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 0334ca97c584..fe4552e1c40b 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -1405,7 +1405,11 @@ extern void pagefault_out_of_memory(void); + + extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); + ++#ifdef CONFIG_MMU + extern bool can_do_mlock(void); ++#else ++static inline bool can_do_mlock(void) { return false; } ++#endif + extern int user_shm_lock(size_t, struct user_struct *); + extern void user_shm_unlock(size_t, struct user_struct *); + +diff --git a/include/linux/pci.h b/include/linux/pci.h +index 82e4cd1b7ac3..ac8a6c4e1792 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -2435,4 +2435,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); + #define pci_notice_ratelimited(pdev, fmt, arg...) \ + dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg) + ++#define pci_info_ratelimited(pdev, fmt, arg...) \ ++ dev_info_ratelimited(&(pdev)->dev, fmt, ##arg) ++ + #endif /* LINUX_PCI_H */ +diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h +index 54ade13a9b15..4e8899972db4 100644 +--- a/include/linux/soc/mediatek/mtk-cmdq.h ++++ b/include/linux/soc/mediatek/mtk-cmdq.h +@@ -13,9 +13,6 @@ + + #define CMDQ_NO_TIMEOUT 0xffffffffu + +-/** cmdq event maximum */ +-#define CMDQ_MAX_EVENT 0x3ff +- + struct cmdq_pkt; + + struct cmdq_client { +diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h +index e03bd9d41fa8..7b196d234626 100644 +--- a/include/scsi/scsi_dbg.h ++++ b/include/scsi/scsi_dbg.h +@@ -6,8 +6,6 @@ struct scsi_cmnd; + struct scsi_device; + struct scsi_sense_hdr; + +-#define SCSI_LOG_BUFSIZE 128 +- + extern void scsi_print_command(struct scsi_cmnd *); + extern size_t __scsi_format_command(char *, size_t, + const unsigned char *, size_t); +diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h +index a13a62db3565..edc5c887a44c 100644 +--- a/include/trace/events/rxrpc.h ++++ b/include/trace/events/rxrpc.h +@@ -1068,7 +1068,7 @@ TRACE_EVENT(rxrpc_recvmsg, + ), + + TP_fast_assign( +- __entry->call = call->debug_id; ++ __entry->call = call ? call->debug_id : 0; + __entry->why = why; + __entry->seq = seq; + __entry->offset = offset; +diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c +index d5870723b8ad..15d70a90b50d 100644 +--- a/kernel/kexec_core.c ++++ b/kernel/kexec_core.c +@@ -300,6 +300,8 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) + { + struct page *pages; + ++ if (fatal_signal_pending(current)) ++ return NULL; + pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order); + if (pages) { + unsigned int count, i; +diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c +index c4ce08f43bd6..ab4a4606d19b 100644 +--- a/kernel/livepatch/core.c ++++ b/kernel/livepatch/core.c +@@ -1175,6 +1175,7 @@ err: + pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", + patch->mod->name, obj->mod->name, obj->mod->name); + mod->klp_alive = false; ++ obj->mod = NULL; + klp_cleanup_module_patches_limited(mod, patch); + mutex_unlock(&klp_mutex); + +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index 5960e2980a8a..4d39540011e2 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -596,7 +596,7 @@ config DEBUG_KMEMLEAK_EARLY_LOG_SIZE + int "Maximum kmemleak early log entries" + depends on DEBUG_KMEMLEAK + range 200 40000 +- default 400 ++ default 16000 + help + Kmemleak must track all the memory allocations to avoid + reporting false positives. Since memory may be allocated or +diff --git a/net/core/sock.c b/net/core/sock.c +index 545fac19a711..3aa93af51d48 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -1700,8 +1700,6 @@ static void __sk_destruct(struct rcu_head *head) + sk_filter_uncharge(sk, filter); + RCU_INIT_POINTER(sk->sk_filter, NULL); + } +- if (rcu_access_pointer(sk->sk_reuseport_cb)) +- reuseport_detach_sock(sk); + + sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); + +@@ -1728,7 +1726,14 @@ static void __sk_destruct(struct rcu_head *head) + + void sk_destruct(struct sock *sk) + { +- if (sock_flag(sk, SOCK_RCU_FREE)) ++ bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); ++ ++ if (rcu_access_pointer(sk->sk_reuseport_cb)) { ++ reuseport_detach_sock(sk); ++ use_call_rcu = true; ++ } ++ ++ if (use_call_rcu) + call_rcu(&sk->sk_rcu, __sk_destruct); + else + __sk_destruct(&sk->sk_rcu); +diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c +index 47ee88163a9d..27fe80d07460 100644 +--- a/net/dsa/tag_sja1105.c ++++ b/net/dsa/tag_sja1105.c +@@ -155,7 +155,11 @@ static struct sk_buff + /* Step 1: A timestampable frame was received. + * Buffer it until we get its meta frame. + */ +- if (is_link_local && sp->data->hwts_rx_en) { ++ if (is_link_local) { ++ if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state)) ++ /* Do normal processing. */ ++ return skb; ++ + spin_lock(&sp->data->meta_lock); + /* Was this a link-local frame instead of the meta + * that we were expecting? +@@ -186,6 +190,12 @@ static struct sk_buff + } else if (is_meta) { + struct sk_buff *stampable_skb; + ++ /* Drop the meta frame if we're not in the right state ++ * to process it. ++ */ ++ if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state)) ++ return NULL; ++ + spin_lock(&sp->data->meta_lock); + + stampable_skb = sp->data->stampable_skb; +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c +index a53a543fe055..52690bb3e40f 100644 +--- a/net/ipv4/ip_gre.c ++++ b/net/ipv4/ip_gre.c +@@ -1446,6 +1446,7 @@ static void erspan_setup(struct net_device *dev) + struct ip_tunnel *t = netdev_priv(dev); + + ether_setup(dev); ++ dev->max_mtu = 0; + dev->netdev_ops = &erspan_netdev_ops; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 7dcce724c78b..14654876127e 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -916,16 +916,15 @@ void ip_rt_send_redirect(struct sk_buff *skb) + if (peer->rate_tokens == 0 || + time_after(jiffies, + (peer->rate_last + +- (ip_rt_redirect_load << peer->rate_tokens)))) { ++ (ip_rt_redirect_load << peer->n_redirects)))) { + __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); + + icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); + peer->rate_last = jiffies; +- ++peer->rate_tokens; + ++peer->n_redirects; + #ifdef CONFIG_IP_ROUTE_VERBOSE + if (log_martians && +- peer->rate_tokens == ip_rt_redirect_number) ++ peer->n_redirects == ip_rt_redirect_number) + net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", + &ip_hdr(skb)->saddr, inet_iif(skb), + &ip_hdr(skb)->daddr, &gw); +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c +index 3e8b38c73d8c..483323332d74 100644 +--- a/net/ipv4/tcp_timer.c ++++ b/net/ipv4/tcp_timer.c +@@ -198,8 +198,13 @@ static bool retransmits_timed_out(struct sock *sk, + return false; + + start_ts = tcp_sk(sk)->retrans_stamp; +- if (likely(timeout == 0)) +- timeout = tcp_model_timeout(sk, boundary, TCP_RTO_MIN); ++ if (likely(timeout == 0)) { ++ unsigned int rto_base = TCP_RTO_MIN; ++ ++ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) ++ rto_base = tcp_timeout_init(sk); ++ timeout = tcp_model_timeout(sk, boundary, rto_base); ++ } + + return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0; + } +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index 16486c8b708b..5e5d0575a43c 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -821,6 +821,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, + int is_udplite = IS_UDPLITE(sk); + int offset = skb_transport_offset(skb); + int len = skb->len - offset; ++ int datalen = len - sizeof(*uh); + __wsum csum = 0; + + /* +@@ -854,10 +855,12 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, + return -EIO; + } + +- skb_shinfo(skb)->gso_size = cork->gso_size; +- skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; +- skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh), +- cork->gso_size); ++ if (datalen > cork->gso_size) { ++ skb_shinfo(skb)->gso_size = cork->gso_size; ++ skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; ++ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, ++ cork->gso_size); ++ } + goto csum_partial; + } + +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 6a576ff92c39..34ccef18b40e 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -5964,13 +5964,20 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) + switch (event) { + case RTM_NEWADDR: + /* +- * If the address was optimistic +- * we inserted the route at the start of +- * our DAD process, so we don't need +- * to do it again ++ * If the address was optimistic we inserted the route at the ++ * start of our DAD process, so we don't need to do it again. ++ * If the device was taken down in the middle of the DAD ++ * cycle there is a race where we could get here without a ++ * host route, so nothing to insert. That will be fixed when ++ * the device is brought up. + */ +- if (!rcu_access_pointer(ifp->rt->fib6_node)) ++ if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) { + ip6_ins_rt(net, ifp->rt); ++ } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) { ++ pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n", ++ &ifp->addr, ifp->idev->dev->name); ++ } ++ + if (ifp->idev->cnf.forwarding) + addrconf_join_anycast(ifp); + if (!ipv6_addr_any(&ifp->peer_addr)) +diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c +index fa014d5f1732..a593aaf25748 100644 +--- a/net/ipv6/ip6_input.c ++++ b/net/ipv6/ip6_input.c +@@ -221,6 +221,16 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev, + if (ipv6_addr_is_multicast(&hdr->saddr)) + goto err; + ++ /* While RFC4291 is not explicit about v4mapped addresses ++ * in IPv6 headers, it seems clear linux dual-stack ++ * model can not deal properly with these. ++ * Security models could be fooled by ::ffff:127.0.0.1 for example. ++ * ++ * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02 ++ */ ++ if (ipv6_addr_v4mapped(&hdr->saddr)) ++ goto err; ++ + skb->transport_header = skb->network_header + sizeof(*hdr); + IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); + +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c +index 5995fdc99d3f..0454a8a3b39c 100644 +--- a/net/ipv6/udp.c ++++ b/net/ipv6/udp.c +@@ -1109,6 +1109,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, + __wsum csum = 0; + int offset = skb_transport_offset(skb); + int len = skb->len - offset; ++ int datalen = len - sizeof(*uh); + + /* + * Create a UDP header +@@ -1141,8 +1142,12 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, + return -EIO; + } + +- skb_shinfo(skb)->gso_size = cork->gso_size; +- skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; ++ if (datalen > cork->gso_size) { ++ skb_shinfo(skb)->gso_size = cork->gso_size; ++ skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; ++ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, ++ cork->gso_size); ++ } + goto csum_partial; + } + +diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c +index 8dfea26536c9..ccdd790e163a 100644 +--- a/net/nfc/llcp_sock.c ++++ b/net/nfc/llcp_sock.c +@@ -107,9 +107,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) + llcp_sock->service_name = kmemdup(llcp_addr.service_name, + llcp_sock->service_name_len, + GFP_KERNEL); +- ++ if (!llcp_sock->service_name) { ++ ret = -ENOMEM; ++ goto put_dev; ++ } + llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock); + if (llcp_sock->ssap == LLCP_SAP_MAX) { ++ kfree(llcp_sock->service_name); ++ llcp_sock->service_name = NULL; + ret = -EADDRINUSE; + goto put_dev; + } +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c +index ea64c90b14e8..17e6ca62f1be 100644 +--- a/net/nfc/netlink.c ++++ b/net/nfc/netlink.c +@@ -970,7 +970,8 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info) + int rc; + u32 idx; + +- if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) ++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || ++ !info->attrs[NFC_ATTR_TARGET_INDEX]) + return -EINVAL; + + idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); +@@ -1018,7 +1019,8 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info) + struct sk_buff *msg = NULL; + u32 idx; + +- if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) ++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || ++ !info->attrs[NFC_ATTR_FIRMWARE_NAME]) + return -EINVAL; + + idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); +diff --git a/net/rds/ib.c b/net/rds/ib.c +index 45acab2de0cf..9de2ae22d583 100644 +--- a/net/rds/ib.c ++++ b/net/rds/ib.c +@@ -143,6 +143,9 @@ static void rds_ib_add_one(struct ib_device *device) + refcount_set(&rds_ibdev->refcount, 1); + INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free); + ++ INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); ++ INIT_LIST_HEAD(&rds_ibdev->conn_list); ++ + rds_ibdev->max_wrs = device->attrs.max_qp_wr; + rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE); + +@@ -203,9 +206,6 @@ static void rds_ib_add_one(struct ib_device *device) + device->name, + rds_ibdev->use_fastreg ? "FRMR" : "FMR"); + +- INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); +- INIT_LIST_HEAD(&rds_ibdev->conn_list); +- + down_write(&rds_ib_devices_lock); + list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices); + up_write(&rds_ib_devices_lock); +diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c +index 06c7a2da21bc..39b427dc7512 100644 +--- a/net/sched/sch_cbq.c ++++ b/net/sched/sch_cbq.c +@@ -1127,6 +1127,33 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = { + [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) }, + }; + ++static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1], ++ struct nlattr *opt, ++ struct netlink_ext_ack *extack) ++{ ++ int err; ++ ++ if (!opt) { ++ NL_SET_ERR_MSG(extack, "CBQ options are required for this operation"); ++ return -EINVAL; ++ } ++ ++ err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, ++ cbq_policy, extack); ++ if (err < 0) ++ return err; ++ ++ if (tb[TCA_CBQ_WRROPT]) { ++ const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]); ++ ++ if (wrr->priority > TC_CBQ_MAXPRIO) { ++ NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO"); ++ err = -EINVAL; ++ } ++ } ++ return err; ++} ++ + static int cbq_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) + { +@@ -1139,13 +1166,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt, + hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + q->delay_timer.function = cbq_undelay; + +- if (!opt) { +- NL_SET_ERR_MSG(extack, "CBQ options are required for this operation"); +- return -EINVAL; +- } +- +- err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy, +- extack); ++ err = cbq_opt_parse(tb, opt, extack); + if (err < 0) + return err; + +@@ -1464,13 +1485,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t + struct cbq_class *parent; + struct qdisc_rate_table *rtab = NULL; + +- if (!opt) { +- NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing"); +- return -EINVAL; +- } +- +- err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy, +- extack); ++ err = cbq_opt_parse(tb, opt, extack); + if (err < 0) + return err; + +diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c +index 4a403d35438f..284ab2dcf47f 100644 +--- a/net/sched/sch_cbs.c ++++ b/net/sched/sch_cbs.c +@@ -306,7 +306,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) + if (err < 0) + goto skip; + +- if (ecmd.base.speed != SPEED_UNKNOWN) ++ if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN) + speed = ecmd.base.speed; + + skip: +diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c +index bad1cbe59a56..05605b30bef3 100644 +--- a/net/sched/sch_dsmark.c ++++ b/net/sched/sch_dsmark.c +@@ -361,6 +361,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt, + goto errout; + + err = -EINVAL; ++ if (!tb[TCA_DSMARK_INDICES]) ++ goto errout; + indices = nla_get_u16(tb[TCA_DSMARK_INDICES]); + + if (hweight32(indices) != 1) +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c +index 8d8bc2ec5cd6..76bebe516194 100644 +--- a/net/sched/sch_taprio.c ++++ b/net/sched/sch_taprio.c +@@ -961,12 +961,11 @@ static void taprio_set_picos_per_byte(struct net_device *dev, + if (err < 0) + goto skip; + +- if (ecmd.base.speed != SPEED_UNKNOWN) ++ if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN) + speed = ecmd.base.speed; + + skip: +- picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, +- speed * 1000 * 1000); ++ picos_per_byte = (USEC_PER_SEC * 8) / speed; + + atomic64_set(&q->picos_per_byte, picos_per_byte); + netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", +diff --git a/net/tipc/link.c b/net/tipc/link.c +index c2c5c53cad22..b0063d05599e 100644 +--- a/net/tipc/link.c ++++ b/net/tipc/link.c +@@ -160,6 +160,7 @@ struct tipc_link { + struct { + u16 len; + u16 limit; ++ struct sk_buff *target_bskb; + } backlog[5]; + u16 snd_nxt; + u16 window; +@@ -866,6 +867,7 @@ static void link_prepare_wakeup(struct tipc_link *l) + void tipc_link_reset(struct tipc_link *l) + { + struct sk_buff_head list; ++ u32 imp; + + __skb_queue_head_init(&list); + +@@ -887,11 +889,10 @@ void tipc_link_reset(struct tipc_link *l) + __skb_queue_purge(&l->deferdq); + __skb_queue_purge(&l->backlogq); + __skb_queue_purge(&l->failover_deferdq); +- l->backlog[TIPC_LOW_IMPORTANCE].len = 0; +- l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; +- l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; +- l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; +- l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; ++ for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) { ++ l->backlog[imp].len = 0; ++ l->backlog[imp].target_bskb = NULL; ++ } + kfree_skb(l->reasm_buf); + kfree_skb(l->failover_reasm_skb); + l->reasm_buf = NULL; +@@ -931,7 +932,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, + u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; + struct sk_buff_head *transmq = &l->transmq; + struct sk_buff_head *backlogq = &l->backlogq; +- struct sk_buff *skb, *_skb, *bskb; ++ struct sk_buff *skb, *_skb, **tskb; + int pkt_cnt = skb_queue_len(list); + int rc = 0; + +@@ -980,19 +981,21 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, + seqno++; + continue; + } +- if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) { ++ tskb = &l->backlog[imp].target_bskb; ++ if (tipc_msg_bundle(*tskb, hdr, mtu)) { + kfree_skb(__skb_dequeue(list)); + l->stats.sent_bundled++; + continue; + } +- if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) { ++ if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) { + kfree_skb(__skb_dequeue(list)); +- __skb_queue_tail(backlogq, bskb); +- l->backlog[msg_importance(buf_msg(bskb))].len++; ++ __skb_queue_tail(backlogq, *tskb); ++ l->backlog[imp].len++; + l->stats.sent_bundled++; + l->stats.sent_bundles++; + continue; + } ++ l->backlog[imp].target_bskb = NULL; + l->backlog[imp].len += skb_queue_len(list); + skb_queue_splice_tail_init(list, backlogq); + } +@@ -1008,6 +1011,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l, + u16 seqno = l->snd_nxt; + u16 ack = l->rcv_nxt - 1; + u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; ++ u32 imp; + + while (skb_queue_len(&l->transmq) < l->window) { + skb = skb_peek(&l->backlogq); +@@ -1018,7 +1022,10 @@ static void tipc_link_advance_backlog(struct tipc_link *l, + break; + __skb_dequeue(&l->backlogq); + hdr = buf_msg(skb); +- l->backlog[msg_importance(hdr)].len--; ++ imp = msg_importance(hdr); ++ l->backlog[imp].len--; ++ if (unlikely(skb == l->backlog[imp].target_bskb)) ++ l->backlog[imp].target_bskb = NULL; + __skb_queue_tail(&l->transmq, skb); + /* next retransmit attempt */ + if (link_is_bc_sndlink(l)) +diff --git a/net/tipc/msg.c b/net/tipc/msg.c +index f48e5857210f..b956ce4a40ef 100644 +--- a/net/tipc/msg.c ++++ b/net/tipc/msg.c +@@ -484,10 +484,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg, + bmsg = buf_msg(_skb); + tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0, + INT_H_SIZE, dnode); +- if (msg_isdata(msg)) +- msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE); +- else +- msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE); ++ msg_set_importance(bmsg, msg_importance(msg)); + msg_set_seqno(bmsg, msg_seqno(msg)); + msg_set_ack(bmsg, msg_ack(msg)); + msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c +index ab47bf3ab66e..2ab43b2bba31 100644 +--- a/net/vmw_vsock/af_vsock.c ++++ b/net/vmw_vsock/af_vsock.c +@@ -638,7 +638,7 @@ struct sock *__vsock_create(struct net *net, + } + EXPORT_SYMBOL_GPL(__vsock_create); + +-static void __vsock_release(struct sock *sk) ++static void __vsock_release(struct sock *sk, int level) + { + if (sk) { + struct sk_buff *skb; +@@ -648,9 +648,17 @@ static void __vsock_release(struct sock *sk) + vsk = vsock_sk(sk); + pending = NULL; /* Compiler warning. */ + ++ /* The release call is supposed to use lock_sock_nested() ++ * rather than lock_sock(), if a sock lock should be acquired. ++ */ + transport->release(vsk); + +- lock_sock(sk); ++ /* When "level" is SINGLE_DEPTH_NESTING, use the nested ++ * version to avoid the warning "possible recursive locking ++ * detected". When "level" is 0, lock_sock_nested(sk, level) ++ * is the same as lock_sock(sk). ++ */ ++ lock_sock_nested(sk, level); + sock_orphan(sk); + sk->sk_shutdown = SHUTDOWN_MASK; + +@@ -659,7 +667,7 @@ static void __vsock_release(struct sock *sk) + + /* Clean up any sockets that never were accepted. */ + while ((pending = vsock_dequeue_accept(sk)) != NULL) { +- __vsock_release(pending); ++ __vsock_release(pending, SINGLE_DEPTH_NESTING); + sock_put(pending); + } + +@@ -708,7 +716,7 @@ EXPORT_SYMBOL_GPL(vsock_stream_has_space); + + static int vsock_release(struct socket *sock) + { +- __vsock_release(sock->sk); ++ __vsock_release(sock->sk, 0); + sock->sk = NULL; + sock->state = SS_FREE; + +diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c +index 9d864ebeb7b3..4b126b21b453 100644 +--- a/net/vmw_vsock/hyperv_transport.c ++++ b/net/vmw_vsock/hyperv_transport.c +@@ -559,7 +559,7 @@ static void hvs_release(struct vsock_sock *vsk) + struct sock *sk = sk_vsock(vsk); + bool remove_sock; + +- lock_sock(sk); ++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + remove_sock = hvs_close_lock_held(vsk); + release_sock(sk); + if (remove_sock) +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c +index 6f1a8aff65c5..a7adffd062c7 100644 +--- a/net/vmw_vsock/virtio_transport_common.c ++++ b/net/vmw_vsock/virtio_transport_common.c +@@ -790,7 +790,7 @@ void virtio_transport_release(struct vsock_sock *vsk) + struct sock *sk = &vsk->sk; + bool remove_sock = true; + +- lock_sock(sk); ++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + if (sk->sk_type == SOCK_STREAM) + remove_sock = virtio_transport_close(vsk); + +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index 74dd46de01b6..e75517464786 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -3403,7 +3403,7 @@ static int selinux_inode_copy_up_xattr(const char *name) + static int selinux_kernfs_init_security(struct kernfs_node *kn_dir, + struct kernfs_node *kn) + { +- const struct task_security_struct *tsec = current_security(); ++ const struct task_security_struct *tsec = selinux_cred(current_cred()); + u32 parent_sid, newsid, clen; + int rc; + char *context; +diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h +index 91c5395dd20c..586b7abd0aa7 100644 +--- a/security/selinux/include/objsec.h ++++ b/security/selinux/include/objsec.h +@@ -37,16 +37,6 @@ struct task_security_struct { + u32 sockcreate_sid; /* fscreate SID */ + }; + +-/* +- * get the subjective security ID of the current task +- */ +-static inline u32 current_sid(void) +-{ +- const struct task_security_struct *tsec = current_security(); +- +- return tsec->sid; +-} +- + enum label_initialized { + LABEL_INVALID, /* invalid or not initialized */ + LABEL_INITIALIZED, /* initialized */ +@@ -185,4 +175,14 @@ static inline struct ipc_security_struct *selinux_ipc( + return ipc->security + selinux_blob_sizes.lbs_ipc; + } + ++/* ++ * get the subjective security ID of the current task ++ */ ++static inline u32 current_sid(void) ++{ ++ const struct task_security_struct *tsec = selinux_cred(current_cred()); ++ ++ return tsec->sid; ++} ++ + #endif /* _SELINUX_OBJSEC_H_ */ +diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c +index f1c93a7be9ec..38ac3da4e791 100644 +--- a/security/smack/smack_access.c ++++ b/security/smack/smack_access.c +@@ -465,7 +465,7 @@ char *smk_parse_smack(const char *string, int len) + if (i == 0 || i >= SMK_LONGLABEL) + return ERR_PTR(-EINVAL); + +- smack = kzalloc(i + 1, GFP_KERNEL); ++ smack = kzalloc(i + 1, GFP_NOFS); + if (smack == NULL) + return ERR_PTR(-ENOMEM); + +@@ -500,7 +500,7 @@ int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap, + if ((m & *cp) == 0) + continue; + rc = netlbl_catmap_setbit(&sap->attr.mls.cat, +- cat, GFP_KERNEL); ++ cat, GFP_NOFS); + if (rc < 0) { + netlbl_catmap_free(sap->attr.mls.cat); + return rc; +@@ -536,7 +536,7 @@ struct smack_known *smk_import_entry(const char *string, int len) + if (skp != NULL) + goto freeout; + +- skp = kzalloc(sizeof(*skp), GFP_KERNEL); ++ skp = kzalloc(sizeof(*skp), GFP_NOFS); + if (skp == NULL) { + skp = ERR_PTR(-ENOMEM); + goto freeout; +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c +index 4c5e5a438f8b..36b6b9d4cbaf 100644 +--- a/security/smack/smack_lsm.c ++++ b/security/smack/smack_lsm.c +@@ -288,7 +288,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip, + if (!(ip->i_opflags & IOP_XATTR)) + return ERR_PTR(-EOPNOTSUPP); + +- buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL); ++ buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS); + if (buffer == NULL) + return ERR_PTR(-ENOMEM); + +@@ -937,7 +937,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm) + + if (rc != 0) + return rc; +- } else if (bprm->unsafe) ++ } ++ if (bprm->unsafe & ~LSM_UNSAFE_PTRACE) + return -EPERM; + + bsp->smk_task = isp->smk_task; +@@ -3925,6 +3926,8 @@ access_check: + skp = smack_ipv6host_label(&sadd); + if (skp == NULL) + skp = smack_net_ambient; ++ if (skb == NULL) ++ break; + #ifdef CONFIG_AUDIT + smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); + ad.a.u.net->family = family; +diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c +index 6a10dea01eef..696586407e83 100644 +--- a/tools/power/x86/intel-speed-select/isst-config.c ++++ b/tools/power/x86/intel-speed-select/isst-config.c +@@ -402,6 +402,9 @@ void set_cpu_mask_from_punit_coremask(int cpu, unsigned long long core_mask, + int j; + + for (j = 0; j < topo_max_cpus; ++j) { ++ if (!CPU_ISSET_S(j, present_cpumask_size, present_cpumask)) ++ continue; ++ + if (cpu_map[j].pkg_id == pkg_id && + cpu_map[j].die_id == die_id && + cpu_map[j].punit_cpu_core == i) { +diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c +index b8265ee9923f..614b31aad168 100644 +--- a/tools/testing/selftests/net/udpgso.c ++++ b/tools/testing/selftests/net/udpgso.c +@@ -89,12 +89,9 @@ struct testcase testcases_v4[] = { + .tfail = true, + }, + { +- /* send a single MSS: will fail with GSO, because the segment +- * logic in udp4_ufo_fragment demands a gso skb to be > MTU +- */ ++ /* send a single MSS: will fall back to no GSO */ + .tlen = CONST_MSS_V4, + .gso_len = CONST_MSS_V4, +- .tfail = true, + .r_num_mss = 1, + }, + { +@@ -139,10 +136,9 @@ struct testcase testcases_v4[] = { + .tfail = true, + }, + { +- /* send a single 1B MSS: will fail, see single MSS above */ ++ /* send a single 1B MSS: will fall back to no GSO */ + .tlen = 1, + .gso_len = 1, +- .tfail = true, + .r_num_mss = 1, + }, + { +@@ -196,12 +192,9 @@ struct testcase testcases_v6[] = { + .tfail = true, + }, + { +- /* send a single MSS: will fail with GSO, because the segment +- * logic in udp4_ufo_fragment demands a gso skb to be > MTU +- */ ++ /* send a single MSS: will fall back to no GSO */ + .tlen = CONST_MSS_V6, + .gso_len = CONST_MSS_V6, +- .tfail = true, + .r_num_mss = 1, + }, + { +@@ -246,10 +239,9 @@ struct testcase testcases_v6[] = { + .tfail = true, + }, + { +- /* send a single 1B MSS: will fail, see single MSS above */ ++ /* send a single 1B MSS: will fall back to no GSO */ + .tlen = 1, + .gso_len = 1, +- .tfail = true, + .r_num_mss = 1, + }, + { +diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h +index 97f9f491c541..c402464b038f 100644 +--- a/tools/testing/selftests/powerpc/tm/tm.h ++++ b/tools/testing/selftests/powerpc/tm/tm.h +@@ -55,7 +55,8 @@ static inline bool failure_is_unavailable(void) + static inline bool failure_is_reschedule(void) + { + if ((failure_code() & TM_CAUSE_RESCHED) == TM_CAUSE_RESCHED || +- (failure_code() & TM_CAUSE_KVM_RESCHED) == TM_CAUSE_KVM_RESCHED) ++ (failure_code() & TM_CAUSE_KVM_RESCHED) == TM_CAUSE_KVM_RESCHED || ++ (failure_code() & TM_CAUSE_KVM_FAC_UNAV) == TM_CAUSE_KVM_FAC_UNAV) + return true; + + return false; +diff --git a/usr/Makefile b/usr/Makefile +index 6a89eb019275..e6f7cb2f81db 100644 +--- a/usr/Makefile ++++ b/usr/Makefile +@@ -11,6 +11,9 @@ datafile_y = initramfs_data.cpio$(suffix_y) + datafile_d_y = .$(datafile_y).d + AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)" + ++# clean rules do not have CONFIG_INITRAMFS_COMPRESSION. So clean up after all ++# possible compression formats. ++clean-files += initramfs_data.cpio* + + # Generate builtin.o based on initramfs_data.o + obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o |