Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.18 commit in: /
Date: Wed, 05 Sep 2018 15:30:35
Message-Id: 1536161420.a830aee1944ccf0a758da9c5f5de62ae4aef091f.mpagano@gentoo
1 commit: a830aee1944ccf0a758da9c5f5de62ae4aef091f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 5 15:30:20 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 5 15:30:20 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a830aee1
7
8 Linux patch 4.18.6
9
10 0000_README | 4 +
11 1005_linux-4.18.6.patch | 5123 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 5127 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 8da0979..8bfc2e4 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -63,6 +63,10 @@ Patch: 1004_linux-4.18.5.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.18.5
21
22 +Patch: 1005_linux-4.18.6.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.18.6
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1005_linux-4.18.6.patch b/1005_linux-4.18.6.patch
31 new file mode 100644
32 index 0000000..99632b3
33 --- /dev/null
34 +++ b/1005_linux-4.18.6.patch
35 @@ -0,0 +1,5123 @@
36 +diff --git a/Makefile b/Makefile
37 +index a41692c5827a..62524f4d42ad 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 18
44 +-SUBLEVEL = 5
45 ++SUBLEVEL = 6
46 + EXTRAVERSION =
47 + NAME = Merciless Moray
48 +
49 +@@ -493,9 +493,13 @@ KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
50 + endif
51 +
52 + RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
53 ++RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register
54 + RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
55 ++RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline
56 + RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
57 ++RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG)))
58 + export RETPOLINE_CFLAGS
59 ++export RETPOLINE_VDSO_CFLAGS
60 +
61 + KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
62 + KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
63 +diff --git a/arch/Kconfig b/arch/Kconfig
64 +index d1f2ed462ac8..f03b72644902 100644
65 +--- a/arch/Kconfig
66 ++++ b/arch/Kconfig
67 +@@ -354,6 +354,9 @@ config HAVE_ARCH_JUMP_LABEL
68 + config HAVE_RCU_TABLE_FREE
69 + bool
70 +
71 ++config HAVE_RCU_TABLE_INVALIDATE
72 ++ bool
73 ++
74 + config ARCH_HAVE_NMI_SAFE_CMPXCHG
75 + bool
76 +
77 +diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
78 +index f6a62ae44a65..c864f6b045ba 100644
79 +--- a/arch/arm/net/bpf_jit_32.c
80 ++++ b/arch/arm/net/bpf_jit_32.c
81 +@@ -238,7 +238,7 @@ static void jit_fill_hole(void *area, unsigned int size)
82 + #define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
83 +
84 + /* Get the offset of eBPF REGISTERs stored on scratch space. */
85 +-#define STACK_VAR(off) (STACK_SIZE - off)
86 ++#define STACK_VAR(off) (STACK_SIZE - off - 4)
87 +
88 + #if __LINUX_ARM_ARCH__ < 7
89 +
90 +diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
91 +index e90cc8a08186..a8be6fe3946d 100644
92 +--- a/arch/arm/probes/kprobes/core.c
93 ++++ b/arch/arm/probes/kprobes/core.c
94 +@@ -289,8 +289,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
95 + break;
96 + case KPROBE_REENTER:
97 + /* A nested probe was hit in FIQ, it is a BUG */
98 +- pr_warn("Unrecoverable kprobe detected at %p.\n",
99 +- p->addr);
100 ++ pr_warn("Unrecoverable kprobe detected.\n");
101 ++ dump_kprobe(p);
102 + /* fall through */
103 + default:
104 + /* impossible cases */
105 +diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
106 +index 14db14152909..cc237fa9b90f 100644
107 +--- a/arch/arm/probes/kprobes/test-core.c
108 ++++ b/arch/arm/probes/kprobes/test-core.c
109 +@@ -1461,7 +1461,6 @@ fail:
110 + print_registers(&result_regs);
111 +
112 + if (mem) {
113 +- pr_err("current_stack=%p\n", current_stack);
114 + pr_err("expected_memory:\n");
115 + print_memory(expected_memory, mem_size);
116 + pr_err("result_memory:\n");
117 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
118 +index b8e9da15e00c..2c1aa84abeea 100644
119 +--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
120 ++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
121 +@@ -331,7 +331,7 @@
122 + reg = <0x0 0xff120000 0x0 0x100>;
123 + interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
124 + clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
125 +- clock-names = "sclk_uart", "pclk_uart";
126 ++ clock-names = "baudclk", "apb_pclk";
127 + dmas = <&dmac 4>, <&dmac 5>;
128 + dma-names = "tx", "rx";
129 + pinctrl-names = "default";
130 +diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
131 +index 5df5cfe1c143..5ee5bca8c24b 100644
132 +--- a/arch/arm64/include/asm/cache.h
133 ++++ b/arch/arm64/include/asm/cache.h
134 +@@ -21,12 +21,16 @@
135 + #define CTR_L1IP_SHIFT 14
136 + #define CTR_L1IP_MASK 3
137 + #define CTR_DMINLINE_SHIFT 16
138 ++#define CTR_IMINLINE_SHIFT 0
139 + #define CTR_ERG_SHIFT 20
140 + #define CTR_CWG_SHIFT 24
141 + #define CTR_CWG_MASK 15
142 + #define CTR_IDC_SHIFT 28
143 + #define CTR_DIC_SHIFT 29
144 +
145 ++#define CTR_CACHE_MINLINE_MASK \
146 ++ (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
147 ++
148 + #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
149 +
150 + #define ICACHE_POLICY_VPIPT 0
151 +diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
152 +index 8a699c708fc9..be3bf3d08916 100644
153 +--- a/arch/arm64/include/asm/cpucaps.h
154 ++++ b/arch/arm64/include/asm/cpucaps.h
155 +@@ -49,7 +49,8 @@
156 + #define ARM64_HAS_CACHE_DIC 28
157 + #define ARM64_HW_DBM 29
158 + #define ARM64_SSBD 30
159 ++#define ARM64_MISMATCHED_CACHE_TYPE 31
160 +
161 +-#define ARM64_NCAPS 31
162 ++#define ARM64_NCAPS 32
163 +
164 + #endif /* __ASM_CPUCAPS_H */
165 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
166 +index 1d2b6d768efe..5d59ff9a8da9 100644
167 +--- a/arch/arm64/kernel/cpu_errata.c
168 ++++ b/arch/arm64/kernel/cpu_errata.c
169 +@@ -65,12 +65,18 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
170 + }
171 +
172 + static bool
173 +-has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
174 +- int scope)
175 ++has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
176 ++ int scope)
177 + {
178 ++ u64 mask = CTR_CACHE_MINLINE_MASK;
179 ++
180 ++ /* Skip matching the min line sizes for cache type check */
181 ++ if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
182 ++ mask ^= arm64_ftr_reg_ctrel0.strict_mask;
183 ++
184 + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
185 +- return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
186 +- (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
187 ++ return (read_cpuid_cachetype() & mask) !=
188 ++ (arm64_ftr_reg_ctrel0.sys_val & mask);
189 + }
190 +
191 + static void
192 +@@ -613,7 +619,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
193 + {
194 + .desc = "Mismatched cache line size",
195 + .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
196 +- .matches = has_mismatched_cache_line_size,
197 ++ .matches = has_mismatched_cache_type,
198 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
199 ++ .cpu_enable = cpu_enable_trap_ctr_access,
200 ++ },
201 ++ {
202 ++ .desc = "Mismatched cache type",
203 ++ .capability = ARM64_MISMATCHED_CACHE_TYPE,
204 ++ .matches = has_mismatched_cache_type,
205 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
206 + .cpu_enable = cpu_enable_trap_ctr_access,
207 + },
208 +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
209 +index c6d80743f4ed..e4103b718a7c 100644
210 +--- a/arch/arm64/kernel/cpufeature.c
211 ++++ b/arch/arm64/kernel/cpufeature.c
212 +@@ -214,7 +214,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
213 + * If we have differing I-cache policies, report it as the weakest - VIPT.
214 + */
215 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
216 +- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
217 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
218 + ARM64_FTR_END,
219 + };
220 +
221 +diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
222 +index d849d9804011..22a5921562c7 100644
223 +--- a/arch/arm64/kernel/probes/kprobes.c
224 ++++ b/arch/arm64/kernel/probes/kprobes.c
225 +@@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
226 + break;
227 + case KPROBE_HIT_SS:
228 + case KPROBE_REENTER:
229 +- pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
230 ++ pr_warn("Unrecoverable kprobe detected.\n");
231 + dump_kprobe(p);
232 + BUG();
233 + break;
234 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
235 +index 9abf8a1e7b25..787e27964ab9 100644
236 +--- a/arch/arm64/mm/init.c
237 ++++ b/arch/arm64/mm/init.c
238 +@@ -287,7 +287,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
239 + #ifdef CONFIG_HAVE_ARCH_PFN_VALID
240 + int pfn_valid(unsigned long pfn)
241 + {
242 +- return memblock_is_map_memory(pfn << PAGE_SHIFT);
243 ++ phys_addr_t addr = pfn << PAGE_SHIFT;
244 ++
245 ++ if ((addr >> PAGE_SHIFT) != pfn)
246 ++ return 0;
247 ++ return memblock_is_map_memory(addr);
248 + }
249 + EXPORT_SYMBOL(pfn_valid);
250 + #endif
251 +diff --git a/arch/mips/Makefile b/arch/mips/Makefile
252 +index e2122cca4ae2..1e98d22ec119 100644
253 +--- a/arch/mips/Makefile
254 ++++ b/arch/mips/Makefile
255 +@@ -155,15 +155,11 @@ cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap
256 + cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap
257 + cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
258 + cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
259 +-cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
260 +- -Wa,-mips32 -Wa,--trap
261 +-cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
262 +- -Wa,-mips32r2 -Wa,--trap
263 ++cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
264 ++cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
265 + cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
266 +-cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
267 +- -Wa,-mips64 -Wa,--trap
268 +-cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
269 +- -Wa,-mips64r2 -Wa,--trap
270 ++cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
271 ++cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap
272 + cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
273 + cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
274 + cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \
275 +diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
276 +index af34afbc32d9..b2fa62922d88 100644
277 +--- a/arch/mips/include/asm/processor.h
278 ++++ b/arch/mips/include/asm/processor.h
279 +@@ -141,7 +141,7 @@ struct mips_fpu_struct {
280 +
281 + #define NUM_DSP_REGS 6
282 +
283 +-typedef __u32 dspreg_t;
284 ++typedef unsigned long dspreg_t;
285 +
286 + struct mips_dsp_state {
287 + dspreg_t dspr[NUM_DSP_REGS];
288 +@@ -386,7 +386,20 @@ unsigned long get_wchan(struct task_struct *p);
289 + #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
290 + #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
291 +
292 ++#ifdef CONFIG_CPU_LOONGSON3
293 ++/*
294 ++ * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
295 ++ * tight read loop is executed, because reads take priority over writes & the
296 ++ * hardware (incorrectly) doesn't ensure that writes will eventually occur.
297 ++ *
298 ++ * Since spin loops of any kind should have a cpu_relax() in them, force an SFB
299 ++ * flush from cpu_relax() such that any pending writes will become visible as
300 ++ * expected.
301 ++ */
302 ++#define cpu_relax() smp_mb()
303 ++#else
304 + #define cpu_relax() barrier()
305 ++#endif
306 +
307 + /*
308 + * Return_address is a replacement for __builtin_return_address(count)
309 +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
310 +index 9f6c3f2aa2e2..8c8d42823bda 100644
311 +--- a/arch/mips/kernel/ptrace.c
312 ++++ b/arch/mips/kernel/ptrace.c
313 +@@ -856,7 +856,7 @@ long arch_ptrace(struct task_struct *child, long request,
314 + goto out;
315 + }
316 + dregs = __get_dsp_regs(child);
317 +- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
318 ++ tmp = dregs[addr - DSP_BASE];
319 + break;
320 + }
321 + case DSP_CONTROL:
322 +diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
323 +index 7edc629304c8..bc348d44d151 100644
324 +--- a/arch/mips/kernel/ptrace32.c
325 ++++ b/arch/mips/kernel/ptrace32.c
326 +@@ -142,7 +142,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
327 + goto out;
328 + }
329 + dregs = __get_dsp_regs(child);
330 +- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
331 ++ tmp = dregs[addr - DSP_BASE];
332 + break;
333 + }
334 + case DSP_CONTROL:
335 +diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
336 +index 1cc306520a55..fac26ce64b2f 100644
337 +--- a/arch/mips/lib/memset.S
338 ++++ b/arch/mips/lib/memset.S
339 +@@ -195,6 +195,7 @@
340 + #endif
341 + #else
342 + PTR_SUBU t0, $0, a2
343 ++ move a2, zero /* No remaining longs */
344 + PTR_ADDIU t0, 1
345 + STORE_BYTE(0)
346 + STORE_BYTE(1)
347 +@@ -231,7 +232,7 @@
348 +
349 + #ifdef CONFIG_CPU_MIPSR6
350 + .Lbyte_fixup\@:
351 +- PTR_SUBU a2, $0, t0
352 ++ PTR_SUBU a2, t0
353 + jr ra
354 + PTR_ADDIU a2, 1
355 + #endif /* CONFIG_CPU_MIPSR6 */
356 +diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
357 +index 111ad475aa0c..4c2483f410c2 100644
358 +--- a/arch/mips/lib/multi3.c
359 ++++ b/arch/mips/lib/multi3.c
360 +@@ -4,12 +4,12 @@
361 + #include "libgcc.h"
362 +
363 + /*
364 +- * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
365 +- * specific case only we'll implement it here.
366 ++ * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
367 ++ * that specific case only we implement that intrinsic here.
368 + *
369 + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
370 + */
371 +-#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
372 ++#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
373 +
374 + /* multiply 64-bit values, low 64-bits returned */
375 + static inline long long notrace dmulu(long long a, long long b)
376 +diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
377 +index de11ecc99c7c..9c9970a5dfb1 100644
378 +--- a/arch/s390/include/asm/qdio.h
379 ++++ b/arch/s390/include/asm/qdio.h
380 +@@ -262,7 +262,6 @@ struct qdio_outbuf_state {
381 + void *user;
382 + };
383 +
384 +-#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
385 + #define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
386 +
387 + #define CHSC_AC1_INITIATE_INPUTQ 0x80
388 +diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
389 +index 2311f15be9cf..40c4d59c926e 100644
390 +--- a/arch/s390/lib/mem.S
391 ++++ b/arch/s390/lib/mem.S
392 +@@ -17,7 +17,7 @@
393 + ENTRY(memmove)
394 + ltgr %r4,%r4
395 + lgr %r1,%r2
396 +- bzr %r14
397 ++ jz .Lmemmove_exit
398 + aghi %r4,-1
399 + clgr %r2,%r3
400 + jnh .Lmemmove_forward
401 +@@ -36,6 +36,7 @@ ENTRY(memmove)
402 + .Lmemmove_forward_remainder:
403 + larl %r5,.Lmemmove_mvc
404 + ex %r4,0(%r5)
405 ++.Lmemmove_exit:
406 + BR_EX %r14
407 + .Lmemmove_reverse:
408 + ic %r0,0(%r4,%r3)
409 +@@ -65,7 +66,7 @@ EXPORT_SYMBOL(memmove)
410 + */
411 + ENTRY(memset)
412 + ltgr %r4,%r4
413 +- bzr %r14
414 ++ jz .Lmemset_exit
415 + ltgr %r3,%r3
416 + jnz .Lmemset_fill
417 + aghi %r4,-1
418 +@@ -80,6 +81,7 @@ ENTRY(memset)
419 + .Lmemset_clear_remainder:
420 + larl %r3,.Lmemset_xc
421 + ex %r4,0(%r3)
422 ++.Lmemset_exit:
423 + BR_EX %r14
424 + .Lmemset_fill:
425 + cghi %r4,1
426 +@@ -115,7 +117,7 @@ EXPORT_SYMBOL(memset)
427 + */
428 + ENTRY(memcpy)
429 + ltgr %r4,%r4
430 +- bzr %r14
431 ++ jz .Lmemcpy_exit
432 + aghi %r4,-1
433 + srlg %r5,%r4,8
434 + ltgr %r5,%r5
435 +@@ -124,6 +126,7 @@ ENTRY(memcpy)
436 + .Lmemcpy_remainder:
437 + larl %r5,.Lmemcpy_mvc
438 + ex %r4,0(%r5)
439 ++.Lmemcpy_exit:
440 + BR_EX %r14
441 + .Lmemcpy_loop:
442 + mvc 0(256,%r1),0(%r3)
443 +@@ -145,9 +148,9 @@ EXPORT_SYMBOL(memcpy)
444 + .macro __MEMSET bits,bytes,insn
445 + ENTRY(__memset\bits)
446 + ltgr %r4,%r4
447 +- bzr %r14
448 ++ jz .L__memset_exit\bits
449 + cghi %r4,\bytes
450 +- je .L__memset_exit\bits
451 ++ je .L__memset_store\bits
452 + aghi %r4,-(\bytes+1)
453 + srlg %r5,%r4,8
454 + ltgr %r5,%r5
455 +@@ -163,8 +166,9 @@ ENTRY(__memset\bits)
456 + larl %r5,.L__memset_mvc\bits
457 + ex %r4,0(%r5)
458 + BR_EX %r14
459 +-.L__memset_exit\bits:
460 ++.L__memset_store\bits:
461 + \insn %r3,0(%r2)
462 ++.L__memset_exit\bits:
463 + BR_EX %r14
464 + .L__memset_mvc\bits:
465 + mvc \bytes(1,%r1),0(%r1)
466 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
467 +index e074480d3598..4cc3f06b0ab3 100644
468 +--- a/arch/s390/mm/fault.c
469 ++++ b/arch/s390/mm/fault.c
470 +@@ -502,6 +502,8 @@ retry:
471 + /* No reason to continue if interrupted by SIGKILL. */
472 + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
473 + fault = VM_FAULT_SIGNAL;
474 ++ if (flags & FAULT_FLAG_RETRY_NOWAIT)
475 ++ goto out_up;
476 + goto out;
477 + }
478 + if (unlikely(fault & VM_FAULT_ERROR))
479 +diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
480 +index 382153ff17e3..dc3cede7f2ec 100644
481 +--- a/arch/s390/mm/page-states.c
482 ++++ b/arch/s390/mm/page-states.c
483 +@@ -271,7 +271,7 @@ void arch_set_page_states(int make_stable)
484 + list_for_each(l, &zone->free_area[order].free_list[t]) {
485 + page = list_entry(l, struct page, lru);
486 + if (make_stable)
487 +- set_page_stable_dat(page, 0);
488 ++ set_page_stable_dat(page, order);
489 + else
490 + set_page_unused(page, order);
491 + }
492 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
493 +index 5f0234ec8038..d7052cbe984f 100644
494 +--- a/arch/s390/net/bpf_jit_comp.c
495 ++++ b/arch/s390/net/bpf_jit_comp.c
496 +@@ -485,8 +485,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
497 + /* br %r1 */
498 + _EMIT2(0x07f1);
499 + } else {
500 +- /* larl %r1,.+14 */
501 +- EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
502 + /* ex 0,S390_lowcore.br_r1_tampoline */
503 + EMIT4_DISP(0x44000000, REG_0, REG_0,
504 + offsetof(struct lowcore, br_r1_trampoline));
505 +diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
506 +index 06a80434cfe6..5bd374491f94 100644
507 +--- a/arch/s390/numa/numa.c
508 ++++ b/arch/s390/numa/numa.c
509 +@@ -134,26 +134,14 @@ void __init numa_setup(void)
510 + {
511 + pr_info("NUMA mode: %s\n", mode->name);
512 + nodes_clear(node_possible_map);
513 ++ /* Initially attach all possible CPUs to node 0. */
514 ++ cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
515 + if (mode->setup)
516 + mode->setup();
517 + numa_setup_memory();
518 + memblock_dump_all();
519 + }
520 +
521 +-/*
522 +- * numa_init_early() - Initialization initcall
523 +- *
524 +- * This runs when only one CPU is online and before the first
525 +- * topology update is called for by the scheduler.
526 +- */
527 +-static int __init numa_init_early(void)
528 +-{
529 +- /* Attach all possible CPUs to node 0 for now. */
530 +- cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
531 +- return 0;
532 +-}
533 +-early_initcall(numa_init_early);
534 +-
535 + /*
536 + * numa_init_late() - Initialization initcall
537 + *
538 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
539 +index 4902fed221c0..8a505cfdd9b9 100644
540 +--- a/arch/s390/pci/pci.c
541 ++++ b/arch/s390/pci/pci.c
542 +@@ -421,6 +421,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
543 + hwirq = 0;
544 + for_each_pci_msi_entry(msi, pdev) {
545 + rc = -EIO;
546 ++ if (hwirq >= msi_vecs)
547 ++ break;
548 + irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
549 + if (irq < 0)
550 + return -ENOMEM;
551 +diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
552 +index 1ace023cbdce..abfa8c7a6d9a 100644
553 +--- a/arch/s390/purgatory/Makefile
554 ++++ b/arch/s390/purgatory/Makefile
555 +@@ -7,13 +7,13 @@ purgatory-y := head.o purgatory.o string.o sha256.o mem.o
556 + targets += $(purgatory-y) purgatory.ro kexec-purgatory.c
557 + PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
558 +
559 +-$(obj)/sha256.o: $(srctree)/lib/sha256.c
560 ++$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
561 + $(call if_changed_rule,cc_o_c)
562 +
563 +-$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S
564 ++$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
565 + $(call if_changed_rule,as_o_S)
566 +
567 +-$(obj)/string.o: $(srctree)/arch/s390/lib/string.c
568 ++$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE
569 + $(call if_changed_rule,cc_o_c)
570 +
571 + LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib
572 +@@ -23,6 +23,7 @@ KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
573 + KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
574 + KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float
575 + KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
576 ++KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
577 +
578 + $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
579 + $(call if_changed,ld)
580 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
581 +index 6b8065d718bd..1aa4dd3b5687 100644
582 +--- a/arch/x86/Kconfig
583 ++++ b/arch/x86/Kconfig
584 +@@ -179,6 +179,7 @@ config X86
585 + select HAVE_PERF_REGS
586 + select HAVE_PERF_USER_STACK_DUMP
587 + select HAVE_RCU_TABLE_FREE
588 ++ select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
589 + select HAVE_REGS_AND_STACK_ACCESS_API
590 + select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
591 + select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
592 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
593 +index a08e82856563..d944b52649a4 100644
594 +--- a/arch/x86/Makefile
595 ++++ b/arch/x86/Makefile
596 +@@ -180,10 +180,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
597 + endif
598 + endif
599 +
600 +-ifndef CC_HAVE_ASM_GOTO
601 +- $(error Compiler lacks asm-goto support.)
602 +-endif
603 +-
604 + #
605 + # Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
606 + # GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way
607 +@@ -317,6 +313,13 @@ PHONY += vdso_install
608 + vdso_install:
609 + $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
610 +
611 ++archprepare: checkbin
612 ++checkbin:
613 ++ifndef CC_HAVE_ASM_GOTO
614 ++ @echo Compiler lacks asm-goto support.
615 ++ @exit 1
616 ++endif
617 ++
618 + archclean:
619 + $(Q)rm -rf $(objtree)/arch/i386
620 + $(Q)rm -rf $(objtree)/arch/x86_64
621 +diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
622 +index 261802b1cc50..9589878faf46 100644
623 +--- a/arch/x86/entry/vdso/Makefile
624 ++++ b/arch/x86/entry/vdso/Makefile
625 +@@ -72,9 +72,9 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
626 + CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
627 + $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
628 + -fno-omit-frame-pointer -foptimize-sibling-calls \
629 +- -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
630 ++ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
631 +
632 +-$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
633 ++$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
634 +
635 + #
636 + # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
637 +@@ -138,11 +138,13 @@ KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
638 + KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
639 + KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
640 + KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
641 ++KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
642 + KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
643 + KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
644 + KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
645 + KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
646 + KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
647 ++KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
648 + $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
649 +
650 + $(obj)/vdso32.so.dbg: FORCE \
651 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
652 +index 5f4829f10129..dfb2f7c0d019 100644
653 +--- a/arch/x86/events/core.c
654 ++++ b/arch/x86/events/core.c
655 +@@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
656 +
657 + perf_callchain_store(entry, regs->ip);
658 +
659 +- if (!current->mm)
660 ++ if (!nmi_uaccess_okay())
661 + return;
662 +
663 + if (perf_callchain_user32(regs, entry))
664 +diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
665 +index c14f2a74b2be..15450a675031 100644
666 +--- a/arch/x86/include/asm/irqflags.h
667 ++++ b/arch/x86/include/asm/irqflags.h
668 +@@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void)
669 + return flags;
670 + }
671 +
672 +-static inline void native_restore_fl(unsigned long flags)
673 ++extern inline void native_restore_fl(unsigned long flags);
674 ++extern inline void native_restore_fl(unsigned long flags)
675 + {
676 + asm volatile("push %0 ; popf"
677 + : /* no output */
678 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
679 +index 682286aca881..d53c54b842da 100644
680 +--- a/arch/x86/include/asm/processor.h
681 ++++ b/arch/x86/include/asm/processor.h
682 +@@ -132,6 +132,8 @@ struct cpuinfo_x86 {
683 + /* Index into per_cpu list: */
684 + u16 cpu_index;
685 + u32 microcode;
686 ++ /* Address space bits used by the cache internally */
687 ++ u8 x86_cache_bits;
688 + unsigned initialized : 1;
689 + } __randomize_layout;
690 +
691 +@@ -181,9 +183,9 @@ extern const struct seq_operations cpuinfo_op;
692 +
693 + extern void cpu_detect(struct cpuinfo_x86 *c);
694 +
695 +-static inline unsigned long l1tf_pfn_limit(void)
696 ++static inline unsigned long long l1tf_pfn_limit(void)
697 + {
698 +- return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
699 ++ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
700 + }
701 +
702 + extern void early_cpu_init(void);
703 +diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
704 +index b6dc698f992a..f335aad404a4 100644
705 +--- a/arch/x86/include/asm/stacktrace.h
706 ++++ b/arch/x86/include/asm/stacktrace.h
707 +@@ -111,6 +111,6 @@ static inline unsigned long caller_frame_pointer(void)
708 + return (unsigned long)frame;
709 + }
710 +
711 +-void show_opcodes(u8 *rip, const char *loglvl);
712 ++void show_opcodes(struct pt_regs *regs, const char *loglvl);
713 + void show_ip(struct pt_regs *regs, const char *loglvl);
714 + #endif /* _ASM_X86_STACKTRACE_H */
715 +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
716 +index 6690cd3fc8b1..0af97e51e609 100644
717 +--- a/arch/x86/include/asm/tlbflush.h
718 ++++ b/arch/x86/include/asm/tlbflush.h
719 +@@ -175,8 +175,16 @@ struct tlb_state {
720 + * are on. This means that it may not match current->active_mm,
721 + * which will contain the previous user mm when we're in lazy TLB
722 + * mode even if we've already switched back to swapper_pg_dir.
723 ++ *
724 ++ * During switch_mm_irqs_off(), loaded_mm will be set to
725 ++ * LOADED_MM_SWITCHING during the brief interrupts-off window
726 ++ * when CR3 and loaded_mm would otherwise be inconsistent. This
727 ++ * is for nmi_uaccess_okay()'s benefit.
728 + */
729 + struct mm_struct *loaded_mm;
730 ++
731 ++#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
732 ++
733 + u16 loaded_mm_asid;
734 + u16 next_asid;
735 + /* last user mm's ctx id */
736 +@@ -246,6 +254,38 @@ struct tlb_state {
737 + };
738 + DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
739 +
740 ++/*
741 ++ * Blindly accessing user memory from NMI context can be dangerous
742 ++ * if we're in the middle of switching the current user task or
743 ++ * switching the loaded mm. It can also be dangerous if we
744 ++ * interrupted some kernel code that was temporarily using a
745 ++ * different mm.
746 ++ */
747 ++static inline bool nmi_uaccess_okay(void)
748 ++{
749 ++ struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
750 ++ struct mm_struct *current_mm = current->mm;
751 ++
752 ++ VM_WARN_ON_ONCE(!loaded_mm);
753 ++
754 ++ /*
755 ++ * The condition we want to check is
756 ++ * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
757 ++ * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
758 ++ * is supposed to be reasonably fast.
759 ++ *
760 ++ * Instead, we check the almost equivalent but somewhat conservative
761 ++ * condition below, and we rely on the fact that switch_mm_irqs_off()
762 ++ * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
763 ++ */
764 ++ if (loaded_mm != current_mm)
765 ++ return false;
766 ++
767 ++ VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
768 ++
769 ++ return true;
770 ++}
771 ++
772 + /* Initialize cr4 shadow for this CPU. */
773 + static inline void cr4_init_shadow(void)
774 + {
775 +diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
776 +index fb856c9f0449..53748541c487 100644
777 +--- a/arch/x86/include/asm/vgtod.h
778 ++++ b/arch/x86/include/asm/vgtod.h
779 +@@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void)
780 + *
781 + * If RDPID is available, use it.
782 + */
783 +- alternative_io ("lsl %[p],%[seg]",
784 ++ alternative_io ("lsl %[seg],%[p]",
785 + ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
786 + X86_FEATURE_RDPID,
787 + [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
788 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
789 +index 664f161f96ff..4891a621a752 100644
790 +--- a/arch/x86/kernel/cpu/bugs.c
791 ++++ b/arch/x86/kernel/cpu/bugs.c
792 +@@ -652,6 +652,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
793 + enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
794 + EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
795 +
796 ++/*
797 ++ * These CPUs all support 44bits physical address space internally in the
798 ++ * cache but CPUID can report a smaller number of physical address bits.
799 ++ *
800 ++ * The L1TF mitigation uses the top most address bit for the inversion of
801 ++ * non present PTEs. When the installed memory reaches into the top most
802 ++ * address bit due to memory holes, which has been observed on machines
803 ++ * which report 36bits physical address bits and have 32G RAM installed,
804 ++ * then the mitigation range check in l1tf_select_mitigation() triggers.
805 ++ * This is a false positive because the mitigation is still possible due to
806 ++ * the fact that the cache uses 44bit internally. Use the cache bits
807 ++ * instead of the reported physical bits and adjust them on the affected
808 ++ * machines to 44bit if the reported bits are less than 44.
809 ++ */
810 ++static void override_cache_bits(struct cpuinfo_x86 *c)
811 ++{
812 ++ if (c->x86 != 6)
813 ++ return;
814 ++
815 ++ switch (c->x86_model) {
816 ++ case INTEL_FAM6_NEHALEM:
817 ++ case INTEL_FAM6_WESTMERE:
818 ++ case INTEL_FAM6_SANDYBRIDGE:
819 ++ case INTEL_FAM6_IVYBRIDGE:
820 ++ case INTEL_FAM6_HASWELL_CORE:
821 ++ case INTEL_FAM6_HASWELL_ULT:
822 ++ case INTEL_FAM6_HASWELL_GT3E:
823 ++ case INTEL_FAM6_BROADWELL_CORE:
824 ++ case INTEL_FAM6_BROADWELL_GT3E:
825 ++ case INTEL_FAM6_SKYLAKE_MOBILE:
826 ++ case INTEL_FAM6_SKYLAKE_DESKTOP:
827 ++ case INTEL_FAM6_KABYLAKE_MOBILE:
828 ++ case INTEL_FAM6_KABYLAKE_DESKTOP:
829 ++ if (c->x86_cache_bits < 44)
830 ++ c->x86_cache_bits = 44;
831 ++ break;
832 ++ }
833 ++}
834 ++
835 + static void __init l1tf_select_mitigation(void)
836 + {
837 + u64 half_pa;
838 +@@ -659,6 +698,8 @@ static void __init l1tf_select_mitigation(void)
839 + if (!boot_cpu_has_bug(X86_BUG_L1TF))
840 + return;
841 +
842 ++ override_cache_bits(&boot_cpu_data);
843 ++
844 + switch (l1tf_mitigation) {
845 + case L1TF_MITIGATION_OFF:
846 + case L1TF_MITIGATION_FLUSH_NOWARN:
847 +@@ -678,14 +719,13 @@ static void __init l1tf_select_mitigation(void)
848 + return;
849 + #endif
850 +
851 +- /*
852 +- * This is extremely unlikely to happen because almost all
853 +- * systems have far more MAX_PA/2 than RAM can be fit into
854 +- * DIMM slots.
855 +- */
856 + half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
857 + if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
858 + pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
859 ++ pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
860 ++ half_pa);
861 ++ pr_info("However, doing so will make a part of your RAM unusable.\n");
862 ++ pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
863 + return;
864 + }
865 +
866 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
867 +index b41b72bd8bb8..1ee8ea36af30 100644
868 +--- a/arch/x86/kernel/cpu/common.c
869 ++++ b/arch/x86/kernel/cpu/common.c
870 +@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
871 + else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
872 + c->x86_phys_bits = 36;
873 + #endif
874 ++ c->x86_cache_bits = c->x86_phys_bits;
875 + }
876 +
877 + static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
878 +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
879 +index 6602941cfebf..3f0abb62161b 100644
880 +--- a/arch/x86/kernel/cpu/intel.c
881 ++++ b/arch/x86/kernel/cpu/intel.c
882 +@@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
883 + if (cpu_has(c, X86_FEATURE_HYPERVISOR))
884 + return false;
885 +
886 ++ if (c->x86 != 6)
887 ++ return false;
888 ++
889 + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
890 + if (c->x86_model == spectre_bad_microcodes[i].model &&
891 + c->x86_stepping == spectre_bad_microcodes[i].stepping)
892 +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
893 +index 666a284116ac..17b02adc79aa 100644
894 +--- a/arch/x86/kernel/dumpstack.c
895 ++++ b/arch/x86/kernel/dumpstack.c
896 +@@ -17,6 +17,7 @@
897 + #include <linux/bug.h>
898 + #include <linux/nmi.h>
899 + #include <linux/sysfs.h>
900 ++#include <linux/kasan.h>
901 +
902 + #include <asm/cpu_entry_area.h>
903 + #include <asm/stacktrace.h>
904 +@@ -91,23 +92,32 @@ static void printk_stack_address(unsigned long address, int reliable,
905 + * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
906 + * guesstimate in attempt to achieve all of the above.
907 + */
908 +-void show_opcodes(u8 *rip, const char *loglvl)
909 ++void show_opcodes(struct pt_regs *regs, const char *loglvl)
910 + {
911 + unsigned int code_prologue = OPCODE_BUFSIZE * 2 / 3;
912 + u8 opcodes[OPCODE_BUFSIZE];
913 +- u8 *ip;
914 ++ unsigned long ip;
915 + int i;
916 ++ bool bad_ip;
917 +
918 + printk("%sCode: ", loglvl);
919 +
920 +- ip = (u8 *)rip - code_prologue;
921 +- if (probe_kernel_read(opcodes, ip, OPCODE_BUFSIZE)) {
922 ++ ip = regs->ip - code_prologue;
923 ++
924 ++ /*
925 ++ * Make sure userspace isn't trying to trick us into dumping kernel
926 ++ * memory by pointing the userspace instruction pointer at it.
927 ++ */
928 ++ bad_ip = user_mode(regs) &&
929 ++ __chk_range_not_ok(ip, OPCODE_BUFSIZE, TASK_SIZE_MAX);
930 ++
931 ++ if (bad_ip || probe_kernel_read(opcodes, (u8 *)ip, OPCODE_BUFSIZE)) {
932 + pr_cont("Bad RIP value.\n");
933 + return;
934 + }
935 +
936 + for (i = 0; i < OPCODE_BUFSIZE; i++, ip++) {
937 +- if (ip == rip)
938 ++ if (ip == regs->ip)
939 + pr_cont("<%02x> ", opcodes[i]);
940 + else
941 + pr_cont("%02x ", opcodes[i]);
942 +@@ -122,7 +132,7 @@ void show_ip(struct pt_regs *regs, const char *loglvl)
943 + #else
944 + printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
945 + #endif
946 +- show_opcodes((u8 *)regs->ip, loglvl);
947 ++ show_opcodes(regs, loglvl);
948 + }
949 +
950 + void show_iret_regs(struct pt_regs *regs)
951 +@@ -356,7 +366,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
952 + * We're not going to return, but we might be on an IST stack or
953 + * have very little stack space left. Rewind the stack and kill
954 + * the task.
955 ++ * Before we rewind the stack, we have to tell KASAN that we're going to
956 ++ * reuse the task stack and that existing poisons are invalid.
957 + */
958 ++ kasan_unpoison_task_stack(current);
959 + rewind_stack_do_exit(signr);
960 + }
961 + NOKPROBE_SYMBOL(oops_end);
962 +diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
963 +index da5d8ac60062..50d5848bf22e 100644
964 +--- a/arch/x86/kernel/early-quirks.c
965 ++++ b/arch/x86/kernel/early-quirks.c
966 +@@ -338,6 +338,18 @@ static resource_size_t __init gen3_stolen_base(int num, int slot, int func,
967 + return bsm & INTEL_BSM_MASK;
968 + }
969 +
970 ++static resource_size_t __init gen11_stolen_base(int num, int slot, int func,
971 ++ resource_size_t stolen_size)
972 ++{
973 ++ u64 bsm;
974 ++
975 ++ bsm = read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW0);
976 ++ bsm &= INTEL_BSM_MASK;
977 ++ bsm |= (u64)read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW1) << 32;
978 ++
979 ++ return bsm;
980 ++}
981 ++
982 + static resource_size_t __init i830_stolen_size(int num, int slot, int func)
983 + {
984 + u16 gmch_ctrl;
985 +@@ -498,6 +510,11 @@ static const struct intel_early_ops chv_early_ops __initconst = {
986 + .stolen_size = chv_stolen_size,
987 + };
988 +
989 ++static const struct intel_early_ops gen11_early_ops __initconst = {
990 ++ .stolen_base = gen11_stolen_base,
991 ++ .stolen_size = gen9_stolen_size,
992 ++};
993 ++
994 + static const struct pci_device_id intel_early_ids[] __initconst = {
995 + INTEL_I830_IDS(&i830_early_ops),
996 + INTEL_I845G_IDS(&i845_early_ops),
997 +@@ -529,6 +546,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
998 + INTEL_CFL_IDS(&gen9_early_ops),
999 + INTEL_GLK_IDS(&gen9_early_ops),
1000 + INTEL_CNL_IDS(&gen9_early_ops),
1001 ++ INTEL_ICL_11_IDS(&gen11_early_ops),
1002 + };
1003 +
1004 + struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
1005 +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
1006 +index 12bb445fb98d..4344a032ebe6 100644
1007 +--- a/arch/x86/kernel/process_64.c
1008 ++++ b/arch/x86/kernel/process_64.c
1009 +@@ -384,6 +384,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
1010 + start_thread_common(regs, new_ip, new_sp,
1011 + __USER_CS, __USER_DS, 0);
1012 + }
1013 ++EXPORT_SYMBOL_GPL(start_thread);
1014 +
1015 + #ifdef CONFIG_COMPAT
1016 + void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
1017 +diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
1018 +index af8caf965baa..01d209ab5481 100644
1019 +--- a/arch/x86/kvm/hyperv.c
1020 ++++ b/arch/x86/kvm/hyperv.c
1021 +@@ -235,7 +235,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
1022 + struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
1023 + int ret;
1024 +
1025 +- if (!synic->active)
1026 ++ if (!synic->active && !host)
1027 + return 1;
1028 +
1029 + trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
1030 +@@ -295,11 +295,12 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
1031 + return ret;
1032 + }
1033 +
1034 +-static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
1035 ++static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
1036 ++ bool host)
1037 + {
1038 + int ret;
1039 +
1040 +- if (!synic->active)
1041 ++ if (!synic->active && !host)
1042 + return 1;
1043 +
1044 + ret = 0;
1045 +@@ -1014,6 +1015,11 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1046 + case HV_X64_MSR_TSC_EMULATION_STATUS:
1047 + hv->hv_tsc_emulation_status = data;
1048 + break;
1049 ++ case HV_X64_MSR_TIME_REF_COUNT:
1050 ++ /* read-only, but still ignore it if host-initiated */
1051 ++ if (!host)
1052 ++ return 1;
1053 ++ break;
1054 + default:
1055 + vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1056 + msr, data);
1057 +@@ -1101,6 +1107,12 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1058 + return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
1059 + data, host);
1060 + }
1061 ++ case HV_X64_MSR_TSC_FREQUENCY:
1062 ++ case HV_X64_MSR_APIC_FREQUENCY:
1063 ++ /* read-only, but still ignore it if host-initiated */
1064 ++ if (!host)
1065 ++ return 1;
1066 ++ break;
1067 + default:
1068 + vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1069 + msr, data);
1070 +@@ -1156,7 +1168,8 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1071 + return 0;
1072 + }
1073 +
1074 +-static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1075 ++static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1076 ++ bool host)
1077 + {
1078 + u64 data = 0;
1079 + struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1080 +@@ -1183,7 +1196,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1081 + case HV_X64_MSR_SIMP:
1082 + case HV_X64_MSR_EOM:
1083 + case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1084 +- return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata);
1085 ++ return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
1086 + case HV_X64_MSR_STIMER0_CONFIG:
1087 + case HV_X64_MSR_STIMER1_CONFIG:
1088 + case HV_X64_MSR_STIMER2_CONFIG:
1089 +@@ -1229,7 +1242,7 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1090 + return kvm_hv_set_msr(vcpu, msr, data, host);
1091 + }
1092 +
1093 +-int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1094 ++int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1095 + {
1096 + if (kvm_hv_msr_partition_wide(msr)) {
1097 + int r;
1098 +@@ -1239,7 +1252,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1099 + mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1100 + return r;
1101 + } else
1102 +- return kvm_hv_get_msr(vcpu, msr, pdata);
1103 ++ return kvm_hv_get_msr(vcpu, msr, pdata, host);
1104 + }
1105 +
1106 + static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)
1107 +diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
1108 +index 837465d69c6d..d6aa969e20f1 100644
1109 +--- a/arch/x86/kvm/hyperv.h
1110 ++++ b/arch/x86/kvm/hyperv.h
1111 +@@ -48,7 +48,7 @@ static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
1112 + }
1113 +
1114 + int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
1115 +-int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
1116 ++int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
1117 +
1118 + bool kvm_hv_hypercall_enabled(struct kvm *kvm);
1119 + int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
1120 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1121 +index f059a73f0fd0..9799f86388e7 100644
1122 +--- a/arch/x86/kvm/svm.c
1123 ++++ b/arch/x86/kvm/svm.c
1124 +@@ -5580,8 +5580,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1125 +
1126 + clgi();
1127 +
1128 +- local_irq_enable();
1129 +-
1130 + /*
1131 + * If this vCPU has touched SPEC_CTRL, restore the guest's value if
1132 + * it's non-zero. Since vmentry is serialising on affected CPUs, there
1133 +@@ -5590,6 +5588,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1134 + */
1135 + x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
1136 +
1137 ++ local_irq_enable();
1138 ++
1139 + asm volatile (
1140 + "push %%" _ASM_BP "; \n\t"
1141 + "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
1142 +@@ -5712,12 +5712,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1143 + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
1144 + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
1145 +
1146 +- x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
1147 +-
1148 + reload_tss(vcpu);
1149 +
1150 + local_irq_disable();
1151 +
1152 ++ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
1153 ++
1154 + vcpu->arch.cr2 = svm->vmcb->save.cr2;
1155 + vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
1156 + vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
1157 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1158 +index a5caa5e5480c..24c84aa87049 100644
1159 +--- a/arch/x86/kvm/x86.c
1160 ++++ b/arch/x86/kvm/x86.c
1161 +@@ -2185,10 +2185,11 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1162 + vcpu->arch.mcg_status = data;
1163 + break;
1164 + case MSR_IA32_MCG_CTL:
1165 +- if (!(mcg_cap & MCG_CTL_P))
1166 ++ if (!(mcg_cap & MCG_CTL_P) &&
1167 ++ (data || !msr_info->host_initiated))
1168 + return 1;
1169 + if (data != 0 && data != ~(u64)0)
1170 +- return -1;
1171 ++ return 1;
1172 + vcpu->arch.mcg_ctl = data;
1173 + break;
1174 + default:
1175 +@@ -2576,7 +2577,7 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1176 + }
1177 + EXPORT_SYMBOL_GPL(kvm_get_msr);
1178 +
1179 +-static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1180 ++static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1181 + {
1182 + u64 data;
1183 + u64 mcg_cap = vcpu->arch.mcg_cap;
1184 +@@ -2591,7 +2592,7 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1185 + data = vcpu->arch.mcg_cap;
1186 + break;
1187 + case MSR_IA32_MCG_CTL:
1188 +- if (!(mcg_cap & MCG_CTL_P))
1189 ++ if (!(mcg_cap & MCG_CTL_P) && !host)
1190 + return 1;
1191 + data = vcpu->arch.mcg_ctl;
1192 + break;
1193 +@@ -2724,7 +2725,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1194 + case MSR_IA32_MCG_CTL:
1195 + case MSR_IA32_MCG_STATUS:
1196 + case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
1197 +- return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
1198 ++ return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
1199 ++ msr_info->host_initiated);
1200 + case MSR_K7_CLK_CTL:
1201 + /*
1202 + * Provide expected ramp-up count for K7. All other
1203 +@@ -2745,7 +2747,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1204 + case HV_X64_MSR_TSC_EMULATION_CONTROL:
1205 + case HV_X64_MSR_TSC_EMULATION_STATUS:
1206 + return kvm_hv_get_msr_common(vcpu,
1207 +- msr_info->index, &msr_info->data);
1208 ++ msr_info->index, &msr_info->data,
1209 ++ msr_info->host_initiated);
1210 + break;
1211 + case MSR_IA32_BBL_CR_CTL3:
1212 + /* This legacy MSR exists but isn't fully documented in current
1213 +diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
1214 +index c8c6ad0d58b8..3f435d7fca5e 100644
1215 +--- a/arch/x86/lib/usercopy.c
1216 ++++ b/arch/x86/lib/usercopy.c
1217 +@@ -7,6 +7,8 @@
1218 + #include <linux/uaccess.h>
1219 + #include <linux/export.h>
1220 +
1221 ++#include <asm/tlbflush.h>
1222 ++
1223 + /*
1224 + * We rely on the nested NMI work to allow atomic faults from the NMI path; the
1225 + * nested NMI paths are careful to preserve CR2.
1226 +@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
1227 + if (__range_not_ok(from, n, TASK_SIZE))
1228 + return n;
1229 +
1230 ++ if (!nmi_uaccess_okay())
1231 ++ return n;
1232 ++
1233 + /*
1234 + * Even though this function is typically called from NMI/IRQ context
1235 + * disable pagefaults so that its behaviour is consistent even when
1236 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
1237 +index 2aafa6ab6103..d1f1612672c7 100644
1238 +--- a/arch/x86/mm/fault.c
1239 ++++ b/arch/x86/mm/fault.c
1240 +@@ -838,7 +838,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
1241 +
1242 + printk(KERN_CONT "\n");
1243 +
1244 +- show_opcodes((u8 *)regs->ip, loglvl);
1245 ++ show_opcodes(regs, loglvl);
1246 + }
1247 +
1248 + static void
1249 +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
1250 +index acfab322fbe0..63a6f9fcaf20 100644
1251 +--- a/arch/x86/mm/init.c
1252 ++++ b/arch/x86/mm/init.c
1253 +@@ -923,7 +923,7 @@ unsigned long max_swapfile_size(void)
1254 +
1255 + if (boot_cpu_has_bug(X86_BUG_L1TF)) {
1256 + /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
1257 +- unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
1258 ++ unsigned long long l1tf_limit = l1tf_pfn_limit();
1259 + /*
1260 + * We encode swap offsets also with 3 bits below those for pfn
1261 + * which makes the usable limit higher.
1262 +@@ -931,7 +931,7 @@ unsigned long max_swapfile_size(void)
1263 + #if CONFIG_PGTABLE_LEVELS > 2
1264 + l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
1265 + #endif
1266 +- pages = min_t(unsigned long, l1tf_limit, pages);
1267 ++ pages = min_t(unsigned long long, l1tf_limit, pages);
1268 + }
1269 + return pages;
1270 + }
1271 +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
1272 +index f40ab8185d94..1e95d57760cf 100644
1273 +--- a/arch/x86/mm/mmap.c
1274 ++++ b/arch/x86/mm/mmap.c
1275 +@@ -257,7 +257,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
1276 + /* If it's real memory always allow */
1277 + if (pfn_valid(pfn))
1278 + return true;
1279 +- if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
1280 ++ if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
1281 + return false;
1282 + return true;
1283 + }
1284 +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
1285 +index 6eb1f34c3c85..cd2617285e2e 100644
1286 +--- a/arch/x86/mm/tlb.c
1287 ++++ b/arch/x86/mm/tlb.c
1288 +@@ -298,6 +298,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
1289 +
1290 + choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
1291 +
1292 ++ /* Let nmi_uaccess_okay() know that we're changing CR3. */
1293 ++ this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
1294 ++ barrier();
1295 ++
1296 + if (need_flush) {
1297 + this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
1298 + this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
1299 +@@ -328,6 +332,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
1300 + if (next != &init_mm)
1301 + this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
1302 +
1303 ++ /* Make sure we write CR3 before loaded_mm. */
1304 ++ barrier();
1305 ++
1306 + this_cpu_write(cpu_tlbstate.loaded_mm, next);
1307 + this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
1308 + }
1309 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1310 +index cc71c63df381..984b37647b2f 100644
1311 +--- a/drivers/ata/libata-core.c
1312 ++++ b/drivers/ata/libata-core.c
1313 +@@ -6424,6 +6424,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
1314 + host->n_tags = ATA_MAX_QUEUE;
1315 + host->dev = dev;
1316 + host->ops = ops;
1317 ++ kref_init(&host->kref);
1318 + }
1319 +
1320 + void __ata_port_probe(struct ata_port *ap)
1321 +@@ -7391,3 +7392,5 @@ EXPORT_SYMBOL_GPL(ata_cable_80wire);
1322 + EXPORT_SYMBOL_GPL(ata_cable_unknown);
1323 + EXPORT_SYMBOL_GPL(ata_cable_ignore);
1324 + EXPORT_SYMBOL_GPL(ata_cable_sata);
1325 ++EXPORT_SYMBOL_GPL(ata_host_get);
1326 ++EXPORT_SYMBOL_GPL(ata_host_put);
1327 +\ No newline at end of file
1328 +diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
1329 +index 9e21c49cf6be..f953cb4bb1ba 100644
1330 +--- a/drivers/ata/libata.h
1331 ++++ b/drivers/ata/libata.h
1332 +@@ -100,8 +100,6 @@ extern int ata_port_probe(struct ata_port *ap);
1333 + extern void __ata_port_probe(struct ata_port *ap);
1334 + extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1335 + u8 page, void *buf, unsigned int sectors);
1336 +-extern void ata_host_get(struct ata_host *host);
1337 +-extern void ata_host_put(struct ata_host *host);
1338 +
1339 + #define to_ata_port(d) container_of(d, struct ata_port, tdev)
1340 +
1341 +diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
1342 +index 8e2e4757adcb..5a42ae4078c2 100644
1343 +--- a/drivers/base/power/clock_ops.c
1344 ++++ b/drivers/base/power/clock_ops.c
1345 +@@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
1346 + int of_pm_clk_add_clks(struct device *dev)
1347 + {
1348 + struct clk **clks;
1349 +- unsigned int i, count;
1350 ++ int i, count;
1351 + int ret;
1352 +
1353 + if (!dev || !dev->of_node)
1354 +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
1355 +index a78b8e7085e9..66acbd063562 100644
1356 +--- a/drivers/cdrom/cdrom.c
1357 ++++ b/drivers/cdrom/cdrom.c
1358 +@@ -2542,7 +2542,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
1359 + if (!CDROM_CAN(CDC_SELECT_DISC) ||
1360 + (arg == CDSL_CURRENT || arg == CDSL_NONE))
1361 + return cdi->ops->drive_status(cdi, CDSL_CURRENT);
1362 +- if (((int)arg >= cdi->capacity))
1363 ++ if (arg >= cdi->capacity)
1364 + return -EINVAL;
1365 + return cdrom_slot_status(cdi, arg);
1366 + }
1367 +diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1368 +index e32f6e85dc6d..3a3a7a548a85 100644
1369 +--- a/drivers/char/tpm/tpm-interface.c
1370 ++++ b/drivers/char/tpm/tpm-interface.c
1371 +@@ -29,7 +29,6 @@
1372 + #include <linux/mutex.h>
1373 + #include <linux/spinlock.h>
1374 + #include <linux/freezer.h>
1375 +-#include <linux/pm_runtime.h>
1376 + #include <linux/tpm_eventlog.h>
1377 +
1378 + #include "tpm.h"
1379 +@@ -369,10 +368,13 @@ err_len:
1380 + return -EINVAL;
1381 + }
1382 +
1383 +-static int tpm_request_locality(struct tpm_chip *chip)
1384 ++static int tpm_request_locality(struct tpm_chip *chip, unsigned int flags)
1385 + {
1386 + int rc;
1387 +
1388 ++ if (flags & TPM_TRANSMIT_RAW)
1389 ++ return 0;
1390 ++
1391 + if (!chip->ops->request_locality)
1392 + return 0;
1393 +
1394 +@@ -385,10 +387,13 @@ static int tpm_request_locality(struct tpm_chip *chip)
1395 + return 0;
1396 + }
1397 +
1398 +-static void tpm_relinquish_locality(struct tpm_chip *chip)
1399 ++static void tpm_relinquish_locality(struct tpm_chip *chip, unsigned int flags)
1400 + {
1401 + int rc;
1402 +
1403 ++ if (flags & TPM_TRANSMIT_RAW)
1404 ++ return;
1405 ++
1406 + if (!chip->ops->relinquish_locality)
1407 + return;
1408 +
1409 +@@ -399,6 +404,28 @@ static void tpm_relinquish_locality(struct tpm_chip *chip)
1410 + chip->locality = -1;
1411 + }
1412 +
1413 ++static int tpm_cmd_ready(struct tpm_chip *chip, unsigned int flags)
1414 ++{
1415 ++ if (flags & TPM_TRANSMIT_RAW)
1416 ++ return 0;
1417 ++
1418 ++ if (!chip->ops->cmd_ready)
1419 ++ return 0;
1420 ++
1421 ++ return chip->ops->cmd_ready(chip);
1422 ++}
1423 ++
1424 ++static int tpm_go_idle(struct tpm_chip *chip, unsigned int flags)
1425 ++{
1426 ++ if (flags & TPM_TRANSMIT_RAW)
1427 ++ return 0;
1428 ++
1429 ++ if (!chip->ops->go_idle)
1430 ++ return 0;
1431 ++
1432 ++ return chip->ops->go_idle(chip);
1433 ++}
1434 ++
1435 + static ssize_t tpm_try_transmit(struct tpm_chip *chip,
1436 + struct tpm_space *space,
1437 + u8 *buf, size_t bufsiz,
1438 +@@ -423,7 +450,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
1439 + header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
1440 + header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
1441 + TSS2_RESMGR_TPM_RC_LAYER);
1442 +- return bufsiz;
1443 ++ return sizeof(*header);
1444 + }
1445 +
1446 + if (bufsiz > TPM_BUFSIZE)
1447 +@@ -449,14 +476,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
1448 + /* Store the decision as chip->locality will be changed. */
1449 + need_locality = chip->locality == -1;
1450 +
1451 +- if (!(flags & TPM_TRANSMIT_RAW) && need_locality) {
1452 +- rc = tpm_request_locality(chip);
1453 ++ if (need_locality) {
1454 ++ rc = tpm_request_locality(chip, flags);
1455 + if (rc < 0)
1456 + goto out_no_locality;
1457 + }
1458 +
1459 +- if (chip->dev.parent)
1460 +- pm_runtime_get_sync(chip->dev.parent);
1461 ++ rc = tpm_cmd_ready(chip, flags);
1462 ++ if (rc)
1463 ++ goto out;
1464 +
1465 + rc = tpm2_prepare_space(chip, space, ordinal, buf);
1466 + if (rc)
1467 +@@ -516,13 +544,16 @@ out_recv:
1468 + }
1469 +
1470 + rc = tpm2_commit_space(chip, space, ordinal, buf, &len);
1471 ++ if (rc)
1472 ++ dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
1473 +
1474 + out:
1475 +- if (chip->dev.parent)
1476 +- pm_runtime_put_sync(chip->dev.parent);
1477 ++ rc = tpm_go_idle(chip, flags);
1478 ++ if (rc)
1479 ++ goto out;
1480 +
1481 + if (need_locality)
1482 +- tpm_relinquish_locality(chip);
1483 ++ tpm_relinquish_locality(chip, flags);
1484 +
1485 + out_no_locality:
1486 + if (chip->ops->clk_enable != NULL)
1487 +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
1488 +index 4426649e431c..5f02dcd3df97 100644
1489 +--- a/drivers/char/tpm/tpm.h
1490 ++++ b/drivers/char/tpm/tpm.h
1491 +@@ -511,9 +511,17 @@ extern const struct file_operations tpm_fops;
1492 + extern const struct file_operations tpmrm_fops;
1493 + extern struct idr dev_nums_idr;
1494 +
1495 ++/**
1496 ++ * enum tpm_transmit_flags
1497 ++ *
1498 ++ * @TPM_TRANSMIT_UNLOCKED: used to lock sequence of tpm_transmit calls.
1499 ++ * @TPM_TRANSMIT_RAW: prevent recursive calls into setup steps
1500 ++ * (go idle, locality,..). Always use with UNLOCKED
1501 ++ * as it will fail on double locking.
1502 ++ */
1503 + enum tpm_transmit_flags {
1504 +- TPM_TRANSMIT_UNLOCKED = BIT(0),
1505 +- TPM_TRANSMIT_RAW = BIT(1),
1506 ++ TPM_TRANSMIT_UNLOCKED = BIT(0),
1507 ++ TPM_TRANSMIT_RAW = BIT(1),
1508 + };
1509 +
1510 + ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
1511 +diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
1512 +index 6122d3276f72..11c85ed8c113 100644
1513 +--- a/drivers/char/tpm/tpm2-space.c
1514 ++++ b/drivers/char/tpm/tpm2-space.c
1515 +@@ -39,7 +39,8 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
1516 + for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
1517 + if (space->session_tbl[i])
1518 + tpm2_flush_context_cmd(chip, space->session_tbl[i],
1519 +- TPM_TRANSMIT_UNLOCKED);
1520 ++ TPM_TRANSMIT_UNLOCKED |
1521 ++ TPM_TRANSMIT_RAW);
1522 + }
1523 + }
1524 +
1525 +@@ -84,7 +85,7 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
1526 + tpm_buf_append(&tbuf, &buf[*offset], body_size);
1527 +
1528 + rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 4,
1529 +- TPM_TRANSMIT_UNLOCKED, NULL);
1530 ++ TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, NULL);
1531 + if (rc < 0) {
1532 + dev_warn(&chip->dev, "%s: failed with a system error %d\n",
1533 + __func__, rc);
1534 +@@ -133,7 +134,7 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
1535 + tpm_buf_append_u32(&tbuf, handle);
1536 +
1537 + rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 0,
1538 +- TPM_TRANSMIT_UNLOCKED, NULL);
1539 ++ TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, NULL);
1540 + if (rc < 0) {
1541 + dev_warn(&chip->dev, "%s: failed with a system error %d\n",
1542 + __func__, rc);
1543 +@@ -170,7 +171,8 @@ static void tpm2_flush_space(struct tpm_chip *chip)
1544 + for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
1545 + if (space->context_tbl[i] && ~space->context_tbl[i])
1546 + tpm2_flush_context_cmd(chip, space->context_tbl[i],
1547 +- TPM_TRANSMIT_UNLOCKED);
1548 ++ TPM_TRANSMIT_UNLOCKED |
1549 ++ TPM_TRANSMIT_RAW);
1550 +
1551 + tpm2_flush_sessions(chip, space);
1552 + }
1553 +@@ -377,7 +379,8 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp,
1554 +
1555 + return 0;
1556 + out_no_slots:
1557 +- tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_UNLOCKED);
1558 ++ tpm2_flush_context_cmd(chip, phandle,
1559 ++ TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW);
1560 + dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__,
1561 + phandle);
1562 + return -ENOMEM;
1563 +@@ -465,7 +468,8 @@ static int tpm2_save_space(struct tpm_chip *chip)
1564 + return rc;
1565 +
1566 + tpm2_flush_context_cmd(chip, space->context_tbl[i],
1567 +- TPM_TRANSMIT_UNLOCKED);
1568 ++ TPM_TRANSMIT_UNLOCKED |
1569 ++ TPM_TRANSMIT_RAW);
1570 + space->context_tbl[i] = ~0;
1571 + }
1572 +
1573 +diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
1574 +index 34fbc6cb097b..36952ef98f90 100644
1575 +--- a/drivers/char/tpm/tpm_crb.c
1576 ++++ b/drivers/char/tpm/tpm_crb.c
1577 +@@ -132,7 +132,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
1578 + }
1579 +
1580 + /**
1581 +- * crb_go_idle - request tpm crb device to go the idle state
1582 ++ * __crb_go_idle - request tpm crb device to go the idle state
1583 + *
1584 + * @dev: crb device
1585 + * @priv: crb private data
1586 +@@ -147,7 +147,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
1587 + *
1588 + * Return: 0 always
1589 + */
1590 +-static int crb_go_idle(struct device *dev, struct crb_priv *priv)
1591 ++static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
1592 + {
1593 + if ((priv->sm == ACPI_TPM2_START_METHOD) ||
1594 + (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
1595 +@@ -163,11 +163,20 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
1596 + dev_warn(dev, "goIdle timed out\n");
1597 + return -ETIME;
1598 + }
1599 ++
1600 + return 0;
1601 + }
1602 +
1603 ++static int crb_go_idle(struct tpm_chip *chip)
1604 ++{
1605 ++ struct device *dev = &chip->dev;
1606 ++ struct crb_priv *priv = dev_get_drvdata(dev);
1607 ++
1608 ++ return __crb_go_idle(dev, priv);
1609 ++}
1610 ++
1611 + /**
1612 +- * crb_cmd_ready - request tpm crb device to enter ready state
1613 ++ * __crb_cmd_ready - request tpm crb device to enter ready state
1614 + *
1615 + * @dev: crb device
1616 + * @priv: crb private data
1617 +@@ -181,7 +190,7 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
1618 + *
1619 + * Return: 0 on success -ETIME on timeout;
1620 + */
1621 +-static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
1622 ++static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
1623 + {
1624 + if ((priv->sm == ACPI_TPM2_START_METHOD) ||
1625 + (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
1626 +@@ -200,6 +209,14 @@ static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
1627 + return 0;
1628 + }
1629 +
1630 ++static int crb_cmd_ready(struct tpm_chip *chip)
1631 ++{
1632 ++ struct device *dev = &chip->dev;
1633 ++ struct crb_priv *priv = dev_get_drvdata(dev);
1634 ++
1635 ++ return __crb_cmd_ready(dev, priv);
1636 ++}
1637 ++
1638 + static int __crb_request_locality(struct device *dev,
1639 + struct crb_priv *priv, int loc)
1640 + {
1641 +@@ -401,6 +418,8 @@ static const struct tpm_class_ops tpm_crb = {
1642 + .send = crb_send,
1643 + .cancel = crb_cancel,
1644 + .req_canceled = crb_req_canceled,
1645 ++ .go_idle = crb_go_idle,
1646 ++ .cmd_ready = crb_cmd_ready,
1647 + .request_locality = crb_request_locality,
1648 + .relinquish_locality = crb_relinquish_locality,
1649 + .req_complete_mask = CRB_DRV_STS_COMPLETE,
1650 +@@ -520,7 +539,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
1651 + * PTT HW bug w/a: wake up the device to access
1652 + * possibly not retained registers.
1653 + */
1654 +- ret = crb_cmd_ready(dev, priv);
1655 ++ ret = __crb_cmd_ready(dev, priv);
1656 + if (ret)
1657 + goto out_relinquish_locality;
1658 +
1659 +@@ -565,7 +584,7 @@ out:
1660 + if (!ret)
1661 + priv->cmd_size = cmd_size;
1662 +
1663 +- crb_go_idle(dev, priv);
1664 ++ __crb_go_idle(dev, priv);
1665 +
1666 + out_relinquish_locality:
1667 +
1668 +@@ -628,32 +647,7 @@ static int crb_acpi_add(struct acpi_device *device)
1669 + chip->acpi_dev_handle = device->handle;
1670 + chip->flags = TPM_CHIP_FLAG_TPM2;
1671 +
1672 +- rc = __crb_request_locality(dev, priv, 0);
1673 +- if (rc)
1674 +- return rc;
1675 +-
1676 +- rc = crb_cmd_ready(dev, priv);
1677 +- if (rc)
1678 +- goto out;
1679 +-
1680 +- pm_runtime_get_noresume(dev);
1681 +- pm_runtime_set_active(dev);
1682 +- pm_runtime_enable(dev);
1683 +-
1684 +- rc = tpm_chip_register(chip);
1685 +- if (rc) {
1686 +- crb_go_idle(dev, priv);
1687 +- pm_runtime_put_noidle(dev);
1688 +- pm_runtime_disable(dev);
1689 +- goto out;
1690 +- }
1691 +-
1692 +- pm_runtime_put_sync(dev);
1693 +-
1694 +-out:
1695 +- __crb_relinquish_locality(dev, priv, 0);
1696 +-
1697 +- return rc;
1698 ++ return tpm_chip_register(chip);
1699 + }
1700 +
1701 + static int crb_acpi_remove(struct acpi_device *device)
1702 +@@ -663,52 +657,11 @@ static int crb_acpi_remove(struct acpi_device *device)
1703 +
1704 + tpm_chip_unregister(chip);
1705 +
1706 +- pm_runtime_disable(dev);
1707 +-
1708 + return 0;
1709 + }
1710 +
1711 +-static int __maybe_unused crb_pm_runtime_suspend(struct device *dev)
1712 +-{
1713 +- struct tpm_chip *chip = dev_get_drvdata(dev);
1714 +- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
1715 +-
1716 +- return crb_go_idle(dev, priv);
1717 +-}
1718 +-
1719 +-static int __maybe_unused crb_pm_runtime_resume(struct device *dev)
1720 +-{
1721 +- struct tpm_chip *chip = dev_get_drvdata(dev);
1722 +- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
1723 +-
1724 +- return crb_cmd_ready(dev, priv);
1725 +-}
1726 +-
1727 +-static int __maybe_unused crb_pm_suspend(struct device *dev)
1728 +-{
1729 +- int ret;
1730 +-
1731 +- ret = tpm_pm_suspend(dev);
1732 +- if (ret)
1733 +- return ret;
1734 +-
1735 +- return crb_pm_runtime_suspend(dev);
1736 +-}
1737 +-
1738 +-static int __maybe_unused crb_pm_resume(struct device *dev)
1739 +-{
1740 +- int ret;
1741 +-
1742 +- ret = crb_pm_runtime_resume(dev);
1743 +- if (ret)
1744 +- return ret;
1745 +-
1746 +- return tpm_pm_resume(dev);
1747 +-}
1748 +-
1749 + static const struct dev_pm_ops crb_pm = {
1750 +- SET_SYSTEM_SLEEP_PM_OPS(crb_pm_suspend, crb_pm_resume)
1751 +- SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
1752 ++ SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
1753 + };
1754 +
1755 + static const struct acpi_device_id crb_device_ids[] = {
1756 +diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
1757 +index 740af90a9508..c5edf8f2fd19 100644
1758 +--- a/drivers/clk/clk-npcm7xx.c
1759 ++++ b/drivers/clk/clk-npcm7xx.c
1760 +@@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
1761 + if (!clk_base)
1762 + goto npcm7xx_init_error;
1763 +
1764 +- npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) *
1765 +- NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL);
1766 ++ npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws,
1767 ++ NPCM7XX_NUM_CLOCKS), GFP_KERNEL);
1768 + if (!npcm7xx_clk_data)
1769 + goto npcm7xx_init_np_err;
1770 +
1771 +diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
1772 +index bca10d618f0a..2a8634a52856 100644
1773 +--- a/drivers/clk/rockchip/clk-rk3399.c
1774 ++++ b/drivers/clk/rockchip/clk-rk3399.c
1775 +@@ -631,7 +631,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
1776 + MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT,
1777 + RK3399_CLKSEL_CON(31), 0, 2, MFLAGS),
1778 + COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT,
1779 +- RK3399_CLKSEL_CON(30), 8, 2, MFLAGS,
1780 ++ RK3399_CLKSEL_CON(31), 2, 1, MFLAGS,
1781 + RK3399_CLKGATE_CON(8), 12, GFLAGS),
1782 +
1783 + /* uart */
1784 +diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
1785 +index 55c0cc309198..7588a9eb0ee0 100644
1786 +--- a/drivers/gpu/drm/udl/udl_drv.h
1787 ++++ b/drivers/gpu/drm/udl/udl_drv.h
1788 +@@ -112,7 +112,7 @@ udl_fb_user_fb_create(struct drm_device *dev,
1789 + struct drm_file *file,
1790 + const struct drm_mode_fb_cmd2 *mode_cmd);
1791 +
1792 +-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
1793 ++int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
1794 + const char *front, char **urb_buf_ptr,
1795 + u32 byte_offset, u32 device_byte_offset, u32 byte_width,
1796 + int *ident_ptr, int *sent_ptr);
1797 +diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
1798 +index d5583190f3e4..8746eeeec44d 100644
1799 +--- a/drivers/gpu/drm/udl/udl_fb.c
1800 ++++ b/drivers/gpu/drm/udl/udl_fb.c
1801 +@@ -90,7 +90,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
1802 + int bytes_identical = 0;
1803 + struct urb *urb;
1804 + int aligned_x;
1805 +- int bpp = fb->base.format->cpp[0];
1806 ++ int log_bpp;
1807 ++
1808 ++ BUG_ON(!is_power_of_2(fb->base.format->cpp[0]));
1809 ++ log_bpp = __ffs(fb->base.format->cpp[0]);
1810 +
1811 + if (!fb->active_16)
1812 + return 0;
1813 +@@ -125,12 +128,12 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
1814 +
1815 + for (i = y; i < y + height ; i++) {
1816 + const int line_offset = fb->base.pitches[0] * i;
1817 +- const int byte_offset = line_offset + (x * bpp);
1818 +- const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
1819 +- if (udl_render_hline(dev, bpp, &urb,
1820 ++ const int byte_offset = line_offset + (x << log_bpp);
1821 ++ const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
1822 ++ if (udl_render_hline(dev, log_bpp, &urb,
1823 + (char *) fb->obj->vmapping,
1824 + &cmd, byte_offset, dev_byte_offset,
1825 +- width * bpp,
1826 ++ width << log_bpp,
1827 + &bytes_identical, &bytes_sent))
1828 + goto error;
1829 + }
1830 +@@ -149,7 +152,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
1831 + error:
1832 + atomic_add(bytes_sent, &udl->bytes_sent);
1833 + atomic_add(bytes_identical, &udl->bytes_identical);
1834 +- atomic_add(width*height*bpp, &udl->bytes_rendered);
1835 ++ atomic_add((width * height) << log_bpp, &udl->bytes_rendered);
1836 + end_cycles = get_cycles();
1837 + atomic_add(((unsigned int) ((end_cycles - start_cycles)
1838 + >> 10)), /* Kcycles */
1839 +@@ -221,7 +224,7 @@ static int udl_fb_open(struct fb_info *info, int user)
1840 +
1841 + struct fb_deferred_io *fbdefio;
1842 +
1843 +- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
1844 ++ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
1845 +
1846 + if (fbdefio) {
1847 + fbdefio->delay = DL_DEFIO_WRITE_DELAY;
1848 +diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
1849 +index d518de8f496b..7e9ad926926a 100644
1850 +--- a/drivers/gpu/drm/udl/udl_main.c
1851 ++++ b/drivers/gpu/drm/udl/udl_main.c
1852 +@@ -170,18 +170,13 @@ static void udl_free_urb_list(struct drm_device *dev)
1853 + struct list_head *node;
1854 + struct urb_node *unode;
1855 + struct urb *urb;
1856 +- int ret;
1857 + unsigned long flags;
1858 +
1859 + DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
1860 +
1861 + /* keep waiting and freeing, until we've got 'em all */
1862 + while (count--) {
1863 +-
1864 +- /* Getting interrupted means a leak, but ok at shutdown*/
1865 +- ret = down_interruptible(&udl->urbs.limit_sem);
1866 +- if (ret)
1867 +- break;
1868 ++ down(&udl->urbs.limit_sem);
1869 +
1870 + spin_lock_irqsave(&udl->urbs.lock, flags);
1871 +
1872 +@@ -205,17 +200,22 @@ static void udl_free_urb_list(struct drm_device *dev)
1873 + static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
1874 + {
1875 + struct udl_device *udl = dev->dev_private;
1876 +- int i = 0;
1877 + struct urb *urb;
1878 + struct urb_node *unode;
1879 + char *buf;
1880 ++ size_t wanted_size = count * size;
1881 +
1882 + spin_lock_init(&udl->urbs.lock);
1883 +
1884 ++retry:
1885 + udl->urbs.size = size;
1886 + INIT_LIST_HEAD(&udl->urbs.list);
1887 +
1888 +- while (i < count) {
1889 ++ sema_init(&udl->urbs.limit_sem, 0);
1890 ++ udl->urbs.count = 0;
1891 ++ udl->urbs.available = 0;
1892 ++
1893 ++ while (udl->urbs.count * size < wanted_size) {
1894 + unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
1895 + if (!unode)
1896 + break;
1897 +@@ -231,11 +231,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
1898 + }
1899 + unode->urb = urb;
1900 +
1901 +- buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
1902 ++ buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
1903 + &urb->transfer_dma);
1904 + if (!buf) {
1905 + kfree(unode);
1906 + usb_free_urb(urb);
1907 ++ if (size > PAGE_SIZE) {
1908 ++ size /= 2;
1909 ++ udl_free_urb_list(dev);
1910 ++ goto retry;
1911 ++ }
1912 + break;
1913 + }
1914 +
1915 +@@ -246,16 +251,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
1916 +
1917 + list_add_tail(&unode->entry, &udl->urbs.list);
1918 +
1919 +- i++;
1920 ++ up(&udl->urbs.limit_sem);
1921 ++ udl->urbs.count++;
1922 ++ udl->urbs.available++;
1923 + }
1924 +
1925 +- sema_init(&udl->urbs.limit_sem, i);
1926 +- udl->urbs.count = i;
1927 +- udl->urbs.available = i;
1928 +-
1929 +- DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
1930 ++ DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
1931 +
1932 +- return i;
1933 ++ return udl->urbs.count;
1934 + }
1935 +
1936 + struct urb *udl_get_urb(struct drm_device *dev)
1937 +diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
1938 +index b992644c17e6..f3331d33547a 100644
1939 +--- a/drivers/gpu/drm/udl/udl_transfer.c
1940 ++++ b/drivers/gpu/drm/udl/udl_transfer.c
1941 +@@ -83,12 +83,12 @@ static inline u16 pixel32_to_be16(const uint32_t pixel)
1942 + ((pixel >> 8) & 0xf800));
1943 + }
1944 +
1945 +-static inline u16 get_pixel_val16(const uint8_t *pixel, int bpp)
1946 ++static inline u16 get_pixel_val16(const uint8_t *pixel, int log_bpp)
1947 + {
1948 +- u16 pixel_val16 = 0;
1949 +- if (bpp == 2)
1950 ++ u16 pixel_val16;
1951 ++ if (log_bpp == 1)
1952 + pixel_val16 = *(const uint16_t *)pixel;
1953 +- else if (bpp == 4)
1954 ++ else
1955 + pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel);
1956 + return pixel_val16;
1957 + }
1958 +@@ -125,8 +125,9 @@ static void udl_compress_hline16(
1959 + const u8 *const pixel_end,
1960 + uint32_t *device_address_ptr,
1961 + uint8_t **command_buffer_ptr,
1962 +- const uint8_t *const cmd_buffer_end, int bpp)
1963 ++ const uint8_t *const cmd_buffer_end, int log_bpp)
1964 + {
1965 ++ const int bpp = 1 << log_bpp;
1966 + const u8 *pixel = *pixel_start_ptr;
1967 + uint32_t dev_addr = *device_address_ptr;
1968 + uint8_t *cmd = *command_buffer_ptr;
1969 +@@ -153,12 +154,12 @@ static void udl_compress_hline16(
1970 + raw_pixels_count_byte = cmd++; /* we'll know this later */
1971 + raw_pixel_start = pixel;
1972 +
1973 +- cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL,
1974 +- (unsigned long)(pixel_end - pixel) / bpp,
1975 +- (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp;
1976 ++ cmd_pixel_end = pixel + (min3(MAX_CMD_PIXELS + 1UL,
1977 ++ (unsigned long)(pixel_end - pixel) >> log_bpp,
1978 ++ (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) << log_bpp);
1979 +
1980 + prefetch_range((void *) pixel, cmd_pixel_end - pixel);
1981 +- pixel_val16 = get_pixel_val16(pixel, bpp);
1982 ++ pixel_val16 = get_pixel_val16(pixel, log_bpp);
1983 +
1984 + while (pixel < cmd_pixel_end) {
1985 + const u8 *const start = pixel;
1986 +@@ -170,7 +171,7 @@ static void udl_compress_hline16(
1987 + pixel += bpp;
1988 +
1989 + while (pixel < cmd_pixel_end) {
1990 +- pixel_val16 = get_pixel_val16(pixel, bpp);
1991 ++ pixel_val16 = get_pixel_val16(pixel, log_bpp);
1992 + if (pixel_val16 != repeating_pixel_val16)
1993 + break;
1994 + pixel += bpp;
1995 +@@ -179,10 +180,10 @@ static void udl_compress_hline16(
1996 + if (unlikely(pixel > start + bpp)) {
1997 + /* go back and fill in raw pixel count */
1998 + *raw_pixels_count_byte = (((start -
1999 +- raw_pixel_start) / bpp) + 1) & 0xFF;
2000 ++ raw_pixel_start) >> log_bpp) + 1) & 0xFF;
2001 +
2002 + /* immediately after raw data is repeat byte */
2003 +- *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
2004 ++ *cmd++ = (((pixel - start) >> log_bpp) - 1) & 0xFF;
2005 +
2006 + /* Then start another raw pixel span */
2007 + raw_pixel_start = pixel;
2008 +@@ -192,14 +193,14 @@ static void udl_compress_hline16(
2009 +
2010 + if (pixel > raw_pixel_start) {
2011 + /* finalize last RAW span */
2012 +- *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
2013 ++ *raw_pixels_count_byte = ((pixel - raw_pixel_start) >> log_bpp) & 0xFF;
2014 + } else {
2015 + /* undo unused byte */
2016 + cmd--;
2017 + }
2018 +
2019 +- *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
2020 +- dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2;
2021 ++ *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) >> log_bpp) & 0xFF;
2022 ++ dev_addr += ((pixel - cmd_pixel_start) >> log_bpp) * 2;
2023 + }
2024 +
2025 + if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
2026 +@@ -222,19 +223,19 @@ static void udl_compress_hline16(
2027 + * (that we can only write to, slowly, and can never read), and (optionally)
2028 + * our shadow copy that tracks what's been sent to that hardware buffer.
2029 + */
2030 +-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
2031 ++int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
2032 + const char *front, char **urb_buf_ptr,
2033 + u32 byte_offset, u32 device_byte_offset,
2034 + u32 byte_width,
2035 + int *ident_ptr, int *sent_ptr)
2036 + {
2037 + const u8 *line_start, *line_end, *next_pixel;
2038 +- u32 base16 = 0 + (device_byte_offset / bpp) * 2;
2039 ++ u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2;
2040 + struct urb *urb = *urb_ptr;
2041 + u8 *cmd = *urb_buf_ptr;
2042 + u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
2043 +
2044 +- BUG_ON(!(bpp == 2 || bpp == 4));
2045 ++ BUG_ON(!(log_bpp == 1 || log_bpp == 2));
2046 +
2047 + line_start = (u8 *) (front + byte_offset);
2048 + next_pixel = line_start;
2049 +@@ -244,7 +245,7 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
2050 +
2051 + udl_compress_hline16(&next_pixel,
2052 + line_end, &base16,
2053 +- (u8 **) &cmd, (u8 *) cmd_end, bpp);
2054 ++ (u8 **) &cmd, (u8 *) cmd_end, log_bpp);
2055 +
2056 + if (cmd >= cmd_end) {
2057 + int len = cmd - (u8 *) urb->transfer_buffer;
2058 +diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
2059 +index 17c6460ae351..577e2ede5a1a 100644
2060 +--- a/drivers/hwmon/k10temp.c
2061 ++++ b/drivers/hwmon/k10temp.c
2062 +@@ -105,6 +105,8 @@ static const struct tctl_offset tctl_offset_table[] = {
2063 + { 0x17, "AMD Ryzen Threadripper 1950", 10000 },
2064 + { 0x17, "AMD Ryzen Threadripper 1920", 10000 },
2065 + { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
2066 ++ { 0x17, "AMD Ryzen Threadripper 2950X", 27000 },
2067 ++ { 0x17, "AMD Ryzen Threadripper 2990WX", 27000 },
2068 + };
2069 +
2070 + static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
2071 +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
2072 +index f9d1349c3286..b89e8379d898 100644
2073 +--- a/drivers/hwmon/nct6775.c
2074 ++++ b/drivers/hwmon/nct6775.c
2075 +@@ -63,6 +63,7 @@
2076 + #include <linux/bitops.h>
2077 + #include <linux/dmi.h>
2078 + #include <linux/io.h>
2079 ++#include <linux/nospec.h>
2080 + #include "lm75.h"
2081 +
2082 + #define USE_ALTERNATE
2083 +@@ -2689,6 +2690,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr,
2084 + return err;
2085 + if (val > NUM_TEMP)
2086 + return -EINVAL;
2087 ++ val = array_index_nospec(val, NUM_TEMP + 1);
2088 + if (val && (!(data->have_temp & BIT(val - 1)) ||
2089 + !data->temp_src[val - 1]))
2090 + return -EINVAL;
2091 +diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
2092 +index f7a96bcf94a6..5349e22b5c78 100644
2093 +--- a/drivers/iommu/arm-smmu.c
2094 ++++ b/drivers/iommu/arm-smmu.c
2095 +@@ -2103,12 +2103,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
2096 + if (err)
2097 + return err;
2098 +
2099 +- if (smmu->version == ARM_SMMU_V2 &&
2100 +- smmu->num_context_banks != smmu->num_context_irqs) {
2101 +- dev_err(dev,
2102 +- "found only %d context interrupt(s) but %d required\n",
2103 +- smmu->num_context_irqs, smmu->num_context_banks);
2104 +- return -ENODEV;
2105 ++ if (smmu->version == ARM_SMMU_V2) {
2106 ++ if (smmu->num_context_banks > smmu->num_context_irqs) {
2107 ++ dev_err(dev,
2108 ++ "found only %d context irq(s) but %d required\n",
2109 ++ smmu->num_context_irqs, smmu->num_context_banks);
2110 ++ return -ENODEV;
2111 ++ }
2112 ++
2113 ++ /* Ignore superfluous interrupts */
2114 ++ smmu->num_context_irqs = smmu->num_context_banks;
2115 + }
2116 +
2117 + for (i = 0; i < smmu->num_global_irqs; ++i) {
2118 +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
2119 +index 7465f17e1559..38175ebd92d4 100644
2120 +--- a/drivers/misc/mei/main.c
2121 ++++ b/drivers/misc/mei/main.c
2122 +@@ -312,7 +312,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
2123 + }
2124 + }
2125 +
2126 +- *offset = 0;
2127 + cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
2128 + if (!cb) {
2129 + rets = -ENOMEM;
2130 +diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
2131 +index f4a5a317d4ae..e1086a010b88 100644
2132 +--- a/drivers/mtd/nand/raw/fsmc_nand.c
2133 ++++ b/drivers/mtd/nand/raw/fsmc_nand.c
2134 +@@ -740,7 +740,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2135 + for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
2136 + nand_read_page_op(chip, page, s * eccsize, NULL, 0);
2137 + chip->ecc.hwctl(mtd, NAND_ECC_READ);
2138 +- chip->read_buf(mtd, p, eccsize);
2139 ++ nand_read_data_op(chip, p, eccsize, false);
2140 +
2141 + for (j = 0; j < eccbytes;) {
2142 + struct mtd_oob_region oobregion;
2143 +diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
2144 +index ebb1d141b900..c88588815ca1 100644
2145 +--- a/drivers/mtd/nand/raw/marvell_nand.c
2146 ++++ b/drivers/mtd/nand/raw/marvell_nand.c
2147 +@@ -2677,6 +2677,21 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
2148 + return 0;
2149 + }
2150 +
2151 ++static void marvell_nfc_reset(struct marvell_nfc *nfc)
2152 ++{
2153 ++ /*
2154 ++ * ECC operations and interruptions are only enabled when specifically
2155 ++ * needed. ECC shall not be activated in the early stages (fails probe).
2156 ++ * Arbiter flag, even if marked as "reserved", must be set (empirical).
2157 ++ * SPARE_EN bit must always be set or ECC bytes will not be at the same
2158 ++ * offset in the read page and this will fail the protection.
2159 ++ */
2160 ++ writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
2161 ++ NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
2162 ++ writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
2163 ++ writel_relaxed(0, nfc->regs + NDECCCTRL);
2164 ++}
2165 ++
2166 + static int marvell_nfc_init(struct marvell_nfc *nfc)
2167 + {
2168 + struct device_node *np = nfc->dev->of_node;
2169 +@@ -2715,17 +2730,7 @@ static int marvell_nfc_init(struct marvell_nfc *nfc)
2170 + if (!nfc->caps->is_nfcv2)
2171 + marvell_nfc_init_dma(nfc);
2172 +
2173 +- /*
2174 +- * ECC operations and interruptions are only enabled when specifically
2175 +- * needed. ECC shall not be activated in the early stages (fails probe).
2176 +- * Arbiter flag, even if marked as "reserved", must be set (empirical).
2177 +- * SPARE_EN bit must always be set or ECC bytes will not be at the same
2178 +- * offset in the read page and this will fail the protection.
2179 +- */
2180 +- writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
2181 +- NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
2182 +- writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
2183 +- writel_relaxed(0, nfc->regs + NDECCCTRL);
2184 ++ marvell_nfc_reset(nfc);
2185 +
2186 + return 0;
2187 + }
2188 +@@ -2840,6 +2845,51 @@ static int marvell_nfc_remove(struct platform_device *pdev)
2189 + return 0;
2190 + }
2191 +
2192 ++static int __maybe_unused marvell_nfc_suspend(struct device *dev)
2193 ++{
2194 ++ struct marvell_nfc *nfc = dev_get_drvdata(dev);
2195 ++ struct marvell_nand_chip *chip;
2196 ++
2197 ++ list_for_each_entry(chip, &nfc->chips, node)
2198 ++ marvell_nfc_wait_ndrun(&chip->chip);
2199 ++
2200 ++ clk_disable_unprepare(nfc->reg_clk);
2201 ++ clk_disable_unprepare(nfc->core_clk);
2202 ++
2203 ++ return 0;
2204 ++}
2205 ++
2206 ++static int __maybe_unused marvell_nfc_resume(struct device *dev)
2207 ++{
2208 ++ struct marvell_nfc *nfc = dev_get_drvdata(dev);
2209 ++ int ret;
2210 ++
2211 ++ ret = clk_prepare_enable(nfc->core_clk);
2212 ++ if (ret < 0)
2213 ++ return ret;
2214 ++
2215 ++ if (!IS_ERR(nfc->reg_clk)) {
2216 ++ ret = clk_prepare_enable(nfc->reg_clk);
2217 ++ if (ret < 0)
2218 ++ return ret;
2219 ++ }
2220 ++
2221 ++ /*
2222 ++ * Reset nfc->selected_chip so the next command will cause the timing
2223 ++ * registers to be restored in marvell_nfc_select_chip().
2224 ++ */
2225 ++ nfc->selected_chip = NULL;
2226 ++
2227 ++ /* Reset registers that have lost their contents */
2228 ++ marvell_nfc_reset(nfc);
2229 ++
2230 ++ return 0;
2231 ++}
2232 ++
2233 ++static const struct dev_pm_ops marvell_nfc_pm_ops = {
2234 ++ SET_SYSTEM_SLEEP_PM_OPS(marvell_nfc_suspend, marvell_nfc_resume)
2235 ++};
2236 ++
2237 + static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
2238 + .max_cs_nb = 4,
2239 + .max_rb_nb = 2,
2240 +@@ -2924,6 +2974,7 @@ static struct platform_driver marvell_nfc_driver = {
2241 + .driver = {
2242 + .name = "marvell-nfc",
2243 + .of_match_table = marvell_nfc_of_ids,
2244 ++ .pm = &marvell_nfc_pm_ops,
2245 + },
2246 + .id_table = marvell_nfc_platform_ids,
2247 + .probe = marvell_nfc_probe,
2248 +diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
2249 +index d542908a0ebb..766df4134482 100644
2250 +--- a/drivers/mtd/nand/raw/nand_hynix.c
2251 ++++ b/drivers/mtd/nand/raw/nand_hynix.c
2252 +@@ -100,6 +100,16 @@ static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
2253 + struct mtd_info *mtd = nand_to_mtd(chip);
2254 + u16 column = ((u16)addr << 8) | addr;
2255 +
2256 ++ if (chip->exec_op) {
2257 ++ struct nand_op_instr instrs[] = {
2258 ++ NAND_OP_ADDR(1, &addr, 0),
2259 ++ NAND_OP_8BIT_DATA_OUT(1, &val, 0),
2260 ++ };
2261 ++ struct nand_operation op = NAND_OPERATION(instrs);
2262 ++
2263 ++ return nand_exec_op(chip, &op);
2264 ++ }
2265 ++
2266 + chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
2267 + chip->write_byte(mtd, val);
2268 +
2269 +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
2270 +index 6a5519f0ff25..49b4e70fefe7 100644
2271 +--- a/drivers/mtd/nand/raw/qcom_nandc.c
2272 ++++ b/drivers/mtd/nand/raw/qcom_nandc.c
2273 +@@ -213,6 +213,8 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
2274 + #define QPIC_PER_CW_CMD_SGL 32
2275 + #define QPIC_PER_CW_DATA_SGL 8
2276 +
2277 ++#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
2278 ++
2279 + /*
2280 + * Flags used in DMA descriptor preparation helper functions
2281 + * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
2282 +@@ -245,6 +247,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
2283 + * @tx_sgl_start - start index in data sgl for tx.
2284 + * @rx_sgl_pos - current index in data sgl for rx.
2285 + * @rx_sgl_start - start index in data sgl for rx.
2286 ++ * @wait_second_completion - wait for second DMA desc completion before making
2287 ++ * the NAND transfer completion.
2288 ++ * @txn_done - completion for NAND transfer.
2289 ++ * @last_data_desc - last DMA desc in data channel (tx/rx).
2290 ++ * @last_cmd_desc - last DMA desc in command channel.
2291 + */
2292 + struct bam_transaction {
2293 + struct bam_cmd_element *bam_ce;
2294 +@@ -258,6 +265,10 @@ struct bam_transaction {
2295 + u32 tx_sgl_start;
2296 + u32 rx_sgl_pos;
2297 + u32 rx_sgl_start;
2298 ++ bool wait_second_completion;
2299 ++ struct completion txn_done;
2300 ++ struct dma_async_tx_descriptor *last_data_desc;
2301 ++ struct dma_async_tx_descriptor *last_cmd_desc;
2302 + };
2303 +
2304 + /*
2305 +@@ -504,6 +515,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
2306 +
2307 + bam_txn->data_sgl = bam_txn_buf;
2308 +
2309 ++ init_completion(&bam_txn->txn_done);
2310 ++
2311 + return bam_txn;
2312 + }
2313 +
2314 +@@ -523,11 +536,33 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
2315 + bam_txn->tx_sgl_start = 0;
2316 + bam_txn->rx_sgl_pos = 0;
2317 + bam_txn->rx_sgl_start = 0;
2318 ++ bam_txn->last_data_desc = NULL;
2319 ++ bam_txn->wait_second_completion = false;
2320 +
2321 + sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
2322 + QPIC_PER_CW_CMD_SGL);
2323 + sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
2324 + QPIC_PER_CW_DATA_SGL);
2325 ++
2326 ++ reinit_completion(&bam_txn->txn_done);
2327 ++}
2328 ++
2329 ++/* Callback for DMA descriptor completion */
2330 ++static void qpic_bam_dma_done(void *data)
2331 ++{
2332 ++ struct bam_transaction *bam_txn = data;
2333 ++
2334 ++ /*
2335 ++ * In case of data transfer with NAND, 2 callbacks will be generated.
2336 ++ * One for command channel and another one for data channel.
2337 ++ * If current transaction has data descriptors
2338 ++ * (i.e. wait_second_completion is true), then set this to false
2339 ++ * and wait for second DMA descriptor completion.
2340 ++ */
2341 ++ if (bam_txn->wait_second_completion)
2342 ++ bam_txn->wait_second_completion = false;
2343 ++ else
2344 ++ complete(&bam_txn->txn_done);
2345 + }
2346 +
2347 + static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
2348 +@@ -756,6 +791,12 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
2349 +
2350 + desc->dma_desc = dma_desc;
2351 +
2352 ++ /* update last data/command descriptor */
2353 ++ if (chan == nandc->cmd_chan)
2354 ++ bam_txn->last_cmd_desc = dma_desc;
2355 ++ else
2356 ++ bam_txn->last_data_desc = dma_desc;
2357 ++
2358 + list_add_tail(&desc->node, &nandc->desc_list);
2359 +
2360 + return 0;
2361 +@@ -1273,10 +1314,20 @@ static int submit_descs(struct qcom_nand_controller *nandc)
2362 + cookie = dmaengine_submit(desc->dma_desc);
2363 +
2364 + if (nandc->props->is_bam) {
2365 ++ bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
2366 ++ bam_txn->last_cmd_desc->callback_param = bam_txn;
2367 ++ if (bam_txn->last_data_desc) {
2368 ++ bam_txn->last_data_desc->callback = qpic_bam_dma_done;
2369 ++ bam_txn->last_data_desc->callback_param = bam_txn;
2370 ++ bam_txn->wait_second_completion = true;
2371 ++ }
2372 ++
2373 + dma_async_issue_pending(nandc->tx_chan);
2374 + dma_async_issue_pending(nandc->rx_chan);
2375 ++ dma_async_issue_pending(nandc->cmd_chan);
2376 +
2377 +- if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
2378 ++ if (!wait_for_completion_timeout(&bam_txn->txn_done,
2379 ++ QPIC_NAND_COMPLETION_TIMEOUT))
2380 + return -ETIMEDOUT;
2381 + } else {
2382 + if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
2383 +diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
2384 +index cb987c2ecc6b..87131f663292 100644
2385 +--- a/drivers/net/wireless/broadcom/b43/leds.c
2386 ++++ b/drivers/net/wireless/broadcom/b43/leds.c
2387 +@@ -131,7 +131,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
2388 + led->wl = dev->wl;
2389 + led->index = led_index;
2390 + led->activelow = activelow;
2391 +- strncpy(led->name, name, sizeof(led->name));
2392 ++ strlcpy(led->name, name, sizeof(led->name));
2393 + atomic_set(&led->state, 0);
2394 +
2395 + led->led_dev.name = led->name;
2396 +diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c
2397 +index fd4565389c77..bc922118b6ac 100644
2398 +--- a/drivers/net/wireless/broadcom/b43legacy/leds.c
2399 ++++ b/drivers/net/wireless/broadcom/b43legacy/leds.c
2400 +@@ -101,7 +101,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev,
2401 + led->dev = dev;
2402 + led->index = led_index;
2403 + led->activelow = activelow;
2404 +- strncpy(led->name, name, sizeof(led->name));
2405 ++ strlcpy(led->name, name, sizeof(led->name));
2406 +
2407 + led->led_dev.name = led->name;
2408 + led->led_dev.default_trigger = default_trigger;
2409 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2410 +index ddd441b1516a..e10b0d20c4a7 100644
2411 +--- a/drivers/nvme/host/pci.c
2412 ++++ b/drivers/nvme/host/pci.c
2413 +@@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
2414 + old_value = *dbbuf_db;
2415 + *dbbuf_db = value;
2416 +
2417 ++ /*
2418 ++ * Ensure that the doorbell is updated before reading the event
2419 ++ * index from memory. The controller needs to provide similar
2420 ++ * ordering to ensure the envent index is updated before reading
2421 ++ * the doorbell.
2422 ++ */
2423 ++ mb();
2424 ++
2425 + if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
2426 + return false;
2427 + }
2428 +diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
2429 +index c3bdd90b1422..deb7870b3d1a 100644
2430 +--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
2431 ++++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
2432 +@@ -429,7 +429,7 @@ static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
2433 + const char *name;
2434 + int i, ret;
2435 +
2436 +- if (group > info->ngroups)
2437 ++ if (group >= info->ngroups)
2438 + return;
2439 +
2440 + seq_puts(s, "\n");
2441 +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
2442 +index 45b7cb01f410..307403decf76 100644
2443 +--- a/drivers/platform/x86/ideapad-laptop.c
2444 ++++ b/drivers/platform/x86/ideapad-laptop.c
2445 +@@ -1133,10 +1133,10 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
2446 + },
2447 + },
2448 + {
2449 +- .ident = "Lenovo Legion Y520-15IKBN",
2450 ++ .ident = "Lenovo Legion Y520-15IKB",
2451 + .matches = {
2452 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2453 +- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBN"),
2454 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKB"),
2455 + },
2456 + },
2457 + {
2458 +diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
2459 +index 8e3d0146ff8c..04791ea5d97b 100644
2460 +--- a/drivers/platform/x86/wmi.c
2461 ++++ b/drivers/platform/x86/wmi.c
2462 +@@ -895,7 +895,6 @@ static int wmi_dev_probe(struct device *dev)
2463 + struct wmi_driver *wdriver =
2464 + container_of(dev->driver, struct wmi_driver, driver);
2465 + int ret = 0;
2466 +- int count;
2467 + char *buf;
2468 +
2469 + if (ACPI_FAILURE(wmi_method_enable(wblock, 1)))
2470 +@@ -917,9 +916,8 @@ static int wmi_dev_probe(struct device *dev)
2471 + goto probe_failure;
2472 + }
2473 +
2474 +- count = get_order(wblock->req_buf_size);
2475 +- wblock->handler_data = (void *)__get_free_pages(GFP_KERNEL,
2476 +- count);
2477 ++ wblock->handler_data = kmalloc(wblock->req_buf_size,
2478 ++ GFP_KERNEL);
2479 + if (!wblock->handler_data) {
2480 + ret = -ENOMEM;
2481 + goto probe_failure;
2482 +@@ -964,8 +962,7 @@ static int wmi_dev_remove(struct device *dev)
2483 + if (wdriver->filter_callback) {
2484 + misc_deregister(&wblock->char_dev);
2485 + kfree(wblock->char_dev.name);
2486 +- free_pages((unsigned long)wblock->handler_data,
2487 +- get_order(wblock->req_buf_size));
2488 ++ kfree(wblock->handler_data);
2489 + }
2490 +
2491 + if (wdriver->remove)
2492 +diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
2493 +index 28dc056eaafa..bc462d1ec963 100644
2494 +--- a/drivers/power/supply/generic-adc-battery.c
2495 ++++ b/drivers/power/supply/generic-adc-battery.c
2496 +@@ -241,10 +241,10 @@ static int gab_probe(struct platform_device *pdev)
2497 + struct power_supply_desc *psy_desc;
2498 + struct power_supply_config psy_cfg = {};
2499 + struct gab_platform_data *pdata = pdev->dev.platform_data;
2500 +- enum power_supply_property *properties;
2501 + int ret = 0;
2502 + int chan;
2503 +- int index = 0;
2504 ++ int index = ARRAY_SIZE(gab_props);
2505 ++ bool any = false;
2506 +
2507 + adc_bat = devm_kzalloc(&pdev->dev, sizeof(*adc_bat), GFP_KERNEL);
2508 + if (!adc_bat) {
2509 +@@ -278,8 +278,6 @@ static int gab_probe(struct platform_device *pdev)
2510 + }
2511 +
2512 + memcpy(psy_desc->properties, gab_props, sizeof(gab_props));
2513 +- properties = (enum power_supply_property *)
2514 +- ((char *)psy_desc->properties + sizeof(gab_props));
2515 +
2516 + /*
2517 + * getting channel from iio and copying the battery properties
2518 +@@ -293,15 +291,22 @@ static int gab_probe(struct platform_device *pdev)
2519 + adc_bat->channel[chan] = NULL;
2520 + } else {
2521 + /* copying properties for supported channels only */
2522 +- memcpy(properties + sizeof(*(psy_desc->properties)) * index,
2523 +- &gab_dyn_props[chan],
2524 +- sizeof(gab_dyn_props[chan]));
2525 +- index++;
2526 ++ int index2;
2527 ++
2528 ++ for (index2 = 0; index2 < index; index2++) {
2529 ++ if (psy_desc->properties[index2] ==
2530 ++ gab_dyn_props[chan])
2531 ++ break; /* already known */
2532 ++ }
2533 ++ if (index2 == index) /* really new */
2534 ++ psy_desc->properties[index++] =
2535 ++ gab_dyn_props[chan];
2536 ++ any = true;
2537 + }
2538 + }
2539 +
2540 + /* none of the channels are supported so let's bail out */
2541 +- if (index == 0) {
2542 ++ if (!any) {
2543 + ret = -ENODEV;
2544 + goto second_mem_fail;
2545 + }
2546 +@@ -312,7 +317,7 @@ static int gab_probe(struct platform_device *pdev)
2547 + * as come channels may be not be supported by the device.So
2548 + * we need to take care of that.
2549 + */
2550 +- psy_desc->num_properties = ARRAY_SIZE(gab_props) + index;
2551 ++ psy_desc->num_properties = index;
2552 +
2553 + adc_bat->psy = power_supply_register(&pdev->dev, psy_desc, &psy_cfg);
2554 + if (IS_ERR(adc_bat->psy)) {
2555 +diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
2556 +index f6d6a4ad9e8a..e976d073f28d 100644
2557 +--- a/drivers/regulator/arizona-ldo1.c
2558 ++++ b/drivers/regulator/arizona-ldo1.c
2559 +@@ -36,6 +36,8 @@ struct arizona_ldo1 {
2560 +
2561 + struct regulator_consumer_supply supply;
2562 + struct regulator_init_data init_data;
2563 ++
2564 ++ struct gpio_desc *ena_gpiod;
2565 + };
2566 +
2567 + static int arizona_ldo1_hc_list_voltage(struct regulator_dev *rdev,
2568 +@@ -253,12 +255,17 @@ static int arizona_ldo1_common_init(struct platform_device *pdev,
2569 + }
2570 + }
2571 +
2572 +- /* We assume that high output = regulator off */
2573 +- config.ena_gpiod = devm_gpiod_get_optional(&pdev->dev, "wlf,ldoena",
2574 +- GPIOD_OUT_HIGH);
2575 ++ /* We assume that high output = regulator off
2576 ++ * Don't use devm, since we need to get against the parent device
2577 ++ * so clean up would happen at the wrong time
2578 ++ */
2579 ++ config.ena_gpiod = gpiod_get_optional(parent_dev, "wlf,ldoena",
2580 ++ GPIOD_OUT_LOW);
2581 + if (IS_ERR(config.ena_gpiod))
2582 + return PTR_ERR(config.ena_gpiod);
2583 +
2584 ++ ldo1->ena_gpiod = config.ena_gpiod;
2585 ++
2586 + if (pdata->init_data)
2587 + config.init_data = pdata->init_data;
2588 + else
2589 +@@ -276,6 +283,9 @@ static int arizona_ldo1_common_init(struct platform_device *pdev,
2590 + of_node_put(config.of_node);
2591 +
2592 + if (IS_ERR(ldo1->regulator)) {
2593 ++ if (config.ena_gpiod)
2594 ++ gpiod_put(config.ena_gpiod);
2595 ++
2596 + ret = PTR_ERR(ldo1->regulator);
2597 + dev_err(&pdev->dev, "Failed to register LDO1 supply: %d\n",
2598 + ret);
2599 +@@ -334,8 +344,19 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
2600 + return ret;
2601 + }
2602 +
2603 ++static int arizona_ldo1_remove(struct platform_device *pdev)
2604 ++{
2605 ++ struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev);
2606 ++
2607 ++ if (ldo1->ena_gpiod)
2608 ++ gpiod_put(ldo1->ena_gpiod);
2609 ++
2610 ++ return 0;
2611 ++}
2612 ++
2613 + static struct platform_driver arizona_ldo1_driver = {
2614 + .probe = arizona_ldo1_probe,
2615 ++ .remove = arizona_ldo1_remove,
2616 + .driver = {
2617 + .name = "arizona-ldo1",
2618 + },
2619 +diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
2620 +index f4ca72dd862f..9c7d9da42ba0 100644
2621 +--- a/drivers/s390/cio/qdio_main.c
2622 ++++ b/drivers/s390/cio/qdio_main.c
2623 +@@ -631,21 +631,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
2624 + unsigned long phys_aob = 0;
2625 +
2626 + if (!q->use_cq)
2627 +- goto out;
2628 ++ return 0;
2629 +
2630 + if (!q->aobs[bufnr]) {
2631 + struct qaob *aob = qdio_allocate_aob();
2632 + q->aobs[bufnr] = aob;
2633 + }
2634 + if (q->aobs[bufnr]) {
2635 +- q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
2636 + q->sbal_state[bufnr].aob = q->aobs[bufnr];
2637 + q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
2638 + phys_aob = virt_to_phys(q->aobs[bufnr]);
2639 + WARN_ON_ONCE(phys_aob & 0xFF);
2640 + }
2641 +
2642 +-out:
2643 ++ q->sbal_state[bufnr].flags = 0;
2644 + return phys_aob;
2645 + }
2646 +
2647 +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
2648 +index ff1d612f6fb9..41cdda7a926b 100644
2649 +--- a/drivers/scsi/libsas/sas_ata.c
2650 ++++ b/drivers/scsi/libsas/sas_ata.c
2651 +@@ -557,34 +557,46 @@ int sas_ata_init(struct domain_device *found_dev)
2652 + {
2653 + struct sas_ha_struct *ha = found_dev->port->ha;
2654 + struct Scsi_Host *shost = ha->core.shost;
2655 ++ struct ata_host *ata_host;
2656 + struct ata_port *ap;
2657 + int rc;
2658 +
2659 +- ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, &sas_sata_ops);
2660 +- ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
2661 +- &sata_port_info,
2662 +- shost);
2663 ++ ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL);
2664 ++ if (!ata_host) {
2665 ++ SAS_DPRINTK("ata host alloc failed.\n");
2666 ++ return -ENOMEM;
2667 ++ }
2668 ++
2669 ++ ata_host_init(ata_host, ha->dev, &sas_sata_ops);
2670 ++
2671 ++ ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost);
2672 + if (!ap) {
2673 + SAS_DPRINTK("ata_sas_port_alloc failed.\n");
2674 +- return -ENODEV;
2675 ++ rc = -ENODEV;
2676 ++ goto free_host;
2677 + }
2678 +
2679 + ap->private_data = found_dev;
2680 + ap->cbl = ATA_CBL_SATA;
2681 + ap->scsi_host = shost;
2682 + rc = ata_sas_port_init(ap);
2683 +- if (rc) {
2684 +- ata_sas_port_destroy(ap);
2685 +- return rc;
2686 +- }
2687 +- rc = ata_sas_tport_add(found_dev->sata_dev.ata_host.dev, ap);
2688 +- if (rc) {
2689 +- ata_sas_port_destroy(ap);
2690 +- return rc;
2691 +- }
2692 ++ if (rc)
2693 ++ goto destroy_port;
2694 ++
2695 ++ rc = ata_sas_tport_add(ata_host->dev, ap);
2696 ++ if (rc)
2697 ++ goto destroy_port;
2698 ++
2699 ++ found_dev->sata_dev.ata_host = ata_host;
2700 + found_dev->sata_dev.ap = ap;
2701 +
2702 + return 0;
2703 ++
2704 ++destroy_port:
2705 ++ ata_sas_port_destroy(ap);
2706 ++free_host:
2707 ++ ata_host_put(ata_host);
2708 ++ return rc;
2709 + }
2710 +
2711 + void sas_ata_task_abort(struct sas_task *task)
2712 +diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
2713 +index 1ffca28fe6a8..0148ae62a52a 100644
2714 +--- a/drivers/scsi/libsas/sas_discover.c
2715 ++++ b/drivers/scsi/libsas/sas_discover.c
2716 +@@ -316,6 +316,8 @@ void sas_free_device(struct kref *kref)
2717 + if (dev_is_sata(dev) && dev->sata_dev.ap) {
2718 + ata_sas_tport_delete(dev->sata_dev.ap);
2719 + ata_sas_port_destroy(dev->sata_dev.ap);
2720 ++ ata_host_put(dev->sata_dev.ata_host);
2721 ++ dev->sata_dev.ata_host = NULL;
2722 + dev->sata_dev.ap = NULL;
2723 + }
2724 +
2725 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
2726 +index e44c91edf92d..3c8c17c0b547 100644
2727 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
2728 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
2729 +@@ -3284,6 +3284,7 @@ void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
2730 + st->cb_idx = 0xFF;
2731 + st->direct_io = 0;
2732 + atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
2733 ++ st->smid = 0;
2734 + }
2735 +
2736 + /**
2737 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2738 +index b8d131a455d0..f3d727076e1f 100644
2739 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2740 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2741 +@@ -1489,7 +1489,7 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2742 + scmd = scsi_host_find_tag(ioc->shost, unique_tag);
2743 + if (scmd) {
2744 + st = scsi_cmd_priv(scmd);
2745 +- if (st->cb_idx == 0xFF)
2746 ++ if (st->cb_idx == 0xFF || st->smid == 0)
2747 + scmd = NULL;
2748 + }
2749 + }
2750 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
2751 +index 3a143bb5ca72..6c71b20af9e3 100644
2752 +--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
2753 ++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
2754 +@@ -1936,12 +1936,12 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2755 + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
2756 + __func__, ioc->name);
2757 + rc = -EFAULT;
2758 +- goto out;
2759 ++ goto job_done;
2760 + }
2761 +
2762 + rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
2763 + if (rc)
2764 +- goto out;
2765 ++ goto job_done;
2766 +
2767 + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
2768 + pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
2769 +@@ -2066,6 +2066,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2770 + out:
2771 + ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
2772 + mutex_unlock(&ioc->transport_cmds.mutex);
2773 ++job_done:
2774 + bsg_job_done(job, rc, reslen);
2775 + }
2776 +
2777 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
2778 +index 1b19b954bbae..ec550ee0108e 100644
2779 +--- a/drivers/scsi/qla2xxx/qla_init.c
2780 ++++ b/drivers/scsi/qla2xxx/qla_init.c
2781 +@@ -382,7 +382,7 @@ qla2x00_async_adisc_sp_done(void *ptr, int res)
2782 + "Async done-%s res %x %8phC\n",
2783 + sp->name, res, sp->fcport->port_name);
2784 +
2785 +- sp->fcport->flags &= ~FCF_ASYNC_SENT;
2786 ++ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2787 +
2788 + memset(&ea, 0, sizeof(ea));
2789 + ea.event = FCME_ADISC_DONE;
2790 +diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
2791 +index dd93a22fe843..667055cbe155 100644
2792 +--- a/drivers/scsi/qla2xxx/qla_iocb.c
2793 ++++ b/drivers/scsi/qla2xxx/qla_iocb.c
2794 +@@ -2656,6 +2656,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2795 + ql_dbg(ql_dbg_io, vha, 0x3073,
2796 + "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2797 +
2798 ++ fcport->flags |= FCF_ASYNC_SENT;
2799 + sp->type = SRB_ELS_DCMD;
2800 + sp->name = "ELS_DCMD";
2801 + sp->fcport = fcport;
2802 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
2803 +index 7943b762c12d..87ef6714845b 100644
2804 +--- a/drivers/scsi/scsi_sysfs.c
2805 ++++ b/drivers/scsi/scsi_sysfs.c
2806 +@@ -722,8 +722,24 @@ static ssize_t
2807 + sdev_store_delete(struct device *dev, struct device_attribute *attr,
2808 + const char *buf, size_t count)
2809 + {
2810 +- if (device_remove_file_self(dev, attr))
2811 +- scsi_remove_device(to_scsi_device(dev));
2812 ++ struct kernfs_node *kn;
2813 ++
2814 ++ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
2815 ++ WARN_ON_ONCE(!kn);
2816 ++ /*
2817 ++ * Concurrent writes into the "delete" sysfs attribute may trigger
2818 ++ * concurrent calls to device_remove_file() and scsi_remove_device().
2819 ++ * device_remove_file() handles concurrent removal calls by
2820 ++ * serializing these and by ignoring the second and later removal
2821 ++ * attempts. Concurrent calls of scsi_remove_device() are
2822 ++ * serialized. The second and later calls of scsi_remove_device() are
2823 ++ * ignored because the first call of that function changes the device
2824 ++ * state into SDEV_DEL.
2825 ++ */
2826 ++ device_remove_file(dev, attr);
2827 ++ scsi_remove_device(to_scsi_device(dev));
2828 ++ if (kn)
2829 ++ sysfs_unbreak_active_protection(kn);
2830 + return count;
2831 + };
2832 + static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
2833 +diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
2834 +index c8999e38b005..8a3678c2e83c 100644
2835 +--- a/drivers/soc/qcom/rmtfs_mem.c
2836 ++++ b/drivers/soc/qcom/rmtfs_mem.c
2837 +@@ -184,6 +184,7 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
2838 + device_initialize(&rmtfs_mem->dev);
2839 + rmtfs_mem->dev.parent = &pdev->dev;
2840 + rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups;
2841 ++ rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
2842 +
2843 + rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr,
2844 + rmtfs_mem->size, MEMREMAP_WC);
2845 +@@ -206,8 +207,6 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
2846 + goto put_device;
2847 + }
2848 +
2849 +- rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
2850 +-
2851 + ret = of_property_read_u32(node, "qcom,vmid", &vmid);
2852 + if (ret < 0 && ret != -EINVAL) {
2853 + dev_err(&pdev->dev, "failed to parse qcom,vmid\n");
2854 +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
2855 +index 99501785cdc1..68b3eb00a9d0 100644
2856 +--- a/drivers/target/iscsi/iscsi_target_login.c
2857 ++++ b/drivers/target/iscsi/iscsi_target_login.c
2858 +@@ -348,8 +348,7 @@ static int iscsi_login_zero_tsih_s1(
2859 + pr_err("idr_alloc() for sess_idr failed\n");
2860 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
2861 + ISCSI_LOGIN_STATUS_NO_RESOURCES);
2862 +- kfree(sess);
2863 +- return -ENOMEM;
2864 ++ goto free_sess;
2865 + }
2866 +
2867 + sess->creation_time = get_jiffies_64();
2868 +@@ -365,20 +364,28 @@ static int iscsi_login_zero_tsih_s1(
2869 + ISCSI_LOGIN_STATUS_NO_RESOURCES);
2870 + pr_err("Unable to allocate memory for"
2871 + " struct iscsi_sess_ops.\n");
2872 +- kfree(sess);
2873 +- return -ENOMEM;
2874 ++ goto remove_idr;
2875 + }
2876 +
2877 + sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
2878 + if (IS_ERR(sess->se_sess)) {
2879 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
2880 + ISCSI_LOGIN_STATUS_NO_RESOURCES);
2881 +- kfree(sess->sess_ops);
2882 +- kfree(sess);
2883 +- return -ENOMEM;
2884 ++ goto free_ops;
2885 + }
2886 +
2887 + return 0;
2888 ++
2889 ++free_ops:
2890 ++ kfree(sess->sess_ops);
2891 ++remove_idr:
2892 ++ spin_lock_bh(&sess_idr_lock);
2893 ++ idr_remove(&sess_idr, sess->session_index);
2894 ++ spin_unlock_bh(&sess_idr_lock);
2895 ++free_sess:
2896 ++ kfree(sess);
2897 ++ conn->sess = NULL;
2898 ++ return -ENOMEM;
2899 + }
2900 +
2901 + static int iscsi_login_zero_tsih_s2(
2902 +@@ -1161,13 +1168,13 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
2903 + ISCSI_LOGIN_STATUS_INIT_ERR);
2904 + if (!zero_tsih || !conn->sess)
2905 + goto old_sess_out;
2906 +- if (conn->sess->se_sess)
2907 +- transport_free_session(conn->sess->se_sess);
2908 +- if (conn->sess->session_index != 0) {
2909 +- spin_lock_bh(&sess_idr_lock);
2910 +- idr_remove(&sess_idr, conn->sess->session_index);
2911 +- spin_unlock_bh(&sess_idr_lock);
2912 +- }
2913 ++
2914 ++ transport_free_session(conn->sess->se_sess);
2915 ++
2916 ++ spin_lock_bh(&sess_idr_lock);
2917 ++ idr_remove(&sess_idr, conn->sess->session_index);
2918 ++ spin_unlock_bh(&sess_idr_lock);
2919 ++
2920 + kfree(conn->sess->sess_ops);
2921 + kfree(conn->sess);
2922 + conn->sess = NULL;
2923 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2924 +index 205092dc9390..dfed08e70ec1 100644
2925 +--- a/fs/btrfs/disk-io.c
2926 ++++ b/fs/btrfs/disk-io.c
2927 +@@ -961,8 +961,9 @@ static int btree_writepages(struct address_space *mapping,
2928 +
2929 + fs_info = BTRFS_I(mapping->host)->root->fs_info;
2930 + /* this is a bit racy, but that's ok */
2931 +- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
2932 +- BTRFS_DIRTY_METADATA_THRESH);
2933 ++ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
2934 ++ BTRFS_DIRTY_METADATA_THRESH,
2935 ++ fs_info->dirty_metadata_batch);
2936 + if (ret < 0)
2937 + return 0;
2938 + }
2939 +@@ -4150,8 +4151,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
2940 + if (flush_delayed)
2941 + btrfs_balance_delayed_items(fs_info);
2942 +
2943 +- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
2944 +- BTRFS_DIRTY_METADATA_THRESH);
2945 ++ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
2946 ++ BTRFS_DIRTY_METADATA_THRESH,
2947 ++ fs_info->dirty_metadata_batch);
2948 + if (ret > 0) {
2949 + balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
2950 + }
2951 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2952 +index 3d9fe58c0080..8aab7a6c1e58 100644
2953 +--- a/fs/btrfs/extent-tree.c
2954 ++++ b/fs/btrfs/extent-tree.c
2955 +@@ -4358,7 +4358,7 @@ commit_trans:
2956 + data_sinfo->flags, bytes, 1);
2957 + spin_unlock(&data_sinfo->lock);
2958 +
2959 +- return ret;
2960 ++ return 0;
2961 + }
2962 +
2963 + int btrfs_check_data_free_space(struct inode *inode,
2964 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2965 +index eba61bcb9bb3..071d949f69ec 100644
2966 +--- a/fs/btrfs/inode.c
2967 ++++ b/fs/btrfs/inode.c
2968 +@@ -6027,32 +6027,6 @@ err:
2969 + return ret;
2970 + }
2971 +
2972 +-int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
2973 +-{
2974 +- struct btrfs_root *root = BTRFS_I(inode)->root;
2975 +- struct btrfs_trans_handle *trans;
2976 +- int ret = 0;
2977 +- bool nolock = false;
2978 +-
2979 +- if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
2980 +- return 0;
2981 +-
2982 +- if (btrfs_fs_closing(root->fs_info) &&
2983 +- btrfs_is_free_space_inode(BTRFS_I(inode)))
2984 +- nolock = true;
2985 +-
2986 +- if (wbc->sync_mode == WB_SYNC_ALL) {
2987 +- if (nolock)
2988 +- trans = btrfs_join_transaction_nolock(root);
2989 +- else
2990 +- trans = btrfs_join_transaction(root);
2991 +- if (IS_ERR(trans))
2992 +- return PTR_ERR(trans);
2993 +- ret = btrfs_commit_transaction(trans);
2994 +- }
2995 +- return ret;
2996 +-}
2997 +-
2998 + /*
2999 + * This is somewhat expensive, updating the tree every time the
3000 + * inode changes. But, it is most likely to find the inode in cache.
3001 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
3002 +index c47f62b19226..b75b4abaa4a5 100644
3003 +--- a/fs/btrfs/send.c
3004 ++++ b/fs/btrfs/send.c
3005 +@@ -100,6 +100,7 @@ struct send_ctx {
3006 + u64 cur_inode_rdev;
3007 + u64 cur_inode_last_extent;
3008 + u64 cur_inode_next_write_offset;
3009 ++ bool ignore_cur_inode;
3010 +
3011 + u64 send_progress;
3012 +
3013 +@@ -5006,6 +5007,15 @@ static int send_hole(struct send_ctx *sctx, u64 end)
3014 + u64 len;
3015 + int ret = 0;
3016 +
3017 ++ /*
3018 ++ * A hole that starts at EOF or beyond it. Since we do not yet support
3019 ++ * fallocate (for extent preallocation and hole punching), sending a
3020 ++ * write of zeroes starting at EOF or beyond would later require issuing
3021 ++ * a truncate operation which would undo the write and achieve nothing.
3022 ++ */
3023 ++ if (offset >= sctx->cur_inode_size)
3024 ++ return 0;
3025 ++
3026 + if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
3027 + return send_update_extent(sctx, offset, end - offset);
3028 +
3029 +@@ -5799,6 +5809,9 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
3030 + int pending_move = 0;
3031 + int refs_processed = 0;
3032 +
3033 ++ if (sctx->ignore_cur_inode)
3034 ++ return 0;
3035 ++
3036 + ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
3037 + &refs_processed);
3038 + if (ret < 0)
3039 +@@ -5917,6 +5930,93 @@ out:
3040 + return ret;
3041 + }
3042 +
3043 ++struct parent_paths_ctx {
3044 ++ struct list_head *refs;
3045 ++ struct send_ctx *sctx;
3046 ++};
3047 ++
3048 ++static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
3049 ++ void *ctx)
3050 ++{
3051 ++ struct parent_paths_ctx *ppctx = ctx;
3052 ++
3053 ++ return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
3054 ++ ppctx->refs);
3055 ++}
3056 ++
3057 ++/*
3058 ++ * Issue unlink operations for all paths of the current inode found in the
3059 ++ * parent snapshot.
3060 ++ */
3061 ++static int btrfs_unlink_all_paths(struct send_ctx *sctx)
3062 ++{
3063 ++ LIST_HEAD(deleted_refs);
3064 ++ struct btrfs_path *path;
3065 ++ struct btrfs_key key;
3066 ++ struct parent_paths_ctx ctx;
3067 ++ int ret;
3068 ++
3069 ++ path = alloc_path_for_send();
3070 ++ if (!path)
3071 ++ return -ENOMEM;
3072 ++
3073 ++ key.objectid = sctx->cur_ino;
3074 ++ key.type = BTRFS_INODE_REF_KEY;
3075 ++ key.offset = 0;
3076 ++ ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3077 ++ if (ret < 0)
3078 ++ goto out;
3079 ++
3080 ++ ctx.refs = &deleted_refs;
3081 ++ ctx.sctx = sctx;
3082 ++
3083 ++ while (true) {
3084 ++ struct extent_buffer *eb = path->nodes[0];
3085 ++ int slot = path->slots[0];
3086 ++
3087 ++ if (slot >= btrfs_header_nritems(eb)) {
3088 ++ ret = btrfs_next_leaf(sctx->parent_root, path);
3089 ++ if (ret < 0)
3090 ++ goto out;
3091 ++ else if (ret > 0)
3092 ++ break;
3093 ++ continue;
3094 ++ }
3095 ++
3096 ++ btrfs_item_key_to_cpu(eb, &key, slot);
3097 ++ if (key.objectid != sctx->cur_ino)
3098 ++ break;
3099 ++ if (key.type != BTRFS_INODE_REF_KEY &&
3100 ++ key.type != BTRFS_INODE_EXTREF_KEY)
3101 ++ break;
3102 ++
3103 ++ ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
3104 ++ record_parent_ref, &ctx);
3105 ++ if (ret < 0)
3106 ++ goto out;
3107 ++
3108 ++ path->slots[0]++;
3109 ++ }
3110 ++
3111 ++ while (!list_empty(&deleted_refs)) {
3112 ++ struct recorded_ref *ref;
3113 ++
3114 ++ ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
3115 ++ ret = send_unlink(sctx, ref->full_path);
3116 ++ if (ret < 0)
3117 ++ goto out;
3118 ++ fs_path_free(ref->full_path);
3119 ++ list_del(&ref->list);
3120 ++ kfree(ref);
3121 ++ }
3122 ++ ret = 0;
3123 ++out:
3124 ++ btrfs_free_path(path);
3125 ++ if (ret)
3126 ++ __free_recorded_refs(&deleted_refs);
3127 ++ return ret;
3128 ++}
3129 ++
3130 + static int changed_inode(struct send_ctx *sctx,
3131 + enum btrfs_compare_tree_result result)
3132 + {
3133 +@@ -5931,6 +6031,7 @@ static int changed_inode(struct send_ctx *sctx,
3134 + sctx->cur_inode_new_gen = 0;
3135 + sctx->cur_inode_last_extent = (u64)-1;
3136 + sctx->cur_inode_next_write_offset = 0;
3137 ++ sctx->ignore_cur_inode = false;
3138 +
3139 + /*
3140 + * Set send_progress to current inode. This will tell all get_cur_xxx
3141 +@@ -5971,6 +6072,33 @@ static int changed_inode(struct send_ctx *sctx,
3142 + sctx->cur_inode_new_gen = 1;
3143 + }
3144 +
3145 ++ /*
3146 ++ * Normally we do not find inodes with a link count of zero (orphans)
3147 ++ * because the most common case is to create a snapshot and use it
3148 ++ * for a send operation. However other less common use cases involve
3149 ++ * using a subvolume and send it after turning it to RO mode just
3150 ++ * after deleting all hard links of a file while holding an open
3151 ++ * file descriptor against it or turning a RO snapshot into RW mode,
3152 ++ * keep an open file descriptor against a file, delete it and then
3153 ++ * turn the snapshot back to RO mode before using it for a send
3154 ++ * operation. So if we find such cases, ignore the inode and all its
3155 ++ * items completely if it's a new inode, or if it's a changed inode
3156 ++ * make sure all its previous paths (from the parent snapshot) are all
3157 ++ * unlinked and all other the inode items are ignored.
3158 ++ */
3159 ++ if (result == BTRFS_COMPARE_TREE_NEW ||
3160 ++ result == BTRFS_COMPARE_TREE_CHANGED) {
3161 ++ u32 nlinks;
3162 ++
3163 ++ nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
3164 ++ if (nlinks == 0) {
3165 ++ sctx->ignore_cur_inode = true;
3166 ++ if (result == BTRFS_COMPARE_TREE_CHANGED)
3167 ++ ret = btrfs_unlink_all_paths(sctx);
3168 ++ goto out;
3169 ++ }
3170 ++ }
3171 ++
3172 + if (result == BTRFS_COMPARE_TREE_NEW) {
3173 + sctx->cur_inode_gen = left_gen;
3174 + sctx->cur_inode_new = 1;
3175 +@@ -6309,15 +6437,17 @@ static int changed_cb(struct btrfs_path *left_path,
3176 + key->objectid == BTRFS_FREE_SPACE_OBJECTID)
3177 + goto out;
3178 +
3179 +- if (key->type == BTRFS_INODE_ITEM_KEY)
3180 ++ if (key->type == BTRFS_INODE_ITEM_KEY) {
3181 + ret = changed_inode(sctx, result);
3182 +- else if (key->type == BTRFS_INODE_REF_KEY ||
3183 +- key->type == BTRFS_INODE_EXTREF_KEY)
3184 +- ret = changed_ref(sctx, result);
3185 +- else if (key->type == BTRFS_XATTR_ITEM_KEY)
3186 +- ret = changed_xattr(sctx, result);
3187 +- else if (key->type == BTRFS_EXTENT_DATA_KEY)
3188 +- ret = changed_extent(sctx, result);
3189 ++ } else if (!sctx->ignore_cur_inode) {
3190 ++ if (key->type == BTRFS_INODE_REF_KEY ||
3191 ++ key->type == BTRFS_INODE_EXTREF_KEY)
3192 ++ ret = changed_ref(sctx, result);
3193 ++ else if (key->type == BTRFS_XATTR_ITEM_KEY)
3194 ++ ret = changed_xattr(sctx, result);
3195 ++ else if (key->type == BTRFS_EXTENT_DATA_KEY)
3196 ++ ret = changed_extent(sctx, result);
3197 ++ }
3198 +
3199 + out:
3200 + return ret;
3201 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
3202 +index 81107ad49f3a..bddfc28b27c0 100644
3203 +--- a/fs/btrfs/super.c
3204 ++++ b/fs/btrfs/super.c
3205 +@@ -2331,7 +2331,6 @@ static const struct super_operations btrfs_super_ops = {
3206 + .sync_fs = btrfs_sync_fs,
3207 + .show_options = btrfs_show_options,
3208 + .show_devname = btrfs_show_devname,
3209 +- .write_inode = btrfs_write_inode,
3210 + .alloc_inode = btrfs_alloc_inode,
3211 + .destroy_inode = btrfs_destroy_inode,
3212 + .statfs = btrfs_statfs,
3213 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3214 +index f8220ec02036..84b00a29d531 100644
3215 +--- a/fs/btrfs/tree-log.c
3216 ++++ b/fs/btrfs/tree-log.c
3217 +@@ -1291,6 +1291,46 @@ again:
3218 + return ret;
3219 + }
3220 +
3221 ++static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
3222 ++ const u8 ref_type, const char *name,
3223 ++ const int namelen)
3224 ++{
3225 ++ struct btrfs_key key;
3226 ++ struct btrfs_path *path;
3227 ++ const u64 parent_id = btrfs_ino(BTRFS_I(dir));
3228 ++ int ret;
3229 ++
3230 ++ path = btrfs_alloc_path();
3231 ++ if (!path)
3232 ++ return -ENOMEM;
3233 ++
3234 ++ key.objectid = btrfs_ino(BTRFS_I(inode));
3235 ++ key.type = ref_type;
3236 ++ if (key.type == BTRFS_INODE_REF_KEY)
3237 ++ key.offset = parent_id;
3238 ++ else
3239 ++ key.offset = btrfs_extref_hash(parent_id, name, namelen);
3240 ++
3241 ++ ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
3242 ++ if (ret < 0)
3243 ++ goto out;
3244 ++ if (ret > 0) {
3245 ++ ret = 0;
3246 ++ goto out;
3247 ++ }
3248 ++ if (key.type == BTRFS_INODE_EXTREF_KEY)
3249 ++ ret = btrfs_find_name_in_ext_backref(path->nodes[0],
3250 ++ path->slots[0], parent_id,
3251 ++ name, namelen, NULL);
3252 ++ else
3253 ++ ret = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
3254 ++ name, namelen, NULL);
3255 ++
3256 ++out:
3257 ++ btrfs_free_path(path);
3258 ++ return ret;
3259 ++}
3260 ++
3261 + /*
3262 + * replay one inode back reference item found in the log tree.
3263 + * eb, slot and key refer to the buffer and key found in the log tree.
3264 +@@ -1400,6 +1440,32 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
3265 + }
3266 + }
3267 +
3268 ++ /*
3269 ++ * If a reference item already exists for this inode
3270 ++ * with the same parent and name, but different index,
3271 ++ * drop it and the corresponding directory index entries
3272 ++ * from the parent before adding the new reference item
3273 ++ * and dir index entries, otherwise we would fail with
3274 ++ * -EEXIST returned from btrfs_add_link() below.
3275 ++ */
3276 ++ ret = btrfs_inode_ref_exists(inode, dir, key->type,
3277 ++ name, namelen);
3278 ++ if (ret > 0) {
3279 ++ ret = btrfs_unlink_inode(trans, root,
3280 ++ BTRFS_I(dir),
3281 ++ BTRFS_I(inode),
3282 ++ name, namelen);
3283 ++ /*
3284 ++ * If we dropped the link count to 0, bump it so
3285 ++ * that later the iput() on the inode will not
3286 ++ * free it. We will fixup the link count later.
3287 ++ */
3288 ++ if (!ret && inode->i_nlink == 0)
3289 ++ inc_nlink(inode);
3290 ++ }
3291 ++ if (ret < 0)
3292 ++ goto out;
3293 ++
3294 + /* insert our name */
3295 + ret = btrfs_add_link(trans, BTRFS_I(dir),
3296 + BTRFS_I(inode),
3297 +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
3298 +index bfe999505815..991bfb271908 100644
3299 +--- a/fs/cifs/cifs_debug.c
3300 ++++ b/fs/cifs/cifs_debug.c
3301 +@@ -160,25 +160,41 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
3302 + seq_printf(m, "CIFS Version %s\n", CIFS_VERSION);
3303 + seq_printf(m, "Features:");
3304 + #ifdef CONFIG_CIFS_DFS_UPCALL
3305 +- seq_printf(m, " dfs");
3306 ++ seq_printf(m, " DFS");
3307 + #endif
3308 + #ifdef CONFIG_CIFS_FSCACHE
3309 +- seq_printf(m, " fscache");
3310 ++ seq_printf(m, ",FSCACHE");
3311 ++#endif
3312 ++#ifdef CONFIG_CIFS_SMB_DIRECT
3313 ++ seq_printf(m, ",SMB_DIRECT");
3314 ++#endif
3315 ++#ifdef CONFIG_CIFS_STATS2
3316 ++ seq_printf(m, ",STATS2");
3317 ++#elif defined(CONFIG_CIFS_STATS)
3318 ++ seq_printf(m, ",STATS");
3319 ++#endif
3320 ++#ifdef CONFIG_CIFS_DEBUG2
3321 ++ seq_printf(m, ",DEBUG2");
3322 ++#elif defined(CONFIG_CIFS_DEBUG)
3323 ++ seq_printf(m, ",DEBUG");
3324 ++#endif
3325 ++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
3326 ++ seq_printf(m, ",ALLOW_INSECURE_LEGACY");
3327 + #endif
3328 + #ifdef CONFIG_CIFS_WEAK_PW_HASH
3329 +- seq_printf(m, " lanman");
3330 ++ seq_printf(m, ",WEAK_PW_HASH");
3331 + #endif
3332 + #ifdef CONFIG_CIFS_POSIX
3333 +- seq_printf(m, " posix");
3334 ++ seq_printf(m, ",CIFS_POSIX");
3335 + #endif
3336 + #ifdef CONFIG_CIFS_UPCALL
3337 +- seq_printf(m, " spnego");
3338 ++ seq_printf(m, ",UPCALL(SPNEGO)");
3339 + #endif
3340 + #ifdef CONFIG_CIFS_XATTR
3341 +- seq_printf(m, " xattr");
3342 ++ seq_printf(m, ",XATTR");
3343 + #endif
3344 + #ifdef CONFIG_CIFS_ACL
3345 +- seq_printf(m, " acl");
3346 ++ seq_printf(m, ",ACL");
3347 + #endif
3348 + seq_putc(m, '\n');
3349 + seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
3350 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
3351 +index d5aa7ae917bf..69ec5427769c 100644
3352 +--- a/fs/cifs/cifsfs.c
3353 ++++ b/fs/cifs/cifsfs.c
3354 +@@ -209,14 +209,16 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
3355 +
3356 + xid = get_xid();
3357 +
3358 +- /*
3359 +- * PATH_MAX may be too long - it would presumably be total path,
3360 +- * but note that some servers (includinng Samba 3) have a shorter
3361 +- * maximum path.
3362 +- *
3363 +- * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
3364 +- */
3365 +- buf->f_namelen = PATH_MAX;
3366 ++ if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
3367 ++ buf->f_namelen =
3368 ++ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
3369 ++ else
3370 ++ buf->f_namelen = PATH_MAX;
3371 ++
3372 ++ buf->f_fsid.val[0] = tcon->vol_serial_number;
3373 ++ /* are using part of create time for more randomness, see man statfs */
3374 ++ buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
3375 ++
3376 + buf->f_files = 0; /* undefined */
3377 + buf->f_ffree = 0; /* unlimited */
3378 +
3379 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
3380 +index c923c7854027..4b45d3ef3f9d 100644
3381 +--- a/fs/cifs/cifsglob.h
3382 ++++ b/fs/cifs/cifsglob.h
3383 +@@ -913,6 +913,7 @@ cap_unix(struct cifs_ses *ses)
3384 +
3385 + struct cached_fid {
3386 + bool is_valid:1; /* Do we have a useable root fid */
3387 ++ struct kref refcount;
3388 + struct cifs_fid *fid;
3389 + struct mutex fid_mutex;
3390 + struct cifs_tcon *tcon;
3391 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
3392 +index a2cfb33e85c1..9051b9dfd590 100644
3393 +--- a/fs/cifs/inode.c
3394 ++++ b/fs/cifs/inode.c
3395 +@@ -1122,6 +1122,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
3396 + if (!server->ops->set_file_info)
3397 + return -ENOSYS;
3398 +
3399 ++ info_buf.Pad = 0;
3400 ++
3401 + if (attrs->ia_valid & ATTR_ATIME) {
3402 + set_time = true;
3403 + info_buf.LastAccessTime =
3404 +diff --git a/fs/cifs/link.c b/fs/cifs/link.c
3405 +index de41f96aba49..2148b0f60e5e 100644
3406 +--- a/fs/cifs/link.c
3407 ++++ b/fs/cifs/link.c
3408 +@@ -396,7 +396,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
3409 + struct cifs_io_parms io_parms;
3410 + int buf_type = CIFS_NO_BUFFER;
3411 + __le16 *utf16_path;
3412 +- __u8 oplock = SMB2_OPLOCK_LEVEL_II;
3413 ++ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3414 + struct smb2_file_all_info *pfile_info = NULL;
3415 +
3416 + oparms.tcon = tcon;
3417 +@@ -459,7 +459,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
3418 + struct cifs_io_parms io_parms;
3419 + int create_options = CREATE_NOT_DIR;
3420 + __le16 *utf16_path;
3421 +- __u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
3422 ++ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3423 + struct kvec iov[2];
3424 +
3425 + if (backup_cred(cifs_sb))
3426 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
3427 +index 8b0502cd39af..aa23c00367ec 100644
3428 +--- a/fs/cifs/sess.c
3429 ++++ b/fs/cifs/sess.c
3430 +@@ -398,6 +398,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
3431 + goto setup_ntlmv2_ret;
3432 + }
3433 + *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
3434 ++ if (!*pbuffer) {
3435 ++ rc = -ENOMEM;
3436 ++ cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
3437 ++ *buflen = 0;
3438 ++ goto setup_ntlmv2_ret;
3439 ++ }
3440 + sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
3441 +
3442 + memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
3443 +diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
3444 +index d01ad706d7fc..1eef1791d0c4 100644
3445 +--- a/fs/cifs/smb2inode.c
3446 ++++ b/fs/cifs/smb2inode.c
3447 +@@ -120,7 +120,9 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
3448 + break;
3449 + }
3450 +
3451 +- if (use_cached_root_handle == false)
3452 ++ if (use_cached_root_handle)
3453 ++ close_shroot(&tcon->crfid);
3454 ++ else
3455 + rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3456 + if (tmprc)
3457 + rc = tmprc;
3458 +@@ -281,7 +283,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
3459 + int rc;
3460 +
3461 + if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
3462 +- (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
3463 ++ (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
3464 + (buf->Attributes == 0))
3465 + return 0; /* would be a no op, no sense sending this */
3466 +
3467 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3468 +index ea92a38b2f08..ee6c4a952ce9 100644
3469 +--- a/fs/cifs/smb2ops.c
3470 ++++ b/fs/cifs/smb2ops.c
3471 +@@ -466,21 +466,36 @@ out:
3472 + return rc;
3473 + }
3474 +
3475 +-void
3476 +-smb2_cached_lease_break(struct work_struct *work)
3477 ++static void
3478 ++smb2_close_cached_fid(struct kref *ref)
3479 + {
3480 +- struct cached_fid *cfid = container_of(work,
3481 +- struct cached_fid, lease_break);
3482 +- mutex_lock(&cfid->fid_mutex);
3483 ++ struct cached_fid *cfid = container_of(ref, struct cached_fid,
3484 ++ refcount);
3485 ++
3486 + if (cfid->is_valid) {
3487 + cifs_dbg(FYI, "clear cached root file handle\n");
3488 + SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
3489 + cfid->fid->volatile_fid);
3490 + cfid->is_valid = false;
3491 + }
3492 ++}
3493 ++
3494 ++void close_shroot(struct cached_fid *cfid)
3495 ++{
3496 ++ mutex_lock(&cfid->fid_mutex);
3497 ++ kref_put(&cfid->refcount, smb2_close_cached_fid);
3498 + mutex_unlock(&cfid->fid_mutex);
3499 + }
3500 +
3501 ++void
3502 ++smb2_cached_lease_break(struct work_struct *work)
3503 ++{
3504 ++ struct cached_fid *cfid = container_of(work,
3505 ++ struct cached_fid, lease_break);
3506 ++
3507 ++ close_shroot(cfid);
3508 ++}
3509 ++
3510 + /*
3511 + * Open the directory at the root of a share
3512 + */
3513 +@@ -495,6 +510,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
3514 + if (tcon->crfid.is_valid) {
3515 + cifs_dbg(FYI, "found a cached root file handle\n");
3516 + memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
3517 ++ kref_get(&tcon->crfid.refcount);
3518 + mutex_unlock(&tcon->crfid.fid_mutex);
3519 + return 0;
3520 + }
3521 +@@ -511,6 +527,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
3522 + memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
3523 + tcon->crfid.tcon = tcon;
3524 + tcon->crfid.is_valid = true;
3525 ++ kref_init(&tcon->crfid.refcount);
3526 ++ kref_get(&tcon->crfid.refcount);
3527 + }
3528 + mutex_unlock(&tcon->crfid.fid_mutex);
3529 + return rc;
3530 +@@ -548,10 +566,15 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
3531 + FS_ATTRIBUTE_INFORMATION);
3532 + SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
3533 + FS_DEVICE_INFORMATION);
3534 ++ SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
3535 ++ FS_VOLUME_INFORMATION);
3536 + SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
3537 + FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
3538 + if (no_cached_open)
3539 + SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3540 ++ else
3541 ++ close_shroot(&tcon->crfid);
3542 ++
3543 + return;
3544 + }
3545 +
3546 +@@ -1353,6 +1376,13 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
3547 +
3548 + }
3549 +
3550 ++/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
3551 ++#define GMT_TOKEN_SIZE 50
3552 ++
3553 ++/*
3554 ++ * Input buffer contains (empty) struct smb_snapshot array with size filled in
3555 ++ * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
3556 ++ */
3557 + static int
3558 + smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
3559 + struct cifsFileInfo *cfile, void __user *ioc_buf)
3560 +@@ -1382,14 +1412,27 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
3561 + kfree(retbuf);
3562 + return rc;
3563 + }
3564 +- if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
3565 +- rc = -ERANGE;
3566 +- kfree(retbuf);
3567 +- return rc;
3568 +- }
3569 +
3570 +- if (ret_data_len > snapshot_in.snapshot_array_size)
3571 +- ret_data_len = snapshot_in.snapshot_array_size;
3572 ++ /*
3573 ++ * Check for min size, ie not large enough to fit even one GMT
3574 ++ * token (snapshot). On the first ioctl some users may pass in
3575 ++ * smaller size (or zero) to simply get the size of the array
3576 ++ * so the user space caller can allocate sufficient memory
3577 ++ * and retry the ioctl again with larger array size sufficient
3578 ++ * to hold all of the snapshot GMT tokens on the second try.
3579 ++ */
3580 ++ if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
3581 ++ ret_data_len = sizeof(struct smb_snapshot_array);
3582 ++
3583 ++ /*
3584 ++ * We return struct SRV_SNAPSHOT_ARRAY, followed by
3585 ++ * the snapshot array (of 50 byte GMT tokens) each
3586 ++ * representing an available previous version of the data
3587 ++ */
3588 ++ if (ret_data_len > (snapshot_in.snapshot_array_size +
3589 ++ sizeof(struct smb_snapshot_array)))
3590 ++ ret_data_len = snapshot_in.snapshot_array_size +
3591 ++ sizeof(struct smb_snapshot_array);
3592 +
3593 + if (copy_to_user(ioc_buf, retbuf, ret_data_len))
3594 + rc = -EFAULT;
3595 +@@ -3366,6 +3409,11 @@ struct smb_version_operations smb311_operations = {
3596 + .query_all_EAs = smb2_query_eas,
3597 + .set_EA = smb2_set_ea,
3598 + #endif /* CIFS_XATTR */
3599 ++#ifdef CONFIG_CIFS_ACL
3600 ++ .get_acl = get_smb2_acl,
3601 ++ .get_acl_by_fid = get_smb2_acl_by_fid,
3602 ++ .set_acl = set_smb2_acl,
3603 ++#endif /* CIFS_ACL */
3604 + .next_header = smb2_next_header,
3605 + };
3606 + #endif /* CIFS_SMB311 */
3607 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3608 +index 3c92678cb45b..ffce77e00a58 100644
3609 +--- a/fs/cifs/smb2pdu.c
3610 ++++ b/fs/cifs/smb2pdu.c
3611 +@@ -4046,6 +4046,9 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
3612 + } else if (level == FS_SECTOR_SIZE_INFORMATION) {
3613 + max_len = sizeof(struct smb3_fs_ss_info);
3614 + min_len = sizeof(struct smb3_fs_ss_info);
3615 ++ } else if (level == FS_VOLUME_INFORMATION) {
3616 ++ max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
3617 ++ min_len = sizeof(struct smb3_fs_vol_info);
3618 + } else {
3619 + cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
3620 + return -EINVAL;
3621 +@@ -4090,6 +4093,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
3622 + tcon->ss_flags = le32_to_cpu(ss_info->Flags);
3623 + tcon->perf_sector_size =
3624 + le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
3625 ++ } else if (level == FS_VOLUME_INFORMATION) {
3626 ++ struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
3627 ++ (offset + (char *)rsp);
3628 ++ tcon->vol_serial_number = vol_info->VolumeSerialNumber;
3629 ++ tcon->vol_create_time = vol_info->VolumeCreationTime;
3630 + }
3631 +
3632 + qfsattr_exit:
3633 +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
3634 +index a671adcc44a6..c2a4526512b5 100644
3635 +--- a/fs/cifs/smb2pdu.h
3636 ++++ b/fs/cifs/smb2pdu.h
3637 +@@ -1248,6 +1248,17 @@ struct smb3_fs_ss_info {
3638 + __le32 ByteOffsetForPartitionAlignment;
3639 + } __packed;
3640 +
3641 ++/* volume info struct - see MS-FSCC 2.5.9 */
3642 ++#define MAX_VOL_LABEL_LEN 32
3643 ++struct smb3_fs_vol_info {
3644 ++ __le64 VolumeCreationTime;
3645 ++ __u32 VolumeSerialNumber;
3646 ++ __le32 VolumeLabelLength; /* includes trailing null */
3647 ++ __u8 SupportsObjects; /* True if eg like NTFS, supports objects */
3648 ++ __u8 Reserved;
3649 ++ __u8 VolumeLabel[0]; /* variable len */
3650 ++} __packed;
3651 ++
3652 + /* partial list of QUERY INFO levels */
3653 + #define FILE_DIRECTORY_INFORMATION 1
3654 + #define FILE_FULL_DIRECTORY_INFORMATION 2
3655 +diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
3656 +index 6e6a4f2ec890..c1520b48d1e1 100644
3657 +--- a/fs/cifs/smb2proto.h
3658 ++++ b/fs/cifs/smb2proto.h
3659 +@@ -68,6 +68,7 @@ extern int smb3_handle_read_data(struct TCP_Server_Info *server,
3660 +
3661 + extern int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
3662 + struct cifs_fid *pfid);
3663 ++extern void close_shroot(struct cached_fid *cfid);
3664 + extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst,
3665 + struct smb2_file_all_info *src);
3666 + extern int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
3667 +diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
3668 +index 719d55e63d88..bf61c3774830 100644
3669 +--- a/fs/cifs/smb2transport.c
3670 ++++ b/fs/cifs/smb2transport.c
3671 +@@ -173,7 +173,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
3672 + struct kvec *iov = rqst->rq_iov;
3673 + struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
3674 + struct cifs_ses *ses;
3675 +- struct shash_desc *shash = &server->secmech.sdeschmacsha256->shash;
3676 ++ struct shash_desc *shash;
3677 + struct smb_rqst drqst;
3678 +
3679 + ses = smb2_find_smb_ses(server, shdr->SessionId);
3680 +@@ -187,7 +187,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
3681 +
3682 + rc = smb2_crypto_shash_allocate(server);
3683 + if (rc) {
3684 +- cifs_dbg(VFS, "%s: shah256 alloc failed\n", __func__);
3685 ++ cifs_dbg(VFS, "%s: sha256 alloc failed\n", __func__);
3686 + return rc;
3687 + }
3688 +
3689 +@@ -198,6 +198,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
3690 + return rc;
3691 + }
3692 +
3693 ++ shash = &server->secmech.sdeschmacsha256->shash;
3694 + rc = crypto_shash_init(shash);
3695 + if (rc) {
3696 + cifs_dbg(VFS, "%s: Could not init sha256", __func__);
3697 +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
3698 +index aa52d87985aa..e5d6ee61ff48 100644
3699 +--- a/fs/ext4/balloc.c
3700 ++++ b/fs/ext4/balloc.c
3701 +@@ -426,9 +426,9 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
3702 + }
3703 + bh = sb_getblk(sb, bitmap_blk);
3704 + if (unlikely(!bh)) {
3705 +- ext4_error(sb, "Cannot get buffer for block bitmap - "
3706 +- "block_group = %u, block_bitmap = %llu",
3707 +- block_group, bitmap_blk);
3708 ++ ext4_warning(sb, "Cannot get buffer for block bitmap - "
3709 ++ "block_group = %u, block_bitmap = %llu",
3710 ++ block_group, bitmap_blk);
3711 + return ERR_PTR(-ENOMEM);
3712 + }
3713 +
3714 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
3715 +index f336cbc6e932..796aa609bcb9 100644
3716 +--- a/fs/ext4/ialloc.c
3717 ++++ b/fs/ext4/ialloc.c
3718 +@@ -138,9 +138,9 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
3719 + }
3720 + bh = sb_getblk(sb, bitmap_blk);
3721 + if (unlikely(!bh)) {
3722 +- ext4_error(sb, "Cannot read inode bitmap - "
3723 +- "block_group = %u, inode_bitmap = %llu",
3724 +- block_group, bitmap_blk);
3725 ++ ext4_warning(sb, "Cannot read inode bitmap - "
3726 ++ "block_group = %u, inode_bitmap = %llu",
3727 ++ block_group, bitmap_blk);
3728 + return ERR_PTR(-ENOMEM);
3729 + }
3730 + if (bitmap_uptodate(bh))
3731 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
3732 +index 2a4c25c4681d..116ff68c5bd4 100644
3733 +--- a/fs/ext4/namei.c
3734 ++++ b/fs/ext4/namei.c
3735 +@@ -1398,6 +1398,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
3736 + goto cleanup_and_exit;
3737 + dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
3738 + "falling back\n"));
3739 ++ ret = NULL;
3740 + }
3741 + nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
3742 + if (!nblocks) {
3743 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3744 +index b7f7922061be..130c12974e28 100644
3745 +--- a/fs/ext4/super.c
3746 ++++ b/fs/ext4/super.c
3747 +@@ -776,26 +776,26 @@ void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
3748 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3749 + struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3750 + struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
3751 ++ int ret;
3752 +
3753 +- if ((flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) &&
3754 +- !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) {
3755 +- percpu_counter_sub(&sbi->s_freeclusters_counter,
3756 +- grp->bb_free);
3757 +- set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
3758 +- &grp->bb_state);
3759 ++ if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
3760 ++ ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
3761 ++ &grp->bb_state);
3762 ++ if (!ret)
3763 ++ percpu_counter_sub(&sbi->s_freeclusters_counter,
3764 ++ grp->bb_free);
3765 + }
3766 +
3767 +- if ((flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) &&
3768 +- !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
3769 +- if (gdp) {
3770 ++ if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
3771 ++ ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
3772 ++ &grp->bb_state);
3773 ++ if (!ret && gdp) {
3774 + int count;
3775 +
3776 + count = ext4_free_inodes_count(sb, gdp);
3777 + percpu_counter_sub(&sbi->s_freeinodes_counter,
3778 + count);
3779 + }
3780 +- set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
3781 +- &grp->bb_state);
3782 + }
3783 + }
3784 +
3785 +diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
3786 +index f34da0bb8f17..b970a200f20c 100644
3787 +--- a/fs/ext4/sysfs.c
3788 ++++ b/fs/ext4/sysfs.c
3789 +@@ -274,8 +274,12 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
3790 + case attr_pointer_ui:
3791 + if (!ptr)
3792 + return 0;
3793 +- return snprintf(buf, PAGE_SIZE, "%u\n",
3794 +- *((unsigned int *) ptr));
3795 ++ if (a->attr_ptr == ptr_ext4_super_block_offset)
3796 ++ return snprintf(buf, PAGE_SIZE, "%u\n",
3797 ++ le32_to_cpup(ptr));
3798 ++ else
3799 ++ return snprintf(buf, PAGE_SIZE, "%u\n",
3800 ++ *((unsigned int *) ptr));
3801 + case attr_pointer_atomic:
3802 + if (!ptr)
3803 + return 0;
3804 +@@ -308,7 +312,10 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
3805 + ret = kstrtoul(skip_spaces(buf), 0, &t);
3806 + if (ret)
3807 + return ret;
3808 +- *((unsigned int *) ptr) = t;
3809 ++ if (a->attr_ptr == ptr_ext4_super_block_offset)
3810 ++ *((__le32 *) ptr) = cpu_to_le32(t);
3811 ++ else
3812 ++ *((unsigned int *) ptr) = t;
3813 + return len;
3814 + case attr_inode_readahead:
3815 + return inode_readahead_blks_store(sbi, buf, len);
3816 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
3817 +index 723df14f4084..f36fc5d5b257 100644
3818 +--- a/fs/ext4/xattr.c
3819 ++++ b/fs/ext4/xattr.c
3820 +@@ -190,6 +190,8 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
3821 + struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
3822 + if ((void *)next >= end)
3823 + return -EFSCORRUPTED;
3824 ++ if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
3825 ++ return -EFSCORRUPTED;
3826 + e = next;
3827 + }
3828 +
3829 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
3830 +index c6b88fa85e2e..4a9ace7280b9 100644
3831 +--- a/fs/fuse/dev.c
3832 ++++ b/fs/fuse/dev.c
3833 +@@ -127,6 +127,16 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
3834 + return !fc->initialized || (for_background && fc->blocked);
3835 + }
3836 +
3837 ++static void fuse_drop_waiting(struct fuse_conn *fc)
3838 ++{
3839 ++ if (fc->connected) {
3840 ++ atomic_dec(&fc->num_waiting);
3841 ++ } else if (atomic_dec_and_test(&fc->num_waiting)) {
3842 ++ /* wake up aborters */
3843 ++ wake_up_all(&fc->blocked_waitq);
3844 ++ }
3845 ++}
3846 ++
3847 + static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
3848 + bool for_background)
3849 + {
3850 +@@ -175,7 +185,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
3851 + return req;
3852 +
3853 + out:
3854 +- atomic_dec(&fc->num_waiting);
3855 ++ fuse_drop_waiting(fc);
3856 + return ERR_PTR(err);
3857 + }
3858 +
3859 +@@ -285,7 +295,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
3860 +
3861 + if (test_bit(FR_WAITING, &req->flags)) {
3862 + __clear_bit(FR_WAITING, &req->flags);
3863 +- atomic_dec(&fc->num_waiting);
3864 ++ fuse_drop_waiting(fc);
3865 + }
3866 +
3867 + if (req->stolen_file)
3868 +@@ -371,7 +381,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
3869 + struct fuse_iqueue *fiq = &fc->iq;
3870 +
3871 + if (test_and_set_bit(FR_FINISHED, &req->flags))
3872 +- return;
3873 ++ goto put_request;
3874 +
3875 + spin_lock(&fiq->waitq.lock);
3876 + list_del_init(&req->intr_entry);
3877 +@@ -400,6 +410,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
3878 + wake_up(&req->waitq);
3879 + if (req->end)
3880 + req->end(fc, req);
3881 ++put_request:
3882 + fuse_put_request(fc, req);
3883 + }
3884 +
3885 +@@ -1944,12 +1955,15 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
3886 + if (!fud)
3887 + return -EPERM;
3888 +
3889 ++ pipe_lock(pipe);
3890 ++
3891 + bufs = kmalloc_array(pipe->buffers, sizeof(struct pipe_buffer),
3892 + GFP_KERNEL);
3893 +- if (!bufs)
3894 ++ if (!bufs) {
3895 ++ pipe_unlock(pipe);
3896 + return -ENOMEM;
3897 ++ }
3898 +
3899 +- pipe_lock(pipe);
3900 + nbuf = 0;
3901 + rem = 0;
3902 + for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
3903 +@@ -2105,6 +2119,7 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
3904 + set_bit(FR_ABORTED, &req->flags);
3905 + if (!test_bit(FR_LOCKED, &req->flags)) {
3906 + set_bit(FR_PRIVATE, &req->flags);
3907 ++ __fuse_get_request(req);
3908 + list_move(&req->list, &to_end1);
3909 + }
3910 + spin_unlock(&req->waitq.lock);
3911 +@@ -2131,7 +2146,6 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
3912 +
3913 + while (!list_empty(&to_end1)) {
3914 + req = list_first_entry(&to_end1, struct fuse_req, list);
3915 +- __fuse_get_request(req);
3916 + list_del_init(&req->list);
3917 + request_end(fc, req);
3918 + }
3919 +@@ -2142,6 +2156,11 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
3920 + }
3921 + EXPORT_SYMBOL_GPL(fuse_abort_conn);
3922 +
3923 ++void fuse_wait_aborted(struct fuse_conn *fc)
3924 ++{
3925 ++ wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
3926 ++}
3927 ++
3928 + int fuse_dev_release(struct inode *inode, struct file *file)
3929 + {
3930 + struct fuse_dev *fud = fuse_get_dev(file);
3931 +@@ -2149,9 +2168,15 @@ int fuse_dev_release(struct inode *inode, struct file *file)
3932 + if (fud) {
3933 + struct fuse_conn *fc = fud->fc;
3934 + struct fuse_pqueue *fpq = &fud->pq;
3935 ++ LIST_HEAD(to_end);
3936 +
3937 ++ spin_lock(&fpq->lock);
3938 + WARN_ON(!list_empty(&fpq->io));
3939 +- end_requests(fc, &fpq->processing);
3940 ++ list_splice_init(&fpq->processing, &to_end);
3941 ++ spin_unlock(&fpq->lock);
3942 ++
3943 ++ end_requests(fc, &to_end);
3944 ++
3945 + /* Are we the last open device? */
3946 + if (atomic_dec_and_test(&fc->dev_count)) {
3947 + WARN_ON(fc->iq.fasync != NULL);
3948 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
3949 +index 56231b31f806..606909ed5f21 100644
3950 +--- a/fs/fuse/dir.c
3951 ++++ b/fs/fuse/dir.c
3952 +@@ -355,11 +355,12 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
3953 + struct inode *inode;
3954 + struct dentry *newent;
3955 + bool outarg_valid = true;
3956 ++ bool locked;
3957 +
3958 +- fuse_lock_inode(dir);
3959 ++ locked = fuse_lock_inode(dir);
3960 + err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
3961 + &outarg, &inode);
3962 +- fuse_unlock_inode(dir);
3963 ++ fuse_unlock_inode(dir, locked);
3964 + if (err == -ENOENT) {
3965 + outarg_valid = false;
3966 + err = 0;
3967 +@@ -1340,6 +1341,7 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
3968 + struct fuse_conn *fc = get_fuse_conn(inode);
3969 + struct fuse_req *req;
3970 + u64 attr_version = 0;
3971 ++ bool locked;
3972 +
3973 + if (is_bad_inode(inode))
3974 + return -EIO;
3975 +@@ -1367,9 +1369,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
3976 + fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
3977 + FUSE_READDIR);
3978 + }
3979 +- fuse_lock_inode(inode);
3980 ++ locked = fuse_lock_inode(inode);
3981 + fuse_request_send(fc, req);
3982 +- fuse_unlock_inode(inode);
3983 ++ fuse_unlock_inode(inode, locked);
3984 + nbytes = req->out.args[0].size;
3985 + err = req->out.h.error;
3986 + fuse_put_request(fc, req);
3987 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
3988 +index a201fb0ac64f..aa23749a943b 100644
3989 +--- a/fs/fuse/file.c
3990 ++++ b/fs/fuse/file.c
3991 +@@ -866,6 +866,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
3992 + }
3993 +
3994 + if (WARN_ON(req->num_pages >= req->max_pages)) {
3995 ++ unlock_page(page);
3996 + fuse_put_request(fc, req);
3997 + return -EIO;
3998 + }
3999 +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
4000 +index 5256ad333b05..f78e9614bb5f 100644
4001 +--- a/fs/fuse/fuse_i.h
4002 ++++ b/fs/fuse/fuse_i.h
4003 +@@ -862,6 +862,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc,
4004 +
4005 + /* Abort all requests */
4006 + void fuse_abort_conn(struct fuse_conn *fc, bool is_abort);
4007 ++void fuse_wait_aborted(struct fuse_conn *fc);
4008 +
4009 + /**
4010 + * Invalidate inode attributes
4011 +@@ -974,8 +975,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
4012 +
4013 + void fuse_set_initialized(struct fuse_conn *fc);
4014 +
4015 +-void fuse_unlock_inode(struct inode *inode);
4016 +-void fuse_lock_inode(struct inode *inode);
4017 ++void fuse_unlock_inode(struct inode *inode, bool locked);
4018 ++bool fuse_lock_inode(struct inode *inode);
4019 +
4020 + int fuse_setxattr(struct inode *inode, const char *name, const void *value,
4021 + size_t size, int flags);
4022 +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
4023 +index a24df8861b40..2dbd487390a3 100644
4024 +--- a/fs/fuse/inode.c
4025 ++++ b/fs/fuse/inode.c
4026 +@@ -357,15 +357,21 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
4027 + return 0;
4028 + }
4029 +
4030 +-void fuse_lock_inode(struct inode *inode)
4031 ++bool fuse_lock_inode(struct inode *inode)
4032 + {
4033 +- if (!get_fuse_conn(inode)->parallel_dirops)
4034 ++ bool locked = false;
4035 ++
4036 ++ if (!get_fuse_conn(inode)->parallel_dirops) {
4037 + mutex_lock(&get_fuse_inode(inode)->mutex);
4038 ++ locked = true;
4039 ++ }
4040 ++
4041 ++ return locked;
4042 + }
4043 +
4044 +-void fuse_unlock_inode(struct inode *inode)
4045 ++void fuse_unlock_inode(struct inode *inode, bool locked)
4046 + {
4047 +- if (!get_fuse_conn(inode)->parallel_dirops)
4048 ++ if (locked)
4049 + mutex_unlock(&get_fuse_inode(inode)->mutex);
4050 + }
4051 +
4052 +@@ -391,9 +397,6 @@ static void fuse_put_super(struct super_block *sb)
4053 + {
4054 + struct fuse_conn *fc = get_fuse_conn_super(sb);
4055 +
4056 +- fuse_send_destroy(fc);
4057 +-
4058 +- fuse_abort_conn(fc, false);
4059 + mutex_lock(&fuse_mutex);
4060 + list_del(&fc->entry);
4061 + fuse_ctl_remove_conn(fc);
4062 +@@ -1210,16 +1213,25 @@ static struct dentry *fuse_mount(struct file_system_type *fs_type,
4063 + return mount_nodev(fs_type, flags, raw_data, fuse_fill_super);
4064 + }
4065 +
4066 +-static void fuse_kill_sb_anon(struct super_block *sb)
4067 ++static void fuse_sb_destroy(struct super_block *sb)
4068 + {
4069 + struct fuse_conn *fc = get_fuse_conn_super(sb);
4070 +
4071 + if (fc) {
4072 ++ fuse_send_destroy(fc);
4073 ++
4074 ++ fuse_abort_conn(fc, false);
4075 ++ fuse_wait_aborted(fc);
4076 ++
4077 + down_write(&fc->killsb);
4078 + fc->sb = NULL;
4079 + up_write(&fc->killsb);
4080 + }
4081 ++}
4082 +
4083 ++static void fuse_kill_sb_anon(struct super_block *sb)
4084 ++{
4085 ++ fuse_sb_destroy(sb);
4086 + kill_anon_super(sb);
4087 + }
4088 +
4089 +@@ -1242,14 +1254,7 @@ static struct dentry *fuse_mount_blk(struct file_system_type *fs_type,
4090 +
4091 + static void fuse_kill_sb_blk(struct super_block *sb)
4092 + {
4093 +- struct fuse_conn *fc = get_fuse_conn_super(sb);
4094 +-
4095 +- if (fc) {
4096 +- down_write(&fc->killsb);
4097 +- fc->sb = NULL;
4098 +- up_write(&fc->killsb);
4099 +- }
4100 +-
4101 ++ fuse_sb_destroy(sb);
4102 + kill_block_super(sb);
4103 + }
4104 +
4105 +diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
4106 +index 5c13f29bfcdb..118fa197a35f 100644
4107 +--- a/fs/sysfs/file.c
4108 ++++ b/fs/sysfs/file.c
4109 +@@ -405,6 +405,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
4110 + }
4111 + EXPORT_SYMBOL_GPL(sysfs_chmod_file);
4112 +
4113 ++/**
4114 ++ * sysfs_break_active_protection - break "active" protection
4115 ++ * @kobj: The kernel object @attr is associated with.
4116 ++ * @attr: The attribute to break the "active" protection for.
4117 ++ *
4118 ++ * With sysfs, just like kernfs, deletion of an attribute is postponed until
4119 ++ * all active .show() and .store() callbacks have finished unless this function
4120 ++ * is called. Hence this function is useful in methods that implement self
4121 ++ * deletion.
4122 ++ */
4123 ++struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
4124 ++ const struct attribute *attr)
4125 ++{
4126 ++ struct kernfs_node *kn;
4127 ++
4128 ++ kobject_get(kobj);
4129 ++ kn = kernfs_find_and_get(kobj->sd, attr->name);
4130 ++ if (kn)
4131 ++ kernfs_break_active_protection(kn);
4132 ++ return kn;
4133 ++}
4134 ++EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
4135 ++
4136 ++/**
4137 ++ * sysfs_unbreak_active_protection - restore "active" protection
4138 ++ * @kn: Pointer returned by sysfs_break_active_protection().
4139 ++ *
4140 ++ * Undo the effects of sysfs_break_active_protection(). Since this function
4141 ++ * calls kernfs_put() on the kernfs node that corresponds to the 'attr'
4142 ++ * argument passed to sysfs_break_active_protection() that attribute may have
4143 ++ * been removed between the sysfs_break_active_protection() and
4144 ++ * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after
4145 ++ * this function has returned.
4146 ++ */
4147 ++void sysfs_unbreak_active_protection(struct kernfs_node *kn)
4148 ++{
4149 ++ struct kobject *kobj = kn->parent->priv;
4150 ++
4151 ++ kernfs_unbreak_active_protection(kn);
4152 ++ kernfs_put(kn);
4153 ++ kobject_put(kobj);
4154 ++}
4155 ++EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection);
4156 ++
4157 + /**
4158 + * sysfs_remove_file_ns - remove an object attribute with a custom ns tag
4159 + * @kobj: object we're acting for
4160 +diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
4161 +index c9e5a6621b95..c44703f471b3 100644
4162 +--- a/include/drm/i915_drm.h
4163 ++++ b/include/drm/i915_drm.h
4164 +@@ -95,7 +95,9 @@ extern struct resource intel_graphics_stolen_res;
4165 + #define I845_TSEG_SIZE_512K (2 << 1)
4166 + #define I845_TSEG_SIZE_1M (3 << 1)
4167 +
4168 +-#define INTEL_BSM 0x5c
4169 ++#define INTEL_BSM 0x5c
4170 ++#define INTEL_GEN11_BSM_DW0 0xc0
4171 ++#define INTEL_GEN11_BSM_DW1 0xc4
4172 + #define INTEL_BSM_MASK (-(1u << 20))
4173 +
4174 + #endif /* _I915_DRM_H_ */
4175 +diff --git a/include/linux/libata.h b/include/linux/libata.h
4176 +index 32f247cb5e9e..bc4f87cbe7f4 100644
4177 +--- a/include/linux/libata.h
4178 ++++ b/include/linux/libata.h
4179 +@@ -1111,6 +1111,8 @@ extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
4180 + extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
4181 + const struct ata_port_info * const * ppi, int n_ports);
4182 + extern int ata_slave_link_init(struct ata_port *ap);
4183 ++extern void ata_host_get(struct ata_host *host);
4184 ++extern void ata_host_put(struct ata_host *host);
4185 + extern int ata_host_start(struct ata_host *host);
4186 + extern int ata_host_register(struct ata_host *host,
4187 + struct scsi_host_template *sht);
4188 +diff --git a/include/linux/printk.h b/include/linux/printk.h
4189 +index 6d7e800affd8..3ede9f46a494 100644
4190 +--- a/include/linux/printk.h
4191 ++++ b/include/linux/printk.h
4192 +@@ -148,9 +148,13 @@ void early_printk(const char *s, ...) { }
4193 + #ifdef CONFIG_PRINTK_NMI
4194 + extern void printk_nmi_enter(void);
4195 + extern void printk_nmi_exit(void);
4196 ++extern void printk_nmi_direct_enter(void);
4197 ++extern void printk_nmi_direct_exit(void);
4198 + #else
4199 + static inline void printk_nmi_enter(void) { }
4200 + static inline void printk_nmi_exit(void) { }
4201 ++static inline void printk_nmi_direct_enter(void) { }
4202 ++static inline void printk_nmi_direct_exit(void) { }
4203 + #endif /* PRINTK_NMI */
4204 +
4205 + #ifdef CONFIG_PRINTK
4206 +diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
4207 +index b8bfdc173ec0..3c12198c0103 100644
4208 +--- a/include/linux/sysfs.h
4209 ++++ b/include/linux/sysfs.h
4210 +@@ -237,6 +237,9 @@ int __must_check sysfs_create_files(struct kobject *kobj,
4211 + const struct attribute **attr);
4212 + int __must_check sysfs_chmod_file(struct kobject *kobj,
4213 + const struct attribute *attr, umode_t mode);
4214 ++struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
4215 ++ const struct attribute *attr);
4216 ++void sysfs_unbreak_active_protection(struct kernfs_node *kn);
4217 + void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
4218 + const void *ns);
4219 + bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
4220 +@@ -350,6 +353,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
4221 + return 0;
4222 + }
4223 +
4224 ++static inline struct kernfs_node *
4225 ++sysfs_break_active_protection(struct kobject *kobj,
4226 ++ const struct attribute *attr)
4227 ++{
4228 ++ return NULL;
4229 ++}
4230 ++
4231 ++static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn)
4232 ++{
4233 ++}
4234 ++
4235 + static inline void sysfs_remove_file_ns(struct kobject *kobj,
4236 + const struct attribute *attr,
4237 + const void *ns)
4238 +diff --git a/include/linux/tpm.h b/include/linux/tpm.h
4239 +index 06639fb6ab85..8eb5e5ebe136 100644
4240 +--- a/include/linux/tpm.h
4241 ++++ b/include/linux/tpm.h
4242 +@@ -43,6 +43,8 @@ struct tpm_class_ops {
4243 + u8 (*status) (struct tpm_chip *chip);
4244 + bool (*update_timeouts)(struct tpm_chip *chip,
4245 + unsigned long *timeout_cap);
4246 ++ int (*go_idle)(struct tpm_chip *chip);
4247 ++ int (*cmd_ready)(struct tpm_chip *chip);
4248 + int (*request_locality)(struct tpm_chip *chip, int loc);
4249 + int (*relinquish_locality)(struct tpm_chip *chip, int loc);
4250 + void (*clk_enable)(struct tpm_chip *chip, bool value);
4251 +diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
4252 +index 225ab7783dfd..3de3b10da19a 100644
4253 +--- a/include/scsi/libsas.h
4254 ++++ b/include/scsi/libsas.h
4255 +@@ -161,7 +161,7 @@ struct sata_device {
4256 + u8 port_no; /* port number, if this is a PM (Port) */
4257 +
4258 + struct ata_port *ap;
4259 +- struct ata_host ata_host;
4260 ++ struct ata_host *ata_host;
4261 + struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
4262 + u8 fis[ATA_RESP_FIS_SIZE];
4263 + };
4264 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
4265 +index ea619021d901..f3183ad10d96 100644
4266 +--- a/kernel/kprobes.c
4267 ++++ b/kernel/kprobes.c
4268 +@@ -710,9 +710,7 @@ static void reuse_unused_kprobe(struct kprobe *ap)
4269 + * there is still a relative jump) and disabled.
4270 + */
4271 + op = container_of(ap, struct optimized_kprobe, kp);
4272 +- if (unlikely(list_empty(&op->list)))
4273 +- printk(KERN_WARNING "Warning: found a stray unused "
4274 +- "aggrprobe@%p\n", ap->addr);
4275 ++ WARN_ON_ONCE(list_empty(&op->list));
4276 + /* Enable the probe again */
4277 + ap->flags &= ~KPROBE_FLAG_DISABLED;
4278 + /* Optimize it again (remove from op->list) */
4279 +@@ -985,7 +983,8 @@ static int arm_kprobe_ftrace(struct kprobe *p)
4280 + ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
4281 + (unsigned long)p->addr, 0, 0);
4282 + if (ret) {
4283 +- pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
4284 ++ pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
4285 ++ p->addr, ret);
4286 + return ret;
4287 + }
4288 +
4289 +@@ -1025,7 +1024,8 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
4290 +
4291 + ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
4292 + (unsigned long)p->addr, 1, 0);
4293 +- WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
4294 ++ WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
4295 ++ p->addr, ret);
4296 + return ret;
4297 + }
4298 + #else /* !CONFIG_KPROBES_ON_FTRACE */
4299 +@@ -2169,11 +2169,12 @@ out:
4300 + }
4301 + EXPORT_SYMBOL_GPL(enable_kprobe);
4302 +
4303 ++/* Caller must NOT call this in usual path. This is only for critical case */
4304 + void dump_kprobe(struct kprobe *kp)
4305 + {
4306 +- printk(KERN_WARNING "Dumping kprobe:\n");
4307 +- printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
4308 +- kp->symbol_name, kp->addr, kp->offset);
4309 ++ pr_err("Dumping kprobe:\n");
4310 ++ pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
4311 ++ kp->symbol_name, kp->offset, kp->addr);
4312 + }
4313 + NOKPROBE_SYMBOL(dump_kprobe);
4314 +
4315 +@@ -2196,11 +2197,8 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
4316 + entry = arch_deref_entry_point((void *)*iter);
4317 +
4318 + if (!kernel_text_address(entry) ||
4319 +- !kallsyms_lookup_size_offset(entry, &size, &offset)) {
4320 +- pr_err("Failed to find blacklist at %p\n",
4321 +- (void *)entry);
4322 ++ !kallsyms_lookup_size_offset(entry, &size, &offset))
4323 + continue;
4324 +- }
4325 +
4326 + ent = kmalloc(sizeof(*ent), GFP_KERNEL);
4327 + if (!ent)
4328 +@@ -2428,8 +2426,16 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
4329 + struct kprobe_blacklist_entry *ent =
4330 + list_entry(v, struct kprobe_blacklist_entry, list);
4331 +
4332 +- seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
4333 +- (void *)ent->end_addr, (void *)ent->start_addr);
4334 ++ /*
4335 ++ * If /proc/kallsyms is not showing kernel address, we won't
4336 ++ * show them here either.
4337 ++ */
4338 ++ if (!kallsyms_show_value())
4339 ++ seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
4340 ++ (void *)ent->start_addr);
4341 ++ else
4342 ++ seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
4343 ++ (void *)ent->end_addr, (void *)ent->start_addr);
4344 + return 0;
4345 + }
4346 +
4347 +@@ -2611,7 +2617,7 @@ static int __init debugfs_kprobe_init(void)
4348 + if (!dir)
4349 + return -ENOMEM;
4350 +
4351 +- file = debugfs_create_file("list", 0444, dir, NULL,
4352 ++ file = debugfs_create_file("list", 0400, dir, NULL,
4353 + &debugfs_kprobes_operations);
4354 + if (!file)
4355 + goto error;
4356 +@@ -2621,7 +2627,7 @@ static int __init debugfs_kprobe_init(void)
4357 + if (!file)
4358 + goto error;
4359 +
4360 +- file = debugfs_create_file("blacklist", 0444, dir, NULL,
4361 ++ file = debugfs_create_file("blacklist", 0400, dir, NULL,
4362 + &debugfs_kprobe_blacklist_ops);
4363 + if (!file)
4364 + goto error;
4365 +diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
4366 +index 2a7d04049af4..0f1898820cba 100644
4367 +--- a/kernel/printk/internal.h
4368 ++++ b/kernel/printk/internal.h
4369 +@@ -19,11 +19,16 @@
4370 + #ifdef CONFIG_PRINTK
4371 +
4372 + #define PRINTK_SAFE_CONTEXT_MASK 0x3fffffff
4373 +-#define PRINTK_NMI_DEFERRED_CONTEXT_MASK 0x40000000
4374 ++#define PRINTK_NMI_DIRECT_CONTEXT_MASK 0x40000000
4375 + #define PRINTK_NMI_CONTEXT_MASK 0x80000000
4376 +
4377 + extern raw_spinlock_t logbuf_lock;
4378 +
4379 ++__printf(5, 0)
4380 ++int vprintk_store(int facility, int level,
4381 ++ const char *dict, size_t dictlen,
4382 ++ const char *fmt, va_list args);
4383 ++
4384 + __printf(1, 0) int vprintk_default(const char *fmt, va_list args);
4385 + __printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
4386 + __printf(1, 0) int vprintk_func(const char *fmt, va_list args);
4387 +@@ -54,6 +59,8 @@ void __printk_safe_exit(void);
4388 + local_irq_enable(); \
4389 + } while (0)
4390 +
4391 ++void defer_console_output(void);
4392 ++
4393 + #else
4394 +
4395 + __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
4396 +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
4397 +index 247808333ba4..1d1513215c22 100644
4398 +--- a/kernel/printk/printk.c
4399 ++++ b/kernel/printk/printk.c
4400 +@@ -1824,28 +1824,16 @@ static size_t log_output(int facility, int level, enum log_flags lflags, const c
4401 + return log_store(facility, level, lflags, 0, dict, dictlen, text, text_len);
4402 + }
4403 +
4404 +-asmlinkage int vprintk_emit(int facility, int level,
4405 +- const char *dict, size_t dictlen,
4406 +- const char *fmt, va_list args)
4407 ++/* Must be called under logbuf_lock. */
4408 ++int vprintk_store(int facility, int level,
4409 ++ const char *dict, size_t dictlen,
4410 ++ const char *fmt, va_list args)
4411 + {
4412 + static char textbuf[LOG_LINE_MAX];
4413 + char *text = textbuf;
4414 + size_t text_len;
4415 + enum log_flags lflags = 0;
4416 +- unsigned long flags;
4417 +- int printed_len;
4418 +- bool in_sched = false;
4419 +-
4420 +- if (level == LOGLEVEL_SCHED) {
4421 +- level = LOGLEVEL_DEFAULT;
4422 +- in_sched = true;
4423 +- }
4424 +-
4425 +- boot_delay_msec(level);
4426 +- printk_delay();
4427 +
4428 +- /* This stops the holder of console_sem just where we want him */
4429 +- logbuf_lock_irqsave(flags);
4430 + /*
4431 + * The printf needs to come first; we need the syslog
4432 + * prefix which might be passed-in as a parameter.
4433 +@@ -1886,8 +1874,29 @@ asmlinkage int vprintk_emit(int facility, int level,
4434 + if (dict)
4435 + lflags |= LOG_PREFIX|LOG_NEWLINE;
4436 +
4437 +- printed_len = log_output(facility, level, lflags, dict, dictlen, text, text_len);
4438 ++ return log_output(facility, level, lflags,
4439 ++ dict, dictlen, text, text_len);
4440 ++}
4441 +
4442 ++asmlinkage int vprintk_emit(int facility, int level,
4443 ++ const char *dict, size_t dictlen,
4444 ++ const char *fmt, va_list args)
4445 ++{
4446 ++ int printed_len;
4447 ++ bool in_sched = false;
4448 ++ unsigned long flags;
4449 ++
4450 ++ if (level == LOGLEVEL_SCHED) {
4451 ++ level = LOGLEVEL_DEFAULT;
4452 ++ in_sched = true;
4453 ++ }
4454 ++
4455 ++ boot_delay_msec(level);
4456 ++ printk_delay();
4457 ++
4458 ++ /* This stops the holder of console_sem just where we want him */
4459 ++ logbuf_lock_irqsave(flags);
4460 ++ printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args);
4461 + logbuf_unlock_irqrestore(flags);
4462 +
4463 + /* If called from the scheduler, we can not call up(). */
4464 +@@ -2878,16 +2887,20 @@ void wake_up_klogd(void)
4465 + preempt_enable();
4466 + }
4467 +
4468 +-int vprintk_deferred(const char *fmt, va_list args)
4469 ++void defer_console_output(void)
4470 + {
4471 +- int r;
4472 +-
4473 +- r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
4474 +-
4475 + preempt_disable();
4476 + __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
4477 + irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
4478 + preempt_enable();
4479 ++}
4480 ++
4481 ++int vprintk_deferred(const char *fmt, va_list args)
4482 ++{
4483 ++ int r;
4484 ++
4485 ++ r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
4486 ++ defer_console_output();
4487 +
4488 + return r;
4489 + }
4490 +diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
4491 +index d7d091309054..a0a74c533e4b 100644
4492 +--- a/kernel/printk/printk_safe.c
4493 ++++ b/kernel/printk/printk_safe.c
4494 +@@ -308,24 +308,33 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
4495 +
4496 + void printk_nmi_enter(void)
4497 + {
4498 +- /*
4499 +- * The size of the extra per-CPU buffer is limited. Use it only when
4500 +- * the main one is locked. If this CPU is not in the safe context,
4501 +- * the lock must be taken on another CPU and we could wait for it.
4502 +- */
4503 +- if ((this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) &&
4504 +- raw_spin_is_locked(&logbuf_lock)) {
4505 +- this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
4506 +- } else {
4507 +- this_cpu_or(printk_context, PRINTK_NMI_DEFERRED_CONTEXT_MASK);
4508 +- }
4509 ++ this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
4510 + }
4511 +
4512 + void printk_nmi_exit(void)
4513 + {
4514 +- this_cpu_and(printk_context,
4515 +- ~(PRINTK_NMI_CONTEXT_MASK |
4516 +- PRINTK_NMI_DEFERRED_CONTEXT_MASK));
4517 ++ this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
4518 ++}
4519 ++
4520 ++/*
4521 ++ * Marks a code that might produce many messages in NMI context
4522 ++ * and the risk of losing them is more critical than eventual
4523 ++ * reordering.
4524 ++ *
4525 ++ * It has effect only when called in NMI context. Then printk()
4526 ++ * will try to store the messages into the main logbuf directly
4527 ++ * and use the per-CPU buffers only as a fallback when the lock
4528 ++ * is not available.
4529 ++ */
4530 ++void printk_nmi_direct_enter(void)
4531 ++{
4532 ++ if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
4533 ++ this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
4534 ++}
4535 ++
4536 ++void printk_nmi_direct_exit(void)
4537 ++{
4538 ++ this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
4539 + }
4540 +
4541 + #else
4542 +@@ -363,6 +372,20 @@ void __printk_safe_exit(void)
4543 +
4544 + __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
4545 + {
4546 ++ /*
4547 ++ * Try to use the main logbuf even in NMI. But avoid calling console
4548 ++ * drivers that might have their own locks.
4549 ++ */
4550 ++ if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
4551 ++ raw_spin_trylock(&logbuf_lock)) {
4552 ++ int len;
4553 ++
4554 ++ len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
4555 ++ raw_spin_unlock(&logbuf_lock);
4556 ++ defer_console_output();
4557 ++ return len;
4558 ++ }
4559 ++
4560 + /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
4561 + if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
4562 + return vprintk_nmi(fmt, args);
4563 +@@ -371,13 +394,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
4564 + if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
4565 + return vprintk_safe(fmt, args);
4566 +
4567 +- /*
4568 +- * Use the main logbuf when logbuf_lock is available in NMI.
4569 +- * But avoid calling console drivers that might have their own locks.
4570 +- */
4571 +- if (this_cpu_read(printk_context) & PRINTK_NMI_DEFERRED_CONTEXT_MASK)
4572 +- return vprintk_deferred(fmt, args);
4573 +-
4574 + /* No obstacles. */
4575 + return vprintk_default(fmt, args);
4576 + }
4577 +diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
4578 +index e190d1ef3a23..067cb83f37ea 100644
4579 +--- a/kernel/stop_machine.c
4580 ++++ b/kernel/stop_machine.c
4581 +@@ -81,6 +81,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
4582 + unsigned long flags;
4583 + bool enabled;
4584 +
4585 ++ preempt_disable();
4586 + raw_spin_lock_irqsave(&stopper->lock, flags);
4587 + enabled = stopper->enabled;
4588 + if (enabled)
4589 +@@ -90,6 +91,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
4590 + raw_spin_unlock_irqrestore(&stopper->lock, flags);
4591 +
4592 + wake_up_q(&wakeq);
4593 ++ preempt_enable();
4594 +
4595 + return enabled;
4596 + }
4597 +@@ -236,13 +238,24 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
4598 + struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
4599 + DEFINE_WAKE_Q(wakeq);
4600 + int err;
4601 ++
4602 + retry:
4603 ++ /*
4604 ++ * The waking up of stopper threads has to happen in the same
4605 ++ * scheduling context as the queueing. Otherwise, there is a
4606 ++ * possibility of one of the above stoppers being woken up by another
4607 ++ * CPU, and preempting us. This will cause us to not wake up the other
4608 ++ * stopper forever.
4609 ++ */
4610 ++ preempt_disable();
4611 + raw_spin_lock_irq(&stopper1->lock);
4612 + raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
4613 +
4614 +- err = -ENOENT;
4615 +- if (!stopper1->enabled || !stopper2->enabled)
4616 ++ if (!stopper1->enabled || !stopper2->enabled) {
4617 ++ err = -ENOENT;
4618 + goto unlock;
4619 ++ }
4620 ++
4621 + /*
4622 + * Ensure that if we race with __stop_cpus() the stoppers won't get
4623 + * queued up in reverse order leading to system deadlock.
4624 +@@ -253,36 +266,30 @@ retry:
4625 + * It can be falsely true but it is safe to spin until it is cleared,
4626 + * queue_stop_cpus_work() does everything under preempt_disable().
4627 + */
4628 +- err = -EDEADLK;
4629 +- if (unlikely(stop_cpus_in_progress))
4630 +- goto unlock;
4631 ++ if (unlikely(stop_cpus_in_progress)) {
4632 ++ err = -EDEADLK;
4633 ++ goto unlock;
4634 ++ }
4635 +
4636 + err = 0;
4637 + __cpu_stop_queue_work(stopper1, work1, &wakeq);
4638 + __cpu_stop_queue_work(stopper2, work2, &wakeq);
4639 +- /*
4640 +- * The waking up of stopper threads has to happen
4641 +- * in the same scheduling context as the queueing.
4642 +- * Otherwise, there is a possibility of one of the
4643 +- * above stoppers being woken up by another CPU,
4644 +- * and preempting us. This will cause us to n ot
4645 +- * wake up the other stopper forever.
4646 +- */
4647 +- preempt_disable();
4648 ++
4649 + unlock:
4650 + raw_spin_unlock(&stopper2->lock);
4651 + raw_spin_unlock_irq(&stopper1->lock);
4652 +
4653 + if (unlikely(err == -EDEADLK)) {
4654 ++ preempt_enable();
4655 ++
4656 + while (stop_cpus_in_progress)
4657 + cpu_relax();
4658 ++
4659 + goto retry;
4660 + }
4661 +
4662 +- if (!err) {
4663 +- wake_up_q(&wakeq);
4664 +- preempt_enable();
4665 +- }
4666 ++ wake_up_q(&wakeq);
4667 ++ preempt_enable();
4668 +
4669 + return err;
4670 + }
4671 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4672 +index 823687997b01..176debd3481b 100644
4673 +--- a/kernel/trace/trace.c
4674 ++++ b/kernel/trace/trace.c
4675 +@@ -8288,6 +8288,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4676 + tracing_off();
4677 +
4678 + local_irq_save(flags);
4679 ++ printk_nmi_direct_enter();
4680 +
4681 + /* Simulate the iterator */
4682 + trace_init_global_iter(&iter);
4683 +@@ -8367,7 +8368,8 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4684 + for_each_tracing_cpu(cpu) {
4685 + atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
4686 + }
4687 +- atomic_dec(&dump_running);
4688 ++ atomic_dec(&dump_running);
4689 ++ printk_nmi_direct_exit();
4690 + local_irq_restore(flags);
4691 + }
4692 + EXPORT_SYMBOL_GPL(ftrace_dump);
4693 +diff --git a/kernel/watchdog.c b/kernel/watchdog.c
4694 +index 576d18045811..51f5a64d9ec2 100644
4695 +--- a/kernel/watchdog.c
4696 ++++ b/kernel/watchdog.c
4697 +@@ -266,7 +266,7 @@ static void __touch_watchdog(void)
4698 + * entering idle state. This should only be used for scheduler events.
4699 + * Use touch_softlockup_watchdog() for everything else.
4700 + */
4701 +-void touch_softlockup_watchdog_sched(void)
4702 ++notrace void touch_softlockup_watchdog_sched(void)
4703 + {
4704 + /*
4705 + * Preemption can be enabled. It doesn't matter which CPU's timestamp
4706 +@@ -275,7 +275,7 @@ void touch_softlockup_watchdog_sched(void)
4707 + raw_cpu_write(watchdog_touch_ts, 0);
4708 + }
4709 +
4710 +-void touch_softlockup_watchdog(void)
4711 ++notrace void touch_softlockup_watchdog(void)
4712 + {
4713 + touch_softlockup_watchdog_sched();
4714 + wq_watchdog_touch(raw_smp_processor_id());
4715 +diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
4716 +index e449a23e9d59..4ece6028007a 100644
4717 +--- a/kernel/watchdog_hld.c
4718 ++++ b/kernel/watchdog_hld.c
4719 +@@ -29,7 +29,7 @@ static struct cpumask dead_events_mask;
4720 + static unsigned long hardlockup_allcpu_dumped;
4721 + static atomic_t watchdog_cpus = ATOMIC_INIT(0);
4722 +
4723 +-void arch_touch_nmi_watchdog(void)
4724 ++notrace void arch_touch_nmi_watchdog(void)
4725 + {
4726 + /*
4727 + * Using __raw here because some code paths have
4728 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
4729 +index 78b192071ef7..5f78c6e41796 100644
4730 +--- a/kernel/workqueue.c
4731 ++++ b/kernel/workqueue.c
4732 +@@ -5559,7 +5559,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
4733 + mod_timer(&wq_watchdog_timer, jiffies + thresh);
4734 + }
4735 +
4736 +-void wq_watchdog_touch(int cpu)
4737 ++notrace void wq_watchdog_touch(int cpu)
4738 + {
4739 + if (cpu >= 0)
4740 + per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
4741 +diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
4742 +index 61a6b5aab07e..15ca78e1c7d4 100644
4743 +--- a/lib/nmi_backtrace.c
4744 ++++ b/lib/nmi_backtrace.c
4745 +@@ -87,11 +87,9 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
4746 +
4747 + bool nmi_cpu_backtrace(struct pt_regs *regs)
4748 + {
4749 +- static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
4750 + int cpu = smp_processor_id();
4751 +
4752 + if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
4753 +- arch_spin_lock(&lock);
4754 + if (regs && cpu_in_idle(instruction_pointer(regs))) {
4755 + pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
4756 + cpu, (void *)instruction_pointer(regs));
4757 +@@ -102,7 +100,6 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
4758 + else
4759 + dump_stack();
4760 + }
4761 +- arch_spin_unlock(&lock);
4762 + cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
4763 + return true;
4764 + }
4765 +diff --git a/lib/vsprintf.c b/lib/vsprintf.c
4766 +index a48aaa79d352..cda186230287 100644
4767 +--- a/lib/vsprintf.c
4768 ++++ b/lib/vsprintf.c
4769 +@@ -1942,6 +1942,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
4770 + case 'F':
4771 + return device_node_string(buf, end, ptr, spec, fmt + 1);
4772 + }
4773 ++ break;
4774 + case 'x':
4775 + return pointer_string(buf, end, ptr, spec);
4776 + }
4777 +diff --git a/mm/memory.c b/mm/memory.c
4778 +index 0e356dd923c2..86d4329acb05 100644
4779 +--- a/mm/memory.c
4780 ++++ b/mm/memory.c
4781 +@@ -245,9 +245,6 @@ static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
4782 +
4783 + tlb_flush(tlb);
4784 + mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
4785 +-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
4786 +- tlb_table_flush(tlb);
4787 +-#endif
4788 + __tlb_reset_range(tlb);
4789 + }
4790 +
4791 +@@ -255,6 +252,9 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
4792 + {
4793 + struct mmu_gather_batch *batch;
4794 +
4795 ++#ifdef CONFIG_HAVE_RCU_TABLE_FREE
4796 ++ tlb_table_flush(tlb);
4797 ++#endif
4798 + for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
4799 + free_pages_and_swap_cache(batch->pages, batch->nr);
4800 + batch->nr = 0;
4801 +@@ -330,6 +330,21 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
4802 + * See the comment near struct mmu_table_batch.
4803 + */
4804 +
4805 ++/*
4806 ++ * If we want tlb_remove_table() to imply TLB invalidates.
4807 ++ */
4808 ++static inline void tlb_table_invalidate(struct mmu_gather *tlb)
4809 ++{
4810 ++#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
4811 ++ /*
4812 ++ * Invalidate page-table caches used by hardware walkers. Then we still
4813 ++ * need to RCU-sched wait while freeing the pages because software
4814 ++ * walkers can still be in-flight.
4815 ++ */
4816 ++ tlb_flush_mmu_tlbonly(tlb);
4817 ++#endif
4818 ++}
4819 ++
4820 + static void tlb_remove_table_smp_sync(void *arg)
4821 + {
4822 + /* Simply deliver the interrupt */
4823 +@@ -366,6 +381,7 @@ void tlb_table_flush(struct mmu_gather *tlb)
4824 + struct mmu_table_batch **batch = &tlb->batch;
4825 +
4826 + if (*batch) {
4827 ++ tlb_table_invalidate(tlb);
4828 + call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
4829 + *batch = NULL;
4830 + }
4831 +@@ -387,11 +403,13 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
4832 + if (*batch == NULL) {
4833 + *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
4834 + if (*batch == NULL) {
4835 ++ tlb_table_invalidate(tlb);
4836 + tlb_remove_table_one(table);
4837 + return;
4838 + }
4839 + (*batch)->nr = 0;
4840 + }
4841 ++
4842 + (*batch)->tables[(*batch)->nr++] = table;
4843 + if ((*batch)->nr == MAX_TABLE_BATCH)
4844 + tlb_table_flush(tlb);
4845 +diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
4846 +index 16161a36dc73..e8d1024dc547 100644
4847 +--- a/net/sunrpc/xprtrdma/verbs.c
4848 ++++ b/net/sunrpc/xprtrdma/verbs.c
4849 +@@ -280,7 +280,6 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
4850 + ++xprt->rx_xprt.connect_cookie;
4851 + connstate = -ECONNABORTED;
4852 + connected:
4853 +- xprt->rx_buf.rb_credits = 1;
4854 + ep->rep_connected = connstate;
4855 + rpcrdma_conn_func(ep);
4856 + wake_up_all(&ep->rep_connect_wait);
4857 +@@ -755,6 +754,7 @@ retry:
4858 + }
4859 +
4860 + ep->rep_connected = 0;
4861 ++ rpcrdma_post_recvs(r_xprt, true);
4862 +
4863 + rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
4864 + if (rc) {
4865 +@@ -773,8 +773,6 @@ retry:
4866 +
4867 + dprintk("RPC: %s: connected\n", __func__);
4868 +
4869 +- rpcrdma_post_recvs(r_xprt, true);
4870 +-
4871 + out:
4872 + if (rc)
4873 + ep->rep_connected = rc;
4874 +@@ -1171,6 +1169,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
4875 + list_add(&req->rl_list, &buf->rb_send_bufs);
4876 + }
4877 +
4878 ++ buf->rb_credits = 1;
4879 + buf->rb_posted_receives = 0;
4880 + INIT_LIST_HEAD(&buf->rb_recv_bufs);
4881 +
4882 +diff --git a/scripts/kernel-doc b/scripts/kernel-doc
4883 +index 0057d8eafcc1..8f0f508a78e9 100755
4884 +--- a/scripts/kernel-doc
4885 ++++ b/scripts/kernel-doc
4886 +@@ -1062,7 +1062,7 @@ sub dump_struct($$) {
4887 + my $x = shift;
4888 + my $file = shift;
4889 +
4890 +- if ($x =~ /(struct|union)\s+(\w+)\s*{(.*)}/) {
4891 ++ if ($x =~ /(struct|union)\s+(\w+)\s*\{(.*)\}/) {
4892 + my $decl_type = $1;
4893 + $declaration_name = $2;
4894 + my $members = $3;
4895 +@@ -1148,20 +1148,20 @@ sub dump_struct($$) {
4896 + }
4897 + }
4898 + }
4899 +- $members =~ s/(struct|union)([^\{\};]+)\{([^\{\}]*)}([^\{\}\;]*)\;/$newmember/;
4900 ++ $members =~ s/(struct|union)([^\{\};]+)\{([^\{\}]*)\}([^\{\}\;]*)\;/$newmember/;
4901 + }
4902 +
4903 + # Ignore other nested elements, like enums
4904 +- $members =~ s/({[^\{\}]*})//g;
4905 ++ $members =~ s/(\{[^\{\}]*\})//g;
4906 +
4907 + create_parameterlist($members, ';', $file, $declaration_name);
4908 + check_sections($file, $declaration_name, $decl_type, $sectcheck, $struct_actual);
4909 +
4910 + # Adjust declaration for better display
4911 +- $declaration =~ s/([{;])/$1\n/g;
4912 +- $declaration =~ s/}\s+;/};/g;
4913 ++ $declaration =~ s/([\{;])/$1\n/g;
4914 ++ $declaration =~ s/\}\s+;/};/g;
4915 + # Better handle inlined enums
4916 +- do {} while ($declaration =~ s/(enum\s+{[^}]+),([^\n])/$1,\n$2/);
4917 ++ do {} while ($declaration =~ s/(enum\s+\{[^\}]+),([^\n])/$1,\n$2/);
4918 +
4919 + my @def_args = split /\n/, $declaration;
4920 + my $level = 1;
4921 +@@ -1171,12 +1171,12 @@ sub dump_struct($$) {
4922 + $clause =~ s/\s+$//;
4923 + $clause =~ s/\s+/ /;
4924 + next if (!$clause);
4925 +- $level-- if ($clause =~ m/(})/ && $level > 1);
4926 ++ $level-- if ($clause =~ m/(\})/ && $level > 1);
4927 + if (!($clause =~ m/^\s*#/)) {
4928 + $declaration .= "\t" x $level;
4929 + }
4930 + $declaration .= "\t" . $clause . "\n";
4931 +- $level++ if ($clause =~ m/({)/ && !($clause =~m/}/));
4932 ++ $level++ if ($clause =~ m/(\{)/ && !($clause =~m/\}/));
4933 + }
4934 + output_declaration($declaration_name,
4935 + 'struct',
4936 +@@ -1244,7 +1244,7 @@ sub dump_enum($$) {
4937 + # strip #define macros inside enums
4938 + $x =~ s@#\s*((define|ifdef)\s+|endif)[^;]*;@@gos;
4939 +
4940 +- if ($x =~ /enum\s+(\w+)\s*{(.*)}/) {
4941 ++ if ($x =~ /enum\s+(\w+)\s*\{(.*)\}/) {
4942 + $declaration_name = $1;
4943 + my $members = $2;
4944 + my %_members;
4945 +@@ -1785,7 +1785,7 @@ sub process_proto_type($$) {
4946 + }
4947 +
4948 + while (1) {
4949 +- if ( $x =~ /([^{};]*)([{};])(.*)/ ) {
4950 ++ if ( $x =~ /([^\{\};]*)([\{\};])(.*)/ ) {
4951 + if( length $prototype ) {
4952 + $prototype .= " "
4953 + }
4954 +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
4955 +index 2fcdd84021a5..86c7805da997 100644
4956 +--- a/sound/soc/codecs/wm_adsp.c
4957 ++++ b/sound/soc/codecs/wm_adsp.c
4958 +@@ -2642,7 +2642,10 @@ int wm_adsp2_preloader_get(struct snd_kcontrol *kcontrol,
4959 + struct snd_ctl_elem_value *ucontrol)
4960 + {
4961 + struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
4962 +- struct wm_adsp *dsp = snd_soc_component_get_drvdata(component);
4963 ++ struct wm_adsp *dsps = snd_soc_component_get_drvdata(component);
4964 ++ struct soc_mixer_control *mc =
4965 ++ (struct soc_mixer_control *)kcontrol->private_value;
4966 ++ struct wm_adsp *dsp = &dsps[mc->shift - 1];
4967 +
4968 + ucontrol->value.integer.value[0] = dsp->preloaded;
4969 +
4970 +@@ -2654,10 +2657,11 @@ int wm_adsp2_preloader_put(struct snd_kcontrol *kcontrol,
4971 + struct snd_ctl_elem_value *ucontrol)
4972 + {
4973 + struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
4974 +- struct wm_adsp *dsp = snd_soc_component_get_drvdata(component);
4975 ++ struct wm_adsp *dsps = snd_soc_component_get_drvdata(component);
4976 + struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
4977 + struct soc_mixer_control *mc =
4978 + (struct soc_mixer_control *)kcontrol->private_value;
4979 ++ struct wm_adsp *dsp = &dsps[mc->shift - 1];
4980 + char preload[32];
4981 +
4982 + snprintf(preload, ARRAY_SIZE(preload), "DSP%u Preload", mc->shift);
4983 +diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c
4984 +index 77e7dcf969d0..d70fcd4a1adf 100644
4985 +--- a/sound/soc/sirf/sirf-usp.c
4986 ++++ b/sound/soc/sirf/sirf-usp.c
4987 +@@ -370,10 +370,9 @@ static int sirf_usp_pcm_probe(struct platform_device *pdev)
4988 + platform_set_drvdata(pdev, usp);
4989 +
4990 + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4991 +- base = devm_ioremap(&pdev->dev, mem_res->start,
4992 +- resource_size(mem_res));
4993 +- if (base == NULL)
4994 +- return -ENOMEM;
4995 ++ base = devm_ioremap_resource(&pdev->dev, mem_res);
4996 ++ if (IS_ERR(base))
4997 ++ return PTR_ERR(base);
4998 + usp->regmap = devm_regmap_init_mmio(&pdev->dev, base,
4999 + &sirf_usp_regmap_config);
5000 + if (IS_ERR(usp->regmap))
5001 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
5002 +index 5e7ae47a9658..5feae9666822 100644
5003 +--- a/sound/soc/soc-pcm.c
5004 ++++ b/sound/soc/soc-pcm.c
5005 +@@ -1694,6 +1694,14 @@ static u64 dpcm_runtime_base_format(struct snd_pcm_substream *substream)
5006 + int i;
5007 +
5008 + for (i = 0; i < be->num_codecs; i++) {
5009 ++ /*
5010 ++ * Skip CODECs which don't support the current stream
5011 ++ * type. See soc_pcm_init_runtime_hw() for more details
5012 ++ */
5013 ++ if (!snd_soc_dai_stream_valid(be->codec_dais[i],
5014 ++ stream))
5015 ++ continue;
5016 ++
5017 + codec_dai_drv = be->codec_dais[i]->driver;
5018 + if (stream == SNDRV_PCM_STREAM_PLAYBACK)
5019 + codec_stream = &codec_dai_drv->playback;
5020 +diff --git a/sound/soc/zte/zx-tdm.c b/sound/soc/zte/zx-tdm.c
5021 +index dc955272f58b..389272eeba9a 100644
5022 +--- a/sound/soc/zte/zx-tdm.c
5023 ++++ b/sound/soc/zte/zx-tdm.c
5024 +@@ -144,8 +144,8 @@ static void zx_tdm_rx_dma_en(struct zx_tdm_info *tdm, bool on)
5025 + #define ZX_TDM_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000)
5026 +
5027 + #define ZX_TDM_FMTBIT \
5028 +- (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_MU_LAW | \
5029 +- SNDRV_PCM_FORMAT_A_LAW)
5030 ++ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_MU_LAW | \
5031 ++ SNDRV_PCM_FMTBIT_A_LAW)
5032 +
5033 + static int zx_tdm_dai_probe(struct snd_soc_dai *dai)
5034 + {
5035 +diff --git a/tools/perf/arch/s390/util/kvm-stat.c b/tools/perf/arch/s390/util/kvm-stat.c
5036 +index d233e2eb9592..aaabab5e2830 100644
5037 +--- a/tools/perf/arch/s390/util/kvm-stat.c
5038 ++++ b/tools/perf/arch/s390/util/kvm-stat.c
5039 +@@ -102,7 +102,7 @@ const char * const kvm_skip_events[] = {
5040 +
5041 + int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
5042 + {
5043 +- if (strstr(cpuid, "IBM/S390")) {
5044 ++ if (strstr(cpuid, "IBM")) {
5045 + kvm->exit_reasons = sie_exit_reasons;
5046 + kvm->exit_reasons_isa = "SIE";
5047 + } else
5048 +diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
5049 +index bd3d57f40f1b..17cecc96f735 100644
5050 +--- a/virt/kvm/arm/arch_timer.c
5051 ++++ b/virt/kvm/arm/arch_timer.c
5052 +@@ -295,9 +295,9 @@ static void phys_timer_emulate(struct kvm_vcpu *vcpu)
5053 + struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
5054 +
5055 + /*
5056 +- * If the timer can fire now we have just raised the IRQ line and we
5057 +- * don't need to have a soft timer scheduled for the future. If the
5058 +- * timer cannot fire at all, then we also don't need a soft timer.
5059 ++ * If the timer can fire now, we don't need to have a soft timer
5060 ++ * scheduled for the future. If the timer cannot fire at all,
5061 ++ * then we also don't need a soft timer.
5062 + */
5063 + if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) {
5064 + soft_timer_cancel(&timer->phys_timer, NULL);
5065 +@@ -332,10 +332,10 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
5066 + level = kvm_timer_should_fire(vtimer);
5067 + kvm_timer_update_irq(vcpu, level, vtimer);
5068 +
5069 ++ phys_timer_emulate(vcpu);
5070 ++
5071 + if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
5072 + kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
5073 +-
5074 +- phys_timer_emulate(vcpu);
5075 + }
5076 +
5077 + static void vtimer_save_state(struct kvm_vcpu *vcpu)
5078 +@@ -487,6 +487,7 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
5079 + {
5080 + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
5081 + struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
5082 ++ struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
5083 +
5084 + if (unlikely(!timer->enabled))
5085 + return;
5086 +@@ -502,6 +503,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
5087 +
5088 + /* Set the background timer for the physical timer emulation. */
5089 + phys_timer_emulate(vcpu);
5090 ++
5091 ++ /* If the timer fired while we weren't running, inject it now */
5092 ++ if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
5093 ++ kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
5094 + }
5095 +
5096 + bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
5097 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
5098 +index 1d90d79706bd..c2b95a22959b 100644
5099 +--- a/virt/kvm/arm/mmu.c
5100 ++++ b/virt/kvm/arm/mmu.c
5101 +@@ -1015,19 +1015,35 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
5102 + pmd = stage2_get_pmd(kvm, cache, addr);
5103 + VM_BUG_ON(!pmd);
5104 +
5105 +- /*
5106 +- * Mapping in huge pages should only happen through a fault. If a
5107 +- * page is merged into a transparent huge page, the individual
5108 +- * subpages of that huge page should be unmapped through MMU
5109 +- * notifiers before we get here.
5110 +- *
5111 +- * Merging of CompoundPages is not supported; they should become
5112 +- * splitting first, unmapped, merged, and mapped back in on-demand.
5113 +- */
5114 +- VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
5115 +-
5116 + old_pmd = *pmd;
5117 + if (pmd_present(old_pmd)) {
5118 ++ /*
5119 ++ * Multiple vcpus faulting on the same PMD entry, can
5120 ++ * lead to them sequentially updating the PMD with the
5121 ++ * same value. Following the break-before-make
5122 ++ * (pmd_clear() followed by tlb_flush()) process can
5123 ++ * hinder forward progress due to refaults generated
5124 ++ * on missing translations.
5125 ++ *
5126 ++ * Skip updating the page table if the entry is
5127 ++ * unchanged.
5128 ++ */
5129 ++ if (pmd_val(old_pmd) == pmd_val(*new_pmd))
5130 ++ return 0;
5131 ++
5132 ++ /*
5133 ++ * Mapping in huge pages should only happen through a
5134 ++ * fault. If a page is merged into a transparent huge
5135 ++ * page, the individual subpages of that huge page
5136 ++ * should be unmapped through MMU notifiers before we
5137 ++ * get here.
5138 ++ *
5139 ++ * Merging of CompoundPages is not supported; they
5140 ++ * should become splitting first, unmapped, merged,
5141 ++ * and mapped back in on-demand.
5142 ++ */
5143 ++ VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
5144 ++
5145 + pmd_clear(pmd);
5146 + kvm_tlb_flush_vmid_ipa(kvm, addr);
5147 + } else {
5148 +@@ -1102,6 +1118,10 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
5149 + /* Create 2nd stage page table mapping - Level 3 */
5150 + old_pte = *pte;
5151 + if (pte_present(old_pte)) {
5152 ++ /* Skip page table update if there is no change */
5153 ++ if (pte_val(old_pte) == pte_val(*new_pte))
5154 ++ return 0;
5155 ++
5156 + kvm_set_pte(pte, __pte(0));
5157 + kvm_tlb_flush_vmid_ipa(kvm, addr);
5158 + } else {