Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 14 Nov 2018 14:01:05
Message-Id: 1542204041.e8f7b595d24caaeec8ddbb979a63ef70832c51dc.mpagano@gentoo
1 commit: e8f7b595d24caaeec8ddbb979a63ef70832c51dc
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 5 15:28:34 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 14 14:00:41 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e8f7b595
7
8 Linux patch 4.14.68
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1067_linux-4.14.68.patch | 5725 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5729 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index de230d4..4fd9ed9 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -311,6 +311,10 @@ Patch: 1066_linux-4.14.67.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.67
23
24 +Patch: 1067_linux-4.14.68.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.68
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1067_linux-4.14.68.patch b/1067_linux-4.14.68.patch
33 new file mode 100644
34 index 0000000..4089fa8
35 --- /dev/null
36 +++ b/1067_linux-4.14.68.patch
37 @@ -0,0 +1,5725 @@
38 +diff --git a/Makefile b/Makefile
39 +index 4dad2d1c24ba..3da579058926 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 67
47 ++SUBLEVEL = 68
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +@@ -490,9 +490,13 @@ KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
52 + endif
53 +
54 + RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
55 ++RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register
56 + RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
57 ++RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline
58 + RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
59 ++RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG)))
60 + export RETPOLINE_CFLAGS
61 ++export RETPOLINE_VDSO_CFLAGS
62 +
63 + ifeq ($(config-targets),1)
64 + # ===========================================================================
65 +diff --git a/arch/Kconfig b/arch/Kconfig
66 +index 4e01862f58e4..40dc31fea90c 100644
67 +--- a/arch/Kconfig
68 ++++ b/arch/Kconfig
69 +@@ -336,6 +336,9 @@ config HAVE_ARCH_JUMP_LABEL
70 + config HAVE_RCU_TABLE_FREE
71 + bool
72 +
73 ++config HAVE_RCU_TABLE_INVALIDATE
74 ++ bool
75 ++
76 + config ARCH_HAVE_NMI_SAFE_CMPXCHG
77 + bool
78 +
79 +diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
80 +index 5c8caf85c350..8ff066090680 100644
81 +--- a/arch/arc/Kconfig
82 ++++ b/arch/arc/Kconfig
83 +@@ -45,6 +45,9 @@ config ARC
84 + select HAVE_KERNEL_GZIP
85 + select HAVE_KERNEL_LZMA
86 +
87 ++config ARCH_HAS_CACHE_LINE_SIZE
88 ++ def_bool y
89 ++
90 + config MIGHT_HAVE_PCI
91 + bool
92 +
93 +diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
94 +index 8486f328cc5d..ff7d3232764a 100644
95 +--- a/arch/arc/include/asm/cache.h
96 ++++ b/arch/arc/include/asm/cache.h
97 +@@ -48,7 +48,9 @@
98 + })
99 +
100 + /* Largest line length for either L1 or L2 is 128 bytes */
101 +-#define ARCH_DMA_MINALIGN 128
102 ++#define SMP_CACHE_BYTES 128
103 ++#define cache_line_size() SMP_CACHE_BYTES
104 ++#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
105 +
106 + extern void arc_cache_init(void);
107 + extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
108 +diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
109 +index d5da2115d78a..03d6bb0f4e13 100644
110 +--- a/arch/arc/include/asm/delay.h
111 ++++ b/arch/arc/include/asm/delay.h
112 +@@ -17,8 +17,11 @@
113 + #ifndef __ASM_ARC_UDELAY_H
114 + #define __ASM_ARC_UDELAY_H
115 +
116 ++#include <asm-generic/types.h>
117 + #include <asm/param.h> /* HZ */
118 +
119 ++extern unsigned long loops_per_jiffy;
120 ++
121 + static inline void __delay(unsigned long loops)
122 + {
123 + __asm__ __volatile__(
124 +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
125 +index eee924dfffa6..d14499500106 100644
126 +--- a/arch/arc/mm/cache.c
127 ++++ b/arch/arc/mm/cache.c
128 +@@ -1035,7 +1035,7 @@ void flush_cache_mm(struct mm_struct *mm)
129 + void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
130 + unsigned long pfn)
131 + {
132 +- unsigned int paddr = pfn << PAGE_SHIFT;
133 ++ phys_addr_t paddr = pfn << PAGE_SHIFT;
134 +
135 + u_vaddr &= PAGE_MASK;
136 +
137 +@@ -1055,8 +1055,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
138 + unsigned long u_vaddr)
139 + {
140 + /* TBD: do we really need to clear the kernel mapping */
141 +- __flush_dcache_page(page_address(page), u_vaddr);
142 +- __flush_dcache_page(page_address(page), page_address(page));
143 ++ __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
144 ++ __flush_dcache_page((phys_addr_t)page_address(page),
145 ++ (phys_addr_t)page_address(page));
146 +
147 + }
148 +
149 +diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
150 +index 0c7d11022d0f..4f6a1673b3a6 100644
151 +--- a/arch/arc/plat-eznps/include/plat/ctop.h
152 ++++ b/arch/arc/plat-eznps/include/plat/ctop.h
153 +@@ -21,6 +21,7 @@
154 + #error "Incorrect ctop.h include"
155 + #endif
156 +
157 ++#include <linux/types.h>
158 + #include <soc/nps/common.h>
159 +
160 + /* core auxiliary registers */
161 +@@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst {
162 + };
163 +
164 + /* AUX registers definition */
165 ++struct nps_host_reg_aux_dpc {
166 ++ union {
167 ++ struct {
168 ++ u32 ien:1, men:1, hen:1, reserved:29;
169 ++ };
170 ++ u32 value;
171 ++ };
172 ++};
173 ++
174 + struct nps_host_reg_aux_udmc {
175 + union {
176 + struct {
177 +diff --git a/arch/arc/plat-eznps/mtm.c b/arch/arc/plat-eznps/mtm.c
178 +index 2388de3d09ef..ed0077ef666e 100644
179 +--- a/arch/arc/plat-eznps/mtm.c
180 ++++ b/arch/arc/plat-eznps/mtm.c
181 +@@ -15,6 +15,8 @@
182 + */
183 +
184 + #include <linux/smp.h>
185 ++#include <linux/init.h>
186 ++#include <linux/kernel.h>
187 + #include <linux/io.h>
188 + #include <linux/log2.h>
189 + #include <asm/arcregs.h>
190 +@@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu)
191 + /* Verify and set the value of the mtm hs counter */
192 + static int __init set_mtm_hs_ctr(char *ctr_str)
193 + {
194 +- long hs_ctr;
195 ++ int hs_ctr;
196 + int ret;
197 +
198 +- ret = kstrtol(ctr_str, 0, &hs_ctr);
199 ++ ret = kstrtoint(ctr_str, 0, &hs_ctr);
200 +
201 + if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) {
202 + pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n",
203 +diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
204 +index 52d1cd14fda4..091e9a3c2dcb 100644
205 +--- a/arch/arm/probes/kprobes/core.c
206 ++++ b/arch/arm/probes/kprobes/core.c
207 +@@ -291,8 +291,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
208 + break;
209 + case KPROBE_REENTER:
210 + /* A nested probe was hit in FIQ, it is a BUG */
211 +- pr_warn("Unrecoverable kprobe detected at %p.\n",
212 +- p->addr);
213 ++ pr_warn("Unrecoverable kprobe detected.\n");
214 ++ dump_kprobe(p);
215 + /* fall through */
216 + default:
217 + /* impossible cases */
218 +diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
219 +index 1c98a87786ca..a10d7187ad2c 100644
220 +--- a/arch/arm/probes/kprobes/test-core.c
221 ++++ b/arch/arm/probes/kprobes/test-core.c
222 +@@ -1517,7 +1517,6 @@ fail:
223 + print_registers(&result_regs);
224 +
225 + if (mem) {
226 +- pr_err("current_stack=%p\n", current_stack);
227 + pr_err("expected_memory:\n");
228 + print_memory(expected_memory, mem_size);
229 + pr_err("result_memory:\n");
230 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
231 +index d70e409e2b0c..efac2202b16e 100644
232 +--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
233 ++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
234 +@@ -331,7 +331,7 @@
235 + reg = <0x0 0xff120000 0x0 0x100>;
236 + interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
237 + clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
238 +- clock-names = "sclk_uart", "pclk_uart";
239 ++ clock-names = "baudclk", "apb_pclk";
240 + dmas = <&dmac 4>, <&dmac 5>;
241 + #dma-cells = <2>;
242 + pinctrl-names = "default";
243 +diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
244 +index d849d9804011..22a5921562c7 100644
245 +--- a/arch/arm64/kernel/probes/kprobes.c
246 ++++ b/arch/arm64/kernel/probes/kprobes.c
247 +@@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
248 + break;
249 + case KPROBE_HIT_SS:
250 + case KPROBE_REENTER:
251 +- pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
252 ++ pr_warn("Unrecoverable kprobe detected.\n");
253 + dump_kprobe(p);
254 + BUG();
255 + break;
256 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
257 +index 1190d90e01e6..caa295cd5d09 100644
258 +--- a/arch/arm64/mm/init.c
259 ++++ b/arch/arm64/mm/init.c
260 +@@ -287,7 +287,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
261 + #ifdef CONFIG_HAVE_ARCH_PFN_VALID
262 + int pfn_valid(unsigned long pfn)
263 + {
264 +- return memblock_is_map_memory(pfn << PAGE_SHIFT);
265 ++ phys_addr_t addr = pfn << PAGE_SHIFT;
266 ++
267 ++ if ((addr >> PAGE_SHIFT) != pfn)
268 ++ return 0;
269 ++ return memblock_is_map_memory(addr);
270 + }
271 + EXPORT_SYMBOL(pfn_valid);
272 + #endif
273 +diff --git a/arch/mips/Makefile b/arch/mips/Makefile
274 +index a96d97a806c9..5977884b008e 100644
275 +--- a/arch/mips/Makefile
276 ++++ b/arch/mips/Makefile
277 +@@ -155,15 +155,11 @@ cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap
278 + cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap
279 + cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
280 + cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
281 +-cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
282 +- -Wa,-mips32 -Wa,--trap
283 +-cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
284 +- -Wa,-mips32r2 -Wa,--trap
285 ++cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
286 ++cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
287 + cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
288 +-cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
289 +- -Wa,-mips64 -Wa,--trap
290 +-cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
291 +- -Wa,-mips64r2 -Wa,--trap
292 ++cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
293 ++cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap
294 + cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
295 + cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
296 + cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \
297 +diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
298 +index 8c9cbf13d32a..6054d49e608e 100644
299 +--- a/arch/mips/bcm47xx/setup.c
300 ++++ b/arch/mips/bcm47xx/setup.c
301 +@@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void)
302 + */
303 + if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
304 + cpu_wait = NULL;
305 +-
306 +- /*
307 +- * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
308 +- * Enable ExternalSync for sync instruction to take effect
309 +- */
310 +- set_c0_config7(MIPS_CONF7_ES);
311 + break;
312 + #endif
313 + }
314 +diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
315 +index 60c787d943b0..a6810923b3f0 100644
316 +--- a/arch/mips/include/asm/mipsregs.h
317 ++++ b/arch/mips/include/asm/mipsregs.h
318 +@@ -680,8 +680,6 @@
319 + #define MIPS_CONF7_WII (_ULCAST_(1) << 31)
320 +
321 + #define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
322 +-/* ExternalSync */
323 +-#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
324 +
325 + #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
326 + #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
327 +@@ -2747,7 +2745,6 @@ __BUILD_SET_C0(status)
328 + __BUILD_SET_C0(cause)
329 + __BUILD_SET_C0(config)
330 + __BUILD_SET_C0(config5)
331 +-__BUILD_SET_C0(config7)
332 + __BUILD_SET_C0(intcontrol)
333 + __BUILD_SET_C0(intctl)
334 + __BUILD_SET_C0(srsmap)
335 +diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
336 +index 95b8c471f572..eb1f6030ab85 100644
337 +--- a/arch/mips/include/asm/processor.h
338 ++++ b/arch/mips/include/asm/processor.h
339 +@@ -141,7 +141,7 @@ struct mips_fpu_struct {
340 +
341 + #define NUM_DSP_REGS 6
342 +
343 +-typedef __u32 dspreg_t;
344 ++typedef unsigned long dspreg_t;
345 +
346 + struct mips_dsp_state {
347 + dspreg_t dspr[NUM_DSP_REGS];
348 +@@ -388,7 +388,20 @@ unsigned long get_wchan(struct task_struct *p);
349 + #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
350 + #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
351 +
352 ++#ifdef CONFIG_CPU_LOONGSON3
353 ++/*
354 ++ * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
355 ++ * tight read loop is executed, because reads take priority over writes & the
356 ++ * hardware (incorrectly) doesn't ensure that writes will eventually occur.
357 ++ *
358 ++ * Since spin loops of any kind should have a cpu_relax() in them, force an SFB
359 ++ * flush from cpu_relax() such that any pending writes will become visible as
360 ++ * expected.
361 ++ */
362 ++#define cpu_relax() smp_mb()
363 ++#else
364 + #define cpu_relax() barrier()
365 ++#endif
366 +
367 + /*
368 + * Return_address is a replacement for __builtin_return_address(count)
369 +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
370 +index e058cd300713..efffdf2464ab 100644
371 +--- a/arch/mips/kernel/ptrace.c
372 ++++ b/arch/mips/kernel/ptrace.c
373 +@@ -847,7 +847,7 @@ long arch_ptrace(struct task_struct *child, long request,
374 + goto out;
375 + }
376 + dregs = __get_dsp_regs(child);
377 +- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
378 ++ tmp = dregs[addr - DSP_BASE];
379 + break;
380 + }
381 + case DSP_CONTROL:
382 +diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
383 +index 89026d33a07b..6990240785f6 100644
384 +--- a/arch/mips/kernel/ptrace32.c
385 ++++ b/arch/mips/kernel/ptrace32.c
386 +@@ -141,7 +141,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
387 + goto out;
388 + }
389 + dregs = __get_dsp_regs(child);
390 +- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
391 ++ tmp = dregs[addr - DSP_BASE];
392 + break;
393 + }
394 + case DSP_CONTROL:
395 +diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
396 +index 111ad475aa0c..4c2483f410c2 100644
397 +--- a/arch/mips/lib/multi3.c
398 ++++ b/arch/mips/lib/multi3.c
399 +@@ -4,12 +4,12 @@
400 + #include "libgcc.h"
401 +
402 + /*
403 +- * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
404 +- * specific case only we'll implement it here.
405 ++ * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
406 ++ * that specific case only we implement that intrinsic here.
407 + *
408 + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
409 + */
410 +-#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
411 ++#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
412 +
413 + /* multiply 64-bit values, low 64-bits returned */
414 + static inline long long notrace dmulu(long long a, long long b)
415 +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
416 +index 254634fb3fc7..fee1e1f8c9d3 100644
417 +--- a/arch/powerpc/net/bpf_jit_comp64.c
418 ++++ b/arch/powerpc/net/bpf_jit_comp64.c
419 +@@ -322,6 +322,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
420 + u64 imm64;
421 + u8 *func;
422 + u32 true_cond;
423 ++ u32 tmp_idx;
424 +
425 + /*
426 + * addrs[] maps a BPF bytecode address into a real offset from
427 +@@ -681,11 +682,7 @@ emit_clear:
428 + case BPF_STX | BPF_XADD | BPF_W:
429 + /* Get EA into TMP_REG_1 */
430 + PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
431 +- /* error if EA is not word-aligned */
432 +- PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
433 +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
434 +- PPC_LI(b2p[BPF_REG_0], 0);
435 +- PPC_JMP(exit_addr);
436 ++ tmp_idx = ctx->idx * 4;
437 + /* load value from memory into TMP_REG_2 */
438 + PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
439 + /* add value from src_reg into this */
440 +@@ -693,32 +690,16 @@ emit_clear:
441 + /* store result back */
442 + PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
443 + /* we're done if this succeeded */
444 +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
445 +- /* otherwise, let's try once more */
446 +- PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
447 +- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
448 +- PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
449 +- /* exit if the store was not successful */
450 +- PPC_LI(b2p[BPF_REG_0], 0);
451 +- PPC_BCC(COND_NE, exit_addr);
452 ++ PPC_BCC_SHORT(COND_NE, tmp_idx);
453 + break;
454 + /* *(u64 *)(dst + off) += src */
455 + case BPF_STX | BPF_XADD | BPF_DW:
456 + PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
457 +- /* error if EA is not doubleword-aligned */
458 +- PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
459 +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
460 +- PPC_LI(b2p[BPF_REG_0], 0);
461 +- PPC_JMP(exit_addr);
462 +- PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
463 +- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
464 +- PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
465 +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
466 ++ tmp_idx = ctx->idx * 4;
467 + PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
468 + PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
469 + PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
470 +- PPC_LI(b2p[BPF_REG_0], 0);
471 +- PPC_BCC(COND_NE, exit_addr);
472 ++ PPC_BCC_SHORT(COND_NE, tmp_idx);
473 + break;
474 +
475 + /*
476 +diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
477 +index de11ecc99c7c..9c9970a5dfb1 100644
478 +--- a/arch/s390/include/asm/qdio.h
479 ++++ b/arch/s390/include/asm/qdio.h
480 +@@ -262,7 +262,6 @@ struct qdio_outbuf_state {
481 + void *user;
482 + };
483 +
484 +-#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
485 + #define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
486 +
487 + #define CHSC_AC1_INITIATE_INPUTQ 0x80
488 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
489 +index 242b78c0a9ec..40f1888bc4ab 100644
490 +--- a/arch/s390/mm/fault.c
491 ++++ b/arch/s390/mm/fault.c
492 +@@ -486,6 +486,8 @@ retry:
493 + /* No reason to continue if interrupted by SIGKILL. */
494 + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
495 + fault = VM_FAULT_SIGNAL;
496 ++ if (flags & FAULT_FLAG_RETRY_NOWAIT)
497 ++ goto out_up;
498 + goto out;
499 + }
500 + if (unlikely(fault & VM_FAULT_ERROR))
501 +diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
502 +index 382153ff17e3..dc3cede7f2ec 100644
503 +--- a/arch/s390/mm/page-states.c
504 ++++ b/arch/s390/mm/page-states.c
505 +@@ -271,7 +271,7 @@ void arch_set_page_states(int make_stable)
506 + list_for_each(l, &zone->free_area[order].free_list[t]) {
507 + page = list_entry(l, struct page, lru);
508 + if (make_stable)
509 +- set_page_stable_dat(page, 0);
510 ++ set_page_stable_dat(page, order);
511 + else
512 + set_page_unused(page, order);
513 + }
514 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
515 +index 45f1ea117128..6b1474fa99ab 100644
516 +--- a/arch/s390/net/bpf_jit_comp.c
517 ++++ b/arch/s390/net/bpf_jit_comp.c
518 +@@ -518,8 +518,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
519 + /* br %r1 */
520 + _EMIT2(0x07f1);
521 + } else {
522 +- /* larl %r1,.+14 */
523 +- EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
524 + /* ex 0,S390_lowcore.br_r1_tampoline */
525 + EMIT4_DISP(0x44000000, REG_0, REG_0,
526 + offsetof(struct lowcore, br_r1_trampoline));
527 +diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
528 +index 06a80434cfe6..5bd374491f94 100644
529 +--- a/arch/s390/numa/numa.c
530 ++++ b/arch/s390/numa/numa.c
531 +@@ -134,26 +134,14 @@ void __init numa_setup(void)
532 + {
533 + pr_info("NUMA mode: %s\n", mode->name);
534 + nodes_clear(node_possible_map);
535 ++ /* Initially attach all possible CPUs to node 0. */
536 ++ cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
537 + if (mode->setup)
538 + mode->setup();
539 + numa_setup_memory();
540 + memblock_dump_all();
541 + }
542 +
543 +-/*
544 +- * numa_init_early() - Initialization initcall
545 +- *
546 +- * This runs when only one CPU is online and before the first
547 +- * topology update is called for by the scheduler.
548 +- */
549 +-static int __init numa_init_early(void)
550 +-{
551 +- /* Attach all possible CPUs to node 0 for now. */
552 +- cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
553 +- return 0;
554 +-}
555 +-early_initcall(numa_init_early);
556 +-
557 + /*
558 + * numa_init_late() - Initialization initcall
559 + *
560 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
561 +index 0fe649c0d542..960c4a362d8c 100644
562 +--- a/arch/s390/pci/pci.c
563 ++++ b/arch/s390/pci/pci.c
564 +@@ -420,6 +420,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
565 + hwirq = 0;
566 + for_each_pci_msi_entry(msi, pdev) {
567 + rc = -EIO;
568 ++ if (hwirq >= msi_vecs)
569 ++ break;
570 + irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
571 + if (irq < 0)
572 + return -ENOMEM;
573 +diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
574 +index 80ddc01f57ac..fcbc0c0aa087 100644
575 +--- a/arch/sparc/include/asm/Kbuild
576 ++++ b/arch/sparc/include/asm/Kbuild
577 +@@ -14,6 +14,7 @@ generic-y += local64.h
578 + generic-y += mcs_spinlock.h
579 + generic-y += mm-arch-hooks.h
580 + generic-y += module.h
581 ++generic-y += msi.h
582 + generic-y += preempt.h
583 + generic-y += rwsem.h
584 + generic-y += serial.h
585 +diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
586 +index 3b397081047a..83aaf4888999 100644
587 +--- a/arch/sparc/kernel/time_64.c
588 ++++ b/arch/sparc/kernel/time_64.c
589 +@@ -813,7 +813,7 @@ static void __init get_tick_patch(void)
590 + }
591 + }
592 +
593 +-static void init_tick_ops(struct sparc64_tick_ops *ops)
594 ++static void __init init_tick_ops(struct sparc64_tick_ops *ops)
595 + {
596 + unsigned long freq, quotient, tick;
597 +
598 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
599 +index 1c63a4b5320d..2af0af33362a 100644
600 +--- a/arch/x86/Kconfig
601 ++++ b/arch/x86/Kconfig
602 +@@ -170,6 +170,7 @@ config X86
603 + select HAVE_PERF_REGS
604 + select HAVE_PERF_USER_STACK_DUMP
605 + select HAVE_RCU_TABLE_FREE
606 ++ select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
607 + select HAVE_REGS_AND_STACK_ACCESS_API
608 + select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
609 + select HAVE_STACK_VALIDATION if X86_64
610 +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
611 +index 98018a621f6b..3a250ca2406c 100644
612 +--- a/arch/x86/boot/compressed/Makefile
613 ++++ b/arch/x86/boot/compressed/Makefile
614 +@@ -104,9 +104,13 @@ define cmd_check_data_rel
615 + done
616 + endef
617 +
618 ++# We need to run two commands under "if_changed", so merge them into a
619 ++# single invocation.
620 ++quiet_cmd_check-and-link-vmlinux = LD $@
621 ++ cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
622 ++
623 + $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
624 +- $(call if_changed,check_data_rel)
625 +- $(call if_changed,ld)
626 ++ $(call if_changed,check-and-link-vmlinux)
627 +
628 + OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
629 + $(obj)/vmlinux.bin: vmlinux FORCE
630 +diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
631 +index c366c0adeb40..b545bf9d2328 100644
632 +--- a/arch/x86/entry/vdso/Makefile
633 ++++ b/arch/x86/entry/vdso/Makefile
634 +@@ -74,9 +74,9 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
635 + CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
636 + $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
637 + -fno-omit-frame-pointer -foptimize-sibling-calls \
638 +- -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
639 ++ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
640 +
641 +-$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
642 ++$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
643 +
644 + #
645 + # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
646 +@@ -147,11 +147,13 @@ KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
647 + KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
648 + KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
649 + KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
650 ++KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
651 + KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
652 + KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
653 + KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
654 + KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
655 + KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
656 ++KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
657 + $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
658 +
659 + $(obj)/vdso32.so.dbg: FORCE \
660 +diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
661 +index 786fd875de92..8c51844694e2 100644
662 +--- a/arch/x86/events/amd/ibs.c
663 ++++ b/arch/x86/events/amd/ibs.c
664 +@@ -579,7 +579,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
665 + {
666 + struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
667 + struct perf_event *event = pcpu->event;
668 +- struct hw_perf_event *hwc = &event->hw;
669 ++ struct hw_perf_event *hwc;
670 + struct perf_sample_data data;
671 + struct perf_raw_record raw;
672 + struct pt_regs regs;
673 +@@ -602,6 +602,10 @@ fail:
674 + return 0;
675 + }
676 +
677 ++ if (WARN_ON_ONCE(!event))
678 ++ goto fail;
679 ++
680 ++ hwc = &event->hw;
681 + msr = hwc->config_base;
682 + buf = ibs_data.regs;
683 + rdmsrl(msr, *buf);
684 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
685 +index 717c9219d00e..e5097dc85a06 100644
686 +--- a/arch/x86/events/core.c
687 ++++ b/arch/x86/events/core.c
688 +@@ -2462,7 +2462,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
689 +
690 + perf_callchain_store(entry, regs->ip);
691 +
692 +- if (!current->mm)
693 ++ if (!nmi_uaccess_okay())
694 + return;
695 +
696 + if (perf_callchain_user32(regs, entry))
697 +diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
698 +index c14f2a74b2be..15450a675031 100644
699 +--- a/arch/x86/include/asm/irqflags.h
700 ++++ b/arch/x86/include/asm/irqflags.h
701 +@@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void)
702 + return flags;
703 + }
704 +
705 +-static inline void native_restore_fl(unsigned long flags)
706 ++extern inline void native_restore_fl(unsigned long flags);
707 ++extern inline void native_restore_fl(unsigned long flags)
708 + {
709 + asm volatile("push %0 ; popf"
710 + : /* no output */
711 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
712 +index 0e856c0628b3..b12c8d70dd33 100644
713 +--- a/arch/x86/include/asm/processor.h
714 ++++ b/arch/x86/include/asm/processor.h
715 +@@ -132,6 +132,8 @@ struct cpuinfo_x86 {
716 + /* Index into per_cpu list: */
717 + u16 cpu_index;
718 + u32 microcode;
719 ++ /* Address space bits used by the cache internally */
720 ++ u8 x86_cache_bits;
721 + } __randomize_layout;
722 +
723 + struct cpuid_regs {
724 +@@ -180,9 +182,9 @@ extern const struct seq_operations cpuinfo_op;
725 +
726 + extern void cpu_detect(struct cpuinfo_x86 *c);
727 +
728 +-static inline unsigned long l1tf_pfn_limit(void)
729 ++static inline unsigned long long l1tf_pfn_limit(void)
730 + {
731 +- return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
732 ++ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
733 + }
734 +
735 + extern void early_cpu_init(void);
736 +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
737 +index 875ca99b82ee..5f00ecb9d251 100644
738 +--- a/arch/x86/include/asm/tlbflush.h
739 ++++ b/arch/x86/include/asm/tlbflush.h
740 +@@ -175,8 +175,16 @@ struct tlb_state {
741 + * are on. This means that it may not match current->active_mm,
742 + * which will contain the previous user mm when we're in lazy TLB
743 + * mode even if we've already switched back to swapper_pg_dir.
744 ++ *
745 ++ * During switch_mm_irqs_off(), loaded_mm will be set to
746 ++ * LOADED_MM_SWITCHING during the brief interrupts-off window
747 ++ * when CR3 and loaded_mm would otherwise be inconsistent. This
748 ++ * is for nmi_uaccess_okay()'s benefit.
749 + */
750 + struct mm_struct *loaded_mm;
751 ++
752 ++#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
753 ++
754 + u16 loaded_mm_asid;
755 + u16 next_asid;
756 + /* last user mm's ctx id */
757 +@@ -246,6 +254,38 @@ struct tlb_state {
758 + };
759 + DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
760 +
761 ++/*
762 ++ * Blindly accessing user memory from NMI context can be dangerous
763 ++ * if we're in the middle of switching the current user task or
764 ++ * switching the loaded mm. It can also be dangerous if we
765 ++ * interrupted some kernel code that was temporarily using a
766 ++ * different mm.
767 ++ */
768 ++static inline bool nmi_uaccess_okay(void)
769 ++{
770 ++ struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
771 ++ struct mm_struct *current_mm = current->mm;
772 ++
773 ++ VM_WARN_ON_ONCE(!loaded_mm);
774 ++
775 ++ /*
776 ++ * The condition we want to check is
777 ++ * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
778 ++ * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
779 ++ * is supposed to be reasonably fast.
780 ++ *
781 ++ * Instead, we check the almost equivalent but somewhat conservative
782 ++ * condition below, and we rely on the fact that switch_mm_irqs_off()
783 ++ * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
784 ++ */
785 ++ if (loaded_mm != current_mm)
786 ++ return false;
787 ++
788 ++ VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
789 ++
790 ++ return true;
791 ++}
792 ++
793 + /* Initialize cr4 shadow for this CPU. */
794 + static inline void cr4_init_shadow(void)
795 + {
796 +diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
797 +index 52250681f68c..d92ccff4e615 100644
798 +--- a/arch/x86/include/asm/vgtod.h
799 ++++ b/arch/x86/include/asm/vgtod.h
800 +@@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void)
801 + *
802 + * If RDPID is available, use it.
803 + */
804 +- alternative_io ("lsl %[p],%[seg]",
805 ++ alternative_io ("lsl %[seg],%[p]",
806 + ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
807 + X86_FEATURE_RDPID,
808 + [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
809 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
810 +index d07addb99b71..3e435f88621d 100644
811 +--- a/arch/x86/kernel/cpu/bugs.c
812 ++++ b/arch/x86/kernel/cpu/bugs.c
813 +@@ -652,6 +652,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
814 + enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
815 + EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
816 +
817 ++/*
818 ++ * These CPUs all support 44bits physical address space internally in the
819 ++ * cache but CPUID can report a smaller number of physical address bits.
820 ++ *
821 ++ * The L1TF mitigation uses the top most address bit for the inversion of
822 ++ * non present PTEs. When the installed memory reaches into the top most
823 ++ * address bit due to memory holes, which has been observed on machines
824 ++ * which report 36bits physical address bits and have 32G RAM installed,
825 ++ * then the mitigation range check in l1tf_select_mitigation() triggers.
826 ++ * This is a false positive because the mitigation is still possible due to
827 ++ * the fact that the cache uses 44bit internally. Use the cache bits
828 ++ * instead of the reported physical bits and adjust them on the affected
829 ++ * machines to 44bit if the reported bits are less than 44.
830 ++ */
831 ++static void override_cache_bits(struct cpuinfo_x86 *c)
832 ++{
833 ++ if (c->x86 != 6)
834 ++ return;
835 ++
836 ++ switch (c->x86_model) {
837 ++ case INTEL_FAM6_NEHALEM:
838 ++ case INTEL_FAM6_WESTMERE:
839 ++ case INTEL_FAM6_SANDYBRIDGE:
840 ++ case INTEL_FAM6_IVYBRIDGE:
841 ++ case INTEL_FAM6_HASWELL_CORE:
842 ++ case INTEL_FAM6_HASWELL_ULT:
843 ++ case INTEL_FAM6_HASWELL_GT3E:
844 ++ case INTEL_FAM6_BROADWELL_CORE:
845 ++ case INTEL_FAM6_BROADWELL_GT3E:
846 ++ case INTEL_FAM6_SKYLAKE_MOBILE:
847 ++ case INTEL_FAM6_SKYLAKE_DESKTOP:
848 ++ case INTEL_FAM6_KABYLAKE_MOBILE:
849 ++ case INTEL_FAM6_KABYLAKE_DESKTOP:
850 ++ if (c->x86_cache_bits < 44)
851 ++ c->x86_cache_bits = 44;
852 ++ break;
853 ++ }
854 ++}
855 ++
856 + static void __init l1tf_select_mitigation(void)
857 + {
858 + u64 half_pa;
859 +@@ -659,6 +698,8 @@ static void __init l1tf_select_mitigation(void)
860 + if (!boot_cpu_has_bug(X86_BUG_L1TF))
861 + return;
862 +
863 ++ override_cache_bits(&boot_cpu_data);
864 ++
865 + switch (l1tf_mitigation) {
866 + case L1TF_MITIGATION_OFF:
867 + case L1TF_MITIGATION_FLUSH_NOWARN:
868 +@@ -678,14 +719,13 @@ static void __init l1tf_select_mitigation(void)
869 + return;
870 + #endif
871 +
872 +- /*
873 +- * This is extremely unlikely to happen because almost all
874 +- * systems have far more MAX_PA/2 than RAM can be fit into
875 +- * DIMM slots.
876 +- */
877 + half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
878 + if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
879 + pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
880 ++ pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
881 ++ half_pa);
882 ++ pr_info("However, doing so will make a part of your RAM unusable.\n");
883 ++ pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
884 + return;
885 + }
886 +
887 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
888 +index dd02ee4fa8cd..7d2a7890a823 100644
889 +--- a/arch/x86/kernel/cpu/common.c
890 ++++ b/arch/x86/kernel/cpu/common.c
891 +@@ -890,6 +890,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
892 + }
893 + }
894 + #endif
895 ++ c->x86_cache_bits = c->x86_phys_bits;
896 + }
897 +
898 + static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
899 +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
900 +index 278be092b300..574dcdc092ab 100644
901 +--- a/arch/x86/kernel/cpu/intel.c
902 ++++ b/arch/x86/kernel/cpu/intel.c
903 +@@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
904 + if (cpu_has(c, X86_FEATURE_HYPERVISOR))
905 + return false;
906 +
907 ++ if (c->x86 != 6)
908 ++ return false;
909 ++
910 + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
911 + if (c->x86_model == spectre_bad_microcodes[i].model &&
912 + c->x86_stepping == spectre_bad_microcodes[i].stepping)
913 +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
914 +index a2d8a3908670..224de37821e4 100644
915 +--- a/arch/x86/kernel/dumpstack.c
916 ++++ b/arch/x86/kernel/dumpstack.c
917 +@@ -17,6 +17,7 @@
918 + #include <linux/bug.h>
919 + #include <linux/nmi.h>
920 + #include <linux/sysfs.h>
921 ++#include <linux/kasan.h>
922 +
923 + #include <asm/cpu_entry_area.h>
924 + #include <asm/stacktrace.h>
925 +@@ -298,7 +299,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
926 + * We're not going to return, but we might be on an IST stack or
927 + * have very little stack space left. Rewind the stack and kill
928 + * the task.
929 ++ * Before we rewind the stack, we have to tell KASAN that we're going to
930 ++ * reuse the task stack and that existing poisons are invalid.
931 + */
932 ++ kasan_unpoison_task_stack(current);
933 + rewind_stack_do_exit(signr);
934 + }
935 + NOKPROBE_SYMBOL(oops_end);
936 +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
937 +index fa093b77689f..cbeecfcc66d6 100644
938 +--- a/arch/x86/kernel/process_64.c
939 ++++ b/arch/x86/kernel/process_64.c
940 +@@ -370,6 +370,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
941 + start_thread_common(regs, new_ip, new_sp,
942 + __USER_CS, __USER_DS, 0);
943 + }
944 ++EXPORT_SYMBOL_GPL(start_thread);
945 +
946 + #ifdef CONFIG_COMPAT
947 + void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
948 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
949 +index 282bbcbf3b6a..f6bebcec60b4 100644
950 +--- a/arch/x86/kvm/svm.c
951 ++++ b/arch/x86/kvm/svm.c
952 +@@ -5067,8 +5067,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
953 +
954 + clgi();
955 +
956 +- local_irq_enable();
957 +-
958 + /*
959 + * If this vCPU has touched SPEC_CTRL, restore the guest's value if
960 + * it's non-zero. Since vmentry is serialising on affected CPUs, there
961 +@@ -5077,6 +5075,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
962 + */
963 + x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
964 +
965 ++ local_irq_enable();
966 ++
967 + asm volatile (
968 + "push %%" _ASM_BP "; \n\t"
969 + "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
970 +@@ -5199,12 +5199,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
971 + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
972 + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
973 +
974 +- x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
975 +-
976 + reload_tss(vcpu);
977 +
978 + local_irq_disable();
979 +
980 ++ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
981 ++
982 + vcpu->arch.cr2 = svm->vmcb->save.cr2;
983 + vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
984 + vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
985 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
986 +index f015ca3997d9..8958b35f6008 100644
987 +--- a/arch/x86/kvm/vmx.c
988 ++++ b/arch/x86/kvm/vmx.c
989 +@@ -8108,21 +8108,20 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
990 + /* Emulate the VMPTRST instruction */
991 + static int handle_vmptrst(struct kvm_vcpu *vcpu)
992 + {
993 +- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
994 +- u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
995 +- gva_t vmcs_gva;
996 ++ unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
997 ++ u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
998 ++ gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
999 + struct x86_exception e;
1000 ++ gva_t gva;
1001 +
1002 + if (!nested_vmx_check_permission(vcpu))
1003 + return 1;
1004 +
1005 +- if (get_vmx_mem_address(vcpu, exit_qualification,
1006 +- vmx_instruction_info, true, &vmcs_gva))
1007 ++ if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
1008 + return 1;
1009 + /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
1010 +- if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
1011 +- (void *)&to_vmx(vcpu)->nested.current_vmptr,
1012 +- sizeof(u64), &e)) {
1013 ++ if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
1014 ++ sizeof(gpa_t), &e)) {
1015 + kvm_inject_page_fault(vcpu, &e);
1016 + return 1;
1017 + }
1018 +@@ -9171,9 +9170,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
1019 + * information but as all relevant affected CPUs have 32KiB L1D cache size
1020 + * there is no point in doing so.
1021 + */
1022 +-#define L1D_CACHE_ORDER 4
1023 +-static void *vmx_l1d_flush_pages;
1024 +-
1025 + static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
1026 + {
1027 + int size = PAGE_SIZE << L1D_CACHE_ORDER;
1028 +diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
1029 +index c8c6ad0d58b8..3f435d7fca5e 100644
1030 +--- a/arch/x86/lib/usercopy.c
1031 ++++ b/arch/x86/lib/usercopy.c
1032 +@@ -7,6 +7,8 @@
1033 + #include <linux/uaccess.h>
1034 + #include <linux/export.h>
1035 +
1036 ++#include <asm/tlbflush.h>
1037 ++
1038 + /*
1039 + * We rely on the nested NMI work to allow atomic faults from the NMI path; the
1040 + * nested NMI paths are careful to preserve CR2.
1041 +@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
1042 + if (__range_not_ok(from, n, TASK_SIZE))
1043 + return n;
1044 +
1045 ++ if (!nmi_uaccess_okay())
1046 ++ return n;
1047 ++
1048 + /*
1049 + * Even though this function is typically called from NMI/IRQ context
1050 + * disable pagefaults so that its behaviour is consistent even when
1051 +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
1052 +index 37f60dfd7e4e..94b8d90830d1 100644
1053 +--- a/arch/x86/mm/init.c
1054 ++++ b/arch/x86/mm/init.c
1055 +@@ -892,7 +892,7 @@ unsigned long max_swapfile_size(void)
1056 +
1057 + if (boot_cpu_has_bug(X86_BUG_L1TF)) {
1058 + /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
1059 +- unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
1060 ++ unsigned long long l1tf_limit = l1tf_pfn_limit();
1061 + /*
1062 + * We encode swap offsets also with 3 bits below those for pfn
1063 + * which makes the usable limit higher.
1064 +@@ -900,7 +900,7 @@ unsigned long max_swapfile_size(void)
1065 + #if CONFIG_PGTABLE_LEVELS > 2
1066 + l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
1067 + #endif
1068 +- pages = min_t(unsigned long, l1tf_limit, pages);
1069 ++ pages = min_t(unsigned long long, l1tf_limit, pages);
1070 + }
1071 + return pages;
1072 + }
1073 +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
1074 +index 5f4805d69aab..53f1c18b15bd 100644
1075 +--- a/arch/x86/mm/mmap.c
1076 ++++ b/arch/x86/mm/mmap.c
1077 +@@ -191,7 +191,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
1078 + /* If it's real memory always allow */
1079 + if (pfn_valid(pfn))
1080 + return true;
1081 +- if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
1082 ++ if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
1083 + return false;
1084 + return true;
1085 + }
1086 +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
1087 +index 0c936435ea93..83a3f4c935fc 100644
1088 +--- a/arch/x86/mm/tlb.c
1089 ++++ b/arch/x86/mm/tlb.c
1090 +@@ -292,6 +292,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
1091 +
1092 + choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
1093 +
1094 ++ /* Let nmi_uaccess_okay() know that we're changing CR3. */
1095 ++ this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
1096 ++ barrier();
1097 ++
1098 + if (need_flush) {
1099 + this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
1100 + this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
1101 +@@ -322,6 +326,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
1102 + if (next != &init_mm)
1103 + this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
1104 +
1105 ++ /* Make sure we write CR3 before loaded_mm. */
1106 ++ barrier();
1107 ++
1108 + this_cpu_write(cpu_tlbstate.loaded_mm, next);
1109 + this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
1110 + }
1111 +diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
1112 +index 8e2e4757adcb..5a42ae4078c2 100644
1113 +--- a/drivers/base/power/clock_ops.c
1114 ++++ b/drivers/base/power/clock_ops.c
1115 +@@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
1116 + int of_pm_clk_add_clks(struct device *dev)
1117 + {
1118 + struct clk **clks;
1119 +- unsigned int i, count;
1120 ++ int i, count;
1121 + int ret;
1122 +
1123 + if (!dev || !dev->of_node)
1124 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1125 +index 5feba04ab940..5e55d03d3d01 100644
1126 +--- a/drivers/block/nbd.c
1127 ++++ b/drivers/block/nbd.c
1128 +@@ -112,12 +112,16 @@ struct nbd_device {
1129 + struct task_struct *task_setup;
1130 + };
1131 +
1132 ++#define NBD_CMD_REQUEUED 1
1133 ++
1134 + struct nbd_cmd {
1135 + struct nbd_device *nbd;
1136 ++ struct mutex lock;
1137 + int index;
1138 + int cookie;
1139 +- struct completion send_complete;
1140 + blk_status_t status;
1141 ++ unsigned long flags;
1142 ++ u32 cmd_cookie;
1143 + };
1144 +
1145 + #if IS_ENABLED(CONFIG_DEBUG_FS)
1146 +@@ -146,6 +150,35 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd)
1147 + return disk_to_dev(nbd->disk);
1148 + }
1149 +
1150 ++static void nbd_requeue_cmd(struct nbd_cmd *cmd)
1151 ++{
1152 ++ struct request *req = blk_mq_rq_from_pdu(cmd);
1153 ++
1154 ++ if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
1155 ++ blk_mq_requeue_request(req, true);
1156 ++}
1157 ++
1158 ++#define NBD_COOKIE_BITS 32
1159 ++
1160 ++static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
1161 ++{
1162 ++ struct request *req = blk_mq_rq_from_pdu(cmd);
1163 ++ u32 tag = blk_mq_unique_tag(req);
1164 ++ u64 cookie = cmd->cmd_cookie;
1165 ++
1166 ++ return (cookie << NBD_COOKIE_BITS) | tag;
1167 ++}
1168 ++
1169 ++static u32 nbd_handle_to_tag(u64 handle)
1170 ++{
1171 ++ return (u32)handle;
1172 ++}
1173 ++
1174 ++static u32 nbd_handle_to_cookie(u64 handle)
1175 ++{
1176 ++ return (u32)(handle >> NBD_COOKIE_BITS);
1177 ++}
1178 ++
1179 + static const char *nbdcmd_to_ascii(int cmd)
1180 + {
1181 + switch (cmd) {
1182 +@@ -306,6 +339,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
1183 + }
1184 + config = nbd->config;
1185 +
1186 ++ if (!mutex_trylock(&cmd->lock))
1187 ++ return BLK_EH_RESET_TIMER;
1188 ++
1189 + if (config->num_connections > 1) {
1190 + dev_err_ratelimited(nbd_to_dev(nbd),
1191 + "Connection timed out, retrying\n");
1192 +@@ -328,7 +364,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
1193 + nbd_mark_nsock_dead(nbd, nsock, 1);
1194 + mutex_unlock(&nsock->tx_lock);
1195 + }
1196 +- blk_mq_requeue_request(req, true);
1197 ++ mutex_unlock(&cmd->lock);
1198 ++ nbd_requeue_cmd(cmd);
1199 + nbd_config_put(nbd);
1200 + return BLK_EH_NOT_HANDLED;
1201 + }
1202 +@@ -338,6 +375,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
1203 + }
1204 + set_bit(NBD_TIMEDOUT, &config->runtime_flags);
1205 + cmd->status = BLK_STS_IOERR;
1206 ++ mutex_unlock(&cmd->lock);
1207 + sock_shutdown(nbd);
1208 + nbd_config_put(nbd);
1209 +
1210 +@@ -414,9 +452,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
1211 + struct iov_iter from;
1212 + unsigned long size = blk_rq_bytes(req);
1213 + struct bio *bio;
1214 ++ u64 handle;
1215 + u32 type;
1216 + u32 nbd_cmd_flags = 0;
1217 +- u32 tag = blk_mq_unique_tag(req);
1218 + int sent = nsock->sent, skip = 0;
1219 +
1220 + iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
1221 +@@ -458,6 +496,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
1222 + goto send_pages;
1223 + }
1224 + iov_iter_advance(&from, sent);
1225 ++ } else {
1226 ++ cmd->cmd_cookie++;
1227 + }
1228 + cmd->index = index;
1229 + cmd->cookie = nsock->cookie;
1230 +@@ -466,7 +506,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
1231 + request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
1232 + request.len = htonl(size);
1233 + }
1234 +- memcpy(request.handle, &tag, sizeof(tag));
1235 ++ handle = nbd_cmd_handle(cmd);
1236 ++ memcpy(request.handle, &handle, sizeof(handle));
1237 +
1238 + dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
1239 + cmd, nbdcmd_to_ascii(type),
1240 +@@ -484,6 +525,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
1241 + nsock->pending = req;
1242 + nsock->sent = sent;
1243 + }
1244 ++ set_bit(NBD_CMD_REQUEUED, &cmd->flags);
1245 + return BLK_STS_RESOURCE;
1246 + }
1247 + dev_err_ratelimited(disk_to_dev(nbd->disk),
1248 +@@ -525,6 +567,7 @@ send_pages:
1249 + */
1250 + nsock->pending = req;
1251 + nsock->sent = sent;
1252 ++ set_bit(NBD_CMD_REQUEUED, &cmd->flags);
1253 + return BLK_STS_RESOURCE;
1254 + }
1255 + dev_err(disk_to_dev(nbd->disk),
1256 +@@ -557,10 +600,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
1257 + struct nbd_reply reply;
1258 + struct nbd_cmd *cmd;
1259 + struct request *req = NULL;
1260 ++ u64 handle;
1261 + u16 hwq;
1262 + u32 tag;
1263 + struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
1264 + struct iov_iter to;
1265 ++ int ret = 0;
1266 +
1267 + reply.magic = 0;
1268 + iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
1269 +@@ -578,8 +623,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
1270 + return ERR_PTR(-EPROTO);
1271 + }
1272 +
1273 +- memcpy(&tag, reply.handle, sizeof(u32));
1274 +-
1275 ++ memcpy(&handle, reply.handle, sizeof(handle));
1276 ++ tag = nbd_handle_to_tag(handle);
1277 + hwq = blk_mq_unique_tag_to_hwq(tag);
1278 + if (hwq < nbd->tag_set.nr_hw_queues)
1279 + req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
1280 +@@ -590,11 +635,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
1281 + return ERR_PTR(-ENOENT);
1282 + }
1283 + cmd = blk_mq_rq_to_pdu(req);
1284 ++
1285 ++ mutex_lock(&cmd->lock);
1286 ++ if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
1287 ++ dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
1288 ++ req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
1289 ++ ret = -ENOENT;
1290 ++ goto out;
1291 ++ }
1292 ++ if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
1293 ++ dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
1294 ++ req);
1295 ++ ret = -ENOENT;
1296 ++ goto out;
1297 ++ }
1298 + if (ntohl(reply.error)) {
1299 + dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
1300 + ntohl(reply.error));
1301 + cmd->status = BLK_STS_IOERR;
1302 +- return cmd;
1303 ++ goto out;
1304 + }
1305 +
1306 + dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
1307 +@@ -619,18 +678,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
1308 + if (nbd_disconnected(config) ||
1309 + config->num_connections <= 1) {
1310 + cmd->status = BLK_STS_IOERR;
1311 +- return cmd;
1312 ++ goto out;
1313 + }
1314 +- return ERR_PTR(-EIO);
1315 ++ ret = -EIO;
1316 ++ goto out;
1317 + }
1318 + dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
1319 + cmd, bvec.bv_len);
1320 + }
1321 +- } else {
1322 +- /* See the comment in nbd_queue_rq. */
1323 +- wait_for_completion(&cmd->send_complete);
1324 + }
1325 +- return cmd;
1326 ++out:
1327 ++ mutex_unlock(&cmd->lock);
1328 ++ return ret ? ERR_PTR(ret) : cmd;
1329 + }
1330 +
1331 + static void recv_work(struct work_struct *work)
1332 +@@ -793,7 +852,7 @@ again:
1333 + */
1334 + blk_mq_start_request(req);
1335 + if (unlikely(nsock->pending && nsock->pending != req)) {
1336 +- blk_mq_requeue_request(req, true);
1337 ++ nbd_requeue_cmd(cmd);
1338 + ret = 0;
1339 + goto out;
1340 + }
1341 +@@ -806,7 +865,7 @@ again:
1342 + dev_err_ratelimited(disk_to_dev(nbd->disk),
1343 + "Request send failed, requeueing\n");
1344 + nbd_mark_nsock_dead(nbd, nsock, 1);
1345 +- blk_mq_requeue_request(req, true);
1346 ++ nbd_requeue_cmd(cmd);
1347 + ret = 0;
1348 + }
1349 + out:
1350 +@@ -830,7 +889,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1351 + * that the server is misbehaving (or there was an error) before we're
1352 + * done sending everything over the wire.
1353 + */
1354 +- init_completion(&cmd->send_complete);
1355 ++ mutex_lock(&cmd->lock);
1356 ++ clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
1357 +
1358 + /* We can be called directly from the user space process, which means we
1359 + * could possibly have signals pending so our sendmsg will fail. In
1360 +@@ -842,7 +902,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1361 + ret = BLK_STS_IOERR;
1362 + else if (!ret)
1363 + ret = BLK_STS_OK;
1364 +- complete(&cmd->send_complete);
1365 ++ mutex_unlock(&cmd->lock);
1366 +
1367 + return ret;
1368 + }
1369 +@@ -1446,6 +1506,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1370 + {
1371 + struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1372 + cmd->nbd = set->driver_data;
1373 ++ cmd->flags = 0;
1374 ++ mutex_init(&cmd->lock);
1375 + return 0;
1376 + }
1377 +
1378 +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
1379 +index bfc566d3f31a..8cfa10ab7abc 100644
1380 +--- a/drivers/cdrom/cdrom.c
1381 ++++ b/drivers/cdrom/cdrom.c
1382 +@@ -2542,7 +2542,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
1383 + if (!CDROM_CAN(CDC_SELECT_DISC) ||
1384 + (arg == CDSL_CURRENT || arg == CDSL_NONE))
1385 + return cdi->ops->drive_status(cdi, CDSL_CURRENT);
1386 +- if (((int)arg >= cdi->capacity))
1387 ++ if (arg >= cdi->capacity)
1388 + return -EINVAL;
1389 + return cdrom_slot_status(cdi, arg);
1390 + }
1391 +diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1392 +index dba5259def60..86b526b7d990 100644
1393 +--- a/drivers/char/tpm/tpm-interface.c
1394 ++++ b/drivers/char/tpm/tpm-interface.c
1395 +@@ -423,7 +423,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
1396 + header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
1397 + header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
1398 + TSS2_RESMGR_TPM_RC_LAYER);
1399 +- return bufsiz;
1400 ++ return sizeof(*header);
1401 + }
1402 +
1403 + if (bufsiz > TPM_BUFSIZE)
1404 +diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
1405 +index 6847120b61cd..62d0a69f8da0 100644
1406 +--- a/drivers/clk/rockchip/clk-rk3399.c
1407 ++++ b/drivers/clk/rockchip/clk-rk3399.c
1408 +@@ -630,7 +630,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
1409 + MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT,
1410 + RK3399_CLKSEL_CON(31), 0, 2, MFLAGS),
1411 + COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT,
1412 +- RK3399_CLKSEL_CON(30), 8, 2, MFLAGS,
1413 ++ RK3399_CLKSEL_CON(31), 2, 1, MFLAGS,
1414 + RK3399_CLKGATE_CON(8), 12, GFLAGS),
1415 +
1416 + /* uart */
1417 +diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
1418 +index 02ba5f2aa0e6..cd777c75291d 100644
1419 +--- a/drivers/crypto/vmx/aes_ctr.c
1420 ++++ b/drivers/crypto/vmx/aes_ctr.c
1421 +@@ -27,21 +27,23 @@
1422 + #include <asm/switch_to.h>
1423 + #include <crypto/aes.h>
1424 + #include <crypto/scatterwalk.h>
1425 ++#include <crypto/skcipher.h>
1426 ++
1427 + #include "aesp8-ppc.h"
1428 +
1429 + struct p8_aes_ctr_ctx {
1430 +- struct crypto_blkcipher *fallback;
1431 ++ struct crypto_skcipher *fallback;
1432 + struct aes_key enc_key;
1433 + };
1434 +
1435 + static int p8_aes_ctr_init(struct crypto_tfm *tfm)
1436 + {
1437 + const char *alg = crypto_tfm_alg_name(tfm);
1438 +- struct crypto_blkcipher *fallback;
1439 ++ struct crypto_skcipher *fallback;
1440 + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
1441 +
1442 +- fallback =
1443 +- crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
1444 ++ fallback = crypto_alloc_skcipher(alg, 0,
1445 ++ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1446 + if (IS_ERR(fallback)) {
1447 + printk(KERN_ERR
1448 + "Failed to allocate transformation for '%s': %ld\n",
1449 +@@ -49,9 +51,9 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
1450 + return PTR_ERR(fallback);
1451 + }
1452 +
1453 +- crypto_blkcipher_set_flags(
1454 ++ crypto_skcipher_set_flags(
1455 + fallback,
1456 +- crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm));
1457 ++ crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
1458 + ctx->fallback = fallback;
1459 +
1460 + return 0;
1461 +@@ -62,7 +64,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
1462 + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
1463 +
1464 + if (ctx->fallback) {
1465 +- crypto_free_blkcipher(ctx->fallback);
1466 ++ crypto_free_skcipher(ctx->fallback);
1467 + ctx->fallback = NULL;
1468 + }
1469 + }
1470 +@@ -81,7 +83,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
1471 + pagefault_enable();
1472 + preempt_enable();
1473 +
1474 +- ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
1475 ++ ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
1476 + return ret;
1477 + }
1478 +
1479 +@@ -115,15 +117,14 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
1480 + struct blkcipher_walk walk;
1481 + struct p8_aes_ctr_ctx *ctx =
1482 + crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
1483 +- struct blkcipher_desc fallback_desc = {
1484 +- .tfm = ctx->fallback,
1485 +- .info = desc->info,
1486 +- .flags = desc->flags
1487 +- };
1488 +
1489 + if (in_interrupt()) {
1490 +- ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
1491 +- nbytes);
1492 ++ SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
1493 ++ skcipher_request_set_tfm(req, ctx->fallback);
1494 ++ skcipher_request_set_callback(req, desc->flags, NULL, NULL);
1495 ++ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
1496 ++ ret = crypto_skcipher_encrypt(req);
1497 ++ skcipher_request_zero(req);
1498 + } else {
1499 + blkcipher_walk_init(&walk, dst, src, nbytes);
1500 + ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
1501 +diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
1502 +index d6f3d9ee1350..70b3c556f6cf 100644
1503 +--- a/drivers/gpio/gpiolib-acpi.c
1504 ++++ b/drivers/gpio/gpiolib-acpi.c
1505 +@@ -25,6 +25,7 @@
1506 +
1507 + struct acpi_gpio_event {
1508 + struct list_head node;
1509 ++ struct list_head initial_sync_list;
1510 + acpi_handle handle;
1511 + unsigned int pin;
1512 + unsigned int irq;
1513 +@@ -50,6 +51,9 @@ struct acpi_gpio_chip {
1514 + struct list_head events;
1515 + };
1516 +
1517 ++static LIST_HEAD(acpi_gpio_initial_sync_list);
1518 ++static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
1519 ++
1520 + static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
1521 + {
1522 + if (!gc->parent)
1523 +@@ -142,6 +146,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
1524 + return gpiochip_get_desc(chip, offset);
1525 + }
1526 +
1527 ++static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
1528 ++{
1529 ++ mutex_lock(&acpi_gpio_initial_sync_list_lock);
1530 ++ list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
1531 ++ mutex_unlock(&acpi_gpio_initial_sync_list_lock);
1532 ++}
1533 ++
1534 ++static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
1535 ++{
1536 ++ mutex_lock(&acpi_gpio_initial_sync_list_lock);
1537 ++ if (!list_empty(&event->initial_sync_list))
1538 ++ list_del_init(&event->initial_sync_list);
1539 ++ mutex_unlock(&acpi_gpio_initial_sync_list_lock);
1540 ++}
1541 ++
1542 + static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
1543 + {
1544 + struct acpi_gpio_event *event = data;
1545 +@@ -193,7 +212,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
1546 + irq_handler_t handler = NULL;
1547 + struct gpio_desc *desc;
1548 + unsigned long irqflags;
1549 +- int ret, pin, irq;
1550 ++ int ret, pin, irq, value;
1551 +
1552 + if (!acpi_gpio_get_irq_resource(ares, &agpio))
1553 + return AE_OK;
1554 +@@ -228,6 +247,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
1555 +
1556 + gpiod_direction_input(desc);
1557 +
1558 ++ value = gpiod_get_value(desc);
1559 ++
1560 + ret = gpiochip_lock_as_irq(chip, pin);
1561 + if (ret) {
1562 + dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
1563 +@@ -269,6 +290,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
1564 + event->irq = irq;
1565 + event->pin = pin;
1566 + event->desc = desc;
1567 ++ INIT_LIST_HEAD(&event->initial_sync_list);
1568 +
1569 + ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
1570 + "ACPI:Event", event);
1571 +@@ -283,6 +305,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
1572 + enable_irq_wake(irq);
1573 +
1574 + list_add_tail(&event->node, &acpi_gpio->events);
1575 ++
1576 ++ /*
1577 ++ * Make sure we trigger the initial state of the IRQ when using RISING
1578 ++ * or FALLING. Note we run the handlers on late_init, the AML code
1579 ++ * may refer to OperationRegions from other (builtin) drivers which
1580 ++ * may be probed after us.
1581 ++ */
1582 ++ if (handler == acpi_gpio_irq_handler &&
1583 ++ (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
1584 ++ ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
1585 ++ acpi_gpio_add_to_initial_sync_list(event);
1586 ++
1587 + return AE_OK;
1588 +
1589 + fail_free_event:
1590 +@@ -355,6 +389,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
1591 + list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
1592 + struct gpio_desc *desc;
1593 +
1594 ++ acpi_gpio_del_from_initial_sync_list(event);
1595 ++
1596 + if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
1597 + disable_irq_wake(event->irq);
1598 +
1599 +@@ -1210,3 +1246,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
1600 +
1601 + return con_id == NULL;
1602 + }
1603 ++
1604 ++/* Sync the initial state of handlers after all builtin drivers have probed */
1605 ++static int acpi_gpio_initial_sync(void)
1606 ++{
1607 ++ struct acpi_gpio_event *event, *ep;
1608 ++
1609 ++ mutex_lock(&acpi_gpio_initial_sync_list_lock);
1610 ++ list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
1611 ++ initial_sync_list) {
1612 ++ acpi_evaluate_object(event->handle, NULL, NULL, NULL);
1613 ++ list_del_init(&event->initial_sync_list);
1614 ++ }
1615 ++ mutex_unlock(&acpi_gpio_initial_sync_list_lock);
1616 ++
1617 ++ return 0;
1618 ++}
1619 ++/* We must use _sync so that this runs after the first deferred_probe run */
1620 ++late_initcall_sync(acpi_gpio_initial_sync);
1621 +diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
1622 +index b2431aee7887..f5091827628a 100644
1623 +--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
1624 ++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
1625 +@@ -424,6 +424,18 @@ static void adv7511_hpd_work(struct work_struct *work)
1626 + else
1627 + status = connector_status_disconnected;
1628 +
1629 ++ /*
1630 ++ * The bridge resets its registers on unplug. So when we get a plug
1631 ++ * event and we're already supposed to be powered, cycle the bridge to
1632 ++ * restore its state.
1633 ++ */
1634 ++ if (status == connector_status_connected &&
1635 ++ adv7511->connector.status == connector_status_disconnected &&
1636 ++ adv7511->powered) {
1637 ++ regcache_mark_dirty(adv7511->regmap);
1638 ++ adv7511_power_on(adv7511);
1639 ++ }
1640 ++
1641 + if (adv7511->connector.status != status) {
1642 + adv7511->connector.status = status;
1643 + drm_kms_helper_hotplug_event(adv7511->connector.dev);
1644 +diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
1645 +index 56dd7a9a8e25..dd5312b02a8d 100644
1646 +--- a/drivers/gpu/drm/imx/imx-ldb.c
1647 ++++ b/drivers/gpu/drm/imx/imx-ldb.c
1648 +@@ -612,6 +612,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
1649 + return PTR_ERR(imx_ldb->regmap);
1650 + }
1651 +
1652 ++ /* disable LDB by resetting the control register to POR default */
1653 ++ regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
1654 ++
1655 + imx_ldb->dev = dev;
1656 +
1657 + if (of_id)
1658 +@@ -652,14 +655,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
1659 + if (ret || i < 0 || i > 1)
1660 + return -EINVAL;
1661 +
1662 ++ if (!of_device_is_available(child))
1663 ++ continue;
1664 ++
1665 + if (dual && i > 0) {
1666 + dev_warn(dev, "dual-channel mode, ignoring second output\n");
1667 + continue;
1668 + }
1669 +
1670 +- if (!of_device_is_available(child))
1671 +- continue;
1672 +-
1673 + channel = &imx_ldb->channel[i];
1674 + channel->ldb = imx_ldb;
1675 + channel->chno = i;
1676 +diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
1677 +index 2a75ab80527a..2c149b841cf1 100644
1678 +--- a/drivers/gpu/drm/udl/udl_drv.h
1679 ++++ b/drivers/gpu/drm/udl/udl_drv.h
1680 +@@ -110,7 +110,7 @@ udl_fb_user_fb_create(struct drm_device *dev,
1681 + struct drm_file *file,
1682 + const struct drm_mode_fb_cmd2 *mode_cmd);
1683 +
1684 +-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
1685 ++int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
1686 + const char *front, char **urb_buf_ptr,
1687 + u32 byte_offset, u32 device_byte_offset, u32 byte_width,
1688 + int *ident_ptr, int *sent_ptr);
1689 +diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
1690 +index d5583190f3e4..8746eeeec44d 100644
1691 +--- a/drivers/gpu/drm/udl/udl_fb.c
1692 ++++ b/drivers/gpu/drm/udl/udl_fb.c
1693 +@@ -90,7 +90,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
1694 + int bytes_identical = 0;
1695 + struct urb *urb;
1696 + int aligned_x;
1697 +- int bpp = fb->base.format->cpp[0];
1698 ++ int log_bpp;
1699 ++
1700 ++ BUG_ON(!is_power_of_2(fb->base.format->cpp[0]));
1701 ++ log_bpp = __ffs(fb->base.format->cpp[0]);
1702 +
1703 + if (!fb->active_16)
1704 + return 0;
1705 +@@ -125,12 +128,12 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
1706 +
1707 + for (i = y; i < y + height ; i++) {
1708 + const int line_offset = fb->base.pitches[0] * i;
1709 +- const int byte_offset = line_offset + (x * bpp);
1710 +- const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
1711 +- if (udl_render_hline(dev, bpp, &urb,
1712 ++ const int byte_offset = line_offset + (x << log_bpp);
1713 ++ const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
1714 ++ if (udl_render_hline(dev, log_bpp, &urb,
1715 + (char *) fb->obj->vmapping,
1716 + &cmd, byte_offset, dev_byte_offset,
1717 +- width * bpp,
1718 ++ width << log_bpp,
1719 + &bytes_identical, &bytes_sent))
1720 + goto error;
1721 + }
1722 +@@ -149,7 +152,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
1723 + error:
1724 + atomic_add(bytes_sent, &udl->bytes_sent);
1725 + atomic_add(bytes_identical, &udl->bytes_identical);
1726 +- atomic_add(width*height*bpp, &udl->bytes_rendered);
1727 ++ atomic_add((width * height) << log_bpp, &udl->bytes_rendered);
1728 + end_cycles = get_cycles();
1729 + atomic_add(((unsigned int) ((end_cycles - start_cycles)
1730 + >> 10)), /* Kcycles */
1731 +@@ -221,7 +224,7 @@ static int udl_fb_open(struct fb_info *info, int user)
1732 +
1733 + struct fb_deferred_io *fbdefio;
1734 +
1735 +- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
1736 ++ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
1737 +
1738 + if (fbdefio) {
1739 + fbdefio->delay = DL_DEFIO_WRITE_DELAY;
1740 +diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
1741 +index 0328b2c7b210..f8ea3c99b523 100644
1742 +--- a/drivers/gpu/drm/udl/udl_main.c
1743 ++++ b/drivers/gpu/drm/udl/udl_main.c
1744 +@@ -169,18 +169,13 @@ static void udl_free_urb_list(struct drm_device *dev)
1745 + struct list_head *node;
1746 + struct urb_node *unode;
1747 + struct urb *urb;
1748 +- int ret;
1749 + unsigned long flags;
1750 +
1751 + DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
1752 +
1753 + /* keep waiting and freeing, until we've got 'em all */
1754 + while (count--) {
1755 +-
1756 +- /* Getting interrupted means a leak, but ok at shutdown*/
1757 +- ret = down_interruptible(&udl->urbs.limit_sem);
1758 +- if (ret)
1759 +- break;
1760 ++ down(&udl->urbs.limit_sem);
1761 +
1762 + spin_lock_irqsave(&udl->urbs.lock, flags);
1763 +
1764 +@@ -204,17 +199,22 @@ static void udl_free_urb_list(struct drm_device *dev)
1765 + static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
1766 + {
1767 + struct udl_device *udl = dev->dev_private;
1768 +- int i = 0;
1769 + struct urb *urb;
1770 + struct urb_node *unode;
1771 + char *buf;
1772 ++ size_t wanted_size = count * size;
1773 +
1774 + spin_lock_init(&udl->urbs.lock);
1775 +
1776 ++retry:
1777 + udl->urbs.size = size;
1778 + INIT_LIST_HEAD(&udl->urbs.list);
1779 +
1780 +- while (i < count) {
1781 ++ sema_init(&udl->urbs.limit_sem, 0);
1782 ++ udl->urbs.count = 0;
1783 ++ udl->urbs.available = 0;
1784 ++
1785 ++ while (udl->urbs.count * size < wanted_size) {
1786 + unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
1787 + if (!unode)
1788 + break;
1789 +@@ -230,11 +230,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
1790 + }
1791 + unode->urb = urb;
1792 +
1793 +- buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
1794 ++ buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
1795 + &urb->transfer_dma);
1796 + if (!buf) {
1797 + kfree(unode);
1798 + usb_free_urb(urb);
1799 ++ if (size > PAGE_SIZE) {
1800 ++ size /= 2;
1801 ++ udl_free_urb_list(dev);
1802 ++ goto retry;
1803 ++ }
1804 + break;
1805 + }
1806 +
1807 +@@ -245,16 +250,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
1808 +
1809 + list_add_tail(&unode->entry, &udl->urbs.list);
1810 +
1811 +- i++;
1812 ++ up(&udl->urbs.limit_sem);
1813 ++ udl->urbs.count++;
1814 ++ udl->urbs.available++;
1815 + }
1816 +
1817 +- sema_init(&udl->urbs.limit_sem, i);
1818 +- udl->urbs.count = i;
1819 +- udl->urbs.available = i;
1820 +-
1821 +- DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
1822 ++ DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
1823 +
1824 +- return i;
1825 ++ return udl->urbs.count;
1826 + }
1827 +
1828 + struct urb *udl_get_urb(struct drm_device *dev)
1829 +diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
1830 +index b992644c17e6..f3331d33547a 100644
1831 +--- a/drivers/gpu/drm/udl/udl_transfer.c
1832 ++++ b/drivers/gpu/drm/udl/udl_transfer.c
1833 +@@ -83,12 +83,12 @@ static inline u16 pixel32_to_be16(const uint32_t pixel)
1834 + ((pixel >> 8) & 0xf800));
1835 + }
1836 +
1837 +-static inline u16 get_pixel_val16(const uint8_t *pixel, int bpp)
1838 ++static inline u16 get_pixel_val16(const uint8_t *pixel, int log_bpp)
1839 + {
1840 +- u16 pixel_val16 = 0;
1841 +- if (bpp == 2)
1842 ++ u16 pixel_val16;
1843 ++ if (log_bpp == 1)
1844 + pixel_val16 = *(const uint16_t *)pixel;
1845 +- else if (bpp == 4)
1846 ++ else
1847 + pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel);
1848 + return pixel_val16;
1849 + }
1850 +@@ -125,8 +125,9 @@ static void udl_compress_hline16(
1851 + const u8 *const pixel_end,
1852 + uint32_t *device_address_ptr,
1853 + uint8_t **command_buffer_ptr,
1854 +- const uint8_t *const cmd_buffer_end, int bpp)
1855 ++ const uint8_t *const cmd_buffer_end, int log_bpp)
1856 + {
1857 ++ const int bpp = 1 << log_bpp;
1858 + const u8 *pixel = *pixel_start_ptr;
1859 + uint32_t dev_addr = *device_address_ptr;
1860 + uint8_t *cmd = *command_buffer_ptr;
1861 +@@ -153,12 +154,12 @@ static void udl_compress_hline16(
1862 + raw_pixels_count_byte = cmd++; /* we'll know this later */
1863 + raw_pixel_start = pixel;
1864 +
1865 +- cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL,
1866 +- (unsigned long)(pixel_end - pixel) / bpp,
1867 +- (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp;
1868 ++ cmd_pixel_end = pixel + (min3(MAX_CMD_PIXELS + 1UL,
1869 ++ (unsigned long)(pixel_end - pixel) >> log_bpp,
1870 ++ (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) << log_bpp);
1871 +
1872 + prefetch_range((void *) pixel, cmd_pixel_end - pixel);
1873 +- pixel_val16 = get_pixel_val16(pixel, bpp);
1874 ++ pixel_val16 = get_pixel_val16(pixel, log_bpp);
1875 +
1876 + while (pixel < cmd_pixel_end) {
1877 + const u8 *const start = pixel;
1878 +@@ -170,7 +171,7 @@ static void udl_compress_hline16(
1879 + pixel += bpp;
1880 +
1881 + while (pixel < cmd_pixel_end) {
1882 +- pixel_val16 = get_pixel_val16(pixel, bpp);
1883 ++ pixel_val16 = get_pixel_val16(pixel, log_bpp);
1884 + if (pixel_val16 != repeating_pixel_val16)
1885 + break;
1886 + pixel += bpp;
1887 +@@ -179,10 +180,10 @@ static void udl_compress_hline16(
1888 + if (unlikely(pixel > start + bpp)) {
1889 + /* go back and fill in raw pixel count */
1890 + *raw_pixels_count_byte = (((start -
1891 +- raw_pixel_start) / bpp) + 1) & 0xFF;
1892 ++ raw_pixel_start) >> log_bpp) + 1) & 0xFF;
1893 +
1894 + /* immediately after raw data is repeat byte */
1895 +- *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
1896 ++ *cmd++ = (((pixel - start) >> log_bpp) - 1) & 0xFF;
1897 +
1898 + /* Then start another raw pixel span */
1899 + raw_pixel_start = pixel;
1900 +@@ -192,14 +193,14 @@ static void udl_compress_hline16(
1901 +
1902 + if (pixel > raw_pixel_start) {
1903 + /* finalize last RAW span */
1904 +- *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
1905 ++ *raw_pixels_count_byte = ((pixel - raw_pixel_start) >> log_bpp) & 0xFF;
1906 + } else {
1907 + /* undo unused byte */
1908 + cmd--;
1909 + }
1910 +
1911 +- *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
1912 +- dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2;
1913 ++ *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) >> log_bpp) & 0xFF;
1914 ++ dev_addr += ((pixel - cmd_pixel_start) >> log_bpp) * 2;
1915 + }
1916 +
1917 + if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
1918 +@@ -222,19 +223,19 @@ static void udl_compress_hline16(
1919 + * (that we can only write to, slowly, and can never read), and (optionally)
1920 + * our shadow copy that tracks what's been sent to that hardware buffer.
1921 + */
1922 +-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
1923 ++int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
1924 + const char *front, char **urb_buf_ptr,
1925 + u32 byte_offset, u32 device_byte_offset,
1926 + u32 byte_width,
1927 + int *ident_ptr, int *sent_ptr)
1928 + {
1929 + const u8 *line_start, *line_end, *next_pixel;
1930 +- u32 base16 = 0 + (device_byte_offset / bpp) * 2;
1931 ++ u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2;
1932 + struct urb *urb = *urb_ptr;
1933 + u8 *cmd = *urb_buf_ptr;
1934 + u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
1935 +
1936 +- BUG_ON(!(bpp == 2 || bpp == 4));
1937 ++ BUG_ON(!(log_bpp == 1 || log_bpp == 2));
1938 +
1939 + line_start = (u8 *) (front + byte_offset);
1940 + next_pixel = line_start;
1941 +@@ -244,7 +245,7 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
1942 +
1943 + udl_compress_hline16(&next_pixel,
1944 + line_end, &base16,
1945 +- (u8 **) &cmd, (u8 *) cmd_end, bpp);
1946 ++ (u8 **) &cmd, (u8 *) cmd_end, log_bpp);
1947 +
1948 + if (cmd >= cmd_end) {
1949 + int len = cmd - (u8 *) urb->transfer_buffer;
1950 +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
1951 +index 5f87764d7015..ca9941fa741b 100644
1952 +--- a/drivers/hwmon/nct6775.c
1953 ++++ b/drivers/hwmon/nct6775.c
1954 +@@ -63,6 +63,7 @@
1955 + #include <linux/bitops.h>
1956 + #include <linux/dmi.h>
1957 + #include <linux/io.h>
1958 ++#include <linux/nospec.h>
1959 + #include "lm75.h"
1960 +
1961 + #define USE_ALTERNATE
1962 +@@ -2642,6 +2643,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr,
1963 + return err;
1964 + if (val > NUM_TEMP)
1965 + return -EINVAL;
1966 ++ val = array_index_nospec(val, NUM_TEMP + 1);
1967 + if (val && (!(data->have_temp & BIT(val - 1)) ||
1968 + !data->temp_src[val - 1]))
1969 + return -EINVAL;
1970 +diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
1971 +index b8c43535f16c..5cf670f57be7 100644
1972 +--- a/drivers/i2c/busses/i2c-davinci.c
1973 ++++ b/drivers/i2c/busses/i2c-davinci.c
1974 +@@ -234,12 +234,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
1975 + /*
1976 + * It's not always possible to have 1 to 2 ratio when d=7, so fall back
1977 + * to minimal possible clkh in this case.
1978 ++ *
1979 ++ * Note:
1980 ++ * CLKH is not allowed to be 0, in this case I2C clock is not generated
1981 ++ * at all
1982 + */
1983 +- if (clk >= clkl + d) {
1984 ++ if (clk > clkl + d) {
1985 + clkh = clk - clkl - d;
1986 + clkl -= d;
1987 + } else {
1988 +- clkh = 0;
1989 ++ clkh = 1;
1990 + clkl = clk - (d << 1);
1991 + }
1992 +
1993 +diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
1994 +index 6f2fe63e8f5a..7b961c9c62ef 100644
1995 +--- a/drivers/i2c/i2c-core-base.c
1996 ++++ b/drivers/i2c/i2c-core-base.c
1997 +@@ -638,7 +638,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
1998 + static void i2c_adapter_lock_bus(struct i2c_adapter *adapter,
1999 + unsigned int flags)
2000 + {
2001 +- rt_mutex_lock(&adapter->bus_lock);
2002 ++ rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
2003 + }
2004 +
2005 + /**
2006 +diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
2007 +index 9669ca4937b8..7ba31f6bf148 100644
2008 +--- a/drivers/i2c/i2c-mux.c
2009 ++++ b/drivers/i2c/i2c-mux.c
2010 +@@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
2011 + struct i2c_mux_priv *priv = adapter->algo_data;
2012 + struct i2c_adapter *parent = priv->muxc->parent;
2013 +
2014 +- rt_mutex_lock(&parent->mux_lock);
2015 ++ rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
2016 + if (!(flags & I2C_LOCK_ROOT_ADAPTER))
2017 + return;
2018 + i2c_lock_bus(parent, flags);
2019 +@@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter,
2020 + struct i2c_mux_priv *priv = adapter->algo_data;
2021 + struct i2c_adapter *parent = priv->muxc->parent;
2022 +
2023 +- rt_mutex_lock(&parent->mux_lock);
2024 ++ rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
2025 + i2c_lock_bus(parent, flags);
2026 + }
2027 +
2028 +diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
2029 +index 3bdb799d3b4b..2c436376f13e 100644
2030 +--- a/drivers/iommu/arm-smmu.c
2031 ++++ b/drivers/iommu/arm-smmu.c
2032 +@@ -2100,12 +2100,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
2033 + if (err)
2034 + return err;
2035 +
2036 +- if (smmu->version == ARM_SMMU_V2 &&
2037 +- smmu->num_context_banks != smmu->num_context_irqs) {
2038 +- dev_err(dev,
2039 +- "found only %d context interrupt(s) but %d required\n",
2040 +- smmu->num_context_irqs, smmu->num_context_banks);
2041 +- return -ENODEV;
2042 ++ if (smmu->version == ARM_SMMU_V2) {
2043 ++ if (smmu->num_context_banks > smmu->num_context_irqs) {
2044 ++ dev_err(dev,
2045 ++ "found only %d context irq(s) but %d required\n",
2046 ++ smmu->num_context_irqs, smmu->num_context_banks);
2047 ++ return -ENODEV;
2048 ++ }
2049 ++
2050 ++ /* Ignore superfluous interrupts */
2051 ++ smmu->num_context_irqs = smmu->num_context_banks;
2052 + }
2053 +
2054 + for (i = 0; i < smmu->num_global_irqs; ++i) {
2055 +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
2056 +index 22efc039f302..8d1d40dbf744 100644
2057 +--- a/drivers/misc/mei/main.c
2058 ++++ b/drivers/misc/mei/main.c
2059 +@@ -291,7 +291,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
2060 + goto out;
2061 + }
2062 +
2063 +- *offset = 0;
2064 + cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
2065 + if (!cb) {
2066 + rets = -ENOMEM;
2067 +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
2068 +index ca3fa82316c2..d3ce904e929e 100644
2069 +--- a/drivers/net/can/m_can/m_can.c
2070 ++++ b/drivers/net/can/m_can/m_can.c
2071 +@@ -1637,8 +1637,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
2072 + priv->can.clock.freq = clk_get_rate(cclk);
2073 + priv->mram_base = mram_addr;
2074 +
2075 +- m_can_of_parse_mram(priv, mram_config_vals);
2076 +-
2077 + platform_set_drvdata(pdev, dev);
2078 + SET_NETDEV_DEV(dev, &pdev->dev);
2079 +
2080 +@@ -1649,6 +1647,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
2081 + goto failed_free_dev;
2082 + }
2083 +
2084 ++ m_can_of_parse_mram(priv, mram_config_vals);
2085 ++
2086 + devm_can_led_init(dev);
2087 +
2088 + dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n",
2089 +@@ -1698,8 +1698,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
2090 +
2091 + pinctrl_pm_select_default_state(dev);
2092 +
2093 +- m_can_init_ram(priv);
2094 +-
2095 + priv->can.state = CAN_STATE_ERROR_ACTIVE;
2096 +
2097 + if (netif_running(ndev)) {
2098 +@@ -1709,6 +1707,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
2099 + if (ret)
2100 + return ret;
2101 +
2102 ++ m_can_init_ram(priv);
2103 + m_can_start(ndev);
2104 + netif_device_attach(ndev);
2105 + netif_start_queue(ndev);
2106 +diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
2107 +index c7427bdd3a4b..2949a381a94d 100644
2108 +--- a/drivers/net/can/mscan/mpc5xxx_can.c
2109 ++++ b/drivers/net/can/mscan/mpc5xxx_can.c
2110 +@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
2111 + return 0;
2112 + }
2113 + cdm = of_iomap(np_cdm, 0);
2114 ++ if (!cdm) {
2115 ++ of_node_put(np_cdm);
2116 ++ dev_err(&ofdev->dev, "can't map clock node!\n");
2117 ++ return 0;
2118 ++ }
2119 +
2120 + if (in_8(&cdm->ipb_clk_sel) & 0x1)
2121 + freq *= 2;
2122 +diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
2123 +index 5b7658bcf020..5c3ef9fc8207 100644
2124 +--- a/drivers/net/ethernet/3com/Kconfig
2125 ++++ b/drivers/net/ethernet/3com/Kconfig
2126 +@@ -32,7 +32,7 @@ config EL3
2127 +
2128 + config 3C515
2129 + tristate "3c515 ISA \"Fast EtherLink\""
2130 +- depends on ISA && ISA_DMA_API
2131 ++ depends on ISA && ISA_DMA_API && !PPC32
2132 + ---help---
2133 + If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
2134 + network card, say Y here.
2135 +diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
2136 +index d5c15e8bb3de..a8e8f4e9c1bb 100644
2137 +--- a/drivers/net/ethernet/amd/Kconfig
2138 ++++ b/drivers/net/ethernet/amd/Kconfig
2139 +@@ -44,7 +44,7 @@ config AMD8111_ETH
2140 +
2141 + config LANCE
2142 + tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
2143 +- depends on ISA && ISA_DMA_API && !ARM
2144 ++ depends on ISA && ISA_DMA_API && !ARM && !PPC32
2145 + ---help---
2146 + If you have a network (Ethernet) card of this type, say Y here.
2147 + Some LinkSys cards are of this type.
2148 +@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
2149 +
2150 + config NI65
2151 + tristate "NI6510 support"
2152 +- depends on ISA && ISA_DMA_API && !ARM
2153 ++ depends on ISA && ISA_DMA_API && !ARM && !PPC32
2154 + ---help---
2155 + If you have a network (Ethernet) card of this type, say Y here.
2156 +
2157 +diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
2158 +index 8c9986f3fc01..3615c2a06fda 100644
2159 +--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
2160 ++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
2161 +@@ -1685,6 +1685,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
2162 + skb = build_skb(page_address(page) + adapter->rx_page_offset,
2163 + adapter->rx_frag_size);
2164 + if (likely(skb)) {
2165 ++ skb_reserve(skb, NET_SKB_PAD);
2166 + adapter->rx_page_offset += adapter->rx_frag_size;
2167 + if (adapter->rx_page_offset >= PAGE_SIZE)
2168 + adapter->rx_page = NULL;
2169 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
2170 +index 1e33abde4a3e..3fd1085a093f 100644
2171 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
2172 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
2173 +@@ -3387,14 +3387,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
2174 + DP(BNX2X_MSG_ETHTOOL,
2175 + "rss re-configured, UDP 4-tupple %s\n",
2176 + udp_rss_requested ? "enabled" : "disabled");
2177 +- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
2178 ++ if (bp->state == BNX2X_STATE_OPEN)
2179 ++ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
2180 ++ true);
2181 + } else if ((info->flow_type == UDP_V6_FLOW) &&
2182 + (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
2183 + bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
2184 + DP(BNX2X_MSG_ETHTOOL,
2185 + "rss re-configured, UDP 4-tupple %s\n",
2186 + udp_rss_requested ? "enabled" : "disabled");
2187 +- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
2188 ++ if (bp->state == BNX2X_STATE_OPEN)
2189 ++ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
2190 ++ true);
2191 + }
2192 + return 0;
2193 +
2194 +@@ -3508,7 +3512,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
2195 + bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
2196 + }
2197 +
2198 +- return bnx2x_config_rss_eth(bp, false);
2199 ++ if (bp->state == BNX2X_STATE_OPEN)
2200 ++ return bnx2x_config_rss_eth(bp, false);
2201 ++
2202 ++ return 0;
2203 + }
2204 +
2205 + /**
2206 +diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
2207 +index 5ab912937aff..ec0b545197e2 100644
2208 +--- a/drivers/net/ethernet/cirrus/Kconfig
2209 ++++ b/drivers/net/ethernet/cirrus/Kconfig
2210 +@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
2211 + config CS89x0
2212 + tristate "CS89x0 support"
2213 + depends on ISA || EISA || ARM
2214 ++ depends on !PPC32
2215 + ---help---
2216 + Support for CS89x0 chipset based Ethernet cards. If you have a
2217 + network (Ethernet) card of this type, say Y and read the file
2218 +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
2219 +index 800edfbd36c1..2bfaf3e118b1 100644
2220 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c
2221 ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
2222 +@@ -2007,28 +2007,42 @@ static int enic_stop(struct net_device *netdev)
2223 + return 0;
2224 + }
2225 +
2226 ++static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
2227 ++{
2228 ++ bool running = netif_running(netdev);
2229 ++ int err = 0;
2230 ++
2231 ++ ASSERT_RTNL();
2232 ++ if (running) {
2233 ++ err = enic_stop(netdev);
2234 ++ if (err)
2235 ++ return err;
2236 ++ }
2237 ++
2238 ++ netdev->mtu = new_mtu;
2239 ++
2240 ++ if (running) {
2241 ++ err = enic_open(netdev);
2242 ++ if (err)
2243 ++ return err;
2244 ++ }
2245 ++
2246 ++ return 0;
2247 ++}
2248 ++
2249 + static int enic_change_mtu(struct net_device *netdev, int new_mtu)
2250 + {
2251 + struct enic *enic = netdev_priv(netdev);
2252 +- int running = netif_running(netdev);
2253 +
2254 + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2255 + return -EOPNOTSUPP;
2256 +
2257 +- if (running)
2258 +- enic_stop(netdev);
2259 +-
2260 +- netdev->mtu = new_mtu;
2261 +-
2262 + if (netdev->mtu > enic->port_mtu)
2263 + netdev_warn(netdev,
2264 +- "interface MTU (%d) set higher than port MTU (%d)\n",
2265 +- netdev->mtu, enic->port_mtu);
2266 ++ "interface MTU (%d) set higher than port MTU (%d)\n",
2267 ++ netdev->mtu, enic->port_mtu);
2268 +
2269 +- if (running)
2270 +- enic_open(netdev);
2271 +-
2272 +- return 0;
2273 ++ return _enic_change_mtu(netdev, new_mtu);
2274 + }
2275 +
2276 + static void enic_change_mtu_work(struct work_struct *work)
2277 +@@ -2036,47 +2050,9 @@ static void enic_change_mtu_work(struct work_struct *work)
2278 + struct enic *enic = container_of(work, struct enic, change_mtu_work);
2279 + struct net_device *netdev = enic->netdev;
2280 + int new_mtu = vnic_dev_mtu(enic->vdev);
2281 +- int err;
2282 +- unsigned int i;
2283 +-
2284 +- new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
2285 +
2286 + rtnl_lock();
2287 +-
2288 +- /* Stop RQ */
2289 +- del_timer_sync(&enic->notify_timer);
2290 +-
2291 +- for (i = 0; i < enic->rq_count; i++)
2292 +- napi_disable(&enic->napi[i]);
2293 +-
2294 +- vnic_intr_mask(&enic->intr[0]);
2295 +- enic_synchronize_irqs(enic);
2296 +- err = vnic_rq_disable(&enic->rq[0]);
2297 +- if (err) {
2298 +- rtnl_unlock();
2299 +- netdev_err(netdev, "Unable to disable RQ.\n");
2300 +- return;
2301 +- }
2302 +- vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
2303 +- vnic_cq_clean(&enic->cq[0]);
2304 +- vnic_intr_clean(&enic->intr[0]);
2305 +-
2306 +- /* Fill RQ with new_mtu-sized buffers */
2307 +- netdev->mtu = new_mtu;
2308 +- vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
2309 +- /* Need at least one buffer on ring to get going */
2310 +- if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
2311 +- rtnl_unlock();
2312 +- netdev_err(netdev, "Unable to alloc receive buffers.\n");
2313 +- return;
2314 +- }
2315 +-
2316 +- /* Start RQ */
2317 +- vnic_rq_enable(&enic->rq[0]);
2318 +- napi_enable(&enic->napi[0]);
2319 +- vnic_intr_unmask(&enic->intr[0]);
2320 +- enic_notify_timer_start(enic);
2321 +-
2322 ++ (void)_enic_change_mtu(netdev, new_mtu);
2323 + rtnl_unlock();
2324 +
2325 + netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
2326 +@@ -2867,7 +2843,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2327 + */
2328 +
2329 + enic->port_mtu = enic->config.mtu;
2330 +- (void)enic_change_mtu(netdev, enic->port_mtu);
2331 +
2332 + err = enic_set_mac_addr(netdev, enic->mac_addr);
2333 + if (err) {
2334 +@@ -2954,6 +2929,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2335 + /* MTU range: 68 - 9000 */
2336 + netdev->min_mtu = ENIC_MIN_MTU;
2337 + netdev->max_mtu = ENIC_MAX_MTU;
2338 ++ netdev->mtu = enic->port_mtu;
2339 +
2340 + err = register_netdev(netdev);
2341 + if (err) {
2342 +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
2343 +index eb53bd93065e..a696b5b2d40e 100644
2344 +--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
2345 ++++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
2346 +@@ -981,6 +981,7 @@ static int nic_dev_init(struct pci_dev *pdev)
2347 + hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
2348 + nic_dev, link_status_event_handler);
2349 +
2350 ++ SET_NETDEV_DEV(netdev, &pdev->dev);
2351 + err = register_netdev(netdev);
2352 + if (err) {
2353 + dev_err(&pdev->dev, "Failed to register netdev\n");
2354 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
2355 +index 91fe03617106..72496060e332 100644
2356 +--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
2357 ++++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
2358 +@@ -79,7 +79,7 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
2359 + return NFP_REPR_TYPE_VF;
2360 + }
2361 +
2362 +- return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC;
2363 ++ return __NFP_REPR_TYPE_MAX;
2364 + }
2365 +
2366 + static struct net_device *
2367 +@@ -90,6 +90,8 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
2368 + u8 port = 0;
2369 +
2370 + repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
2371 ++ if (repr_type > NFP_REPR_TYPE_MAX)
2372 ++ return NULL;
2373 +
2374 + reprs = rcu_dereference(app->reprs[repr_type]);
2375 + if (!reprs)
2376 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
2377 +index c5452b445c37..83c1c4fa102b 100644
2378 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
2379 ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
2380 +@@ -663,7 +663,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
2381 +
2382 + p_ramrod->common.update_approx_mcast_flg = 1;
2383 + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
2384 +- u32 *p_bins = (u32 *)p_params->bins;
2385 ++ u32 *p_bins = p_params->bins;
2386 +
2387 + p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
2388 + }
2389 +@@ -1474,8 +1474,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
2390 + enum spq_mode comp_mode,
2391 + struct qed_spq_comp_cb *p_comp_data)
2392 + {
2393 +- unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
2394 + struct vport_update_ramrod_data *p_ramrod = NULL;
2395 ++ u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
2396 + struct qed_spq_entry *p_ent = NULL;
2397 + struct qed_sp_init_data init_data;
2398 + u8 abs_vport_id = 0;
2399 +@@ -1511,26 +1511,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
2400 + /* explicitly clear out the entire vector */
2401 + memset(&p_ramrod->approx_mcast.bins, 0,
2402 + sizeof(p_ramrod->approx_mcast.bins));
2403 +- memset(bins, 0, sizeof(unsigned long) *
2404 +- ETH_MULTICAST_MAC_BINS_IN_REGS);
2405 ++ memset(bins, 0, sizeof(bins));
2406 + /* filter ADD op is explicit set op and it removes
2407 + * any existing filters for the vport
2408 + */
2409 + if (p_filter_cmd->opcode == QED_FILTER_ADD) {
2410 + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
2411 +- u32 bit;
2412 ++ u32 bit, nbits;
2413 +
2414 + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
2415 +- __set_bit(bit, bins);
2416 ++ nbits = sizeof(u32) * BITS_PER_BYTE;
2417 ++ bins[bit / nbits] |= 1 << (bit % nbits);
2418 + }
2419 +
2420 + /* Convert to correct endianity */
2421 + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
2422 + struct vport_update_ramrod_mcast *p_ramrod_bins;
2423 +- u32 *p_bins = (u32 *)bins;
2424 +
2425 + p_ramrod_bins = &p_ramrod->approx_mcast;
2426 +- p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
2427 ++ p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
2428 + }
2429 + }
2430 +
2431 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
2432 +index cc1f248551c9..91d383f3a661 100644
2433 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
2434 ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
2435 +@@ -214,7 +214,7 @@ struct qed_sp_vport_update_params {
2436 + u8 anti_spoofing_en;
2437 + u8 update_accept_any_vlan_flg;
2438 + u8 accept_any_vlan;
2439 +- unsigned long bins[8];
2440 ++ u32 bins[8];
2441 + struct qed_rss_params *rss_params;
2442 + struct qed_filter_accept_flags accept_flags;
2443 + struct qed_sge_tpa_params *sge_tpa_params;
2444 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
2445 +index 376485d99357..3c469355f5a4 100644
2446 +--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
2447 ++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
2448 +@@ -1182,6 +1182,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
2449 + break;
2450 + default:
2451 + p_link->speed = 0;
2452 ++ p_link->link_up = 0;
2453 + }
2454 +
2455 + if (p_link->link_up && p_link->speed)
2456 +@@ -1279,9 +1280,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
2457 + phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
2458 + phy_cfg.adv_speed = params->speed.advertised_speeds;
2459 + phy_cfg.loopback_mode = params->loopback_mode;
2460 +- if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
2461 +- if (params->eee.enable)
2462 +- phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
2463 ++
2464 ++ /* There are MFWs that share this capability regardless of whether
2465 ++ * this is feasible or not. And given that at the very least adv_caps
2466 ++ * would be set internally by qed, we want to make sure LFA would
2467 ++ * still work.
2468 ++ */
2469 ++ if ((p_hwfn->mcp_info->capabilities &
2470 ++ FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
2471 ++ phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
2472 + if (params->eee.tx_lpi_enable)
2473 + phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
2474 + if (params->eee.adv_caps & QED_EEE_1G_ADV)
2475 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2476 +index d08fe350ab6c..c6411158afd7 100644
2477 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2478 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2479 +@@ -2826,7 +2826,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2480 +
2481 + p_data->update_approx_mcast_flg = 1;
2482 + memcpy(p_data->bins, p_mcast_tlv->bins,
2483 +- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2484 ++ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2485 + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2486 + }
2487 +
2488 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
2489 +index 91b5e9f02a62..6eb85db69f9a 100644
2490 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
2491 ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
2492 +@@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
2493 + resp_size += sizeof(struct pfvf_def_resp_tlv);
2494 +
2495 + memcpy(p_mcast_tlv->bins, p_params->bins,
2496 +- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2497 ++ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2498 + }
2499 +
2500 + update_rx = p_params->accept_flags.update_rx_mode_config;
2501 +@@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
2502 + u32 bit;
2503 +
2504 + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
2505 +- __set_bit(bit, sp_params.bins);
2506 ++ sp_params.bins[bit / 32] |= 1 << (bit % 32);
2507 + }
2508 + }
2509 +
2510 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
2511 +index 97d44dfb38ca..1e93c712fa34 100644
2512 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
2513 ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
2514 +@@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv {
2515 + struct channel_tlv tl;
2516 + u8 padding[4];
2517 +
2518 +- u64 bins[8];
2519 ++ /* There are only 256 approx bins, and in HSI they're divided into
2520 ++ * 32-bit values. As old VFs used to set-bit to the values on its side,
2521 ++ * the upper half of the array is never expected to contain any data.
2522 ++ */
2523 ++ u64 bins[4];
2524 ++ u64 obsolete_bins[4];
2525 + };
2526 +
2527 + struct vfpf_vport_update_accept_param_tlv {
2528 +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
2529 +index 16c3bfbe1992..757a3b37ae8a 100644
2530 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
2531 ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
2532 +@@ -218,6 +218,7 @@ issue:
2533 + ret = of_mdiobus_register(bus, np1);
2534 + if (ret) {
2535 + mdiobus_free(bus);
2536 ++ lp->mii_bus = NULL;
2537 + return ret;
2538 + }
2539 + return 0;
2540 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2541 +index 6d3811c869fd..31684f3382f6 100644
2542 +--- a/drivers/net/usb/qmi_wwan.c
2543 ++++ b/drivers/net/usb/qmi_wwan.c
2544 +@@ -1245,7 +1245,7 @@ static const struct usb_device_id products[] = {
2545 + {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
2546 + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
2547 + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
2548 +- {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */
2549 ++ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
2550 + {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
2551 + {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
2552 + {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
2553 +diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
2554 +index 4698450c77d1..bb43d176eb4e 100644
2555 +--- a/drivers/net/wan/lmc/lmc_main.c
2556 ++++ b/drivers/net/wan/lmc/lmc_main.c
2557 +@@ -1371,7 +1371,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
2558 + case 0x001:
2559 + printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
2560 + break;
2561 +- case 0x010:
2562 ++ case 0x002:
2563 + printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
2564 + break;
2565 + default:
2566 +diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
2567 +index cb987c2ecc6b..87131f663292 100644
2568 +--- a/drivers/net/wireless/broadcom/b43/leds.c
2569 ++++ b/drivers/net/wireless/broadcom/b43/leds.c
2570 +@@ -131,7 +131,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
2571 + led->wl = dev->wl;
2572 + led->index = led_index;
2573 + led->activelow = activelow;
2574 +- strncpy(led->name, name, sizeof(led->name));
2575 ++ strlcpy(led->name, name, sizeof(led->name));
2576 + atomic_set(&led->state, 0);
2577 +
2578 + led->led_dev.name = led->name;
2579 +diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c
2580 +index fd4565389c77..bc922118b6ac 100644
2581 +--- a/drivers/net/wireless/broadcom/b43legacy/leds.c
2582 ++++ b/drivers/net/wireless/broadcom/b43legacy/leds.c
2583 +@@ -101,7 +101,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev,
2584 + led->dev = dev;
2585 + led->index = led_index;
2586 + led->activelow = activelow;
2587 +- strncpy(led->name, name, sizeof(led->name));
2588 ++ strlcpy(led->name, name, sizeof(led->name));
2589 +
2590 + led->led_dev.name = led->name;
2591 + led->led_dev.default_trigger = default_trigger;
2592 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2593 +index a67d03716510..afb99876fa9e 100644
2594 +--- a/drivers/nvme/host/pci.c
2595 ++++ b/drivers/nvme/host/pci.c
2596 +@@ -306,6 +306,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
2597 + old_value = *dbbuf_db;
2598 + *dbbuf_db = value;
2599 +
2600 ++ /*
2601 ++ * Ensure that the doorbell is updated before reading the event
2602 ++ * index from memory. The controller needs to provide similar
2603 ++ * ordering to ensure the envent index is updated before reading
2604 ++ * the doorbell.
2605 ++ */
2606 ++ mb();
2607 ++
2608 + if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
2609 + return false;
2610 + }
2611 +diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
2612 +index a4e9f430d452..e2cca91fd266 100644
2613 +--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
2614 ++++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
2615 +@@ -433,7 +433,7 @@ static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
2616 + const char *name;
2617 + int i, ret;
2618 +
2619 +- if (group > info->ngroups)
2620 ++ if (group >= info->ngroups)
2621 + return;
2622 +
2623 + seq_puts(s, "\n");
2624 +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
2625 +index fe98d4ac0df3..e1e7e587b45b 100644
2626 +--- a/drivers/platform/x86/ideapad-laptop.c
2627 ++++ b/drivers/platform/x86/ideapad-laptop.c
2628 +@@ -1097,10 +1097,10 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
2629 + },
2630 + },
2631 + {
2632 +- .ident = "Lenovo Legion Y520-15IKBN",
2633 ++ .ident = "Lenovo Legion Y520-15IKB",
2634 + .matches = {
2635 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2636 +- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBN"),
2637 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKB"),
2638 + },
2639 + },
2640 + {
2641 +diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
2642 +index 37e523374fe0..371b5ec70087 100644
2643 +--- a/drivers/power/supply/generic-adc-battery.c
2644 ++++ b/drivers/power/supply/generic-adc-battery.c
2645 +@@ -243,10 +243,10 @@ static int gab_probe(struct platform_device *pdev)
2646 + struct power_supply_desc *psy_desc;
2647 + struct power_supply_config psy_cfg = {};
2648 + struct gab_platform_data *pdata = pdev->dev.platform_data;
2649 +- enum power_supply_property *properties;
2650 + int ret = 0;
2651 + int chan;
2652 +- int index = 0;
2653 ++ int index = ARRAY_SIZE(gab_props);
2654 ++ bool any = false;
2655 +
2656 + adc_bat = devm_kzalloc(&pdev->dev, sizeof(*adc_bat), GFP_KERNEL);
2657 + if (!adc_bat) {
2658 +@@ -280,8 +280,6 @@ static int gab_probe(struct platform_device *pdev)
2659 + }
2660 +
2661 + memcpy(psy_desc->properties, gab_props, sizeof(gab_props));
2662 +- properties = (enum power_supply_property *)
2663 +- ((char *)psy_desc->properties + sizeof(gab_props));
2664 +
2665 + /*
2666 + * getting channel from iio and copying the battery properties
2667 +@@ -295,15 +293,22 @@ static int gab_probe(struct platform_device *pdev)
2668 + adc_bat->channel[chan] = NULL;
2669 + } else {
2670 + /* copying properties for supported channels only */
2671 +- memcpy(properties + sizeof(*(psy_desc->properties)) * index,
2672 +- &gab_dyn_props[chan],
2673 +- sizeof(gab_dyn_props[chan]));
2674 +- index++;
2675 ++ int index2;
2676 ++
2677 ++ for (index2 = 0; index2 < index; index2++) {
2678 ++ if (psy_desc->properties[index2] ==
2679 ++ gab_dyn_props[chan])
2680 ++ break; /* already known */
2681 ++ }
2682 ++ if (index2 == index) /* really new */
2683 ++ psy_desc->properties[index++] =
2684 ++ gab_dyn_props[chan];
2685 ++ any = true;
2686 + }
2687 + }
2688 +
2689 + /* none of the channels are supported so let's bail out */
2690 +- if (index == 0) {
2691 ++ if (!any) {
2692 + ret = -ENODEV;
2693 + goto second_mem_fail;
2694 + }
2695 +@@ -314,7 +319,7 @@ static int gab_probe(struct platform_device *pdev)
2696 + * as come channels may be not be supported by the device.So
2697 + * we need to take care of that.
2698 + */
2699 +- psy_desc->num_properties = ARRAY_SIZE(gab_props) + index;
2700 ++ psy_desc->num_properties = index;
2701 +
2702 + adc_bat->psy = power_supply_register(&pdev->dev, psy_desc, &psy_cfg);
2703 + if (IS_ERR(adc_bat->psy)) {
2704 +diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
2705 +index 8941e7caaf4d..c7afdbded26b 100644
2706 +--- a/drivers/s390/cio/qdio_main.c
2707 ++++ b/drivers/s390/cio/qdio_main.c
2708 +@@ -641,21 +641,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
2709 + unsigned long phys_aob = 0;
2710 +
2711 + if (!q->use_cq)
2712 +- goto out;
2713 ++ return 0;
2714 +
2715 + if (!q->aobs[bufnr]) {
2716 + struct qaob *aob = qdio_allocate_aob();
2717 + q->aobs[bufnr] = aob;
2718 + }
2719 + if (q->aobs[bufnr]) {
2720 +- q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
2721 + q->sbal_state[bufnr].aob = q->aobs[bufnr];
2722 + q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
2723 + phys_aob = virt_to_phys(q->aobs[bufnr]);
2724 + WARN_ON_ONCE(phys_aob & 0xFF);
2725 + }
2726 +
2727 +-out:
2728 ++ q->sbal_state[bufnr].flags = 0;
2729 + return phys_aob;
2730 + }
2731 +
2732 +diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
2733 +index fff6f1851dc1..03019e07abb9 100644
2734 +--- a/drivers/scsi/fcoe/fcoe_ctlr.c
2735 ++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
2736 +@@ -754,9 +754,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
2737 + case ELS_LOGO:
2738 + if (fip->mode == FIP_MODE_VN2VN) {
2739 + if (fip->state != FIP_ST_VNMP_UP)
2740 +- return -EINVAL;
2741 ++ goto drop;
2742 + if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
2743 +- return -EINVAL;
2744 ++ goto drop;
2745 + } else {
2746 + if (fip->state != FIP_ST_ENABLED)
2747 + return 0;
2748 +@@ -799,9 +799,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
2749 + fip->send(fip, skb);
2750 + return -EINPROGRESS;
2751 + drop:
2752 +- kfree_skb(skb);
2753 + LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
2754 + op, ntoh24(fh->fh_d_id));
2755 ++ kfree_skb(skb);
2756 + return -EINVAL;
2757 + }
2758 + EXPORT_SYMBOL(fcoe_ctlr_els_send);
2759 +diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
2760 +index 31d31aad3de1..89b1f1af2fd4 100644
2761 +--- a/drivers/scsi/libfc/fc_rport.c
2762 ++++ b/drivers/scsi/libfc/fc_rport.c
2763 +@@ -2164,6 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
2764 + FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
2765 + fc_rport_state(rdata));
2766 +
2767 ++ rdata->flags &= ~FC_RP_STARTED;
2768 + fc_rport_enter_delete(rdata, RPORT_EV_STOP);
2769 + mutex_unlock(&rdata->rp_mutex);
2770 + kref_put(&rdata->kref, fc_rport_destroy);
2771 +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
2772 +index bddbe2da5283..cf8a15e54d83 100644
2773 +--- a/drivers/scsi/libiscsi.c
2774 ++++ b/drivers/scsi/libiscsi.c
2775 +@@ -284,11 +284,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
2776 + */
2777 + if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
2778 + iscsi_conn_printk(KERN_INFO, conn,
2779 +- "task [op %x/%x itt "
2780 ++ "task [op %x itt "
2781 + "0x%x/0x%x] "
2782 + "rejected.\n",
2783 +- task->hdr->opcode, opcode,
2784 +- task->itt, task->hdr_itt);
2785 ++ opcode, task->itt,
2786 ++ task->hdr_itt);
2787 + return -EACCES;
2788 + }
2789 + /*
2790 +@@ -297,10 +297,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
2791 + */
2792 + if (conn->session->fast_abort) {
2793 + iscsi_conn_printk(KERN_INFO, conn,
2794 +- "task [op %x/%x itt "
2795 ++ "task [op %x itt "
2796 + "0x%x/0x%x] fast abort.\n",
2797 +- task->hdr->opcode, opcode,
2798 +- task->itt, task->hdr_itt);
2799 ++ opcode, task->itt,
2800 ++ task->hdr_itt);
2801 + return -EACCES;
2802 + }
2803 + break;
2804 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
2805 +index d3940c5d079d..63dd9bc21ff2 100644
2806 +--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
2807 ++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
2808 +@@ -1936,12 +1936,12 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2809 + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
2810 + __func__, ioc->name);
2811 + rc = -EFAULT;
2812 +- goto out;
2813 ++ goto job_done;
2814 + }
2815 +
2816 + rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
2817 + if (rc)
2818 +- goto out;
2819 ++ goto job_done;
2820 +
2821 + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
2822 + pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
2823 +@@ -2066,6 +2066,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2824 + out:
2825 + ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
2826 + mutex_unlock(&ioc->transport_cmds.mutex);
2827 ++job_done:
2828 + bsg_job_done(job, rc, reslen);
2829 + }
2830 +
2831 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
2832 +index 40406c162d0d..8ce12ffcbb7a 100644
2833 +--- a/drivers/scsi/scsi_sysfs.c
2834 ++++ b/drivers/scsi/scsi_sysfs.c
2835 +@@ -721,8 +721,24 @@ static ssize_t
2836 + sdev_store_delete(struct device *dev, struct device_attribute *attr,
2837 + const char *buf, size_t count)
2838 + {
2839 +- if (device_remove_file_self(dev, attr))
2840 +- scsi_remove_device(to_scsi_device(dev));
2841 ++ struct kernfs_node *kn;
2842 ++
2843 ++ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
2844 ++ WARN_ON_ONCE(!kn);
2845 ++ /*
2846 ++ * Concurrent writes into the "delete" sysfs attribute may trigger
2847 ++ * concurrent calls to device_remove_file() and scsi_remove_device().
2848 ++ * device_remove_file() handles concurrent removal calls by
2849 ++ * serializing these and by ignoring the second and later removal
2850 ++ * attempts. Concurrent calls of scsi_remove_device() are
2851 ++ * serialized. The second and later calls of scsi_remove_device() are
2852 ++ * ignored because the first call of that function changes the device
2853 ++ * state into SDEV_DEL.
2854 ++ */
2855 ++ device_remove_file(dev, attr);
2856 ++ scsi_remove_device(to_scsi_device(dev));
2857 ++ if (kn)
2858 ++ sysfs_unbreak_active_protection(kn);
2859 + return count;
2860 + };
2861 + static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
2862 +diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
2863 +index 777e5f1e52d1..0cd947f78b5b 100644
2864 +--- a/drivers/scsi/vmw_pvscsi.c
2865 ++++ b/drivers/scsi/vmw_pvscsi.c
2866 +@@ -561,9 +561,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
2867 + (btstat == BTSTAT_SUCCESS ||
2868 + btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
2869 + btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
2870 +- cmd->result = (DID_OK << 16) | sdstat;
2871 +- if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
2872 +- cmd->result |= (DRIVER_SENSE << 24);
2873 ++ if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
2874 ++ cmd->result = (DID_RESET << 16);
2875 ++ } else {
2876 ++ cmd->result = (DID_OK << 16) | sdstat;
2877 ++ if (sdstat == SAM_STAT_CHECK_CONDITION &&
2878 ++ cmd->sense_buffer)
2879 ++ cmd->result |= (DRIVER_SENSE << 24);
2880 ++ }
2881 + } else
2882 + switch (btstat) {
2883 + case BTSTAT_SUCCESS:
2884 +diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
2885 +index 9e2f0421a01e..0bf6643cca07 100644
2886 +--- a/drivers/staging/media/omap4iss/iss_video.c
2887 ++++ b/drivers/staging/media/omap4iss/iss_video.c
2888 +@@ -11,7 +11,6 @@
2889 + * (at your option) any later version.
2890 + */
2891 +
2892 +-#include <asm/cacheflush.h>
2893 + #include <linux/clk.h>
2894 + #include <linux/mm.h>
2895 + #include <linux/pagemap.h>
2896 +@@ -24,6 +23,8 @@
2897 + #include <media/v4l2-ioctl.h>
2898 + #include <media/v4l2-mc.h>
2899 +
2900 ++#include <asm/cacheflush.h>
2901 ++
2902 + #include "iss_video.h"
2903 + #include "iss.h"
2904 +
2905 +diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
2906 +index 514986b57c2d..25eb3891e34b 100644
2907 +--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
2908 ++++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
2909 +@@ -652,6 +652,7 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
2910 + struct iscsi_param *param;
2911 + u32 mrdsl, mbl;
2912 + u32 max_npdu, max_iso_npdu;
2913 ++ u32 max_iso_payload;
2914 +
2915 + if (conn->login->leading_connection) {
2916 + param = iscsi_find_param_from_key(MAXBURSTLENGTH,
2917 +@@ -670,8 +671,10 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
2918 + mrdsl = conn_ops->MaxRecvDataSegmentLength;
2919 + max_npdu = mbl / mrdsl;
2920 +
2921 +- max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD /
2922 +- (ISCSI_HDR_LEN + mrdsl +
2923 ++ max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss);
2924 ++
2925 ++ max_iso_npdu = max_iso_payload /
2926 ++ (ISCSI_HDR_LEN + mrdsl +
2927 + cxgbit_digest_len[csk->submode]);
2928 +
2929 + csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
2930 +@@ -741,6 +744,9 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
2931 + if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
2932 + conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
2933 +
2934 ++ if (cxgbit_set_digest(csk))
2935 ++ return -1;
2936 ++
2937 + if (conn->login->leading_connection) {
2938 + param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
2939 + conn->param_list);
2940 +@@ -764,7 +770,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
2941 + if (is_t5(cdev->lldi.adapter_type))
2942 + goto enable_ddp;
2943 + else
2944 +- goto enable_digest;
2945 ++ return 0;
2946 + }
2947 +
2948 + if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
2949 +@@ -781,10 +787,6 @@ enable_ddp:
2950 + }
2951 + }
2952 +
2953 +-enable_digest:
2954 +- if (cxgbit_set_digest(csk))
2955 +- return -1;
2956 +-
2957 + return 0;
2958 + }
2959 +
2960 +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
2961 +index dc13afbd4c88..98e27da34f3c 100644
2962 +--- a/drivers/target/iscsi/iscsi_target_login.c
2963 ++++ b/drivers/target/iscsi/iscsi_target_login.c
2964 +@@ -345,8 +345,7 @@ static int iscsi_login_zero_tsih_s1(
2965 + pr_err("idr_alloc() for sess_idr failed\n");
2966 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
2967 + ISCSI_LOGIN_STATUS_NO_RESOURCES);
2968 +- kfree(sess);
2969 +- return -ENOMEM;
2970 ++ goto free_sess;
2971 + }
2972 +
2973 + sess->creation_time = get_jiffies_64();
2974 +@@ -362,20 +361,28 @@ static int iscsi_login_zero_tsih_s1(
2975 + ISCSI_LOGIN_STATUS_NO_RESOURCES);
2976 + pr_err("Unable to allocate memory for"
2977 + " struct iscsi_sess_ops.\n");
2978 +- kfree(sess);
2979 +- return -ENOMEM;
2980 ++ goto remove_idr;
2981 + }
2982 +
2983 + sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
2984 + if (IS_ERR(sess->se_sess)) {
2985 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
2986 + ISCSI_LOGIN_STATUS_NO_RESOURCES);
2987 +- kfree(sess->sess_ops);
2988 +- kfree(sess);
2989 +- return -ENOMEM;
2990 ++ goto free_ops;
2991 + }
2992 +
2993 + return 0;
2994 ++
2995 ++free_ops:
2996 ++ kfree(sess->sess_ops);
2997 ++remove_idr:
2998 ++ spin_lock_bh(&sess_idr_lock);
2999 ++ idr_remove(&sess_idr, sess->session_index);
3000 ++ spin_unlock_bh(&sess_idr_lock);
3001 ++free_sess:
3002 ++ kfree(sess);
3003 ++ conn->sess = NULL;
3004 ++ return -ENOMEM;
3005 + }
3006 +
3007 + static int iscsi_login_zero_tsih_s2(
3008 +@@ -1162,13 +1169,13 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
3009 + ISCSI_LOGIN_STATUS_INIT_ERR);
3010 + if (!zero_tsih || !conn->sess)
3011 + goto old_sess_out;
3012 +- if (conn->sess->se_sess)
3013 +- transport_free_session(conn->sess->se_sess);
3014 +- if (conn->sess->session_index != 0) {
3015 +- spin_lock_bh(&sess_idr_lock);
3016 +- idr_remove(&sess_idr, conn->sess->session_index);
3017 +- spin_unlock_bh(&sess_idr_lock);
3018 +- }
3019 ++
3020 ++ transport_free_session(conn->sess->se_sess);
3021 ++
3022 ++ spin_lock_bh(&sess_idr_lock);
3023 ++ idr_remove(&sess_idr, conn->sess->session_index);
3024 ++ spin_unlock_bh(&sess_idr_lock);
3025 ++
3026 + kfree(conn->sess->sess_ops);
3027 + kfree(conn->sess);
3028 + conn->sess = NULL;
3029 +diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
3030 +index 97cb2dfd6369..d063f0401f84 100644
3031 +--- a/drivers/usb/gadget/function/f_uac2.c
3032 ++++ b/drivers/usb/gadget/function/f_uac2.c
3033 +@@ -442,14 +442,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
3034 + };
3035 +
3036 + struct cntrl_cur_lay3 {
3037 +- __u32 dCUR;
3038 ++ __le32 dCUR;
3039 + };
3040 +
3041 + struct cntrl_range_lay3 {
3042 +- __u16 wNumSubRanges;
3043 +- __u32 dMIN;
3044 +- __u32 dMAX;
3045 +- __u32 dRES;
3046 ++ __le16 wNumSubRanges;
3047 ++ __le32 dMIN;
3048 ++ __le32 dMAX;
3049 ++ __le32 dRES;
3050 + } __packed;
3051 +
3052 + static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
3053 +@@ -563,13 +563,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
3054 + agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
3055 + if (!agdev->out_ep) {
3056 + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
3057 +- return ret;
3058 ++ return -ENODEV;
3059 + }
3060 +
3061 + agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
3062 + if (!agdev->in_ep) {
3063 + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
3064 +- return ret;
3065 ++ return -ENODEV;
3066 + }
3067 +
3068 + agdev->in_ep_maxpsize = max_t(u16,
3069 +@@ -707,9 +707,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
3070 + memset(&c, 0, sizeof(struct cntrl_cur_lay3));
3071 +
3072 + if (entity_id == USB_IN_CLK_ID)
3073 +- c.dCUR = p_srate;
3074 ++ c.dCUR = cpu_to_le32(p_srate);
3075 + else if (entity_id == USB_OUT_CLK_ID)
3076 +- c.dCUR = c_srate;
3077 ++ c.dCUR = cpu_to_le32(c_srate);
3078 +
3079 + value = min_t(unsigned, w_length, sizeof c);
3080 + memcpy(req->buf, &c, value);
3081 +@@ -746,15 +746,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
3082 +
3083 + if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
3084 + if (entity_id == USB_IN_CLK_ID)
3085 +- r.dMIN = p_srate;
3086 ++ r.dMIN = cpu_to_le32(p_srate);
3087 + else if (entity_id == USB_OUT_CLK_ID)
3088 +- r.dMIN = c_srate;
3089 ++ r.dMIN = cpu_to_le32(c_srate);
3090 + else
3091 + return -EOPNOTSUPP;
3092 +
3093 + r.dMAX = r.dMIN;
3094 + r.dRES = 0;
3095 +- r.wNumSubRanges = 1;
3096 ++ r.wNumSubRanges = cpu_to_le16(1);
3097 +
3098 + value = min_t(unsigned, w_length, sizeof r);
3099 + memcpy(req->buf, &r, value);
3100 +diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
3101 +index 3971bbab88bd..d3a639297e06 100644
3102 +--- a/drivers/usb/gadget/function/u_audio.c
3103 ++++ b/drivers/usb/gadget/function/u_audio.c
3104 +@@ -41,9 +41,6 @@ struct uac_req {
3105 + struct uac_rtd_params {
3106 + struct snd_uac_chip *uac; /* parent chip */
3107 + bool ep_enabled; /* if the ep is enabled */
3108 +- /* Size of the ring buffer */
3109 +- size_t dma_bytes;
3110 +- unsigned char *dma_area;
3111 +
3112 + struct snd_pcm_substream *ss;
3113 +
3114 +@@ -52,8 +49,6 @@ struct uac_rtd_params {
3115 +
3116 + void *rbuf;
3117 +
3118 +- size_t period_size;
3119 +-
3120 + unsigned max_psize; /* MaxPacketSize of endpoint */
3121 + struct uac_req *ureq;
3122 +
3123 +@@ -93,12 +88,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
3124 + static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
3125 + {
3126 + unsigned pending;
3127 +- unsigned long flags;
3128 ++ unsigned long flags, flags2;
3129 + unsigned int hw_ptr;
3130 +- bool update_alsa = false;
3131 + int status = req->status;
3132 + struct uac_req *ur = req->context;
3133 + struct snd_pcm_substream *substream;
3134 ++ struct snd_pcm_runtime *runtime;
3135 + struct uac_rtd_params *prm = ur->pp;
3136 + struct snd_uac_chip *uac = prm->uac;
3137 +
3138 +@@ -120,6 +115,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
3139 + if (!substream)
3140 + goto exit;
3141 +
3142 ++ snd_pcm_stream_lock_irqsave(substream, flags2);
3143 ++
3144 ++ runtime = substream->runtime;
3145 ++ if (!runtime || !snd_pcm_running(substream)) {
3146 ++ snd_pcm_stream_unlock_irqrestore(substream, flags2);
3147 ++ goto exit;
3148 ++ }
3149 ++
3150 + spin_lock_irqsave(&prm->lock, flags);
3151 +
3152 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3153 +@@ -146,43 +149,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
3154 + req->actual = req->length;
3155 + }
3156 +
3157 +- pending = prm->hw_ptr % prm->period_size;
3158 +- pending += req->actual;
3159 +- if (pending >= prm->period_size)
3160 +- update_alsa = true;
3161 +-
3162 + hw_ptr = prm->hw_ptr;
3163 +- prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
3164 +
3165 + spin_unlock_irqrestore(&prm->lock, flags);
3166 +
3167 + /* Pack USB load in ALSA ring buffer */
3168 +- pending = prm->dma_bytes - hw_ptr;
3169 ++ pending = runtime->dma_bytes - hw_ptr;
3170 +
3171 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3172 + if (unlikely(pending < req->actual)) {
3173 +- memcpy(req->buf, prm->dma_area + hw_ptr, pending);
3174 +- memcpy(req->buf + pending, prm->dma_area,
3175 ++ memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
3176 ++ memcpy(req->buf + pending, runtime->dma_area,
3177 + req->actual - pending);
3178 + } else {
3179 +- memcpy(req->buf, prm->dma_area + hw_ptr, req->actual);
3180 ++ memcpy(req->buf, runtime->dma_area + hw_ptr,
3181 ++ req->actual);
3182 + }
3183 + } else {
3184 + if (unlikely(pending < req->actual)) {
3185 +- memcpy(prm->dma_area + hw_ptr, req->buf, pending);
3186 +- memcpy(prm->dma_area, req->buf + pending,
3187 ++ memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
3188 ++ memcpy(runtime->dma_area, req->buf + pending,
3189 + req->actual - pending);
3190 + } else {
3191 +- memcpy(prm->dma_area + hw_ptr, req->buf, req->actual);
3192 ++ memcpy(runtime->dma_area + hw_ptr, req->buf,
3193 ++ req->actual);
3194 + }
3195 + }
3196 +
3197 ++ spin_lock_irqsave(&prm->lock, flags);
3198 ++ /* update hw_ptr after data is copied to memory */
3199 ++ prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
3200 ++ hw_ptr = prm->hw_ptr;
3201 ++ spin_unlock_irqrestore(&prm->lock, flags);
3202 ++ snd_pcm_stream_unlock_irqrestore(substream, flags2);
3203 ++
3204 ++ if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
3205 ++ snd_pcm_period_elapsed(substream);
3206 ++
3207 + exit:
3208 + if (usb_ep_queue(ep, req, GFP_ATOMIC))
3209 + dev_err(uac->card->dev, "%d Error!\n", __LINE__);
3210 +-
3211 +- if (update_alsa)
3212 +- snd_pcm_period_elapsed(substream);
3213 + }
3214 +
3215 + static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
3216 +@@ -245,40 +251,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
3217 + static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
3218 + struct snd_pcm_hw_params *hw_params)
3219 + {
3220 +- struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
3221 +- struct uac_rtd_params *prm;
3222 +- int err;
3223 +-
3224 +- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3225 +- prm = &uac->p_prm;
3226 +- else
3227 +- prm = &uac->c_prm;
3228 +-
3229 +- err = snd_pcm_lib_malloc_pages(substream,
3230 ++ return snd_pcm_lib_malloc_pages(substream,
3231 + params_buffer_bytes(hw_params));
3232 +- if (err >= 0) {
3233 +- prm->dma_bytes = substream->runtime->dma_bytes;
3234 +- prm->dma_area = substream->runtime->dma_area;
3235 +- prm->period_size = params_period_bytes(hw_params);
3236 +- }
3237 +-
3238 +- return err;
3239 + }
3240 +
3241 + static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
3242 + {
3243 +- struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
3244 +- struct uac_rtd_params *prm;
3245 +-
3246 +- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3247 +- prm = &uac->p_prm;
3248 +- else
3249 +- prm = &uac->c_prm;
3250 +-
3251 +- prm->dma_area = NULL;
3252 +- prm->dma_bytes = 0;
3253 +- prm->period_size = 0;
3254 +-
3255 + return snd_pcm_lib_free_pages(substream);
3256 + }
3257 +
3258 +@@ -604,15 +582,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
3259 + if (err < 0)
3260 + goto snd_fail;
3261 +
3262 +- strcpy(pcm->name, pcm_name);
3263 ++ strlcpy(pcm->name, pcm_name, sizeof(pcm->name));
3264 + pcm->private_data = uac;
3265 + uac->pcm = pcm;
3266 +
3267 + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
3268 + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
3269 +
3270 +- strcpy(card->driver, card_name);
3271 +- strcpy(card->shortname, card_name);
3272 ++ strlcpy(card->driver, card_name, sizeof(card->driver));
3273 ++ strlcpy(card->shortname, card_name, sizeof(card->shortname));
3274 + sprintf(card->longname, "%s %i", card_name, card->dev->id);
3275 +
3276 + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
3277 +diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
3278 +index 118ad70f1af0..84b227ede082 100644
3279 +--- a/drivers/usb/gadget/udc/r8a66597-udc.c
3280 ++++ b/drivers/usb/gadget/udc/r8a66597-udc.c
3281 +@@ -835,11 +835,11 @@ static void init_controller(struct r8a66597 *r8a66597)
3282 +
3283 + r8a66597_bset(r8a66597, XCKE, SYSCFG0);
3284 +
3285 +- msleep(3);
3286 ++ mdelay(3);
3287 +
3288 + r8a66597_bset(r8a66597, PLLC, SYSCFG0);
3289 +
3290 +- msleep(1);
3291 ++ mdelay(1);
3292 +
3293 + r8a66597_bset(r8a66597, SCKE, SYSCFG0);
3294 +
3295 +@@ -1193,7 +1193,7 @@ __acquires(r8a66597->lock)
3296 + r8a66597->ep0_req->length = 2;
3297 + /* AV: what happens if we get called again before that gets through? */
3298 + spin_unlock(&r8a66597->lock);
3299 +- r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
3300 ++ r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
3301 + spin_lock(&r8a66597->lock);
3302 + }
3303 +
3304 +diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
3305 +index cf8f40ae6e01..9b4354a00ca7 100644
3306 +--- a/drivers/usb/phy/phy-fsl-usb.c
3307 ++++ b/drivers/usb/phy/phy-fsl-usb.c
3308 +@@ -874,6 +874,7 @@ int usb_otg_start(struct platform_device *pdev)
3309 + if (pdata->init && pdata->init(pdev) != 0)
3310 + return -EINVAL;
3311 +
3312 ++#ifdef CONFIG_PPC32
3313 + if (pdata->big_endian_mmio) {
3314 + _fsl_readl = _fsl_readl_be;
3315 + _fsl_writel = _fsl_writel_be;
3316 +@@ -881,6 +882,7 @@ int usb_otg_start(struct platform_device *pdev)
3317 + _fsl_readl = _fsl_readl_le;
3318 + _fsl_writel = _fsl_writel_le;
3319 + }
3320 ++#endif
3321 +
3322 + /* request irq */
3323 + p_otg->irq = platform_get_irq(pdev, 0);
3324 +@@ -971,7 +973,7 @@ int usb_otg_start(struct platform_device *pdev)
3325 + /*
3326 + * state file in sysfs
3327 + */
3328 +-static int show_fsl_usb2_otg_state(struct device *dev,
3329 ++static ssize_t show_fsl_usb2_otg_state(struct device *dev,
3330 + struct device_attribute *attr, char *buf)
3331 + {
3332 + struct otg_fsm *fsm = &fsl_otg_dev->fsm;
3333 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
3334 +index b475d1ebbbbf..5cf1bbe9754c 100644
3335 +--- a/fs/btrfs/disk-io.c
3336 ++++ b/fs/btrfs/disk-io.c
3337 +@@ -1098,8 +1098,9 @@ static int btree_writepages(struct address_space *mapping,
3338 +
3339 + fs_info = BTRFS_I(mapping->host)->root->fs_info;
3340 + /* this is a bit racy, but that's ok */
3341 +- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
3342 +- BTRFS_DIRTY_METADATA_THRESH);
3343 ++ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
3344 ++ BTRFS_DIRTY_METADATA_THRESH,
3345 ++ fs_info->dirty_metadata_batch);
3346 + if (ret < 0)
3347 + return 0;
3348 + }
3349 +@@ -4030,8 +4031,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
3350 + if (flush_delayed)
3351 + btrfs_balance_delayed_items(fs_info);
3352 +
3353 +- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
3354 +- BTRFS_DIRTY_METADATA_THRESH);
3355 ++ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
3356 ++ BTRFS_DIRTY_METADATA_THRESH,
3357 ++ fs_info->dirty_metadata_batch);
3358 + if (ret > 0) {
3359 + balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
3360 + }
3361 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3362 +index 53487102081d..bbabe37c2e8c 100644
3363 +--- a/fs/btrfs/extent-tree.c
3364 ++++ b/fs/btrfs/extent-tree.c
3365 +@@ -4407,7 +4407,7 @@ commit_trans:
3366 + data_sinfo->flags, bytes, 1);
3367 + spin_unlock(&data_sinfo->lock);
3368 +
3369 +- return ret;
3370 ++ return 0;
3371 + }
3372 +
3373 + int btrfs_check_data_free_space(struct inode *inode,
3374 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3375 +index 28a58f40f3a4..e8bfafa25a71 100644
3376 +--- a/fs/btrfs/inode.c
3377 ++++ b/fs/btrfs/inode.c
3378 +@@ -6152,32 +6152,6 @@ err:
3379 + return ret;
3380 + }
3381 +
3382 +-int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
3383 +-{
3384 +- struct btrfs_root *root = BTRFS_I(inode)->root;
3385 +- struct btrfs_trans_handle *trans;
3386 +- int ret = 0;
3387 +- bool nolock = false;
3388 +-
3389 +- if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
3390 +- return 0;
3391 +-
3392 +- if (btrfs_fs_closing(root->fs_info) &&
3393 +- btrfs_is_free_space_inode(BTRFS_I(inode)))
3394 +- nolock = true;
3395 +-
3396 +- if (wbc->sync_mode == WB_SYNC_ALL) {
3397 +- if (nolock)
3398 +- trans = btrfs_join_transaction_nolock(root);
3399 +- else
3400 +- trans = btrfs_join_transaction(root);
3401 +- if (IS_ERR(trans))
3402 +- return PTR_ERR(trans);
3403 +- ret = btrfs_commit_transaction(trans);
3404 +- }
3405 +- return ret;
3406 +-}
3407 +-
3408 + /*
3409 + * This is somewhat expensive, updating the tree every time the
3410 + * inode changes. But, it is most likely to find the inode in cache.
3411 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
3412 +index 8e3ce81d3f44..fe960d5e8913 100644
3413 +--- a/fs/btrfs/super.c
3414 ++++ b/fs/btrfs/super.c
3415 +@@ -2271,7 +2271,6 @@ static const struct super_operations btrfs_super_ops = {
3416 + .sync_fs = btrfs_sync_fs,
3417 + .show_options = btrfs_show_options,
3418 + .show_devname = btrfs_show_devname,
3419 +- .write_inode = btrfs_write_inode,
3420 + .alloc_inode = btrfs_alloc_inode,
3421 + .destroy_inode = btrfs_destroy_inode,
3422 + .statfs = btrfs_statfs,
3423 +diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
3424 +index 3978b324cbca..5f2f67d220fa 100644
3425 +--- a/fs/cachefiles/namei.c
3426 ++++ b/fs/cachefiles/namei.c
3427 +@@ -195,7 +195,6 @@ wait_for_old_object:
3428 + pr_err("\n");
3429 + pr_err("Error: Unexpected object collision\n");
3430 + cachefiles_printk_object(object, xobject);
3431 +- BUG();
3432 + }
3433 + atomic_inc(&xobject->usage);
3434 + write_unlock(&cache->active_lock);
3435 +diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
3436 +index 18d7aa61ef0f..199eb396a1bb 100644
3437 +--- a/fs/cachefiles/rdwr.c
3438 ++++ b/fs/cachefiles/rdwr.c
3439 +@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
3440 + struct cachefiles_one_read *monitor =
3441 + container_of(wait, struct cachefiles_one_read, monitor);
3442 + struct cachefiles_object *object;
3443 ++ struct fscache_retrieval *op = monitor->op;
3444 + struct wait_bit_key *key = _key;
3445 + struct page *page = wait->private;
3446 +
3447 +@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
3448 + list_del(&wait->entry);
3449 +
3450 + /* move onto the action list and queue for FS-Cache thread pool */
3451 +- ASSERT(monitor->op);
3452 ++ ASSERT(op);
3453 +
3454 +- object = container_of(monitor->op->op.object,
3455 +- struct cachefiles_object, fscache);
3456 ++ /* We need to temporarily bump the usage count as we don't own a ref
3457 ++ * here otherwise cachefiles_read_copier() may free the op between the
3458 ++ * monitor being enqueued on the op->to_do list and the op getting
3459 ++ * enqueued on the work queue.
3460 ++ */
3461 ++ fscache_get_retrieval(op);
3462 +
3463 ++ object = container_of(op->op.object, struct cachefiles_object, fscache);
3464 + spin_lock(&object->work_lock);
3465 +- list_add_tail(&monitor->op_link, &monitor->op->to_do);
3466 ++ list_add_tail(&monitor->op_link, &op->to_do);
3467 + spin_unlock(&object->work_lock);
3468 +
3469 +- fscache_enqueue_retrieval(monitor->op);
3470 ++ fscache_enqueue_retrieval(op);
3471 ++ fscache_put_retrieval(op);
3472 + return 0;
3473 + }
3474 +
3475 +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
3476 +index cbb9534b89b4..53c9c49f0fbb 100644
3477 +--- a/fs/cifs/cifs_debug.c
3478 ++++ b/fs/cifs/cifs_debug.c
3479 +@@ -123,25 +123,41 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
3480 + seq_printf(m, "CIFS Version %s\n", CIFS_VERSION);
3481 + seq_printf(m, "Features:");
3482 + #ifdef CONFIG_CIFS_DFS_UPCALL
3483 +- seq_printf(m, " dfs");
3484 ++ seq_printf(m, " DFS");
3485 + #endif
3486 + #ifdef CONFIG_CIFS_FSCACHE
3487 +- seq_printf(m, " fscache");
3488 ++ seq_printf(m, ",FSCACHE");
3489 ++#endif
3490 ++#ifdef CONFIG_CIFS_SMB_DIRECT
3491 ++ seq_printf(m, ",SMB_DIRECT");
3492 ++#endif
3493 ++#ifdef CONFIG_CIFS_STATS2
3494 ++ seq_printf(m, ",STATS2");
3495 ++#elif defined(CONFIG_CIFS_STATS)
3496 ++ seq_printf(m, ",STATS");
3497 ++#endif
3498 ++#ifdef CONFIG_CIFS_DEBUG2
3499 ++ seq_printf(m, ",DEBUG2");
3500 ++#elif defined(CONFIG_CIFS_DEBUG)
3501 ++ seq_printf(m, ",DEBUG");
3502 ++#endif
3503 ++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
3504 ++ seq_printf(m, ",ALLOW_INSECURE_LEGACY");
3505 + #endif
3506 + #ifdef CONFIG_CIFS_WEAK_PW_HASH
3507 +- seq_printf(m, " lanman");
3508 ++ seq_printf(m, ",WEAK_PW_HASH");
3509 + #endif
3510 + #ifdef CONFIG_CIFS_POSIX
3511 +- seq_printf(m, " posix");
3512 ++ seq_printf(m, ",CIFS_POSIX");
3513 + #endif
3514 + #ifdef CONFIG_CIFS_UPCALL
3515 +- seq_printf(m, " spnego");
3516 ++ seq_printf(m, ",UPCALL(SPNEGO)");
3517 + #endif
3518 + #ifdef CONFIG_CIFS_XATTR
3519 +- seq_printf(m, " xattr");
3520 ++ seq_printf(m, ",XATTR");
3521 + #endif
3522 + #ifdef CONFIG_CIFS_ACL
3523 +- seq_printf(m, " acl");
3524 ++ seq_printf(m, ",ACL");
3525 + #endif
3526 + seq_putc(m, '\n');
3527 + seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
3528 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
3529 +index 490c5fc9e69c..44a7b2dea688 100644
3530 +--- a/fs/cifs/cifsfs.c
3531 ++++ b/fs/cifs/cifsfs.c
3532 +@@ -197,14 +197,16 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
3533 +
3534 + xid = get_xid();
3535 +
3536 +- /*
3537 +- * PATH_MAX may be too long - it would presumably be total path,
3538 +- * but note that some servers (includinng Samba 3) have a shorter
3539 +- * maximum path.
3540 +- *
3541 +- * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
3542 +- */
3543 +- buf->f_namelen = PATH_MAX;
3544 ++ if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
3545 ++ buf->f_namelen =
3546 ++ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
3547 ++ else
3548 ++ buf->f_namelen = PATH_MAX;
3549 ++
3550 ++ buf->f_fsid.val[0] = tcon->vol_serial_number;
3551 ++ /* are using part of create time for more randomness, see man statfs */
3552 ++ buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
3553 ++
3554 + buf->f_files = 0; /* undefined */
3555 + buf->f_ffree = 0; /* unlimited */
3556 +
3557 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
3558 +index 0c7b7e2a0919..caf9cf91b825 100644
3559 +--- a/fs/cifs/inode.c
3560 ++++ b/fs/cifs/inode.c
3561 +@@ -1122,6 +1122,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
3562 + if (!server->ops->set_file_info)
3563 + return -ENOSYS;
3564 +
3565 ++ info_buf.Pad = 0;
3566 ++
3567 + if (attrs->ia_valid & ATTR_ATIME) {
3568 + set_time = true;
3569 + info_buf.LastAccessTime =
3570 +diff --git a/fs/cifs/link.c b/fs/cifs/link.c
3571 +index 889a840172eb..9451a7f6893d 100644
3572 +--- a/fs/cifs/link.c
3573 ++++ b/fs/cifs/link.c
3574 +@@ -396,7 +396,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
3575 + struct cifs_io_parms io_parms;
3576 + int buf_type = CIFS_NO_BUFFER;
3577 + __le16 *utf16_path;
3578 +- __u8 oplock = SMB2_OPLOCK_LEVEL_II;
3579 ++ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3580 + struct smb2_file_all_info *pfile_info = NULL;
3581 +
3582 + oparms.tcon = tcon;
3583 +@@ -458,7 +458,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
3584 + struct cifs_io_parms io_parms;
3585 + int create_options = CREATE_NOT_DIR;
3586 + __le16 *utf16_path;
3587 +- __u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
3588 ++ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3589 + struct kvec iov[2];
3590 +
3591 + if (backup_cred(cifs_sb))
3592 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
3593 +index 8b0502cd39af..aa23c00367ec 100644
3594 +--- a/fs/cifs/sess.c
3595 ++++ b/fs/cifs/sess.c
3596 +@@ -398,6 +398,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
3597 + goto setup_ntlmv2_ret;
3598 + }
3599 + *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
3600 ++ if (!*pbuffer) {
3601 ++ rc = -ENOMEM;
3602 ++ cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
3603 ++ *buflen = 0;
3604 ++ goto setup_ntlmv2_ret;
3605 ++ }
3606 + sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
3607 +
3608 + memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
3609 +diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
3610 +index 1238cd3552f9..0267d8cbc996 100644
3611 +--- a/fs/cifs/smb2inode.c
3612 ++++ b/fs/cifs/smb2inode.c
3613 +@@ -267,7 +267,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
3614 + int rc;
3615 +
3616 + if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
3617 +- (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
3618 ++ (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
3619 + (buf->Attributes == 0))
3620 + return 0; /* would be a no op, no sense sending this */
3621 +
3622 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3623 +index 83267ac3a3f0..e9f246fe9d80 100644
3624 +--- a/fs/cifs/smb2ops.c
3625 ++++ b/fs/cifs/smb2ops.c
3626 +@@ -332,6 +332,8 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
3627 + FS_ATTRIBUTE_INFORMATION);
3628 + SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
3629 + FS_DEVICE_INFORMATION);
3630 ++ SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
3631 ++ FS_VOLUME_INFORMATION);
3632 + SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
3633 + FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
3634 + SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3635 +@@ -1129,6 +1131,13 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
3636 +
3637 + }
3638 +
3639 ++/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
3640 ++#define GMT_TOKEN_SIZE 50
3641 ++
3642 ++/*
3643 ++ * Input buffer contains (empty) struct smb_snapshot array with size filled in
3644 ++ * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
3645 ++ */
3646 + static int
3647 + smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
3648 + struct cifsFileInfo *cfile, void __user *ioc_buf)
3649 +@@ -1158,14 +1167,27 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
3650 + kfree(retbuf);
3651 + return rc;
3652 + }
3653 +- if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
3654 +- rc = -ERANGE;
3655 +- kfree(retbuf);
3656 +- return rc;
3657 +- }
3658 +
3659 +- if (ret_data_len > snapshot_in.snapshot_array_size)
3660 +- ret_data_len = snapshot_in.snapshot_array_size;
3661 ++ /*
3662 ++ * Check for min size, ie not large enough to fit even one GMT
3663 ++ * token (snapshot). On the first ioctl some users may pass in
3664 ++ * smaller size (or zero) to simply get the size of the array
3665 ++ * so the user space caller can allocate sufficient memory
3666 ++ * and retry the ioctl again with larger array size sufficient
3667 ++ * to hold all of the snapshot GMT tokens on the second try.
3668 ++ */
3669 ++ if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
3670 ++ ret_data_len = sizeof(struct smb_snapshot_array);
3671 ++
3672 ++ /*
3673 ++ * We return struct SRV_SNAPSHOT_ARRAY, followed by
3674 ++ * the snapshot array (of 50 byte GMT tokens) each
3675 ++ * representing an available previous version of the data
3676 ++ */
3677 ++ if (ret_data_len > (snapshot_in.snapshot_array_size +
3678 ++ sizeof(struct smb_snapshot_array)))
3679 ++ ret_data_len = snapshot_in.snapshot_array_size +
3680 ++ sizeof(struct smb_snapshot_array);
3681 +
3682 + if (copy_to_user(ioc_buf, retbuf, ret_data_len))
3683 + rc = -EFAULT;
3684 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3685 +index 71b81980787f..e317e9a400c1 100644
3686 +--- a/fs/cifs/smb2pdu.c
3687 ++++ b/fs/cifs/smb2pdu.c
3688 +@@ -3455,6 +3455,9 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
3689 + } else if (level == FS_SECTOR_SIZE_INFORMATION) {
3690 + max_len = sizeof(struct smb3_fs_ss_info);
3691 + min_len = sizeof(struct smb3_fs_ss_info);
3692 ++ } else if (level == FS_VOLUME_INFORMATION) {
3693 ++ max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
3694 ++ min_len = sizeof(struct smb3_fs_vol_info);
3695 + } else {
3696 + cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
3697 + return -EINVAL;
3698 +@@ -3495,6 +3498,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
3699 + tcon->ss_flags = le32_to_cpu(ss_info->Flags);
3700 + tcon->perf_sector_size =
3701 + le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
3702 ++ } else if (level == FS_VOLUME_INFORMATION) {
3703 ++ struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
3704 ++ (offset + (char *)rsp);
3705 ++ tcon->vol_serial_number = vol_info->VolumeSerialNumber;
3706 ++ tcon->vol_create_time = vol_info->VolumeCreationTime;
3707 + }
3708 +
3709 + qfsattr_exit:
3710 +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
3711 +index c2ec934be968..e52454059725 100644
3712 +--- a/fs/cifs/smb2pdu.h
3713 ++++ b/fs/cifs/smb2pdu.h
3714 +@@ -1108,6 +1108,17 @@ struct smb3_fs_ss_info {
3715 + __le32 ByteOffsetForPartitionAlignment;
3716 + } __packed;
3717 +
3718 ++/* volume info struct - see MS-FSCC 2.5.9 */
3719 ++#define MAX_VOL_LABEL_LEN 32
3720 ++struct smb3_fs_vol_info {
3721 ++ __le64 VolumeCreationTime;
3722 ++ __u32 VolumeSerialNumber;
3723 ++ __le32 VolumeLabelLength; /* includes trailing null */
3724 ++ __u8 SupportsObjects; /* True if eg like NTFS, supports objects */
3725 ++ __u8 Reserved;
3726 ++ __u8 VolumeLabel[0]; /* variable len */
3727 ++} __packed;
3728 ++
3729 + /* partial list of QUERY INFO levels */
3730 + #define FILE_DIRECTORY_INFORMATION 1
3731 + #define FILE_FULL_DIRECTORY_INFORMATION 2
3732 +diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
3733 +index 27b9a76a0dfa..638ad4743477 100644
3734 +--- a/fs/ext4/mmp.c
3735 ++++ b/fs/ext4/mmp.c
3736 +@@ -186,11 +186,8 @@ static int kmmpd(void *data)
3737 + goto exit_thread;
3738 + }
3739 +
3740 +- if (sb_rdonly(sb)) {
3741 +- ext4_warning(sb, "kmmpd being stopped since filesystem "
3742 +- "has been remounted as readonly.");
3743 +- goto exit_thread;
3744 +- }
3745 ++ if (sb_rdonly(sb))
3746 ++ break;
3747 +
3748 + diff = jiffies - last_update_time;
3749 + if (diff < mmp_update_interval * HZ)
3750 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
3751 +index 6747861f9b70..1db39e12e02b 100644
3752 +--- a/fs/ext4/namei.c
3753 ++++ b/fs/ext4/namei.c
3754 +@@ -1397,6 +1397,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
3755 + goto cleanup_and_exit;
3756 + dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
3757 + "falling back\n"));
3758 ++ ret = NULL;
3759 + }
3760 + nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
3761 + if (!nblocks) {
3762 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3763 +index f30d2bf40471..b4fb085261fd 100644
3764 +--- a/fs/ext4/super.c
3765 ++++ b/fs/ext4/super.c
3766 +@@ -5163,6 +5163,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
3767 +
3768 + if (sbi->s_journal)
3769 + ext4_mark_recovery_complete(sb, es);
3770 ++ if (sbi->s_mmp_tsk)
3771 ++ kthread_stop(sbi->s_mmp_tsk);
3772 + } else {
3773 + /* Make sure we can mount this feature set readwrite */
3774 + if (ext4_has_feature_readonly(sb) ||
3775 +diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
3776 +index e21afd52e7d7..bdfc2a2de8f2 100644
3777 +--- a/fs/ext4/sysfs.c
3778 ++++ b/fs/ext4/sysfs.c
3779 +@@ -278,8 +278,12 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
3780 + case attr_pointer_ui:
3781 + if (!ptr)
3782 + return 0;
3783 +- return snprintf(buf, PAGE_SIZE, "%u\n",
3784 +- *((unsigned int *) ptr));
3785 ++ if (a->attr_ptr == ptr_ext4_super_block_offset)
3786 ++ return snprintf(buf, PAGE_SIZE, "%u\n",
3787 ++ le32_to_cpup(ptr));
3788 ++ else
3789 ++ return snprintf(buf, PAGE_SIZE, "%u\n",
3790 ++ *((unsigned int *) ptr));
3791 + case attr_pointer_atomic:
3792 + if (!ptr)
3793 + return 0;
3794 +@@ -312,7 +316,10 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
3795 + ret = kstrtoul(skip_spaces(buf), 0, &t);
3796 + if (ret)
3797 + return ret;
3798 +- *((unsigned int *) ptr) = t;
3799 ++ if (a->attr_ptr == ptr_ext4_super_block_offset)
3800 ++ *((__le32 *) ptr) = cpu_to_le32(t);
3801 ++ else
3802 ++ *((unsigned int *) ptr) = t;
3803 + return len;
3804 + case attr_inode_readahead:
3805 + return inode_readahead_blks_store(a, sbi, buf, len);
3806 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
3807 +index c7c8c16ccd93..9bc50eef6127 100644
3808 +--- a/fs/ext4/xattr.c
3809 ++++ b/fs/ext4/xattr.c
3810 +@@ -189,6 +189,8 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
3811 + struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
3812 + if ((void *)next >= end)
3813 + return -EFSCORRUPTED;
3814 ++ if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
3815 ++ return -EFSCORRUPTED;
3816 + e = next;
3817 + }
3818 +
3819 +diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
3820 +index de67745e1cd7..77946d6f617d 100644
3821 +--- a/fs/fscache/operation.c
3822 ++++ b/fs/fscache/operation.c
3823 +@@ -66,7 +66,8 @@ void fscache_enqueue_operation(struct fscache_operation *op)
3824 + ASSERT(op->processor != NULL);
3825 + ASSERT(fscache_object_is_available(op->object));
3826 + ASSERTCMP(atomic_read(&op->usage), >, 0);
3827 +- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
3828 ++ ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
3829 ++ op->state, ==, FSCACHE_OP_ST_CANCELLED);
3830 +
3831 + fscache_stat(&fscache_n_op_enqueue);
3832 + switch (op->flags & FSCACHE_OP_TYPE) {
3833 +@@ -481,7 +482,8 @@ void fscache_put_operation(struct fscache_operation *op)
3834 + struct fscache_cache *cache;
3835 +
3836 + _enter("{OBJ%x OP%x,%d}",
3837 +- op->object->debug_id, op->debug_id, atomic_read(&op->usage));
3838 ++ op->object ? op->object->debug_id : 0,
3839 ++ op->debug_id, atomic_read(&op->usage));
3840 +
3841 + ASSERTCMP(atomic_read(&op->usage), >, 0);
3842 +
3843 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
3844 +index 261fd13a75c6..ee8105af4001 100644
3845 +--- a/fs/fuse/dev.c
3846 ++++ b/fs/fuse/dev.c
3847 +@@ -131,6 +131,16 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
3848 + return !fc->initialized || (for_background && fc->blocked);
3849 + }
3850 +
3851 ++static void fuse_drop_waiting(struct fuse_conn *fc)
3852 ++{
3853 ++ if (fc->connected) {
3854 ++ atomic_dec(&fc->num_waiting);
3855 ++ } else if (atomic_dec_and_test(&fc->num_waiting)) {
3856 ++ /* wake up aborters */
3857 ++ wake_up_all(&fc->blocked_waitq);
3858 ++ }
3859 ++}
3860 ++
3861 + static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
3862 + bool for_background)
3863 + {
3864 +@@ -171,7 +181,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
3865 + return req;
3866 +
3867 + out:
3868 +- atomic_dec(&fc->num_waiting);
3869 ++ fuse_drop_waiting(fc);
3870 + return ERR_PTR(err);
3871 + }
3872 +
3873 +@@ -278,7 +288,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
3874 +
3875 + if (test_bit(FR_WAITING, &req->flags)) {
3876 + __clear_bit(FR_WAITING, &req->flags);
3877 +- atomic_dec(&fc->num_waiting);
3878 ++ fuse_drop_waiting(fc);
3879 + }
3880 +
3881 + if (req->stolen_file)
3882 +@@ -364,7 +374,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
3883 + struct fuse_iqueue *fiq = &fc->iq;
3884 +
3885 + if (test_and_set_bit(FR_FINISHED, &req->flags))
3886 +- return;
3887 ++ goto put_request;
3888 +
3889 + spin_lock(&fiq->waitq.lock);
3890 + list_del_init(&req->intr_entry);
3891 +@@ -393,6 +403,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
3892 + wake_up(&req->waitq);
3893 + if (req->end)
3894 + req->end(fc, req);
3895 ++put_request:
3896 + fuse_put_request(fc, req);
3897 + }
3898 +
3899 +@@ -1941,11 +1952,14 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
3900 + if (!fud)
3901 + return -EPERM;
3902 +
3903 ++ pipe_lock(pipe);
3904 ++
3905 + bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
3906 +- if (!bufs)
3907 ++ if (!bufs) {
3908 ++ pipe_unlock(pipe);
3909 + return -ENOMEM;
3910 ++ }
3911 +
3912 +- pipe_lock(pipe);
3913 + nbuf = 0;
3914 + rem = 0;
3915 + for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
3916 +@@ -2100,6 +2114,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
3917 + set_bit(FR_ABORTED, &req->flags);
3918 + if (!test_bit(FR_LOCKED, &req->flags)) {
3919 + set_bit(FR_PRIVATE, &req->flags);
3920 ++ __fuse_get_request(req);
3921 + list_move(&req->list, &to_end1);
3922 + }
3923 + spin_unlock(&req->waitq.lock);
3924 +@@ -2126,7 +2141,6 @@ void fuse_abort_conn(struct fuse_conn *fc)
3925 +
3926 + while (!list_empty(&to_end1)) {
3927 + req = list_first_entry(&to_end1, struct fuse_req, list);
3928 +- __fuse_get_request(req);
3929 + list_del_init(&req->list);
3930 + request_end(fc, req);
3931 + }
3932 +@@ -2137,6 +2151,11 @@ void fuse_abort_conn(struct fuse_conn *fc)
3933 + }
3934 + EXPORT_SYMBOL_GPL(fuse_abort_conn);
3935 +
3936 ++void fuse_wait_aborted(struct fuse_conn *fc)
3937 ++{
3938 ++ wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
3939 ++}
3940 ++
3941 + int fuse_dev_release(struct inode *inode, struct file *file)
3942 + {
3943 + struct fuse_dev *fud = fuse_get_dev(file);
3944 +@@ -2144,9 +2163,15 @@ int fuse_dev_release(struct inode *inode, struct file *file)
3945 + if (fud) {
3946 + struct fuse_conn *fc = fud->fc;
3947 + struct fuse_pqueue *fpq = &fud->pq;
3948 ++ LIST_HEAD(to_end);
3949 +
3950 ++ spin_lock(&fpq->lock);
3951 + WARN_ON(!list_empty(&fpq->io));
3952 +- end_requests(fc, &fpq->processing);
3953 ++ list_splice_init(&fpq->processing, &to_end);
3954 ++ spin_unlock(&fpq->lock);
3955 ++
3956 ++ end_requests(fc, &to_end);
3957 ++
3958 + /* Are we the last open device? */
3959 + if (atomic_dec_and_test(&fc->dev_count)) {
3960 + WARN_ON(fc->iq.fasync != NULL);
3961 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
3962 +index 7a980b4462d9..29868c35c19a 100644
3963 +--- a/fs/fuse/dir.c
3964 ++++ b/fs/fuse/dir.c
3965 +@@ -355,11 +355,12 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
3966 + struct inode *inode;
3967 + struct dentry *newent;
3968 + bool outarg_valid = true;
3969 ++ bool locked;
3970 +
3971 +- fuse_lock_inode(dir);
3972 ++ locked = fuse_lock_inode(dir);
3973 + err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
3974 + &outarg, &inode);
3975 +- fuse_unlock_inode(dir);
3976 ++ fuse_unlock_inode(dir, locked);
3977 + if (err == -ENOENT) {
3978 + outarg_valid = false;
3979 + err = 0;
3980 +@@ -1332,6 +1333,7 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
3981 + struct fuse_conn *fc = get_fuse_conn(inode);
3982 + struct fuse_req *req;
3983 + u64 attr_version = 0;
3984 ++ bool locked;
3985 +
3986 + if (is_bad_inode(inode))
3987 + return -EIO;
3988 +@@ -1359,9 +1361,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
3989 + fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
3990 + FUSE_READDIR);
3991 + }
3992 +- fuse_lock_inode(inode);
3993 ++ locked = fuse_lock_inode(inode);
3994 + fuse_request_send(fc, req);
3995 +- fuse_unlock_inode(inode);
3996 ++ fuse_unlock_inode(inode, locked);
3997 + nbytes = req->out.args[0].size;
3998 + err = req->out.h.error;
3999 + fuse_put_request(fc, req);
4000 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
4001 +index cb7dff5c45d7..fb4738ef162f 100644
4002 +--- a/fs/fuse/file.c
4003 ++++ b/fs/fuse/file.c
4004 +@@ -866,6 +866,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
4005 + }
4006 +
4007 + if (WARN_ON(req->num_pages >= req->max_pages)) {
4008 ++ unlock_page(page);
4009 + fuse_put_request(fc, req);
4010 + return -EIO;
4011 + }
4012 +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
4013 +index d5773ca67ad2..e105640153ce 100644
4014 +--- a/fs/fuse/fuse_i.h
4015 ++++ b/fs/fuse/fuse_i.h
4016 +@@ -852,6 +852,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc,
4017 +
4018 + /* Abort all requests */
4019 + void fuse_abort_conn(struct fuse_conn *fc);
4020 ++void fuse_wait_aborted(struct fuse_conn *fc);
4021 +
4022 + /**
4023 + * Invalidate inode attributes
4024 +@@ -964,8 +965,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
4025 +
4026 + void fuse_set_initialized(struct fuse_conn *fc);
4027 +
4028 +-void fuse_unlock_inode(struct inode *inode);
4029 +-void fuse_lock_inode(struct inode *inode);
4030 ++void fuse_unlock_inode(struct inode *inode, bool locked);
4031 ++bool fuse_lock_inode(struct inode *inode);
4032 +
4033 + int fuse_setxattr(struct inode *inode, const char *name, const void *value,
4034 + size_t size, int flags);
4035 +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
4036 +index a13ecefa9cd1..ffb61787d77a 100644
4037 +--- a/fs/fuse/inode.c
4038 ++++ b/fs/fuse/inode.c
4039 +@@ -357,15 +357,21 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
4040 + return 0;
4041 + }
4042 +
4043 +-void fuse_lock_inode(struct inode *inode)
4044 ++bool fuse_lock_inode(struct inode *inode)
4045 + {
4046 +- if (!get_fuse_conn(inode)->parallel_dirops)
4047 ++ bool locked = false;
4048 ++
4049 ++ if (!get_fuse_conn(inode)->parallel_dirops) {
4050 + mutex_lock(&get_fuse_inode(inode)->mutex);
4051 ++ locked = true;
4052 ++ }
4053 ++
4054 ++ return locked;
4055 + }
4056 +
4057 +-void fuse_unlock_inode(struct inode *inode)
4058 ++void fuse_unlock_inode(struct inode *inode, bool locked)
4059 + {
4060 +- if (!get_fuse_conn(inode)->parallel_dirops)
4061 ++ if (locked)
4062 + mutex_unlock(&get_fuse_inode(inode)->mutex);
4063 + }
4064 +
4065 +@@ -391,9 +397,6 @@ static void fuse_put_super(struct super_block *sb)
4066 + {
4067 + struct fuse_conn *fc = get_fuse_conn_super(sb);
4068 +
4069 +- fuse_send_destroy(fc);
4070 +-
4071 +- fuse_abort_conn(fc);
4072 + mutex_lock(&fuse_mutex);
4073 + list_del(&fc->entry);
4074 + fuse_ctl_remove_conn(fc);
4075 +@@ -1190,16 +1193,25 @@ static struct dentry *fuse_mount(struct file_system_type *fs_type,
4076 + return mount_nodev(fs_type, flags, raw_data, fuse_fill_super);
4077 + }
4078 +
4079 +-static void fuse_kill_sb_anon(struct super_block *sb)
4080 ++static void fuse_sb_destroy(struct super_block *sb)
4081 + {
4082 + struct fuse_conn *fc = get_fuse_conn_super(sb);
4083 +
4084 + if (fc) {
4085 ++ fuse_send_destroy(fc);
4086 ++
4087 ++ fuse_abort_conn(fc);
4088 ++ fuse_wait_aborted(fc);
4089 ++
4090 + down_write(&fc->killsb);
4091 + fc->sb = NULL;
4092 + up_write(&fc->killsb);
4093 + }
4094 ++}
4095 +
4096 ++static void fuse_kill_sb_anon(struct super_block *sb)
4097 ++{
4098 ++ fuse_sb_destroy(sb);
4099 + kill_anon_super(sb);
4100 + }
4101 +
4102 +@@ -1222,14 +1234,7 @@ static struct dentry *fuse_mount_blk(struct file_system_type *fs_type,
4103 +
4104 + static void fuse_kill_sb_blk(struct super_block *sb)
4105 + {
4106 +- struct fuse_conn *fc = get_fuse_conn_super(sb);
4107 +-
4108 +- if (fc) {
4109 +- down_write(&fc->killsb);
4110 +- fc->sb = NULL;
4111 +- up_write(&fc->killsb);
4112 +- }
4113 +-
4114 ++ fuse_sb_destroy(sb);
4115 + kill_block_super(sb);
4116 + }
4117 +
4118 +diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
4119 +index fcff2e0487fe..f1c1430ae721 100644
4120 +--- a/fs/squashfs/file.c
4121 ++++ b/fs/squashfs/file.c
4122 +@@ -374,13 +374,29 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
4123 + return squashfs_block_size(size);
4124 + }
4125 +
4126 ++void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
4127 ++{
4128 ++ int copied;
4129 ++ void *pageaddr;
4130 ++
4131 ++ pageaddr = kmap_atomic(page);
4132 ++ copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
4133 ++ memset(pageaddr + copied, 0, PAGE_SIZE - copied);
4134 ++ kunmap_atomic(pageaddr);
4135 ++
4136 ++ flush_dcache_page(page);
4137 ++ if (copied == avail)
4138 ++ SetPageUptodate(page);
4139 ++ else
4140 ++ SetPageError(page);
4141 ++}
4142 ++
4143 + /* Copy data into page cache */
4144 + void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
4145 + int bytes, int offset)
4146 + {
4147 + struct inode *inode = page->mapping->host;
4148 + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
4149 +- void *pageaddr;
4150 + int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
4151 + int start_index = page->index & ~mask, end_index = start_index | mask;
4152 +
4153 +@@ -406,12 +422,7 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
4154 + if (PageUptodate(push_page))
4155 + goto skip_page;
4156 +
4157 +- pageaddr = kmap_atomic(push_page);
4158 +- squashfs_copy_data(pageaddr, buffer, offset, avail);
4159 +- memset(pageaddr + avail, 0, PAGE_SIZE - avail);
4160 +- kunmap_atomic(pageaddr);
4161 +- flush_dcache_page(push_page);
4162 +- SetPageUptodate(push_page);
4163 ++ squashfs_fill_page(push_page, buffer, offset, avail);
4164 + skip_page:
4165 + unlock_page(push_page);
4166 + if (i != page->index)
4167 +@@ -420,10 +431,9 @@ skip_page:
4168 + }
4169 +
4170 + /* Read datablock stored packed inside a fragment (tail-end packed block) */
4171 +-static int squashfs_readpage_fragment(struct page *page)
4172 ++static int squashfs_readpage_fragment(struct page *page, int expected)
4173 + {
4174 + struct inode *inode = page->mapping->host;
4175 +- struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
4176 + struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
4177 + squashfs_i(inode)->fragment_block,
4178 + squashfs_i(inode)->fragment_size);
4179 +@@ -434,23 +444,16 @@ static int squashfs_readpage_fragment(struct page *page)
4180 + squashfs_i(inode)->fragment_block,
4181 + squashfs_i(inode)->fragment_size);
4182 + else
4183 +- squashfs_copy_cache(page, buffer, i_size_read(inode) &
4184 +- (msblk->block_size - 1),
4185 ++ squashfs_copy_cache(page, buffer, expected,
4186 + squashfs_i(inode)->fragment_offset);
4187 +
4188 + squashfs_cache_put(buffer);
4189 + return res;
4190 + }
4191 +
4192 +-static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
4193 ++static int squashfs_readpage_sparse(struct page *page, int expected)
4194 + {
4195 +- struct inode *inode = page->mapping->host;
4196 +- struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
4197 +- int bytes = index == file_end ?
4198 +- (i_size_read(inode) & (msblk->block_size - 1)) :
4199 +- msblk->block_size;
4200 +-
4201 +- squashfs_copy_cache(page, NULL, bytes, 0);
4202 ++ squashfs_copy_cache(page, NULL, expected, 0);
4203 + return 0;
4204 + }
4205 +
4206 +@@ -460,6 +463,9 @@ static int squashfs_readpage(struct file *file, struct page *page)
4207 + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
4208 + int index = page->index >> (msblk->block_log - PAGE_SHIFT);
4209 + int file_end = i_size_read(inode) >> msblk->block_log;
4210 ++ int expected = index == file_end ?
4211 ++ (i_size_read(inode) & (msblk->block_size - 1)) :
4212 ++ msblk->block_size;
4213 + int res;
4214 + void *pageaddr;
4215 +
4216 +@@ -478,11 +484,11 @@ static int squashfs_readpage(struct file *file, struct page *page)
4217 + goto error_out;
4218 +
4219 + if (bsize == 0)
4220 +- res = squashfs_readpage_sparse(page, index, file_end);
4221 ++ res = squashfs_readpage_sparse(page, expected);
4222 + else
4223 +- res = squashfs_readpage_block(page, block, bsize);
4224 ++ res = squashfs_readpage_block(page, block, bsize, expected);
4225 + } else
4226 +- res = squashfs_readpage_fragment(page);
4227 ++ res = squashfs_readpage_fragment(page, expected);
4228 +
4229 + if (!res)
4230 + return 0;
4231 +diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c
4232 +index f2310d2a2019..a9ba8d96776a 100644
4233 +--- a/fs/squashfs/file_cache.c
4234 ++++ b/fs/squashfs/file_cache.c
4235 +@@ -20,7 +20,7 @@
4236 + #include "squashfs.h"
4237 +
4238 + /* Read separately compressed datablock and memcopy into page cache */
4239 +-int squashfs_readpage_block(struct page *page, u64 block, int bsize)
4240 ++int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
4241 + {
4242 + struct inode *i = page->mapping->host;
4243 + struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
4244 +@@ -31,7 +31,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize)
4245 + ERROR("Unable to read page, block %llx, size %x\n", block,
4246 + bsize);
4247 + else
4248 +- squashfs_copy_cache(page, buffer, buffer->length, 0);
4249 ++ squashfs_copy_cache(page, buffer, expected, 0);
4250 +
4251 + squashfs_cache_put(buffer);
4252 + return res;
4253 +diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
4254 +index cb485d8e0e91..80db1b86a27c 100644
4255 +--- a/fs/squashfs/file_direct.c
4256 ++++ b/fs/squashfs/file_direct.c
4257 +@@ -21,10 +21,11 @@
4258 + #include "page_actor.h"
4259 +
4260 + static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
4261 +- int pages, struct page **page);
4262 ++ int pages, struct page **page, int bytes);
4263 +
4264 + /* Read separately compressed datablock directly into page cache */
4265 +-int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
4266 ++int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
4267 ++ int expected)
4268 +
4269 + {
4270 + struct inode *inode = target_page->mapping->host;
4271 +@@ -83,7 +84,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
4272 + * using an intermediate buffer.
4273 + */
4274 + res = squashfs_read_cache(target_page, block, bsize, pages,
4275 +- page);
4276 ++ page, expected);
4277 + if (res < 0)
4278 + goto mark_errored;
4279 +
4280 +@@ -95,6 +96,11 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
4281 + if (res < 0)
4282 + goto mark_errored;
4283 +
4284 ++ if (res != expected) {
4285 ++ res = -EIO;
4286 ++ goto mark_errored;
4287 ++ }
4288 ++
4289 + /* Last page may have trailing bytes not filled */
4290 + bytes = res % PAGE_SIZE;
4291 + if (bytes) {
4292 +@@ -138,13 +144,12 @@ out:
4293 +
4294 +
4295 + static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
4296 +- int pages, struct page **page)
4297 ++ int pages, struct page **page, int bytes)
4298 + {
4299 + struct inode *i = target_page->mapping->host;
4300 + struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
4301 + block, bsize);
4302 +- int bytes = buffer->length, res = buffer->error, n, offset = 0;
4303 +- void *pageaddr;
4304 ++ int res = buffer->error, n, offset = 0;
4305 +
4306 + if (res) {
4307 + ERROR("Unable to read page, block %llx, size %x\n", block,
4308 +@@ -159,12 +164,7 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
4309 + if (page[n] == NULL)
4310 + continue;
4311 +
4312 +- pageaddr = kmap_atomic(page[n]);
4313 +- squashfs_copy_data(pageaddr, buffer, offset, avail);
4314 +- memset(pageaddr + avail, 0, PAGE_SIZE - avail);
4315 +- kunmap_atomic(pageaddr);
4316 +- flush_dcache_page(page[n]);
4317 +- SetPageUptodate(page[n]);
4318 ++ squashfs_fill_page(page[n], buffer, offset, avail);
4319 + unlock_page(page[n]);
4320 + if (page[n] != target_page)
4321 + put_page(page[n]);
4322 +diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
4323 +index 887d6d270080..f89f8a74c6ce 100644
4324 +--- a/fs/squashfs/squashfs.h
4325 ++++ b/fs/squashfs/squashfs.h
4326 +@@ -67,11 +67,12 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
4327 + u64, u64, unsigned int);
4328 +
4329 + /* file.c */
4330 ++void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int);
4331 + void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
4332 + int);
4333 +
4334 + /* file_xxx.c */
4335 +-extern int squashfs_readpage_block(struct page *, u64, int);
4336 ++extern int squashfs_readpage_block(struct page *, u64, int, int);
4337 +
4338 + /* id.c */
4339 + extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
4340 +diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
4341 +index 39c75a86c67f..666986b95c5d 100644
4342 +--- a/fs/sysfs/file.c
4343 ++++ b/fs/sysfs/file.c
4344 +@@ -407,6 +407,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
4345 + }
4346 + EXPORT_SYMBOL_GPL(sysfs_chmod_file);
4347 +
4348 ++/**
4349 ++ * sysfs_break_active_protection - break "active" protection
4350 ++ * @kobj: The kernel object @attr is associated with.
4351 ++ * @attr: The attribute to break the "active" protection for.
4352 ++ *
4353 ++ * With sysfs, just like kernfs, deletion of an attribute is postponed until
4354 ++ * all active .show() and .store() callbacks have finished unless this function
4355 ++ * is called. Hence this function is useful in methods that implement self
4356 ++ * deletion.
4357 ++ */
4358 ++struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
4359 ++ const struct attribute *attr)
4360 ++{
4361 ++ struct kernfs_node *kn;
4362 ++
4363 ++ kobject_get(kobj);
4364 ++ kn = kernfs_find_and_get(kobj->sd, attr->name);
4365 ++ if (kn)
4366 ++ kernfs_break_active_protection(kn);
4367 ++ return kn;
4368 ++}
4369 ++EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
4370 ++
4371 ++/**
4372 ++ * sysfs_unbreak_active_protection - restore "active" protection
4373 ++ * @kn: Pointer returned by sysfs_break_active_protection().
4374 ++ *
4375 ++ * Undo the effects of sysfs_break_active_protection(). Since this function
4376 ++ * calls kernfs_put() on the kernfs node that corresponds to the 'attr'
4377 ++ * argument passed to sysfs_break_active_protection() that attribute may have
4378 ++ * been removed between the sysfs_break_active_protection() and
4379 ++ * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after
4380 ++ * this function has returned.
4381 ++ */
4382 ++void sysfs_unbreak_active_protection(struct kernfs_node *kn)
4383 ++{
4384 ++ struct kobject *kobj = kn->parent->priv;
4385 ++
4386 ++ kernfs_unbreak_active_protection(kn);
4387 ++ kernfs_put(kn);
4388 ++ kobject_put(kobj);
4389 ++}
4390 ++EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection);
4391 ++
4392 + /**
4393 + * sysfs_remove_file_ns - remove an object attribute with a custom ns tag
4394 + * @kobj: object we're acting for
4395 +diff --git a/include/linux/printk.h b/include/linux/printk.h
4396 +index 335926039adc..6106befed756 100644
4397 +--- a/include/linux/printk.h
4398 ++++ b/include/linux/printk.h
4399 +@@ -150,9 +150,13 @@ void early_printk(const char *s, ...) { }
4400 + #ifdef CONFIG_PRINTK_NMI
4401 + extern void printk_nmi_enter(void);
4402 + extern void printk_nmi_exit(void);
4403 ++extern void printk_nmi_direct_enter(void);
4404 ++extern void printk_nmi_direct_exit(void);
4405 + #else
4406 + static inline void printk_nmi_enter(void) { }
4407 + static inline void printk_nmi_exit(void) { }
4408 ++static inline void printk_nmi_direct_enter(void) { }
4409 ++static inline void printk_nmi_direct_exit(void) { }
4410 + #endif /* PRINTK_NMI */
4411 +
4412 + #ifdef CONFIG_PRINTK
4413 +diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
4414 +index 1b92a28dd672..6fd615a0eea9 100644
4415 +--- a/include/linux/rtmutex.h
4416 ++++ b/include/linux/rtmutex.h
4417 +@@ -106,7 +106,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
4418 + extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
4419 + extern void rt_mutex_destroy(struct rt_mutex *lock);
4420 +
4421 ++#ifdef CONFIG_DEBUG_LOCK_ALLOC
4422 ++extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
4423 ++#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
4424 ++#else
4425 + extern void rt_mutex_lock(struct rt_mutex *lock);
4426 ++#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
4427 ++#endif
4428 ++
4429 + extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
4430 + extern int rt_mutex_timed_lock(struct rt_mutex *lock,
4431 + struct hrtimer_sleeper *timeout);
4432 +diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
4433 +index 40839c02d28c..cca19bb200bd 100644
4434 +--- a/include/linux/sysfs.h
4435 ++++ b/include/linux/sysfs.h
4436 +@@ -239,6 +239,9 @@ int __must_check sysfs_create_files(struct kobject *kobj,
4437 + const struct attribute **attr);
4438 + int __must_check sysfs_chmod_file(struct kobject *kobj,
4439 + const struct attribute *attr, umode_t mode);
4440 ++struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
4441 ++ const struct attribute *attr);
4442 ++void sysfs_unbreak_active_protection(struct kernfs_node *kn);
4443 + void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
4444 + const void *ns);
4445 + bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
4446 +@@ -352,6 +355,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
4447 + return 0;
4448 + }
4449 +
4450 ++static inline struct kernfs_node *
4451 ++sysfs_break_active_protection(struct kobject *kobj,
4452 ++ const struct attribute *attr)
4453 ++{
4454 ++ return NULL;
4455 ++}
4456 ++
4457 ++static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn)
4458 ++{
4459 ++}
4460 ++
4461 + static inline void sysfs_remove_file_ns(struct kobject *kobj,
4462 + const struct attribute *attr,
4463 + const void *ns)
4464 +diff --git a/ipc/sem.c b/ipc/sem.c
4465 +index b2698ebdcb31..d6dd2dc9ddad 100644
4466 +--- a/ipc/sem.c
4467 ++++ b/ipc/sem.c
4468 +@@ -2041,7 +2041,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
4469 + }
4470 +
4471 + do {
4472 +- queue.status = -EINTR;
4473 ++ WRITE_ONCE(queue.status, -EINTR);
4474 + queue.sleeper = current;
4475 +
4476 + __set_current_state(TASK_INTERRUPTIBLE);
4477 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
4478 +index a66e838640ea..5c90765d37e7 100644
4479 +--- a/kernel/kprobes.c
4480 ++++ b/kernel/kprobes.c
4481 +@@ -2531,7 +2531,7 @@ static int __init debugfs_kprobe_init(void)
4482 + if (!dir)
4483 + return -ENOMEM;
4484 +
4485 +- file = debugfs_create_file("list", 0444, dir, NULL,
4486 ++ file = debugfs_create_file("list", 0400, dir, NULL,
4487 + &debugfs_kprobes_operations);
4488 + if (!file)
4489 + goto error;
4490 +@@ -2541,7 +2541,7 @@ static int __init debugfs_kprobe_init(void)
4491 + if (!file)
4492 + goto error;
4493 +
4494 +- file = debugfs_create_file("blacklist", 0444, dir, NULL,
4495 ++ file = debugfs_create_file("blacklist", 0400, dir, NULL,
4496 + &debugfs_kprobe_blacklist_ops);
4497 + if (!file)
4498 + goto error;
4499 +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
4500 +index 65cc0cb984e6..4ad35718f123 100644
4501 +--- a/kernel/locking/rtmutex.c
4502 ++++ b/kernel/locking/rtmutex.c
4503 +@@ -1466,6 +1466,29 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
4504 + rt_mutex_postunlock(&wake_q);
4505 + }
4506 +
4507 ++static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
4508 ++{
4509 ++ might_sleep();
4510 ++
4511 ++ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
4512 ++ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
4513 ++}
4514 ++
4515 ++#ifdef CONFIG_DEBUG_LOCK_ALLOC
4516 ++/**
4517 ++ * rt_mutex_lock_nested - lock a rt_mutex
4518 ++ *
4519 ++ * @lock: the rt_mutex to be locked
4520 ++ * @subclass: the lockdep subclass
4521 ++ */
4522 ++void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
4523 ++{
4524 ++ __rt_mutex_lock(lock, subclass);
4525 ++}
4526 ++EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
4527 ++#endif
4528 ++
4529 ++#ifndef CONFIG_DEBUG_LOCK_ALLOC
4530 + /**
4531 + * rt_mutex_lock - lock a rt_mutex
4532 + *
4533 +@@ -1473,12 +1496,10 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
4534 + */
4535 + void __sched rt_mutex_lock(struct rt_mutex *lock)
4536 + {
4537 +- might_sleep();
4538 +-
4539 +- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
4540 +- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
4541 ++ __rt_mutex_lock(lock, 0);
4542 + }
4543 + EXPORT_SYMBOL_GPL(rt_mutex_lock);
4544 ++#endif
4545 +
4546 + /**
4547 + * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
4548 +diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
4549 +index 2a7d04049af4..0f1898820cba 100644
4550 +--- a/kernel/printk/internal.h
4551 ++++ b/kernel/printk/internal.h
4552 +@@ -19,11 +19,16 @@
4553 + #ifdef CONFIG_PRINTK
4554 +
4555 + #define PRINTK_SAFE_CONTEXT_MASK 0x3fffffff
4556 +-#define PRINTK_NMI_DEFERRED_CONTEXT_MASK 0x40000000
4557 ++#define PRINTK_NMI_DIRECT_CONTEXT_MASK 0x40000000
4558 + #define PRINTK_NMI_CONTEXT_MASK 0x80000000
4559 +
4560 + extern raw_spinlock_t logbuf_lock;
4561 +
4562 ++__printf(5, 0)
4563 ++int vprintk_store(int facility, int level,
4564 ++ const char *dict, size_t dictlen,
4565 ++ const char *fmt, va_list args);
4566 ++
4567 + __printf(1, 0) int vprintk_default(const char *fmt, va_list args);
4568 + __printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
4569 + __printf(1, 0) int vprintk_func(const char *fmt, va_list args);
4570 +@@ -54,6 +59,8 @@ void __printk_safe_exit(void);
4571 + local_irq_enable(); \
4572 + } while (0)
4573 +
4574 ++void defer_console_output(void);
4575 ++
4576 + #else
4577 +
4578 + __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
4579 +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
4580 +index 512f7c2baedd..f0223a7d9ed1 100644
4581 +--- a/kernel/printk/printk.c
4582 ++++ b/kernel/printk/printk.c
4583 +@@ -1680,28 +1680,16 @@ static size_t log_output(int facility, int level, enum log_flags lflags, const c
4584 + return log_store(facility, level, lflags, 0, dict, dictlen, text, text_len);
4585 + }
4586 +
4587 +-asmlinkage int vprintk_emit(int facility, int level,
4588 +- const char *dict, size_t dictlen,
4589 +- const char *fmt, va_list args)
4590 ++/* Must be called under logbuf_lock. */
4591 ++int vprintk_store(int facility, int level,
4592 ++ const char *dict, size_t dictlen,
4593 ++ const char *fmt, va_list args)
4594 + {
4595 + static char textbuf[LOG_LINE_MAX];
4596 + char *text = textbuf;
4597 + size_t text_len;
4598 + enum log_flags lflags = 0;
4599 +- unsigned long flags;
4600 +- int printed_len;
4601 +- bool in_sched = false;
4602 +-
4603 +- if (level == LOGLEVEL_SCHED) {
4604 +- level = LOGLEVEL_DEFAULT;
4605 +- in_sched = true;
4606 +- }
4607 +-
4608 +- boot_delay_msec(level);
4609 +- printk_delay();
4610 +
4611 +- /* This stops the holder of console_sem just where we want him */
4612 +- logbuf_lock_irqsave(flags);
4613 + /*
4614 + * The printf needs to come first; we need the syslog
4615 + * prefix which might be passed-in as a parameter.
4616 +@@ -1742,8 +1730,29 @@ asmlinkage int vprintk_emit(int facility, int level,
4617 + if (dict)
4618 + lflags |= LOG_PREFIX|LOG_NEWLINE;
4619 +
4620 +- printed_len = log_output(facility, level, lflags, dict, dictlen, text, text_len);
4621 ++ return log_output(facility, level, lflags,
4622 ++ dict, dictlen, text, text_len);
4623 ++}
4624 +
4625 ++asmlinkage int vprintk_emit(int facility, int level,
4626 ++ const char *dict, size_t dictlen,
4627 ++ const char *fmt, va_list args)
4628 ++{
4629 ++ int printed_len;
4630 ++ bool in_sched = false;
4631 ++ unsigned long flags;
4632 ++
4633 ++ if (level == LOGLEVEL_SCHED) {
4634 ++ level = LOGLEVEL_DEFAULT;
4635 ++ in_sched = true;
4636 ++ }
4637 ++
4638 ++ boot_delay_msec(level);
4639 ++ printk_delay();
4640 ++
4641 ++ /* This stops the holder of console_sem just where we want him */
4642 ++ logbuf_lock_irqsave(flags);
4643 ++ printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args);
4644 + logbuf_unlock_irqrestore(flags);
4645 +
4646 + /* If called from the scheduler, we can not call up(). */
4647 +@@ -2714,16 +2723,20 @@ void wake_up_klogd(void)
4648 + preempt_enable();
4649 + }
4650 +
4651 +-int vprintk_deferred(const char *fmt, va_list args)
4652 ++void defer_console_output(void)
4653 + {
4654 +- int r;
4655 +-
4656 +- r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
4657 +-
4658 + preempt_disable();
4659 + __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
4660 + irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
4661 + preempt_enable();
4662 ++}
4663 ++
4664 ++int vprintk_deferred(const char *fmt, va_list args)
4665 ++{
4666 ++ int r;
4667 ++
4668 ++ r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
4669 ++ defer_console_output();
4670 +
4671 + return r;
4672 + }
4673 +diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
4674 +index 64825b2df3a5..d482fd61ac67 100644
4675 +--- a/kernel/printk/printk_safe.c
4676 ++++ b/kernel/printk/printk_safe.c
4677 +@@ -311,24 +311,33 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
4678 +
4679 + void printk_nmi_enter(void)
4680 + {
4681 +- /*
4682 +- * The size of the extra per-CPU buffer is limited. Use it only when
4683 +- * the main one is locked. If this CPU is not in the safe context,
4684 +- * the lock must be taken on another CPU and we could wait for it.
4685 +- */
4686 +- if ((this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) &&
4687 +- raw_spin_is_locked(&logbuf_lock)) {
4688 +- this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
4689 +- } else {
4690 +- this_cpu_or(printk_context, PRINTK_NMI_DEFERRED_CONTEXT_MASK);
4691 +- }
4692 ++ this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
4693 + }
4694 +
4695 + void printk_nmi_exit(void)
4696 + {
4697 +- this_cpu_and(printk_context,
4698 +- ~(PRINTK_NMI_CONTEXT_MASK |
4699 +- PRINTK_NMI_DEFERRED_CONTEXT_MASK));
4700 ++ this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
4701 ++}
4702 ++
4703 ++/*
4704 ++ * Marks a code that might produce many messages in NMI context
4705 ++ * and the risk of losing them is more critical than eventual
4706 ++ * reordering.
4707 ++ *
4708 ++ * It has effect only when called in NMI context. Then printk()
4709 ++ * will try to store the messages into the main logbuf directly
4710 ++ * and use the per-CPU buffers only as a fallback when the lock
4711 ++ * is not available.
4712 ++ */
4713 ++void printk_nmi_direct_enter(void)
4714 ++{
4715 ++ if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
4716 ++ this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
4717 ++}
4718 ++
4719 ++void printk_nmi_direct_exit(void)
4720 ++{
4721 ++ this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
4722 + }
4723 +
4724 + #else
4725 +@@ -366,6 +375,20 @@ void __printk_safe_exit(void)
4726 +
4727 + __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
4728 + {
4729 ++ /*
4730 ++ * Try to use the main logbuf even in NMI. But avoid calling console
4731 ++ * drivers that might have their own locks.
4732 ++ */
4733 ++ if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
4734 ++ raw_spin_trylock(&logbuf_lock)) {
4735 ++ int len;
4736 ++
4737 ++ len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
4738 ++ raw_spin_unlock(&logbuf_lock);
4739 ++ defer_console_output();
4740 ++ return len;
4741 ++ }
4742 ++
4743 + /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
4744 + if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
4745 + return vprintk_nmi(fmt, args);
4746 +@@ -374,13 +397,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
4747 + if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
4748 + return vprintk_safe(fmt, args);
4749 +
4750 +- /*
4751 +- * Use the main logbuf when logbuf_lock is available in NMI.
4752 +- * But avoid calling console drivers that might have their own locks.
4753 +- */
4754 +- if (this_cpu_read(printk_context) & PRINTK_NMI_DEFERRED_CONTEXT_MASK)
4755 +- return vprintk_deferred(fmt, args);
4756 +-
4757 + /* No obstacles. */
4758 + return vprintk_default(fmt, args);
4759 + }
4760 +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
4761 +index bba2217652ff..cb9a5b8532fa 100644
4762 +--- a/kernel/sched/rt.c
4763 ++++ b/kernel/sched/rt.c
4764 +@@ -837,6 +837,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
4765 + * can be time-consuming. Try to avoid it when possible.
4766 + */
4767 + raw_spin_lock(&rt_rq->rt_runtime_lock);
4768 ++ if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
4769 ++ rt_rq->rt_runtime = rt_b->rt_runtime;
4770 + skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
4771 + raw_spin_unlock(&rt_rq->rt_runtime_lock);
4772 + if (skip)
4773 +diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
4774 +index e190d1ef3a23..067cb83f37ea 100644
4775 +--- a/kernel/stop_machine.c
4776 ++++ b/kernel/stop_machine.c
4777 +@@ -81,6 +81,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
4778 + unsigned long flags;
4779 + bool enabled;
4780 +
4781 ++ preempt_disable();
4782 + raw_spin_lock_irqsave(&stopper->lock, flags);
4783 + enabled = stopper->enabled;
4784 + if (enabled)
4785 +@@ -90,6 +91,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
4786 + raw_spin_unlock_irqrestore(&stopper->lock, flags);
4787 +
4788 + wake_up_q(&wakeq);
4789 ++ preempt_enable();
4790 +
4791 + return enabled;
4792 + }
4793 +@@ -236,13 +238,24 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
4794 + struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
4795 + DEFINE_WAKE_Q(wakeq);
4796 + int err;
4797 ++
4798 + retry:
4799 ++ /*
4800 ++ * The waking up of stopper threads has to happen in the same
4801 ++ * scheduling context as the queueing. Otherwise, there is a
4802 ++ * possibility of one of the above stoppers being woken up by another
4803 ++ * CPU, and preempting us. This will cause us to not wake up the other
4804 ++ * stopper forever.
4805 ++ */
4806 ++ preempt_disable();
4807 + raw_spin_lock_irq(&stopper1->lock);
4808 + raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
4809 +
4810 +- err = -ENOENT;
4811 +- if (!stopper1->enabled || !stopper2->enabled)
4812 ++ if (!stopper1->enabled || !stopper2->enabled) {
4813 ++ err = -ENOENT;
4814 + goto unlock;
4815 ++ }
4816 ++
4817 + /*
4818 + * Ensure that if we race with __stop_cpus() the stoppers won't get
4819 + * queued up in reverse order leading to system deadlock.
4820 +@@ -253,36 +266,30 @@ retry:
4821 + * It can be falsely true but it is safe to spin until it is cleared,
4822 + * queue_stop_cpus_work() does everything under preempt_disable().
4823 + */
4824 +- err = -EDEADLK;
4825 +- if (unlikely(stop_cpus_in_progress))
4826 +- goto unlock;
4827 ++ if (unlikely(stop_cpus_in_progress)) {
4828 ++ err = -EDEADLK;
4829 ++ goto unlock;
4830 ++ }
4831 +
4832 + err = 0;
4833 + __cpu_stop_queue_work(stopper1, work1, &wakeq);
4834 + __cpu_stop_queue_work(stopper2, work2, &wakeq);
4835 +- /*
4836 +- * The waking up of stopper threads has to happen
4837 +- * in the same scheduling context as the queueing.
4838 +- * Otherwise, there is a possibility of one of the
4839 +- * above stoppers being woken up by another CPU,
4840 +- * and preempting us. This will cause us to n ot
4841 +- * wake up the other stopper forever.
4842 +- */
4843 +- preempt_disable();
4844 ++
4845 + unlock:
4846 + raw_spin_unlock(&stopper2->lock);
4847 + raw_spin_unlock_irq(&stopper1->lock);
4848 +
4849 + if (unlikely(err == -EDEADLK)) {
4850 ++ preempt_enable();
4851 ++
4852 + while (stop_cpus_in_progress)
4853 + cpu_relax();
4854 ++
4855 + goto retry;
4856 + }
4857 +
4858 +- if (!err) {
4859 +- wake_up_q(&wakeq);
4860 +- preempt_enable();
4861 +- }
4862 ++ wake_up_q(&wakeq);
4863 ++ preempt_enable();
4864 +
4865 + return err;
4866 + }
4867 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4868 +index fbc75c84076e..b7302c37c064 100644
4869 +--- a/kernel/trace/trace.c
4870 ++++ b/kernel/trace/trace.c
4871 +@@ -8187,6 +8187,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4872 + tracing_off();
4873 +
4874 + local_irq_save(flags);
4875 ++ printk_nmi_direct_enter();
4876 +
4877 + /* Simulate the iterator */
4878 + trace_init_global_iter(&iter);
4879 +@@ -8266,7 +8267,8 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4880 + for_each_tracing_cpu(cpu) {
4881 + atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
4882 + }
4883 +- atomic_dec(&dump_running);
4884 ++ atomic_dec(&dump_running);
4885 ++ printk_nmi_direct_exit();
4886 + local_irq_restore(flags);
4887 + }
4888 + EXPORT_SYMBOL_GPL(ftrace_dump);
4889 +diff --git a/kernel/watchdog.c b/kernel/watchdog.c
4890 +index c8e06703e44c..087994b23f8b 100644
4891 +--- a/kernel/watchdog.c
4892 ++++ b/kernel/watchdog.c
4893 +@@ -265,7 +265,7 @@ static void __touch_watchdog(void)
4894 + * entering idle state. This should only be used for scheduler events.
4895 + * Use touch_softlockup_watchdog() for everything else.
4896 + */
4897 +-void touch_softlockup_watchdog_sched(void)
4898 ++notrace void touch_softlockup_watchdog_sched(void)
4899 + {
4900 + /*
4901 + * Preemption can be enabled. It doesn't matter which CPU's timestamp
4902 +@@ -274,7 +274,7 @@ void touch_softlockup_watchdog_sched(void)
4903 + raw_cpu_write(watchdog_touch_ts, 0);
4904 + }
4905 +
4906 +-void touch_softlockup_watchdog(void)
4907 ++notrace void touch_softlockup_watchdog(void)
4908 + {
4909 + touch_softlockup_watchdog_sched();
4910 + wq_watchdog_touch(raw_smp_processor_id());
4911 +diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
4912 +index e449a23e9d59..4ece6028007a 100644
4913 +--- a/kernel/watchdog_hld.c
4914 ++++ b/kernel/watchdog_hld.c
4915 +@@ -29,7 +29,7 @@ static struct cpumask dead_events_mask;
4916 + static unsigned long hardlockup_allcpu_dumped;
4917 + static atomic_t watchdog_cpus = ATOMIC_INIT(0);
4918 +
4919 +-void arch_touch_nmi_watchdog(void)
4920 ++notrace void arch_touch_nmi_watchdog(void)
4921 + {
4922 + /*
4923 + * Using __raw here because some code paths have
4924 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
4925 +index d8a7f8939c81..08bc551976b2 100644
4926 +--- a/kernel/workqueue.c
4927 ++++ b/kernel/workqueue.c
4928 +@@ -5484,7 +5484,7 @@ static void wq_watchdog_timer_fn(unsigned long data)
4929 + mod_timer(&wq_watchdog_timer, jiffies + thresh);
4930 + }
4931 +
4932 +-void wq_watchdog_touch(int cpu)
4933 ++notrace void wq_watchdog_touch(int cpu)
4934 + {
4935 + if (cpu >= 0)
4936 + per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
4937 +diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
4938 +index 46e4c749e4eb..70b1f9d830cd 100644
4939 +--- a/lib/nmi_backtrace.c
4940 ++++ b/lib/nmi_backtrace.c
4941 +@@ -87,11 +87,9 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
4942 +
4943 + bool nmi_cpu_backtrace(struct pt_regs *regs)
4944 + {
4945 +- static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
4946 + int cpu = smp_processor_id();
4947 +
4948 + if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
4949 +- arch_spin_lock(&lock);
4950 + if (regs && cpu_in_idle(instruction_pointer(regs))) {
4951 + pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n",
4952 + cpu, instruction_pointer(regs));
4953 +@@ -102,7 +100,6 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
4954 + else
4955 + dump_stack();
4956 + }
4957 +- arch_spin_unlock(&lock);
4958 + cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
4959 + return true;
4960 + }
4961 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4962 +index db69d938e9ed..6a9a7e1066ef 100644
4963 +--- a/mm/memcontrol.c
4964 ++++ b/mm/memcontrol.c
4965 +@@ -4110,6 +4110,14 @@ static struct cftype mem_cgroup_legacy_files[] = {
4966 +
4967 + static DEFINE_IDR(mem_cgroup_idr);
4968 +
4969 ++static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
4970 ++{
4971 ++ if (memcg->id.id > 0) {
4972 ++ idr_remove(&mem_cgroup_idr, memcg->id.id);
4973 ++ memcg->id.id = 0;
4974 ++ }
4975 ++}
4976 ++
4977 + static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4978 + {
4979 + VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
4980 +@@ -4120,8 +4128,7 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4981 + {
4982 + VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
4983 + if (atomic_sub_and_test(n, &memcg->id.ref)) {
4984 +- idr_remove(&mem_cgroup_idr, memcg->id.id);
4985 +- memcg->id.id = 0;
4986 ++ mem_cgroup_id_remove(memcg);
4987 +
4988 + /* Memcg ID pins CSS */
4989 + css_put(&memcg->css);
4990 +@@ -4258,8 +4265,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
4991 + idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4992 + return memcg;
4993 + fail:
4994 +- if (memcg->id.id > 0)
4995 +- idr_remove(&mem_cgroup_idr, memcg->id.id);
4996 ++ mem_cgroup_id_remove(memcg);
4997 + __mem_cgroup_free(memcg);
4998 + return NULL;
4999 + }
5000 +@@ -4318,6 +4324,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5001 +
5002 + return &memcg->css;
5003 + fail:
5004 ++ mem_cgroup_id_remove(memcg);
5005 + mem_cgroup_free(memcg);
5006 + return ERR_PTR(-ENOMEM);
5007 + }
5008 +diff --git a/mm/memory.c b/mm/memory.c
5009 +index 5539b1975091..c9657f013a4d 100644
5010 +--- a/mm/memory.c
5011 ++++ b/mm/memory.c
5012 +@@ -246,9 +246,6 @@ static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
5013 +
5014 + tlb_flush(tlb);
5015 + mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
5016 +-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
5017 +- tlb_table_flush(tlb);
5018 +-#endif
5019 + __tlb_reset_range(tlb);
5020 + }
5021 +
5022 +@@ -256,6 +253,9 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
5023 + {
5024 + struct mmu_gather_batch *batch;
5025 +
5026 ++#ifdef CONFIG_HAVE_RCU_TABLE_FREE
5027 ++ tlb_table_flush(tlb);
5028 ++#endif
5029 + for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
5030 + free_pages_and_swap_cache(batch->pages, batch->nr);
5031 + batch->nr = 0;
5032 +@@ -331,6 +331,21 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
5033 + * See the comment near struct mmu_table_batch.
5034 + */
5035 +
5036 ++/*
5037 ++ * If we want tlb_remove_table() to imply TLB invalidates.
5038 ++ */
5039 ++static inline void tlb_table_invalidate(struct mmu_gather *tlb)
5040 ++{
5041 ++#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
5042 ++ /*
5043 ++ * Invalidate page-table caches used by hardware walkers. Then we still
5044 ++ * need to RCU-sched wait while freeing the pages because software
5045 ++ * walkers can still be in-flight.
5046 ++ */
5047 ++ tlb_flush_mmu_tlbonly(tlb);
5048 ++#endif
5049 ++}
5050 ++
5051 + static void tlb_remove_table_smp_sync(void *arg)
5052 + {
5053 + /* Simply deliver the interrupt */
5054 +@@ -367,6 +382,7 @@ void tlb_table_flush(struct mmu_gather *tlb)
5055 + struct mmu_table_batch **batch = &tlb->batch;
5056 +
5057 + if (*batch) {
5058 ++ tlb_table_invalidate(tlb);
5059 + call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
5060 + *batch = NULL;
5061 + }
5062 +@@ -388,11 +404,13 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
5063 + if (*batch == NULL) {
5064 + *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
5065 + if (*batch == NULL) {
5066 ++ tlb_table_invalidate(tlb);
5067 + tlb_remove_table_one(table);
5068 + return;
5069 + }
5070 + (*batch)->nr = 0;
5071 + }
5072 ++
5073 + (*batch)->tables[(*batch)->nr++] = table;
5074 + if ((*batch)->nr == MAX_TABLE_BATCH)
5075 + tlb_table_flush(tlb);
5076 +@@ -1417,11 +1435,9 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
5077 + do {
5078 + next = pmd_addr_end(addr, end);
5079 + if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
5080 +- if (next - addr != HPAGE_PMD_SIZE) {
5081 +- VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
5082 +- !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
5083 ++ if (next - addr != HPAGE_PMD_SIZE)
5084 + __split_huge_pmd(vma, pmd, addr, false, NULL);
5085 +- } else if (zap_huge_pmd(tlb, vma, pmd, addr))
5086 ++ else if (zap_huge_pmd(tlb, vma, pmd, addr))
5087 + goto next;
5088 + /* fall through */
5089 + }
5090 +@@ -4350,6 +4366,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5091 + return -EINVAL;
5092 +
5093 + maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
5094 ++ if (!maddr)
5095 ++ return -ENOMEM;
5096 ++
5097 + if (write)
5098 + memcpy_toio(maddr + offset, buf, len);
5099 + else
5100 +diff --git a/mm/zswap.c b/mm/zswap.c
5101 +index 597008a44f70..ebb0bc88c5f7 100644
5102 +--- a/mm/zswap.c
5103 ++++ b/mm/zswap.c
5104 +@@ -989,6 +989,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
5105 + ret = -ENOMEM;
5106 + goto reject;
5107 + }
5108 ++
5109 ++ /* A second zswap_is_full() check after
5110 ++ * zswap_shrink() to make sure it's now
5111 ++ * under the max_pool_percent
5112 ++ */
5113 ++ if (zswap_is_full()) {
5114 ++ ret = -ENOMEM;
5115 ++ goto reject;
5116 ++ }
5117 + }
5118 +
5119 + /* allocate entry */
5120 +diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
5121 +index 2d38b6e34203..98b62a7990aa 100644
5122 +--- a/net/caif/caif_dev.c
5123 ++++ b/net/caif/caif_dev.c
5124 +@@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb)
5125 + caifd = caif_get(skb->dev);
5126 +
5127 + WARN_ON(caifd == NULL);
5128 +- if (caifd == NULL)
5129 ++ if (!caifd) {
5130 ++ rcu_read_unlock();
5131 + return;
5132 ++ }
5133 +
5134 + caifd_hold(caifd);
5135 + rcu_read_unlock();
5136 +diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
5137 +index 1307731ddfe4..832d69649cb6 100644
5138 +--- a/net/core/lwt_bpf.c
5139 ++++ b/net/core/lwt_bpf.c
5140 +@@ -217,7 +217,7 @@ static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
5141 + if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
5142 + return -EINVAL;
5143 +
5144 +- prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_KERNEL);
5145 ++ prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
5146 + if (!prog->name)
5147 + return -ENOMEM;
5148 +
5149 +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
5150 +index 89910e2c10f4..f112fef79216 100644
5151 +--- a/net/ipv6/esp6.c
5152 ++++ b/net/ipv6/esp6.c
5153 +@@ -651,8 +651,10 @@ skip_cow:
5154 +
5155 + sg_init_table(sg, nfrags);
5156 + ret = skb_to_sgvec(skb, sg, 0, skb->len);
5157 +- if (unlikely(ret < 0))
5158 ++ if (unlikely(ret < 0)) {
5159 ++ kfree(tmp);
5160 + goto out;
5161 ++ }
5162 +
5163 + skb->ip_summed = CHECKSUM_NONE;
5164 +
5165 +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
5166 +index 0e0ab90a4334..b9e638cc955f 100644
5167 +--- a/net/ipv6/ip6_vti.c
5168 ++++ b/net/ipv6/ip6_vti.c
5169 +@@ -480,10 +480,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
5170 + goto tx_err_dst_release;
5171 + }
5172 +
5173 +- skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
5174 +- skb_dst_set(skb, dst);
5175 +- skb->dev = skb_dst(skb)->dev;
5176 +-
5177 + mtu = dst_mtu(dst);
5178 + if (!skb->ignore_df && skb->len > mtu) {
5179 + skb_dst_update_pmtu(skb, mtu);
5180 +@@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
5181 + htonl(mtu));
5182 + }
5183 +
5184 +- return -EMSGSIZE;
5185 ++ err = -EMSGSIZE;
5186 ++ goto tx_err_dst_release;
5187 + }
5188 +
5189 ++ skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
5190 ++ skb_dst_set(skb, dst);
5191 ++ skb->dev = skb_dst(skb)->dev;
5192 ++
5193 + err = dst_output(t->net, skb->sk, skb);
5194 + if (net_xmit_eval(err) == 0) {
5195 + struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
5196 +diff --git a/net/mac80211/util.c b/net/mac80211/util.c
5197 +index 6aef6793d052..81f120466c38 100644
5198 +--- a/net/mac80211/util.c
5199 ++++ b/net/mac80211/util.c
5200 +@@ -2068,7 +2068,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
5201 + if (!sta->uploaded)
5202 + continue;
5203 +
5204 +- if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
5205 ++ if (sta->sdata->vif.type != NL80211_IFTYPE_AP &&
5206 ++ sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
5207 + continue;
5208 +
5209 + for (state = IEEE80211_STA_NOTEXIST;
5210 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5211 +index 9a945024a0b6..742aacb317e5 100644
5212 +--- a/net/netfilter/nf_tables_api.c
5213 ++++ b/net/netfilter/nf_tables_api.c
5214 +@@ -1480,7 +1480,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
5215 + struct nft_base_chain *basechain;
5216 + struct nft_stats *stats = NULL;
5217 + struct nft_chain_hook hook;
5218 +- const struct nlattr *name;
5219 + struct nf_hook_ops *ops;
5220 + struct nft_trans *trans;
5221 + int err, i;
5222 +@@ -1531,12 +1530,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
5223 + return PTR_ERR(stats);
5224 + }
5225 +
5226 ++ err = -ENOMEM;
5227 + trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
5228 + sizeof(struct nft_trans_chain));
5229 +- if (trans == NULL) {
5230 +- free_percpu(stats);
5231 +- return -ENOMEM;
5232 +- }
5233 ++ if (trans == NULL)
5234 ++ goto err;
5235 +
5236 + nft_trans_chain_stats(trans) = stats;
5237 + nft_trans_chain_update(trans) = true;
5238 +@@ -1546,19 +1544,37 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
5239 + else
5240 + nft_trans_chain_policy(trans) = -1;
5241 +
5242 +- name = nla[NFTA_CHAIN_NAME];
5243 +- if (nla[NFTA_CHAIN_HANDLE] && name) {
5244 +- nft_trans_chain_name(trans) =
5245 +- nla_strdup(name, GFP_KERNEL);
5246 +- if (!nft_trans_chain_name(trans)) {
5247 +- kfree(trans);
5248 +- free_percpu(stats);
5249 +- return -ENOMEM;
5250 ++ if (nla[NFTA_CHAIN_HANDLE] &&
5251 ++ nla[NFTA_CHAIN_NAME]) {
5252 ++ struct nft_trans *tmp;
5253 ++ char *name;
5254 ++
5255 ++ err = -ENOMEM;
5256 ++ name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
5257 ++ if (!name)
5258 ++ goto err;
5259 ++
5260 ++ err = -EEXIST;
5261 ++ list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) {
5262 ++ if (tmp->msg_type == NFT_MSG_NEWCHAIN &&
5263 ++ tmp->ctx.table == table &&
5264 ++ nft_trans_chain_update(tmp) &&
5265 ++ nft_trans_chain_name(tmp) &&
5266 ++ strcmp(name, nft_trans_chain_name(tmp)) == 0) {
5267 ++ kfree(name);
5268 ++ goto err;
5269 ++ }
5270 + }
5271 ++
5272 ++ nft_trans_chain_name(trans) = name;
5273 + }
5274 + list_add_tail(&trans->list, &ctx->net->nft.commit_list);
5275 +
5276 + return 0;
5277 ++err:
5278 ++ free_percpu(stats);
5279 ++ kfree(trans);
5280 ++ return err;
5281 + }
5282 +
5283 + static int nf_tables_newchain(struct net *net, struct sock *nlsk,
5284 +@@ -5043,6 +5059,9 @@ static void nf_tables_commit_release(struct nft_trans *trans)
5285 + case NFT_MSG_DELTABLE:
5286 + nf_tables_table_destroy(&trans->ctx);
5287 + break;
5288 ++ case NFT_MSG_NEWCHAIN:
5289 ++ kfree(nft_trans_chain_name(trans));
5290 ++ break;
5291 + case NFT_MSG_DELCHAIN:
5292 + nf_tables_chain_destroy(trans->ctx.chain);
5293 + break;
5294 +@@ -5100,13 +5119,15 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
5295 + nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
5296 + break;
5297 + case NFT_MSG_NEWCHAIN:
5298 +- if (nft_trans_chain_update(trans))
5299 ++ if (nft_trans_chain_update(trans)) {
5300 + nft_chain_commit_update(trans);
5301 +- else
5302 ++ nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
5303 ++ /* trans destroyed after rcu grace period */
5304 ++ } else {
5305 + nft_clear(net, trans->ctx.chain);
5306 +-
5307 +- nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
5308 +- nft_trans_destroy(trans);
5309 ++ nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
5310 ++ nft_trans_destroy(trans);
5311 ++ }
5312 + break;
5313 + case NFT_MSG_DELCHAIN:
5314 + list_del_rcu(&trans->ctx.chain->list);
5315 +@@ -5246,7 +5267,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
5316 + case NFT_MSG_NEWCHAIN:
5317 + if (nft_trans_chain_update(trans)) {
5318 + free_percpu(nft_trans_chain_stats(trans));
5319 +-
5320 ++ kfree(nft_trans_chain_name(trans));
5321 + nft_trans_destroy(trans);
5322 + } else {
5323 + trans->ctx.table->use--;
5324 +diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
5325 +index 9c0d5a7ce5f9..33aa2ac3a62e 100644
5326 +--- a/net/netfilter/nft_set_hash.c
5327 ++++ b/net/netfilter/nft_set_hash.c
5328 +@@ -359,6 +359,7 @@ static void nft_rhash_destroy(const struct nft_set *set)
5329 + struct nft_rhash *priv = nft_set_priv(set);
5330 +
5331 + cancel_delayed_work_sync(&priv->gc_work);
5332 ++ rcu_barrier();
5333 + rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
5334 + (void *)set);
5335 + }
5336 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5337 +index 4cd351b74e48..753f3e73c498 100644
5338 +--- a/net/wireless/nl80211.c
5339 ++++ b/net/wireless/nl80211.c
5340 +@@ -4186,6 +4186,7 @@ static int parse_station_flags(struct genl_info *info,
5341 + params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
5342 + BIT(NL80211_STA_FLAG_MFP) |
5343 + BIT(NL80211_STA_FLAG_AUTHORIZED);
5344 ++ break;
5345 + default:
5346 + return -EINVAL;
5347 + }
5348 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
5349 +index 9c57d6a5816c..a6c0027cadb5 100644
5350 +--- a/net/xfrm/xfrm_policy.c
5351 ++++ b/net/xfrm/xfrm_policy.c
5352 +@@ -2285,6 +2285,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
5353 + if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
5354 + return make_blackhole(net, dst_orig->ops->family, dst_orig);
5355 +
5356 ++ if (IS_ERR(dst))
5357 ++ dst_release(dst_orig);
5358 ++
5359 + return dst;
5360 + }
5361 + EXPORT_SYMBOL(xfrm_lookup_route);
5362 +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
5363 +index dde40f995ac0..5554d28a32eb 100644
5364 +--- a/net/xfrm/xfrm_user.c
5365 ++++ b/net/xfrm/xfrm_user.c
5366 +@@ -1021,10 +1021,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
5367 + {
5368 + struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
5369 +
5370 +- if (nlsk)
5371 +- return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
5372 +- else
5373 +- return -1;
5374 ++ if (!nlsk) {
5375 ++ kfree_skb(skb);
5376 ++ return -EPIPE;
5377 ++ }
5378 ++
5379 ++ return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
5380 + }
5381 +
5382 + static inline size_t xfrm_spdinfo_msgsize(void)
5383 +diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
5384 +index ffd1dfaa1cc1..f46750053377 100644
5385 +--- a/scripts/gcc-plugins/gcc-common.h
5386 ++++ b/scripts/gcc-plugins/gcc-common.h
5387 +@@ -97,6 +97,10 @@
5388 + #include "predict.h"
5389 + #include "ipa-utils.h"
5390 +
5391 ++#if BUILDING_GCC_VERSION >= 8000
5392 ++#include "stringpool.h"
5393 ++#endif
5394 ++
5395 + #if BUILDING_GCC_VERSION >= 4009
5396 + #include "attribs.h"
5397 + #include "varasm.h"
5398 +diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
5399 +index 65264960910d..cbe1d6c4b1a5 100644
5400 +--- a/scripts/gcc-plugins/latent_entropy_plugin.c
5401 ++++ b/scripts/gcc-plugins/latent_entropy_plugin.c
5402 +@@ -255,21 +255,14 @@ static tree handle_latent_entropy_attribute(tree *node, tree name,
5403 + return NULL_TREE;
5404 + }
5405 +
5406 +-static struct attribute_spec latent_entropy_attr = {
5407 +- .name = "latent_entropy",
5408 +- .min_length = 0,
5409 +- .max_length = 0,
5410 +- .decl_required = true,
5411 +- .type_required = false,
5412 +- .function_type_required = false,
5413 +- .handler = handle_latent_entropy_attribute,
5414 +-#if BUILDING_GCC_VERSION >= 4007
5415 +- .affects_type_identity = false
5416 +-#endif
5417 +-};
5418 ++static struct attribute_spec latent_entropy_attr = { };
5419 +
5420 + static void register_attributes(void *event_data __unused, void *data __unused)
5421 + {
5422 ++ latent_entropy_attr.name = "latent_entropy";
5423 ++ latent_entropy_attr.decl_required = true;
5424 ++ latent_entropy_attr.handler = handle_latent_entropy_attribute;
5425 ++
5426 + register_attribute(&latent_entropy_attr);
5427 + }
5428 +
5429 +diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
5430 +index 0073af326449..c4a345c3715b 100644
5431 +--- a/scripts/gcc-plugins/randomize_layout_plugin.c
5432 ++++ b/scripts/gcc-plugins/randomize_layout_plugin.c
5433 +@@ -580,68 +580,35 @@ static void finish_type(void *event_data, void *data)
5434 + return;
5435 + }
5436 +
5437 +-static struct attribute_spec randomize_layout_attr = {
5438 +- .name = "randomize_layout",
5439 +- // related to args
5440 +- .min_length = 0,
5441 +- .max_length = 0,
5442 +- .decl_required = false,
5443 +- // need type declaration
5444 +- .type_required = true,
5445 +- .function_type_required = false,
5446 +- .handler = handle_randomize_layout_attr,
5447 +-#if BUILDING_GCC_VERSION >= 4007
5448 +- .affects_type_identity = true
5449 +-#endif
5450 +-};
5451 ++static struct attribute_spec randomize_layout_attr = { };
5452 ++static struct attribute_spec no_randomize_layout_attr = { };
5453 ++static struct attribute_spec randomize_considered_attr = { };
5454 ++static struct attribute_spec randomize_performed_attr = { };
5455 +
5456 +-static struct attribute_spec no_randomize_layout_attr = {
5457 +- .name = "no_randomize_layout",
5458 +- // related to args
5459 +- .min_length = 0,
5460 +- .max_length = 0,
5461 +- .decl_required = false,
5462 +- // need type declaration
5463 +- .type_required = true,
5464 +- .function_type_required = false,
5465 +- .handler = handle_randomize_layout_attr,
5466 ++static void register_attributes(void *event_data, void *data)
5467 ++{
5468 ++ randomize_layout_attr.name = "randomize_layout";
5469 ++ randomize_layout_attr.type_required = true;
5470 ++ randomize_layout_attr.handler = handle_randomize_layout_attr;
5471 + #if BUILDING_GCC_VERSION >= 4007
5472 +- .affects_type_identity = true
5473 ++ randomize_layout_attr.affects_type_identity = true;
5474 + #endif
5475 +-};
5476 +
5477 +-static struct attribute_spec randomize_considered_attr = {
5478 +- .name = "randomize_considered",
5479 +- // related to args
5480 +- .min_length = 0,
5481 +- .max_length = 0,
5482 +- .decl_required = false,
5483 +- // need type declaration
5484 +- .type_required = true,
5485 +- .function_type_required = false,
5486 +- .handler = handle_randomize_considered_attr,
5487 ++ no_randomize_layout_attr.name = "no_randomize_layout";
5488 ++ no_randomize_layout_attr.type_required = true;
5489 ++ no_randomize_layout_attr.handler = handle_randomize_layout_attr;
5490 + #if BUILDING_GCC_VERSION >= 4007
5491 +- .affects_type_identity = false
5492 ++ no_randomize_layout_attr.affects_type_identity = true;
5493 + #endif
5494 +-};
5495 +
5496 +-static struct attribute_spec randomize_performed_attr = {
5497 +- .name = "randomize_performed",
5498 +- // related to args
5499 +- .min_length = 0,
5500 +- .max_length = 0,
5501 +- .decl_required = false,
5502 +- // need type declaration
5503 +- .type_required = true,
5504 +- .function_type_required = false,
5505 +- .handler = handle_randomize_performed_attr,
5506 +-#if BUILDING_GCC_VERSION >= 4007
5507 +- .affects_type_identity = false
5508 +-#endif
5509 +-};
5510 ++ randomize_considered_attr.name = "randomize_considered";
5511 ++ randomize_considered_attr.type_required = true;
5512 ++ randomize_considered_attr.handler = handle_randomize_considered_attr;
5513 ++
5514 ++ randomize_performed_attr.name = "randomize_performed";
5515 ++ randomize_performed_attr.type_required = true;
5516 ++ randomize_performed_attr.handler = handle_randomize_performed_attr;
5517 +
5518 +-static void register_attributes(void *event_data, void *data)
5519 +-{
5520 + register_attribute(&randomize_layout_attr);
5521 + register_attribute(&no_randomize_layout_attr);
5522 + register_attribute(&randomize_considered_attr);
5523 +diff --git a/scripts/gcc-plugins/structleak_plugin.c b/scripts/gcc-plugins/structleak_plugin.c
5524 +index 3f8dd4868178..10292f791e99 100644
5525 +--- a/scripts/gcc-plugins/structleak_plugin.c
5526 ++++ b/scripts/gcc-plugins/structleak_plugin.c
5527 +@@ -57,21 +57,16 @@ static tree handle_user_attribute(tree *node, tree name, tree args, int flags, b
5528 + return NULL_TREE;
5529 + }
5530 +
5531 +-static struct attribute_spec user_attr = {
5532 +- .name = "user",
5533 +- .min_length = 0,
5534 +- .max_length = 0,
5535 +- .decl_required = false,
5536 +- .type_required = false,
5537 +- .function_type_required = false,
5538 +- .handler = handle_user_attribute,
5539 +-#if BUILDING_GCC_VERSION >= 4007
5540 +- .affects_type_identity = true
5541 +-#endif
5542 +-};
5543 ++static struct attribute_spec user_attr = { };
5544 +
5545 + static void register_attributes(void *event_data, void *data)
5546 + {
5547 ++ user_attr.name = "user";
5548 ++ user_attr.handler = handle_user_attribute;
5549 ++#if BUILDING_GCC_VERSION >= 4007
5550 ++ user_attr.affects_type_identity = true;
5551 ++#endif
5552 ++
5553 + register_attribute(&user_attr);
5554 + }
5555 +
5556 +diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c
5557 +index 77e7dcf969d0..d70fcd4a1adf 100644
5558 +--- a/sound/soc/sirf/sirf-usp.c
5559 ++++ b/sound/soc/sirf/sirf-usp.c
5560 +@@ -370,10 +370,9 @@ static int sirf_usp_pcm_probe(struct platform_device *pdev)
5561 + platform_set_drvdata(pdev, usp);
5562 +
5563 + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5564 +- base = devm_ioremap(&pdev->dev, mem_res->start,
5565 +- resource_size(mem_res));
5566 +- if (base == NULL)
5567 +- return -ENOMEM;
5568 ++ base = devm_ioremap_resource(&pdev->dev, mem_res);
5569 ++ if (IS_ERR(base))
5570 ++ return PTR_ERR(base);
5571 + usp->regmap = devm_regmap_init_mmio(&pdev->dev, base,
5572 + &sirf_usp_regmap_config);
5573 + if (IS_ERR(usp->regmap))
5574 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
5575 +index 3d0dab8282ad..6fc85199ac73 100644
5576 +--- a/sound/soc/soc-pcm.c
5577 ++++ b/sound/soc/soc-pcm.c
5578 +@@ -1607,6 +1607,14 @@ static u64 dpcm_runtime_base_format(struct snd_pcm_substream *substream)
5579 + int i;
5580 +
5581 + for (i = 0; i < be->num_codecs; i++) {
5582 ++ /*
5583 ++ * Skip CODECs which don't support the current stream
5584 ++ * type. See soc_pcm_init_runtime_hw() for more details
5585 ++ */
5586 ++ if (!snd_soc_dai_stream_valid(be->codec_dais[i],
5587 ++ stream))
5588 ++ continue;
5589 ++
5590 + codec_dai_drv = be->codec_dais[i]->driver;
5591 + if (stream == SNDRV_PCM_STREAM_PLAYBACK)
5592 + codec_stream = &codec_dai_drv->playback;
5593 +diff --git a/sound/soc/zte/zx-tdm.c b/sound/soc/zte/zx-tdm.c
5594 +index dc955272f58b..389272eeba9a 100644
5595 +--- a/sound/soc/zte/zx-tdm.c
5596 ++++ b/sound/soc/zte/zx-tdm.c
5597 +@@ -144,8 +144,8 @@ static void zx_tdm_rx_dma_en(struct zx_tdm_info *tdm, bool on)
5598 + #define ZX_TDM_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000)
5599 +
5600 + #define ZX_TDM_FMTBIT \
5601 +- (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_MU_LAW | \
5602 +- SNDRV_PCM_FORMAT_A_LAW)
5603 ++ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_MU_LAW | \
5604 ++ SNDRV_PCM_FMTBIT_A_LAW)
5605 +
5606 + static int zx_tdm_dai_probe(struct snd_soc_dai *dai)
5607 + {
5608 +diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
5609 +index bd9c6b31a504..1512086c8cb8 100644
5610 +--- a/tools/power/x86/turbostat/turbostat.c
5611 ++++ b/tools/power/x86/turbostat/turbostat.c
5612 +@@ -1038,9 +1038,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_
5613 + if (!printed || !summary_only)
5614 + print_header("\t");
5615 +
5616 +- if (topo.num_cpus > 1)
5617 +- format_counters(&average.threads, &average.cores,
5618 +- &average.packages);
5619 ++ format_counters(&average.threads, &average.cores, &average.packages);
5620 +
5621 + printed = 1;
5622 +
5623 +@@ -4031,7 +4029,9 @@ void process_cpuid()
5624 + family = (fms >> 8) & 0xf;
5625 + model = (fms >> 4) & 0xf;
5626 + stepping = fms & 0xf;
5627 +- if (family == 6 || family == 0xf)
5628 ++ if (family == 0xf)
5629 ++ family += (fms >> 20) & 0xff;
5630 ++ if (family >= 6)
5631 + model += ((fms >> 16) & 0xf) << 4;
5632 +
5633 + if (!quiet) {
5634 +diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
5635 +new file mode 100644
5636 +index 000000000000..3b1f45e13a2e
5637 +--- /dev/null
5638 ++++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
5639 +@@ -0,0 +1,28 @@
5640 ++#!/bin/sh
5641 ++# description: Snapshot and tracing setting
5642 ++# flags: instance
5643 ++
5644 ++[ ! -f snapshot ] && exit_unsupported
5645 ++
5646 ++echo "Set tracing off"
5647 ++echo 0 > tracing_on
5648 ++
5649 ++echo "Allocate and take a snapshot"
5650 ++echo 1 > snapshot
5651 ++
5652 ++# Since trace buffer is empty, snapshot is also empty, but allocated
5653 ++grep -q "Snapshot is allocated" snapshot
5654 ++
5655 ++echo "Ensure keep tracing off"
5656 ++test `cat tracing_on` -eq 0
5657 ++
5658 ++echo "Set tracing on"
5659 ++echo 1 > tracing_on
5660 ++
5661 ++echo "Take a snapshot again"
5662 ++echo 1 > snapshot
5663 ++
5664 ++echo "Ensure keep tracing on"
5665 ++test `cat tracing_on` -eq 1
5666 ++
5667 ++exit 0
5668 +diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
5669 +index 95dd14648ba5..0f395dfb7774 100644
5670 +--- a/tools/usb/ffs-test.c
5671 ++++ b/tools/usb/ffs-test.c
5672 +@@ -44,12 +44,25 @@
5673 +
5674 + /******************** Little Endian Handling ********************************/
5675 +
5676 +-#define cpu_to_le16(x) htole16(x)
5677 +-#define cpu_to_le32(x) htole32(x)
5678 ++/*
5679 ++ * cpu_to_le16/32 are used when initializing structures, a context where a
5680 ++ * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way
5681 ++ * that allows them to be used when initializing structures.
5682 ++ */
5683 ++
5684 ++#if __BYTE_ORDER == __LITTLE_ENDIAN
5685 ++#define cpu_to_le16(x) (x)
5686 ++#define cpu_to_le32(x) (x)
5687 ++#else
5688 ++#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))
5689 ++#define cpu_to_le32(x) \
5690 ++ ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \
5691 ++ (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24))
5692 ++#endif
5693 ++
5694 + #define le32_to_cpu(x) le32toh(x)
5695 + #define le16_to_cpu(x) le16toh(x)
5696 +
5697 +-
5698 + /******************** Messages and Errors ***********************************/
5699 +
5700 + static const char argv0[] = "ffs-test";
5701 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
5702 +index b69798a7880e..ec275b8472a9 100644
5703 +--- a/virt/kvm/arm/mmu.c
5704 ++++ b/virt/kvm/arm/mmu.c
5705 +@@ -901,19 +901,35 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
5706 + pmd = stage2_get_pmd(kvm, cache, addr);
5707 + VM_BUG_ON(!pmd);
5708 +
5709 +- /*
5710 +- * Mapping in huge pages should only happen through a fault. If a
5711 +- * page is merged into a transparent huge page, the individual
5712 +- * subpages of that huge page should be unmapped through MMU
5713 +- * notifiers before we get here.
5714 +- *
5715 +- * Merging of CompoundPages is not supported; they should become
5716 +- * splitting first, unmapped, merged, and mapped back in on-demand.
5717 +- */
5718 +- VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
5719 +-
5720 + old_pmd = *pmd;
5721 + if (pmd_present(old_pmd)) {
5722 ++ /*
5723 ++ * Multiple vcpus faulting on the same PMD entry, can
5724 ++ * lead to them sequentially updating the PMD with the
5725 ++ * same value. Following the break-before-make
5726 ++ * (pmd_clear() followed by tlb_flush()) process can
5727 ++ * hinder forward progress due to refaults generated
5728 ++ * on missing translations.
5729 ++ *
5730 ++ * Skip updating the page table if the entry is
5731 ++ * unchanged.
5732 ++ */
5733 ++ if (pmd_val(old_pmd) == pmd_val(*new_pmd))
5734 ++ return 0;
5735 ++
5736 ++ /*
5737 ++ * Mapping in huge pages should only happen through a
5738 ++ * fault. If a page is merged into a transparent huge
5739 ++ * page, the individual subpages of that huge page
5740 ++ * should be unmapped through MMU notifiers before we
5741 ++ * get here.
5742 ++ *
5743 ++ * Merging of CompoundPages is not supported; they
5744 ++ * should become splitting first, unmapped, merged,
5745 ++ * and mapped back in on-demand.
5746 ++ */
5747 ++ VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
5748 ++
5749 + pmd_clear(pmd);
5750 + kvm_tlb_flush_vmid_ipa(kvm, addr);
5751 + } else {
5752 +@@ -969,6 +985,10 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
5753 + /* Create 2nd stage page table mapping - Level 3 */
5754 + old_pte = *pte;
5755 + if (pte_present(old_pte)) {
5756 ++ /* Skip page table update if there is no change */
5757 ++ if (pte_val(old_pte) == pte_val(*new_pte))
5758 ++ return 0;
5759 ++
5760 + kvm_set_pte(pte, __pte(0));
5761 + kvm_tlb_flush_vmid_ipa(kvm, addr);
5762 + } else {