Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Thu, 30 Nov 2017 12:15:09
Message-Id: 1512043715.333a8fe029b7693b974251030f8b07f2a07a8776.alicef@gentoo
1 commit: 333a8fe029b7693b974251030f8b07f2a07a8776
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Thu Nov 30 12:08:35 2017 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Thu Nov 30 12:08:35 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=333a8fe0
7
8 linux kernel 4.14.3
9
10 0000_README | 4 +
11 1002_linux-4.14.3.patch | 8034 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 8038 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 1324e53..9aaf65a 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -51,6 +51,10 @@ Patch: 1001_linux-4.14.2.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.14.2
21
22 +Patch: 1002_linux-4.14.3.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.14.3
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1002_linux-4.14.3.patch b/1002_linux-4.14.3.patch
31 new file mode 100644
32 index 0000000..269ad50
33 --- /dev/null
34 +++ b/1002_linux-4.14.3.patch
35 @@ -0,0 +1,8034 @@
36 +diff --git a/Makefile b/Makefile
37 +index 75d89dc2b94a..ede4de0d8634 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 14
44 +-SUBLEVEL = 2
45 ++SUBLEVEL = 3
46 + EXTRAVERSION =
47 + NAME = Petit Gorille
48 +
49 +diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
50 +index 35ff45470dbf..fc3b44028cfb 100644
51 +--- a/arch/arm/mm/dump.c
52 ++++ b/arch/arm/mm/dump.c
53 +@@ -129,8 +129,8 @@ static const struct prot_bits section_bits[] = {
54 + .val = PMD_SECT_USER,
55 + .set = "USR",
56 + }, {
57 +- .mask = L_PMD_SECT_RDONLY,
58 +- .val = L_PMD_SECT_RDONLY,
59 ++ .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
60 ++ .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
61 + .set = "ro",
62 + .clear = "RW",
63 + #elif __LINUX_ARM_ARCH__ >= 6
64 +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
65 +index ad80548325fe..0f6d1537f330 100644
66 +--- a/arch/arm/mm/init.c
67 ++++ b/arch/arm/mm/init.c
68 +@@ -639,8 +639,8 @@ static struct section_perm ro_perms[] = {
69 + .start = (unsigned long)_stext,
70 + .end = (unsigned long)__init_begin,
71 + #ifdef CONFIG_ARM_LPAE
72 +- .mask = ~L_PMD_SECT_RDONLY,
73 +- .prot = L_PMD_SECT_RDONLY,
74 ++ .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
75 ++ .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
76 + #else
77 + .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
78 + .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
79 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
80 +index d8dd3298b15c..fb8d76a17bc5 100644
81 +--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
82 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
83 +@@ -49,6 +49,14 @@
84 +
85 + / {
86 + compatible = "amlogic,meson-gxl";
87 ++
88 ++ reserved-memory {
89 ++ /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
90 ++ secmon_reserved_alt: secmon@05000000 {
91 ++ reg = <0x0 0x05000000 0x0 0x300000>;
92 ++ no-map;
93 ++ };
94 ++ };
95 + };
96 +
97 + &ethmac {
98 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
99 +index b46e54c2399b..c9530b5b5ca8 100644
100 +--- a/arch/arm64/include/asm/pgtable.h
101 ++++ b/arch/arm64/include/asm/pgtable.h
102 +@@ -98,6 +98,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
103 + ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
104 + #define pte_valid_young(pte) \
105 + ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
106 ++#define pte_valid_user(pte) \
107 ++ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
108 +
109 + /*
110 + * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
111 +@@ -107,6 +109,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
112 + #define pte_accessible(mm, pte) \
113 + (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
114 +
115 ++/*
116 ++ * p??_access_permitted() is true for valid user mappings (subject to the
117 ++ * write permission check) other than user execute-only which do not have the
118 ++ * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
119 ++ */
120 ++#define pte_access_permitted(pte, write) \
121 ++ (pte_valid_user(pte) && (!(write) || pte_write(pte)))
122 ++#define pmd_access_permitted(pmd, write) \
123 ++ (pte_access_permitted(pmd_pte(pmd), (write)))
124 ++#define pud_access_permitted(pud, write) \
125 ++ (pte_access_permitted(pud_pte(pud), (write)))
126 ++
127 + static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
128 + {
129 + pte_val(pte) &= ~pgprot_val(prot);
130 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
131 +index 5d3284d20678..c3d798b44030 100644
132 +--- a/arch/mips/Kconfig
133 ++++ b/arch/mips/Kconfig
134 +@@ -65,7 +65,7 @@ config MIPS
135 + select HAVE_PERF_EVENTS
136 + select HAVE_REGS_AND_STACK_ACCESS_API
137 + select HAVE_SYSCALL_TRACEPOINTS
138 +- select HAVE_VIRT_CPU_ACCOUNTING_GEN
139 ++ select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
140 + select IRQ_FORCED_THREADING
141 + select MODULES_USE_ELF_RELA if MODULES && 64BIT
142 + select MODULES_USE_ELF_REL if MODULES
143 +diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
144 +index d4f2407a42c6..8307a8a02667 100644
145 +--- a/arch/mips/bcm47xx/leds.c
146 ++++ b/arch/mips/bcm47xx/leds.c
147 +@@ -331,7 +331,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = {
148 + /* Verified on: WRT54GS V1.0 */
149 + static const struct gpio_led
150 + bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = {
151 +- BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
152 ++ BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
153 + BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
154 + BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
155 + };
156 +diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
157 +index 9e09cc4556b3..398994312361 100644
158 +--- a/arch/mips/boot/dts/brcm/Makefile
159 ++++ b/arch/mips/boot/dts/brcm/Makefile
160 +@@ -23,7 +23,6 @@ dtb-$(CONFIG_DT_NONE) += \
161 + bcm63268-comtrend-vr-3032u.dtb \
162 + bcm93384wvg.dtb \
163 + bcm93384wvg_viper.dtb \
164 +- bcm96358nb4ser.dtb \
165 + bcm96368mvwg.dtb \
166 + bcm9ejtagprb.dtb \
167 + bcm97125cbmb.dtb \
168 +diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
169 +index 83054f79f72a..feb069cbf44e 100644
170 +--- a/arch/mips/include/asm/asmmacro.h
171 ++++ b/arch/mips/include/asm/asmmacro.h
172 +@@ -19,6 +19,9 @@
173 + #include <asm/asmmacro-64.h>
174 + #endif
175 +
176 ++/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
177 ++#undef fp
178 ++
179 + /*
180 + * Helper macros for generating raw instruction encodings.
181 + */
182 +@@ -105,6 +108,7 @@
183 + .macro fpu_save_16odd thread
184 + .set push
185 + .set mips64r2
186 ++ .set fp=64
187 + SET_HARDFLOAT
188 + sdc1 $f1, THREAD_FPR1(\thread)
189 + sdc1 $f3, THREAD_FPR3(\thread)
190 +@@ -126,8 +130,8 @@
191 + .endm
192 +
193 + .macro fpu_save_double thread status tmp
194 +-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
195 +- defined(CONFIG_CPU_MIPS32_R6)
196 ++#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
197 ++ defined(CONFIG_CPU_MIPSR6)
198 + sll \tmp, \status, 5
199 + bgez \tmp, 10f
200 + fpu_save_16odd \thread
201 +@@ -163,6 +167,7 @@
202 + .macro fpu_restore_16odd thread
203 + .set push
204 + .set mips64r2
205 ++ .set fp=64
206 + SET_HARDFLOAT
207 + ldc1 $f1, THREAD_FPR1(\thread)
208 + ldc1 $f3, THREAD_FPR3(\thread)
209 +@@ -184,8 +189,8 @@
210 + .endm
211 +
212 + .macro fpu_restore_double thread status tmp
213 +-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
214 +- defined(CONFIG_CPU_MIPS32_R6)
215 ++#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
216 ++ defined(CONFIG_CPU_MIPSR6)
217 + sll \tmp, \status, 5
218 + bgez \tmp, 10f # 16 register mode?
219 +
220 +@@ -234,9 +239,6 @@
221 + .endm
222 +
223 + #ifdef TOOLCHAIN_SUPPORTS_MSA
224 +-/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
225 +-#undef fp
226 +-
227 + .macro _cfcmsa rd, cs
228 + .set push
229 + .set mips32r2
230 +diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
231 +index 7e25c5cc353a..89e9fb7976fe 100644
232 +--- a/arch/mips/include/asm/cmpxchg.h
233 ++++ b/arch/mips/include/asm/cmpxchg.h
234 +@@ -204,8 +204,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
235 + #else
236 + #include <asm-generic/cmpxchg-local.h>
237 + #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
238 ++#ifndef CONFIG_SMP
239 + #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
240 + #endif
241 ++#endif
242 +
243 + #undef __scbeqz
244 +
245 +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
246 +index 1395654cfc8d..5a09c2901a76 100644
247 +--- a/arch/mips/kernel/ptrace.c
248 ++++ b/arch/mips/kernel/ptrace.c
249 +@@ -618,6 +618,19 @@ static const struct user_regset_view user_mips64_view = {
250 + .n = ARRAY_SIZE(mips64_regsets),
251 + };
252 +
253 ++#ifdef CONFIG_MIPS32_N32
254 ++
255 ++static const struct user_regset_view user_mipsn32_view = {
256 ++ .name = "mipsn32",
257 ++ .e_flags = EF_MIPS_ABI2,
258 ++ .e_machine = ELF_ARCH,
259 ++ .ei_osabi = ELF_OSABI,
260 ++ .regsets = mips64_regsets,
261 ++ .n = ARRAY_SIZE(mips64_regsets),
262 ++};
263 ++
264 ++#endif /* CONFIG_MIPS32_N32 */
265 ++
266 + #endif /* CONFIG_64BIT */
267 +
268 + const struct user_regset_view *task_user_regset_view(struct task_struct *task)
269 +@@ -628,6 +641,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
270 + #ifdef CONFIG_MIPS32_O32
271 + if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
272 + return &user_mips_view;
273 ++#endif
274 ++#ifdef CONFIG_MIPS32_N32
275 ++ if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
276 ++ return &user_mipsn32_view;
277 + #endif
278 + return &user_mips64_view;
279 + #endif
280 +diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
281 +index 0a83b1708b3c..8e3a6020c613 100644
282 +--- a/arch/mips/kernel/r4k_fpu.S
283 ++++ b/arch/mips/kernel/r4k_fpu.S
284 +@@ -40,8 +40,8 @@
285 + */
286 + LEAF(_save_fp)
287 + EXPORT_SYMBOL(_save_fp)
288 +-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
289 +- defined(CONFIG_CPU_MIPS32_R6)
290 ++#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
291 ++ defined(CONFIG_CPU_MIPSR6)
292 + mfc0 t0, CP0_STATUS
293 + #endif
294 + fpu_save_double a0 t0 t1 # clobbers t1
295 +@@ -52,8 +52,8 @@ EXPORT_SYMBOL(_save_fp)
296 + * Restore a thread's fp context.
297 + */
298 + LEAF(_restore_fp)
299 +-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
300 +- defined(CONFIG_CPU_MIPS32_R6)
301 ++#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
302 ++ defined(CONFIG_CPU_MIPSR6)
303 + mfc0 t0, CP0_STATUS
304 + #endif
305 + fpu_restore_double a0 t0 t1 # clobbers t1
306 +@@ -246,11 +246,11 @@ LEAF(_save_fp_context)
307 + cfc1 t1, fcr31
308 + .set pop
309 +
310 +-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
311 +- defined(CONFIG_CPU_MIPS32_R6)
312 ++#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
313 ++ defined(CONFIG_CPU_MIPSR6)
314 + .set push
315 + SET_HARDFLOAT
316 +-#ifdef CONFIG_CPU_MIPS32_R2
317 ++#ifdef CONFIG_CPU_MIPSR2
318 + .set mips32r2
319 + .set fp=64
320 + mfc0 t0, CP0_STATUS
321 +@@ -314,11 +314,11 @@ LEAF(_save_fp_context)
322 + LEAF(_restore_fp_context)
323 + EX lw t1, 0(a1)
324 +
325 +-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
326 +- defined(CONFIG_CPU_MIPS32_R6)
327 ++#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
328 ++ defined(CONFIG_CPU_MIPSR6)
329 + .set push
330 + SET_HARDFLOAT
331 +-#ifdef CONFIG_CPU_MIPS32_R2
332 ++#ifdef CONFIG_CPU_MIPSR2
333 + .set mips32r2
334 + .set fp=64
335 + mfc0 t0, CP0_STATUS
336 +diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
337 +index 16d9ef5a78c5..6f57212f5659 100644
338 +--- a/arch/mips/math-emu/cp1emu.c
339 ++++ b/arch/mips/math-emu/cp1emu.c
340 +@@ -1795,7 +1795,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
341 + SPFROMREG(fs, MIPSInst_FS(ir));
342 + SPFROMREG(fd, MIPSInst_FD(ir));
343 + rv.s = ieee754sp_maddf(fd, fs, ft);
344 +- break;
345 ++ goto copcsr;
346 + }
347 +
348 + case fmsubf_op: {
349 +@@ -1809,7 +1809,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
350 + SPFROMREG(fs, MIPSInst_FS(ir));
351 + SPFROMREG(fd, MIPSInst_FD(ir));
352 + rv.s = ieee754sp_msubf(fd, fs, ft);
353 +- break;
354 ++ goto copcsr;
355 + }
356 +
357 + case frint_op: {
358 +@@ -1834,7 +1834,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
359 + SPFROMREG(fs, MIPSInst_FS(ir));
360 + rv.w = ieee754sp_2008class(fs);
361 + rfmt = w_fmt;
362 +- break;
363 ++ goto copcsr;
364 + }
365 +
366 + case fmin_op: {
367 +@@ -1847,7 +1847,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
368 + SPFROMREG(ft, MIPSInst_FT(ir));
369 + SPFROMREG(fs, MIPSInst_FS(ir));
370 + rv.s = ieee754sp_fmin(fs, ft);
371 +- break;
372 ++ goto copcsr;
373 + }
374 +
375 + case fmina_op: {
376 +@@ -1860,7 +1860,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
377 + SPFROMREG(ft, MIPSInst_FT(ir));
378 + SPFROMREG(fs, MIPSInst_FS(ir));
379 + rv.s = ieee754sp_fmina(fs, ft);
380 +- break;
381 ++ goto copcsr;
382 + }
383 +
384 + case fmax_op: {
385 +@@ -1873,7 +1873,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
386 + SPFROMREG(ft, MIPSInst_FT(ir));
387 + SPFROMREG(fs, MIPSInst_FS(ir));
388 + rv.s = ieee754sp_fmax(fs, ft);
389 +- break;
390 ++ goto copcsr;
391 + }
392 +
393 + case fmaxa_op: {
394 +@@ -1886,7 +1886,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
395 + SPFROMREG(ft, MIPSInst_FT(ir));
396 + SPFROMREG(fs, MIPSInst_FS(ir));
397 + rv.s = ieee754sp_fmaxa(fs, ft);
398 +- break;
399 ++ goto copcsr;
400 + }
401 +
402 + case fabs_op:
403 +@@ -2165,7 +2165,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
404 + DPFROMREG(fs, MIPSInst_FS(ir));
405 + DPFROMREG(fd, MIPSInst_FD(ir));
406 + rv.d = ieee754dp_maddf(fd, fs, ft);
407 +- break;
408 ++ goto copcsr;
409 + }
410 +
411 + case fmsubf_op: {
412 +@@ -2179,7 +2179,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
413 + DPFROMREG(fs, MIPSInst_FS(ir));
414 + DPFROMREG(fd, MIPSInst_FD(ir));
415 + rv.d = ieee754dp_msubf(fd, fs, ft);
416 +- break;
417 ++ goto copcsr;
418 + }
419 +
420 + case frint_op: {
421 +@@ -2204,7 +2204,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
422 + DPFROMREG(fs, MIPSInst_FS(ir));
423 + rv.l = ieee754dp_2008class(fs);
424 + rfmt = l_fmt;
425 +- break;
426 ++ goto copcsr;
427 + }
428 +
429 + case fmin_op: {
430 +@@ -2217,7 +2217,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
431 + DPFROMREG(ft, MIPSInst_FT(ir));
432 + DPFROMREG(fs, MIPSInst_FS(ir));
433 + rv.d = ieee754dp_fmin(fs, ft);
434 +- break;
435 ++ goto copcsr;
436 + }
437 +
438 + case fmina_op: {
439 +@@ -2230,7 +2230,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
440 + DPFROMREG(ft, MIPSInst_FT(ir));
441 + DPFROMREG(fs, MIPSInst_FS(ir));
442 + rv.d = ieee754dp_fmina(fs, ft);
443 +- break;
444 ++ goto copcsr;
445 + }
446 +
447 + case fmax_op: {
448 +@@ -2243,7 +2243,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
449 + DPFROMREG(ft, MIPSInst_FT(ir));
450 + DPFROMREG(fs, MIPSInst_FS(ir));
451 + rv.d = ieee754dp_fmax(fs, ft);
452 +- break;
453 ++ goto copcsr;
454 + }
455 +
456 + case fmaxa_op: {
457 +@@ -2256,7 +2256,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
458 + DPFROMREG(ft, MIPSInst_FT(ir));
459 + DPFROMREG(fs, MIPSInst_FS(ir));
460 + rv.d = ieee754dp_fmaxa(fs, ft);
461 +- break;
462 ++ goto copcsr;
463 + }
464 +
465 + case fabs_op:
466 +diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
467 +index 90fba9bf98da..27ac00c36bc0 100644
468 +--- a/arch/mips/pci/pci-mt7620.c
469 ++++ b/arch/mips/pci/pci-mt7620.c
470 +@@ -121,7 +121,7 @@ static int wait_pciephy_busy(void)
471 + else
472 + break;
473 + if (retry++ > WAITRETRY_MAX) {
474 +- printk(KERN_WARN "PCIE-PHY retry failed.\n");
475 ++ pr_warn("PCIE-PHY retry failed.\n");
476 + return -1;
477 + }
478 + }
479 +diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
480 +index 9be8b08ae46b..41b71c4352c2 100644
481 +--- a/arch/mips/ralink/mt7620.c
482 ++++ b/arch/mips/ralink/mt7620.c
483 +@@ -145,8 +145,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
484 + FUNC("i2c", 0, 4, 2),
485 + };
486 +
487 +-static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
488 +-static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
489 ++static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
490 ++static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
491 + static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
492 + static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
493 +
494 +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
495 +index 41e60a9c7db2..e775f80ae28c 100644
496 +--- a/arch/parisc/kernel/syscall.S
497 ++++ b/arch/parisc/kernel/syscall.S
498 +@@ -690,15 +690,15 @@ cas_action:
499 + /* ELF32 Process entry path */
500 + lws_compare_and_swap_2:
501 + #ifdef CONFIG_64BIT
502 +- /* Clip the input registers */
503 ++ /* Clip the input registers. We don't need to clip %r23 as we
504 ++ only use it for word operations */
505 + depdi 0, 31, 32, %r26
506 + depdi 0, 31, 32, %r25
507 + depdi 0, 31, 32, %r24
508 +- depdi 0, 31, 32, %r23
509 + #endif
510 +
511 + /* Check the validity of the size pointer */
512 +- subi,>>= 4, %r23, %r0
513 ++ subi,>>= 3, %r23, %r0
514 + b,n lws_exit_nosys
515 +
516 + /* Jump to the functions which will load the old and new values into
517 +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
518 +index 1c80bd292e48..06598142d755 100644
519 +--- a/arch/powerpc/kernel/exceptions-64s.S
520 ++++ b/arch/powerpc/kernel/exceptions-64s.S
521 +@@ -542,7 +542,7 @@ EXC_COMMON_BEGIN(instruction_access_common)
522 + RECONCILE_IRQ_STATE(r10, r11)
523 + ld r12,_MSR(r1)
524 + ld r3,_NIP(r1)
525 +- andis. r4,r12,DSISR_BAD_FAULT_64S@h
526 ++ andis. r4,r12,DSISR_SRR1_MATCH_64S@h
527 + li r5,0x400
528 + std r3,_DAR(r1)
529 + std r4,_DSISR(r1)
530 +diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
531 +index e9436c5e1e09..3d7539b90010 100644
532 +--- a/arch/powerpc/kernel/signal.c
533 ++++ b/arch/powerpc/kernel/signal.c
534 +@@ -103,7 +103,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
535 + static void do_signal(struct task_struct *tsk)
536 + {
537 + sigset_t *oldset = sigmask_to_save();
538 +- struct ksignal ksig;
539 ++ struct ksignal ksig = { .sig = 0 };
540 + int ret;
541 + int is32 = is_32bit_task();
542 +
543 +diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
544 +index 90644db9d38e..8e0cf8f186df 100644
545 +--- a/arch/powerpc/kvm/book3s_hv_builtin.c
546 ++++ b/arch/powerpc/kvm/book3s_hv_builtin.c
547 +@@ -529,6 +529,8 @@ static inline bool is_rm(void)
548 +
549 + unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
550 + {
551 ++ if (!kvmppc_xics_enabled(vcpu))
552 ++ return H_TOO_HARD;
553 + if (xive_enabled()) {
554 + if (is_rm())
555 + return xive_rm_h_xirr(vcpu);
556 +@@ -541,6 +543,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
557 +
558 + unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
559 + {
560 ++ if (!kvmppc_xics_enabled(vcpu))
561 ++ return H_TOO_HARD;
562 + vcpu->arch.gpr[5] = get_tb();
563 + if (xive_enabled()) {
564 + if (is_rm())
565 +@@ -554,6 +558,8 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
566 +
567 + unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
568 + {
569 ++ if (!kvmppc_xics_enabled(vcpu))
570 ++ return H_TOO_HARD;
571 + if (xive_enabled()) {
572 + if (is_rm())
573 + return xive_rm_h_ipoll(vcpu, server);
574 +@@ -567,6 +573,8 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
575 + int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
576 + unsigned long mfrr)
577 + {
578 ++ if (!kvmppc_xics_enabled(vcpu))
579 ++ return H_TOO_HARD;
580 + if (xive_enabled()) {
581 + if (is_rm())
582 + return xive_rm_h_ipi(vcpu, server, mfrr);
583 +@@ -579,6 +587,8 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
584 +
585 + int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
586 + {
587 ++ if (!kvmppc_xics_enabled(vcpu))
588 ++ return H_TOO_HARD;
589 + if (xive_enabled()) {
590 + if (is_rm())
591 + return xive_rm_h_cppr(vcpu, cppr);
592 +@@ -591,6 +601,8 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
593 +
594 + int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
595 + {
596 ++ if (!kvmppc_xics_enabled(vcpu))
597 ++ return H_TOO_HARD;
598 + if (xive_enabled()) {
599 + if (is_rm())
600 + return xive_rm_h_eoi(vcpu, xirr);
601 +diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
602 +index c9de03e0c1f1..d469224c4ada 100644
603 +--- a/arch/powerpc/lib/code-patching.c
604 ++++ b/arch/powerpc/lib/code-patching.c
605 +@@ -21,6 +21,7 @@
606 + #include <asm/tlbflush.h>
607 + #include <asm/page.h>
608 + #include <asm/code-patching.h>
609 ++#include <asm/setup.h>
610 +
611 + static int __patch_instruction(unsigned int *addr, unsigned int instr)
612 + {
613 +@@ -146,11 +147,8 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
614 + * During early early boot patch_instruction is called
615 + * when text_poke_area is not ready, but we still need
616 + * to allow patching. We just do the plain old patching
617 +- * We use slab_is_available and per cpu read * via this_cpu_read
618 +- * of text_poke_area. Per-CPU areas might not be up early
619 +- * this can create problems with just using this_cpu_read()
620 + */
621 +- if (!slab_is_available() || !this_cpu_read(text_poke_area))
622 ++ if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
623 + return __patch_instruction(addr, instr);
624 +
625 + local_irq_save(flags);
626 +diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
627 +index 558e9d3891bf..bd022d16745c 100644
628 +--- a/arch/powerpc/mm/hugetlbpage-radix.c
629 ++++ b/arch/powerpc/mm/hugetlbpage-radix.c
630 +@@ -49,17 +49,28 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
631 + struct mm_struct *mm = current->mm;
632 + struct vm_area_struct *vma;
633 + struct hstate *h = hstate_file(file);
634 ++ int fixed = (flags & MAP_FIXED);
635 ++ unsigned long high_limit;
636 + struct vm_unmapped_area_info info;
637 +
638 +- if (unlikely(addr > mm->context.addr_limit && addr < TASK_SIZE))
639 +- mm->context.addr_limit = TASK_SIZE;
640 ++ high_limit = DEFAULT_MAP_WINDOW;
641 ++ if (addr >= high_limit || (fixed && (addr + len > high_limit)))
642 ++ high_limit = TASK_SIZE;
643 +
644 + if (len & ~huge_page_mask(h))
645 + return -EINVAL;
646 +- if (len > mm->task_size)
647 ++ if (len > high_limit)
648 + return -ENOMEM;
649 ++ if (fixed) {
650 ++ if (addr > high_limit - len)
651 ++ return -ENOMEM;
652 ++ }
653 +
654 +- if (flags & MAP_FIXED) {
655 ++ if (unlikely(addr > mm->context.addr_limit &&
656 ++ mm->context.addr_limit != TASK_SIZE))
657 ++ mm->context.addr_limit = TASK_SIZE;
658 ++
659 ++ if (fixed) {
660 + if (prepare_hugepage_range(file, addr, len))
661 + return -EINVAL;
662 + return addr;
663 +@@ -68,7 +79,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
664 + if (addr) {
665 + addr = ALIGN(addr, huge_page_size(h));
666 + vma = find_vma(mm, addr);
667 +- if (mm->task_size - len >= addr &&
668 ++ if (high_limit - len >= addr &&
669 + (!vma || addr + len <= vm_start_gap(vma)))
670 + return addr;
671 + }
672 +@@ -79,12 +90,9 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
673 + info.flags = VM_UNMAPPED_AREA_TOPDOWN;
674 + info.length = len;
675 + info.low_limit = PAGE_SIZE;
676 +- info.high_limit = current->mm->mmap_base;
677 ++ info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
678 + info.align_mask = PAGE_MASK & ~huge_page_mask(h);
679 + info.align_offset = 0;
680 +
681 +- if (addr > DEFAULT_MAP_WINDOW)
682 +- info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
683 +-
684 + return vm_unmapped_area(&info);
685 + }
686 +diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
687 +index 5d78b193fec4..6d476a7b5611 100644
688 +--- a/arch/powerpc/mm/mmap.c
689 ++++ b/arch/powerpc/mm/mmap.c
690 +@@ -106,22 +106,32 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
691 + {
692 + struct mm_struct *mm = current->mm;
693 + struct vm_area_struct *vma;
694 ++ int fixed = (flags & MAP_FIXED);
695 ++ unsigned long high_limit;
696 + struct vm_unmapped_area_info info;
697 +
698 ++ high_limit = DEFAULT_MAP_WINDOW;
699 ++ if (addr >= high_limit || (fixed && (addr + len > high_limit)))
700 ++ high_limit = TASK_SIZE;
701 ++
702 ++ if (len > high_limit)
703 ++ return -ENOMEM;
704 ++ if (fixed) {
705 ++ if (addr > high_limit - len)
706 ++ return -ENOMEM;
707 ++ }
708 ++
709 + if (unlikely(addr > mm->context.addr_limit &&
710 + mm->context.addr_limit != TASK_SIZE))
711 + mm->context.addr_limit = TASK_SIZE;
712 +
713 +- if (len > mm->task_size - mmap_min_addr)
714 +- return -ENOMEM;
715 +-
716 +- if (flags & MAP_FIXED)
717 ++ if (fixed)
718 + return addr;
719 +
720 + if (addr) {
721 + addr = PAGE_ALIGN(addr);
722 + vma = find_vma(mm, addr);
723 +- if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
724 ++ if (high_limit - len >= addr && addr >= mmap_min_addr &&
725 + (!vma || addr + len <= vm_start_gap(vma)))
726 + return addr;
727 + }
728 +@@ -129,13 +139,9 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
729 + info.flags = 0;
730 + info.length = len;
731 + info.low_limit = mm->mmap_base;
732 ++ info.high_limit = high_limit;
733 + info.align_mask = 0;
734 +
735 +- if (unlikely(addr > DEFAULT_MAP_WINDOW))
736 +- info.high_limit = mm->context.addr_limit;
737 +- else
738 +- info.high_limit = DEFAULT_MAP_WINDOW;
739 +-
740 + return vm_unmapped_area(&info);
741 + }
742 +
743 +@@ -149,37 +155,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
744 + struct vm_area_struct *vma;
745 + struct mm_struct *mm = current->mm;
746 + unsigned long addr = addr0;
747 ++ int fixed = (flags & MAP_FIXED);
748 ++ unsigned long high_limit;
749 + struct vm_unmapped_area_info info;
750 +
751 ++ high_limit = DEFAULT_MAP_WINDOW;
752 ++ if (addr >= high_limit || (fixed && (addr + len > high_limit)))
753 ++ high_limit = TASK_SIZE;
754 ++
755 ++ if (len > high_limit)
756 ++ return -ENOMEM;
757 ++ if (fixed) {
758 ++ if (addr > high_limit - len)
759 ++ return -ENOMEM;
760 ++ }
761 ++
762 + if (unlikely(addr > mm->context.addr_limit &&
763 + mm->context.addr_limit != TASK_SIZE))
764 + mm->context.addr_limit = TASK_SIZE;
765 +
766 +- /* requested length too big for entire address space */
767 +- if (len > mm->task_size - mmap_min_addr)
768 +- return -ENOMEM;
769 +-
770 +- if (flags & MAP_FIXED)
771 ++ if (fixed)
772 + return addr;
773 +
774 +- /* requesting a specific address */
775 + if (addr) {
776 + addr = PAGE_ALIGN(addr);
777 + vma = find_vma(mm, addr);
778 +- if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
779 +- (!vma || addr + len <= vm_start_gap(vma)))
780 ++ if (high_limit - len >= addr && addr >= mmap_min_addr &&
781 ++ (!vma || addr + len <= vm_start_gap(vma)))
782 + return addr;
783 + }
784 +
785 + info.flags = VM_UNMAPPED_AREA_TOPDOWN;
786 + info.length = len;
787 + info.low_limit = max(PAGE_SIZE, mmap_min_addr);
788 +- info.high_limit = mm->mmap_base;
789 ++ info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
790 + info.align_mask = 0;
791 +
792 +- if (addr > DEFAULT_MAP_WINDOW)
793 +- info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
794 +-
795 + addr = vm_unmapped_area(&info);
796 + if (!(addr & ~PAGE_MASK))
797 + return addr;
798 +diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
799 +index 05e15386d4cb..b94fb62e60fd 100644
800 +--- a/arch/powerpc/mm/mmu_context_book3s64.c
801 ++++ b/arch/powerpc/mm/mmu_context_book3s64.c
802 +@@ -93,11 +93,11 @@ static int hash__init_new_context(struct mm_struct *mm)
803 + return index;
804 +
805 + /*
806 +- * We do switch_slb() early in fork, even before we setup the
807 +- * mm->context.addr_limit. Default to max task size so that we copy the
808 +- * default values to paca which will help us to handle slb miss early.
809 ++ * In the case of exec, use the default limit,
810 ++ * otherwise inherit it from the mm we are duplicating.
811 + */
812 +- mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
813 ++ if (!mm->context.addr_limit)
814 ++ mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
815 +
816 + /*
817 + * The old code would re-promote on fork, we don't do that when using
818 +diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
819 +index 39c252b54d16..cfbbee941a76 100644
820 +--- a/arch/powerpc/mm/pgtable-radix.c
821 ++++ b/arch/powerpc/mm/pgtable-radix.c
822 +@@ -169,6 +169,16 @@ void radix__mark_rodata_ro(void)
823 + {
824 + unsigned long start, end;
825 +
826 ++ /*
827 ++ * mark_rodata_ro() will mark itself as !writable at some point.
828 ++ * Due to DD1 workaround in radix__pte_update(), we'll end up with
829 ++ * an invalid pte and the system will crash quite severly.
830 ++ */
831 ++ if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
832 ++ pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n");
833 ++ return;
834 ++ }
835 ++
836 + start = (unsigned long)_stext;
837 + end = (unsigned long)__init_begin;
838 +
839 +diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
840 +index 45f6740dd407..a4f93699194b 100644
841 +--- a/arch/powerpc/mm/slice.c
842 ++++ b/arch/powerpc/mm/slice.c
843 +@@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
844 + {
845 + struct vm_area_struct *vma;
846 +
847 +- if ((mm->task_size - len) < addr)
848 ++ if ((mm->context.addr_limit - len) < addr)
849 + return 0;
850 + vma = find_vma(mm, addr);
851 + return (!vma || (addr + len) <= vm_start_gap(vma));
852 +@@ -133,7 +133,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
853 + if (!slice_low_has_vma(mm, i))
854 + ret->low_slices |= 1u << i;
855 +
856 +- if (mm->task_size <= SLICE_LOW_TOP)
857 ++ if (mm->context.addr_limit <= SLICE_LOW_TOP)
858 + return;
859 +
860 + for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++)
861 +@@ -412,25 +412,31 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
862 + struct slice_mask compat_mask;
863 + int fixed = (flags & MAP_FIXED);
864 + int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
865 ++ unsigned long page_size = 1UL << pshift;
866 + struct mm_struct *mm = current->mm;
867 + unsigned long newaddr;
868 + unsigned long high_limit;
869 +
870 +- /*
871 +- * Check if we need to expland slice area.
872 +- */
873 +- if (unlikely(addr > mm->context.addr_limit &&
874 +- mm->context.addr_limit != TASK_SIZE)) {
875 +- mm->context.addr_limit = TASK_SIZE;
876 ++ high_limit = DEFAULT_MAP_WINDOW;
877 ++ if (addr >= high_limit || (fixed && (addr + len > high_limit)))
878 ++ high_limit = TASK_SIZE;
879 ++
880 ++ if (len > high_limit)
881 ++ return -ENOMEM;
882 ++ if (len & (page_size - 1))
883 ++ return -EINVAL;
884 ++ if (fixed) {
885 ++ if (addr & (page_size - 1))
886 ++ return -EINVAL;
887 ++ if (addr > high_limit - len)
888 ++ return -ENOMEM;
889 ++ }
890 ++
891 ++ if (high_limit > mm->context.addr_limit) {
892 ++ mm->context.addr_limit = high_limit;
893 + on_each_cpu(slice_flush_segments, mm, 1);
894 + }
895 +- /*
896 +- * This mmap request can allocate upt to 512TB
897 +- */
898 +- if (addr > DEFAULT_MAP_WINDOW)
899 +- high_limit = mm->context.addr_limit;
900 +- else
901 +- high_limit = DEFAULT_MAP_WINDOW;
902 ++
903 + /*
904 + * init different masks
905 + */
906 +@@ -446,27 +452,19 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
907 +
908 + /* Sanity checks */
909 + BUG_ON(mm->task_size == 0);
910 ++ BUG_ON(mm->context.addr_limit == 0);
911 + VM_BUG_ON(radix_enabled());
912 +
913 + slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
914 + slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
915 + addr, len, flags, topdown);
916 +
917 +- if (len > mm->task_size)
918 +- return -ENOMEM;
919 +- if (len & ((1ul << pshift) - 1))
920 +- return -EINVAL;
921 +- if (fixed && (addr & ((1ul << pshift) - 1)))
922 +- return -EINVAL;
923 +- if (fixed && addr > (mm->task_size - len))
924 +- return -ENOMEM;
925 +-
926 + /* If hint, make sure it matches our alignment restrictions */
927 + if (!fixed && addr) {
928 +- addr = _ALIGN_UP(addr, 1ul << pshift);
929 ++ addr = _ALIGN_UP(addr, page_size);
930 + slice_dbg(" aligned addr=%lx\n", addr);
931 + /* Ignore hint if it's too large or overlaps a VMA */
932 +- if (addr > mm->task_size - len ||
933 ++ if (addr > high_limit - len ||
934 + !slice_area_is_free(mm, addr, len))
935 + addr = 0;
936 + }
937 +diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
938 +index 36344117c680..cf64e16f92c2 100644
939 +--- a/arch/powerpc/perf/imc-pmu.c
940 ++++ b/arch/powerpc/perf/imc-pmu.c
941 +@@ -467,7 +467,7 @@ static int nest_imc_event_init(struct perf_event *event)
942 + * Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
943 + * Get the base memory addresss for this cpu.
944 + */
945 +- chip_id = topology_physical_package_id(event->cpu);
946 ++ chip_id = cpu_to_chip_id(event->cpu);
947 + pcni = pmu->mem_info;
948 + do {
949 + if (pcni->id == chip_id) {
950 +@@ -524,19 +524,19 @@ static int nest_imc_event_init(struct perf_event *event)
951 + */
952 + static int core_imc_mem_init(int cpu, int size)
953 + {
954 +- int phys_id, rc = 0, core_id = (cpu / threads_per_core);
955 ++ int nid, rc = 0, core_id = (cpu / threads_per_core);
956 + struct imc_mem_info *mem_info;
957 +
958 + /*
959 + * alloc_pages_node() will allocate memory for core in the
960 + * local node only.
961 + */
962 +- phys_id = topology_physical_package_id(cpu);
963 ++ nid = cpu_to_node(cpu);
964 + mem_info = &core_imc_pmu->mem_info[core_id];
965 + mem_info->id = core_id;
966 +
967 + /* We need only vbase for core counters */
968 +- mem_info->vbase = page_address(alloc_pages_node(phys_id,
969 ++ mem_info->vbase = page_address(alloc_pages_node(nid,
970 + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
971 + __GFP_NOWARN, get_order(size)));
972 + if (!mem_info->vbase)
973 +@@ -797,14 +797,14 @@ static int core_imc_event_init(struct perf_event *event)
974 + static int thread_imc_mem_alloc(int cpu_id, int size)
975 + {
976 + u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id);
977 +- int phys_id = topology_physical_package_id(cpu_id);
978 ++ int nid = cpu_to_node(cpu_id);
979 +
980 + if (!local_mem) {
981 + /*
982 + * This case could happen only once at start, since we dont
983 + * free the memory in cpu offline path.
984 + */
985 +- local_mem = page_address(alloc_pages_node(phys_id,
986 ++ local_mem = page_address(alloc_pages_node(nid,
987 + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
988 + __GFP_NOWARN, get_order(size)));
989 + if (!local_mem)
990 +diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
991 +index c21fe1d57c00..ec7b476c1ac5 100644
992 +--- a/arch/s390/include/asm/switch_to.h
993 ++++ b/arch/s390/include/asm/switch_to.h
994 +@@ -37,8 +37,8 @@ static inline void restore_access_regs(unsigned int *acrs)
995 + save_ri_cb(prev->thread.ri_cb); \
996 + save_gs_cb(prev->thread.gs_cb); \
997 + } \
998 ++ update_cr_regs(next); \
999 + if (next->mm) { \
1000 +- update_cr_regs(next); \
1001 + set_cpu_flag(CIF_FPU); \
1002 + restore_access_regs(&next->thread.acrs[0]); \
1003 + restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
1004 +diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
1005 +index f7e82302a71e..2394557653d5 100644
1006 +--- a/arch/s390/kernel/dis.c
1007 ++++ b/arch/s390/kernel/dis.c
1008 +@@ -1548,6 +1548,7 @@ static struct s390_insn opcode_e7[] = {
1009 + { "vfsq", 0xce, INSTR_VRR_VV000MM },
1010 + { "vfs", 0xe2, INSTR_VRR_VVV00MM },
1011 + { "vftci", 0x4a, INSTR_VRI_VVIMM },
1012 ++ { "", 0, INSTR_INVALID }
1013 + };
1014 +
1015 + static struct s390_insn opcode_eb[] = {
1016 +@@ -1953,7 +1954,7 @@ void show_code(struct pt_regs *regs)
1017 + {
1018 + char *mode = user_mode(regs) ? "User" : "Krnl";
1019 + unsigned char code[64];
1020 +- char buffer[64], *ptr;
1021 ++ char buffer[128], *ptr;
1022 + mm_segment_t old_fs;
1023 + unsigned long addr;
1024 + int start, end, opsize, hops, i;
1025 +@@ -2016,7 +2017,7 @@ void show_code(struct pt_regs *regs)
1026 + start += opsize;
1027 + pr_cont("%s", buffer);
1028 + ptr = buffer;
1029 +- ptr += sprintf(ptr, "\n ");
1030 ++ ptr += sprintf(ptr, "\n\t ");
1031 + hops++;
1032 + }
1033 + pr_cont("\n");
1034 +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
1035 +index b945448b9eae..f7b280f0ab16 100644
1036 +--- a/arch/s390/kernel/early.c
1037 ++++ b/arch/s390/kernel/early.c
1038 +@@ -375,8 +375,10 @@ static __init void detect_machine_facilities(void)
1039 + S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
1040 + if (test_facility(40))
1041 + S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
1042 +- if (test_facility(50) && test_facility(73))
1043 ++ if (test_facility(50) && test_facility(73)) {
1044 + S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
1045 ++ __ctl_set_bit(0, 55);
1046 ++ }
1047 + if (test_facility(51))
1048 + S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
1049 + if (test_facility(129)) {
1050 +diff --git a/arch/s390/kernel/guarded_storage.c b/arch/s390/kernel/guarded_storage.c
1051 +index bff39b66c9ff..9ee794e14f33 100644
1052 +--- a/arch/s390/kernel/guarded_storage.c
1053 ++++ b/arch/s390/kernel/guarded_storage.c
1054 +@@ -14,9 +14,11 @@
1055 +
1056 + void exit_thread_gs(void)
1057 + {
1058 ++ preempt_disable();
1059 + kfree(current->thread.gs_cb);
1060 + kfree(current->thread.gs_bc_cb);
1061 + current->thread.gs_cb = current->thread.gs_bc_cb = NULL;
1062 ++ preempt_enable();
1063 + }
1064 +
1065 + static int gs_enable(void)
1066 +diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
1067 +index b0ba2c26b45e..d6f7782e75c9 100644
1068 +--- a/arch/s390/kernel/machine_kexec.c
1069 ++++ b/arch/s390/kernel/machine_kexec.c
1070 +@@ -269,6 +269,7 @@ static void __do_machine_kexec(void *data)
1071 + s390_reset_system();
1072 + data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
1073 +
1074 ++ __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */
1075 + /* Call the moving routine */
1076 + (*data_mover)(&image->head, image->start);
1077 +
1078 +diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
1079 +index a4a84fb08046..203b7cd7c348 100644
1080 +--- a/arch/s390/kernel/process.c
1081 ++++ b/arch/s390/kernel/process.c
1082 +@@ -100,6 +100,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
1083 + memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
1084 + memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
1085 + clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
1086 ++ p->thread.per_flags = 0;
1087 + /* Initialize per thread user and system timer values */
1088 + p->thread.user_timer = 0;
1089 + p->thread.guest_timer = 0;
1090 +diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
1091 +index ca37e5d5b40c..9c2c96da23d0 100644
1092 +--- a/arch/s390/kernel/relocate_kernel.S
1093 ++++ b/arch/s390/kernel/relocate_kernel.S
1094 +@@ -29,7 +29,6 @@
1095 + ENTRY(relocate_kernel)
1096 + basr %r13,0 # base address
1097 + .base:
1098 +- stnsm sys_msk-.base(%r13),0xfb # disable DAT
1099 + stctg %c0,%c15,ctlregs-.base(%r13)
1100 + stmg %r0,%r15,gprregs-.base(%r13)
1101 + lghi %r0,3
1102 +@@ -103,8 +102,6 @@ ENTRY(relocate_kernel)
1103 + .align 8
1104 + load_psw:
1105 + .long 0x00080000,0x80000000
1106 +- sys_msk:
1107 +- .quad 0
1108 + ctlregs:
1109 + .rept 16
1110 + .quad 0
1111 +diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
1112 +index 32aefb215e59..d85c64821a6b 100644
1113 +--- a/arch/s390/kernel/runtime_instr.c
1114 ++++ b/arch/s390/kernel/runtime_instr.c
1115 +@@ -50,11 +50,13 @@ void exit_thread_runtime_instr(void)
1116 + {
1117 + struct task_struct *task = current;
1118 +
1119 ++ preempt_disable();
1120 + if (!task->thread.ri_cb)
1121 + return;
1122 + disable_runtime_instr();
1123 + kfree(task->thread.ri_cb);
1124 + task->thread.ri_cb = NULL;
1125 ++ preempt_enable();
1126 + }
1127 +
1128 + SYSCALL_DEFINE1(s390_runtime_instr, int, command)
1129 +@@ -65,9 +67,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
1130 + return -EOPNOTSUPP;
1131 +
1132 + if (command == S390_RUNTIME_INSTR_STOP) {
1133 +- preempt_disable();
1134 + exit_thread_runtime_instr();
1135 +- preempt_enable();
1136 + return 0;
1137 + }
1138 +
1139 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
1140 +index bcfc5668dcb2..518d9286b3d1 100644
1141 +--- a/arch/x86/entry/entry_64.S
1142 ++++ b/arch/x86/entry/entry_64.S
1143 +@@ -51,15 +51,19 @@ ENTRY(native_usergs_sysret64)
1144 + END(native_usergs_sysret64)
1145 + #endif /* CONFIG_PARAVIRT */
1146 +
1147 +-.macro TRACE_IRQS_IRETQ
1148 ++.macro TRACE_IRQS_FLAGS flags:req
1149 + #ifdef CONFIG_TRACE_IRQFLAGS
1150 +- bt $9, EFLAGS(%rsp) /* interrupts off? */
1151 ++ bt $9, \flags /* interrupts off? */
1152 + jnc 1f
1153 + TRACE_IRQS_ON
1154 + 1:
1155 + #endif
1156 + .endm
1157 +
1158 ++.macro TRACE_IRQS_IRETQ
1159 ++ TRACE_IRQS_FLAGS EFLAGS(%rsp)
1160 ++.endm
1161 ++
1162 + /*
1163 + * When dynamic function tracer is enabled it will add a breakpoint
1164 + * to all locations that it is about to modify, sync CPUs, update
1165 +@@ -148,8 +152,6 @@ ENTRY(entry_SYSCALL_64)
1166 + movq %rsp, PER_CPU_VAR(rsp_scratch)
1167 + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1168 +
1169 +- TRACE_IRQS_OFF
1170 +-
1171 + /* Construct struct pt_regs on stack */
1172 + pushq $__USER_DS /* pt_regs->ss */
1173 + pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
1174 +@@ -170,6 +172,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
1175 + sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
1176 + UNWIND_HINT_REGS extra=0
1177 +
1178 ++ TRACE_IRQS_OFF
1179 ++
1180 + /*
1181 + * If we need to do entry work or if we guess we'll need to do
1182 + * exit work, go straight to the slow path.
1183 +@@ -923,11 +927,13 @@ ENTRY(native_load_gs_index)
1184 + FRAME_BEGIN
1185 + pushfq
1186 + DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
1187 ++ TRACE_IRQS_OFF
1188 + SWAPGS
1189 + .Lgs_change:
1190 + movl %edi, %gs
1191 + 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
1192 + SWAPGS
1193 ++ TRACE_IRQS_FLAGS (%rsp)
1194 + popfq
1195 + FRAME_END
1196 + ret
1197 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
1198 +index 9fb9a1f1e47b..f94855000d4e 100644
1199 +--- a/arch/x86/events/intel/core.c
1200 ++++ b/arch/x86/events/intel/core.c
1201 +@@ -3730,6 +3730,19 @@ EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
1202 + EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
1203 +
1204 + static struct attribute *hsw_events_attrs[] = {
1205 ++ EVENT_PTR(mem_ld_hsw),
1206 ++ EVENT_PTR(mem_st_hsw),
1207 ++ EVENT_PTR(td_slots_issued),
1208 ++ EVENT_PTR(td_slots_retired),
1209 ++ EVENT_PTR(td_fetch_bubbles),
1210 ++ EVENT_PTR(td_total_slots),
1211 ++ EVENT_PTR(td_total_slots_scale),
1212 ++ EVENT_PTR(td_recovery_bubbles),
1213 ++ EVENT_PTR(td_recovery_bubbles_scale),
1214 ++ NULL
1215 ++};
1216 ++
1217 ++static struct attribute *hsw_tsx_events_attrs[] = {
1218 + EVENT_PTR(tx_start),
1219 + EVENT_PTR(tx_commit),
1220 + EVENT_PTR(tx_abort),
1221 +@@ -3742,18 +3755,16 @@ static struct attribute *hsw_events_attrs[] = {
1222 + EVENT_PTR(el_conflict),
1223 + EVENT_PTR(cycles_t),
1224 + EVENT_PTR(cycles_ct),
1225 +- EVENT_PTR(mem_ld_hsw),
1226 +- EVENT_PTR(mem_st_hsw),
1227 +- EVENT_PTR(td_slots_issued),
1228 +- EVENT_PTR(td_slots_retired),
1229 +- EVENT_PTR(td_fetch_bubbles),
1230 +- EVENT_PTR(td_total_slots),
1231 +- EVENT_PTR(td_total_slots_scale),
1232 +- EVENT_PTR(td_recovery_bubbles),
1233 +- EVENT_PTR(td_recovery_bubbles_scale),
1234 + NULL
1235 + };
1236 +
1237 ++static __init struct attribute **get_hsw_events_attrs(void)
1238 ++{
1239 ++ return boot_cpu_has(X86_FEATURE_RTM) ?
1240 ++ merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) :
1241 ++ hsw_events_attrs;
1242 ++}
1243 ++
1244 + static ssize_t freeze_on_smi_show(struct device *cdev,
1245 + struct device_attribute *attr,
1246 + char *buf)
1247 +@@ -4182,7 +4193,7 @@ __init int intel_pmu_init(void)
1248 +
1249 + x86_pmu.hw_config = hsw_hw_config;
1250 + x86_pmu.get_event_constraints = hsw_get_event_constraints;
1251 +- x86_pmu.cpu_events = hsw_events_attrs;
1252 ++ x86_pmu.cpu_events = get_hsw_events_attrs();
1253 + x86_pmu.lbr_double_abort = true;
1254 + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
1255 + hsw_format_attr : nhm_format_attr;
1256 +@@ -4221,7 +4232,7 @@ __init int intel_pmu_init(void)
1257 +
1258 + x86_pmu.hw_config = hsw_hw_config;
1259 + x86_pmu.get_event_constraints = hsw_get_event_constraints;
1260 +- x86_pmu.cpu_events = hsw_events_attrs;
1261 ++ x86_pmu.cpu_events = get_hsw_events_attrs();
1262 + x86_pmu.limit_period = bdw_limit_period;
1263 + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
1264 + hsw_format_attr : nhm_format_attr;
1265 +@@ -4279,7 +4290,7 @@ __init int intel_pmu_init(void)
1266 + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
1267 + hsw_format_attr : nhm_format_attr;
1268 + extra_attr = merge_attr(extra_attr, skl_format_attr);
1269 +- x86_pmu.cpu_events = hsw_events_attrs;
1270 ++ x86_pmu.cpu_events = get_hsw_events_attrs();
1271 + intel_pmu_pebs_data_source_skl(
1272 + boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
1273 + pr_cont("Skylake events, ");
1274 +diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
1275 +index 410c5dadcee3..3a4b12809ab5 100644
1276 +--- a/arch/x86/kernel/mpparse.c
1277 ++++ b/arch/x86/kernel/mpparse.c
1278 +@@ -431,6 +431,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
1279 + }
1280 +
1281 + static unsigned long mpf_base;
1282 ++static bool mpf_found;
1283 +
1284 + static unsigned long __init get_mpc_size(unsigned long physptr)
1285 + {
1286 +@@ -504,7 +505,7 @@ void __init default_get_smp_config(unsigned int early)
1287 + if (!smp_found_config)
1288 + return;
1289 +
1290 +- if (!mpf_base)
1291 ++ if (!mpf_found)
1292 + return;
1293 +
1294 + if (acpi_lapic && early)
1295 +@@ -593,6 +594,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
1296 + smp_found_config = 1;
1297 + #endif
1298 + mpf_base = base;
1299 ++ mpf_found = true;
1300 +
1301 + pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
1302 + base, base + sizeof(*mpf) - 1, mpf);
1303 +@@ -858,7 +860,7 @@ static int __init update_mp_table(void)
1304 + if (!enable_update_mptable)
1305 + return 0;
1306 +
1307 +- if (!mpf_base)
1308 ++ if (!mpf_found)
1309 + return 0;
1310 +
1311 + mpf = early_memremap(mpf_base, sizeof(*mpf));
1312 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1313 +index 0e68f0b3cbf7..ca209a4a7834 100644
1314 +--- a/arch/x86/kvm/svm.c
1315 ++++ b/arch/x86/kvm/svm.c
1316 +@@ -3657,6 +3657,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1317 + u32 ecx = msr->index;
1318 + u64 data = msr->data;
1319 + switch (ecx) {
1320 ++ case MSR_IA32_CR_PAT:
1321 ++ if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
1322 ++ return 1;
1323 ++ vcpu->arch.pat = data;
1324 ++ svm->vmcb->save.g_pat = data;
1325 ++ mark_dirty(svm->vmcb, VMCB_NPT);
1326 ++ break;
1327 + case MSR_IA32_TSC:
1328 + kvm_write_tsc(vcpu, msr);
1329 + break;
1330 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1331 +index a6f4f095f8f4..21cad7068cbf 100644
1332 +--- a/arch/x86/kvm/vmx.c
1333 ++++ b/arch/x86/kvm/vmx.c
1334 +@@ -202,6 +202,10 @@ struct loaded_vmcs {
1335 + bool nmi_known_unmasked;
1336 + unsigned long vmcs_host_cr3; /* May not match real cr3 */
1337 + unsigned long vmcs_host_cr4; /* May not match real cr4 */
1338 ++ /* Support for vnmi-less CPUs */
1339 ++ int soft_vnmi_blocked;
1340 ++ ktime_t entry_time;
1341 ++ s64 vnmi_blocked_time;
1342 + struct list_head loaded_vmcss_on_cpu_link;
1343 + };
1344 +
1345 +@@ -1286,6 +1290,11 @@ static inline bool cpu_has_vmx_invpcid(void)
1346 + SECONDARY_EXEC_ENABLE_INVPCID;
1347 + }
1348 +
1349 ++static inline bool cpu_has_virtual_nmis(void)
1350 ++{
1351 ++ return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1352 ++}
1353 ++
1354 + static inline bool cpu_has_vmx_wbinvd_exit(void)
1355 + {
1356 + return vmcs_config.cpu_based_2nd_exec_ctrl &
1357 +@@ -1343,11 +1352,6 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1358 + (vmcs12->secondary_vm_exec_control & bit);
1359 + }
1360 +
1361 +-static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1362 +-{
1363 +- return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1364 +-}
1365 +-
1366 + static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1367 + {
1368 + return vmcs12->pin_based_vm_exec_control &
1369 +@@ -3699,9 +3703,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
1370 + &_vmexit_control) < 0)
1371 + return -EIO;
1372 +
1373 +- min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
1374 +- PIN_BASED_VIRTUAL_NMIS;
1375 +- opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER;
1376 ++ min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
1377 ++ opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
1378 ++ PIN_BASED_VMX_PREEMPTION_TIMER;
1379 + if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
1380 + &_pin_based_exec_control) < 0)
1381 + return -EIO;
1382 +@@ -5667,7 +5671,8 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
1383 +
1384 + static void enable_nmi_window(struct kvm_vcpu *vcpu)
1385 + {
1386 +- if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
1387 ++ if (!cpu_has_virtual_nmis() ||
1388 ++ vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
1389 + enable_irq_window(vcpu);
1390 + return;
1391 + }
1392 +@@ -5707,6 +5712,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
1393 + {
1394 + struct vcpu_vmx *vmx = to_vmx(vcpu);
1395 +
1396 ++ if (!cpu_has_virtual_nmis()) {
1397 ++ /*
1398 ++ * Tracking the NMI-blocked state in software is built upon
1399 ++ * finding the next open IRQ window. This, in turn, depends on
1400 ++ * well-behaving guests: They have to keep IRQs disabled at
1401 ++ * least as long as the NMI handler runs. Otherwise we may
1402 ++ * cause NMI nesting, maybe breaking the guest. But as this is
1403 ++ * highly unlikely, we can live with the residual risk.
1404 ++ */
1405 ++ vmx->loaded_vmcs->soft_vnmi_blocked = 1;
1406 ++ vmx->loaded_vmcs->vnmi_blocked_time = 0;
1407 ++ }
1408 ++
1409 + ++vcpu->stat.nmi_injections;
1410 + vmx->loaded_vmcs->nmi_known_unmasked = false;
1411 +
1412 +@@ -5725,6 +5743,8 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
1413 + struct vcpu_vmx *vmx = to_vmx(vcpu);
1414 + bool masked;
1415 +
1416 ++ if (!cpu_has_virtual_nmis())
1417 ++ return vmx->loaded_vmcs->soft_vnmi_blocked;
1418 + if (vmx->loaded_vmcs->nmi_known_unmasked)
1419 + return false;
1420 + masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
1421 +@@ -5736,13 +5756,20 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
1422 + {
1423 + struct vcpu_vmx *vmx = to_vmx(vcpu);
1424 +
1425 +- vmx->loaded_vmcs->nmi_known_unmasked = !masked;
1426 +- if (masked)
1427 +- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
1428 +- GUEST_INTR_STATE_NMI);
1429 +- else
1430 +- vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
1431 +- GUEST_INTR_STATE_NMI);
1432 ++ if (!cpu_has_virtual_nmis()) {
1433 ++ if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
1434 ++ vmx->loaded_vmcs->soft_vnmi_blocked = masked;
1435 ++ vmx->loaded_vmcs->vnmi_blocked_time = 0;
1436 ++ }
1437 ++ } else {
1438 ++ vmx->loaded_vmcs->nmi_known_unmasked = !masked;
1439 ++ if (masked)
1440 ++ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
1441 ++ GUEST_INTR_STATE_NMI);
1442 ++ else
1443 ++ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
1444 ++ GUEST_INTR_STATE_NMI);
1445 ++ }
1446 + }
1447 +
1448 + static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
1449 +@@ -5750,6 +5777,10 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
1450 + if (to_vmx(vcpu)->nested.nested_run_pending)
1451 + return 0;
1452 +
1453 ++ if (!cpu_has_virtual_nmis() &&
1454 ++ to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
1455 ++ return 0;
1456 ++
1457 + return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
1458 + (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
1459 + | GUEST_INTR_STATE_NMI));
1460 +@@ -6478,6 +6509,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
1461 + * AAK134, BY25.
1462 + */
1463 + if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
1464 ++ cpu_has_virtual_nmis() &&
1465 + (exit_qualification & INTR_INFO_UNBLOCK_NMI))
1466 + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
1467 +
1468 +@@ -6961,7 +6993,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
1469 + }
1470 +
1471 + /* Create a new VMCS */
1472 +- item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
1473 ++ item = kzalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
1474 + if (!item)
1475 + return NULL;
1476 + item->vmcs02.vmcs = alloc_vmcs();
1477 +@@ -7978,6 +8010,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
1478 + * "blocked by NMI" bit has to be set before next VM entry.
1479 + */
1480 + if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
1481 ++ cpu_has_virtual_nmis() &&
1482 + (exit_qualification & INTR_INFO_UNBLOCK_NMI))
1483 + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
1484 + GUEST_INTR_STATE_NMI);
1485 +@@ -8822,6 +8855,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
1486 + return 0;
1487 + }
1488 +
1489 ++ if (unlikely(!cpu_has_virtual_nmis() &&
1490 ++ vmx->loaded_vmcs->soft_vnmi_blocked)) {
1491 ++ if (vmx_interrupt_allowed(vcpu)) {
1492 ++ vmx->loaded_vmcs->soft_vnmi_blocked = 0;
1493 ++ } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
1494 ++ vcpu->arch.nmi_pending) {
1495 ++ /*
1496 ++ * This CPU don't support us in finding the end of an
1497 ++ * NMI-blocked window if the guest runs with IRQs
1498 ++ * disabled. So we pull the trigger after 1 s of
1499 ++ * futile waiting, but inform the user about this.
1500 ++ */
1501 ++ printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
1502 ++ "state on VCPU %d after 1 s timeout\n",
1503 ++ __func__, vcpu->vcpu_id);
1504 ++ vmx->loaded_vmcs->soft_vnmi_blocked = 0;
1505 ++ }
1506 ++ }
1507 ++
1508 + if (exit_reason < kvm_vmx_max_exit_handlers
1509 + && kvm_vmx_exit_handlers[exit_reason])
1510 + return kvm_vmx_exit_handlers[exit_reason](vcpu);
1511 +@@ -9104,33 +9156,38 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
1512 +
1513 + idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
1514 +
1515 +- if (vmx->loaded_vmcs->nmi_known_unmasked)
1516 +- return;
1517 +- /*
1518 +- * Can't use vmx->exit_intr_info since we're not sure what
1519 +- * the exit reason is.
1520 +- */
1521 +- exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1522 +- unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
1523 +- vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
1524 +- /*
1525 +- * SDM 3: 27.7.1.2 (September 2008)
1526 +- * Re-set bit "block by NMI" before VM entry if vmexit caused by
1527 +- * a guest IRET fault.
1528 +- * SDM 3: 23.2.2 (September 2008)
1529 +- * Bit 12 is undefined in any of the following cases:
1530 +- * If the VM exit sets the valid bit in the IDT-vectoring
1531 +- * information field.
1532 +- * If the VM exit is due to a double fault.
1533 +- */
1534 +- if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
1535 +- vector != DF_VECTOR && !idtv_info_valid)
1536 +- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
1537 +- GUEST_INTR_STATE_NMI);
1538 +- else
1539 +- vmx->loaded_vmcs->nmi_known_unmasked =
1540 +- !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
1541 +- & GUEST_INTR_STATE_NMI);
1542 ++ if (cpu_has_virtual_nmis()) {
1543 ++ if (vmx->loaded_vmcs->nmi_known_unmasked)
1544 ++ return;
1545 ++ /*
1546 ++ * Can't use vmx->exit_intr_info since we're not sure what
1547 ++ * the exit reason is.
1548 ++ */
1549 ++ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1550 ++ unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
1551 ++ vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
1552 ++ /*
1553 ++ * SDM 3: 27.7.1.2 (September 2008)
1554 ++ * Re-set bit "block by NMI" before VM entry if vmexit caused by
1555 ++ * a guest IRET fault.
1556 ++ * SDM 3: 23.2.2 (September 2008)
1557 ++ * Bit 12 is undefined in any of the following cases:
1558 ++ * If the VM exit sets the valid bit in the IDT-vectoring
1559 ++ * information field.
1560 ++ * If the VM exit is due to a double fault.
1561 ++ */
1562 ++ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
1563 ++ vector != DF_VECTOR && !idtv_info_valid)
1564 ++ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
1565 ++ GUEST_INTR_STATE_NMI);
1566 ++ else
1567 ++ vmx->loaded_vmcs->nmi_known_unmasked =
1568 ++ !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
1569 ++ & GUEST_INTR_STATE_NMI);
1570 ++ } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
1571 ++ vmx->loaded_vmcs->vnmi_blocked_time +=
1572 ++ ktime_to_ns(ktime_sub(ktime_get(),
1573 ++ vmx->loaded_vmcs->entry_time));
1574 + }
1575 +
1576 + static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
1577 +@@ -9247,6 +9304,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
1578 + struct vcpu_vmx *vmx = to_vmx(vcpu);
1579 + unsigned long debugctlmsr, cr3, cr4;
1580 +
1581 ++ /* Record the guest's net vcpu time for enforced NMI injections. */
1582 ++ if (unlikely(!cpu_has_virtual_nmis() &&
1583 ++ vmx->loaded_vmcs->soft_vnmi_blocked))
1584 ++ vmx->loaded_vmcs->entry_time = ktime_get();
1585 ++
1586 + /* Don't enter VMX if guest state is invalid, let the exit handler
1587 + start emulation until we arrive back to a valid state */
1588 + if (vmx->emulation_required)
1589 +@@ -11325,6 +11387,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
1590 + vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
1591 + vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
1592 + vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
1593 ++ vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
1594 ++ vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
1595 +
1596 + /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
1597 + if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
1598 +diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
1599 +index 12e377184ee4..c4d55919fac1 100644
1600 +--- a/arch/x86/lib/x86-opcode-map.txt
1601 ++++ b/arch/x86/lib/x86-opcode-map.txt
1602 +@@ -896,7 +896,7 @@ EndTable
1603 +
1604 + GrpTable: Grp3_1
1605 + 0: TEST Eb,Ib
1606 +-1:
1607 ++1: TEST Eb,Ib
1608 + 2: NOT Eb
1609 + 3: NEG Eb
1610 + 4: MUL AL,Eb
1611 +diff --git a/block/blk-core.c b/block/blk-core.c
1612 +index 048be4aa6024..33ee583cfe45 100644
1613 +--- a/block/blk-core.c
1614 ++++ b/block/blk-core.c
1615 +@@ -333,6 +333,7 @@ EXPORT_SYMBOL(blk_stop_queue);
1616 + void blk_sync_queue(struct request_queue *q)
1617 + {
1618 + del_timer_sync(&q->timeout);
1619 ++ cancel_work_sync(&q->timeout_work);
1620 +
1621 + if (q->mq_ops) {
1622 + struct blk_mq_hw_ctx *hctx;
1623 +@@ -844,6 +845,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1624 + setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
1625 + laptop_mode_timer_fn, (unsigned long) q);
1626 + setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
1627 ++ INIT_WORK(&q->timeout_work, NULL);
1628 + INIT_LIST_HEAD(&q->queue_head);
1629 + INIT_LIST_HEAD(&q->timeout_list);
1630 + INIT_LIST_HEAD(&q->icq_list);
1631 +diff --git a/block/blk-timeout.c b/block/blk-timeout.c
1632 +index 17ec83bb0900..6427be7ac363 100644
1633 +--- a/block/blk-timeout.c
1634 ++++ b/block/blk-timeout.c
1635 +@@ -134,8 +134,6 @@ void blk_timeout_work(struct work_struct *work)
1636 + struct request *rq, *tmp;
1637 + int next_set = 0;
1638 +
1639 +- if (blk_queue_enter(q, true))
1640 +- return;
1641 + spin_lock_irqsave(q->queue_lock, flags);
1642 +
1643 + list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
1644 +@@ -145,7 +143,6 @@ void blk_timeout_work(struct work_struct *work)
1645 + mod_timer(&q->timeout, round_jiffies_up(next));
1646 +
1647 + spin_unlock_irqrestore(q->queue_lock, flags);
1648 +- blk_queue_exit(q);
1649 + }
1650 +
1651 + /**
1652 +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
1653 +index fbcc73f7a099..18af71057b44 100644
1654 +--- a/drivers/acpi/device_pm.c
1655 ++++ b/drivers/acpi/device_pm.c
1656 +@@ -387,6 +387,7 @@ EXPORT_SYMBOL(acpi_bus_power_manageable);
1657 +
1658 + #ifdef CONFIG_PM
1659 + static DEFINE_MUTEX(acpi_pm_notifier_lock);
1660 ++static DEFINE_MUTEX(acpi_pm_notifier_install_lock);
1661 +
1662 + void acpi_pm_wakeup_event(struct device *dev)
1663 + {
1664 +@@ -443,24 +444,25 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
1665 + if (!dev && !func)
1666 + return AE_BAD_PARAMETER;
1667 +
1668 +- mutex_lock(&acpi_pm_notifier_lock);
1669 ++ mutex_lock(&acpi_pm_notifier_install_lock);
1670 +
1671 + if (adev->wakeup.flags.notifier_present)
1672 + goto out;
1673 +
1674 +- adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
1675 +- adev->wakeup.context.dev = dev;
1676 +- adev->wakeup.context.func = func;
1677 +-
1678 + status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
1679 + acpi_pm_notify_handler, NULL);
1680 + if (ACPI_FAILURE(status))
1681 + goto out;
1682 +
1683 ++ mutex_lock(&acpi_pm_notifier_lock);
1684 ++ adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
1685 ++ adev->wakeup.context.dev = dev;
1686 ++ adev->wakeup.context.func = func;
1687 + adev->wakeup.flags.notifier_present = true;
1688 ++ mutex_unlock(&acpi_pm_notifier_lock);
1689 +
1690 + out:
1691 +- mutex_unlock(&acpi_pm_notifier_lock);
1692 ++ mutex_unlock(&acpi_pm_notifier_install_lock);
1693 + return status;
1694 + }
1695 +
1696 +@@ -472,7 +474,7 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
1697 + {
1698 + acpi_status status = AE_BAD_PARAMETER;
1699 +
1700 +- mutex_lock(&acpi_pm_notifier_lock);
1701 ++ mutex_lock(&acpi_pm_notifier_install_lock);
1702 +
1703 + if (!adev->wakeup.flags.notifier_present)
1704 + goto out;
1705 +@@ -483,14 +485,15 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
1706 + if (ACPI_FAILURE(status))
1707 + goto out;
1708 +
1709 ++ mutex_lock(&acpi_pm_notifier_lock);
1710 + adev->wakeup.context.func = NULL;
1711 + adev->wakeup.context.dev = NULL;
1712 + wakeup_source_unregister(adev->wakeup.ws);
1713 +-
1714 + adev->wakeup.flags.notifier_present = false;
1715 ++ mutex_unlock(&acpi_pm_notifier_lock);
1716 +
1717 + out:
1718 +- mutex_unlock(&acpi_pm_notifier_lock);
1719 ++ mutex_unlock(&acpi_pm_notifier_install_lock);
1720 + return status;
1721 + }
1722 +
1723 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1724 +index 236b14324780..82b3ce5e937e 100644
1725 +--- a/drivers/acpi/ec.c
1726 ++++ b/drivers/acpi/ec.c
1727 +@@ -486,8 +486,11 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
1728 + {
1729 + if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
1730 + ec_log_drv("event unblocked");
1731 +- if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
1732 +- advance_transaction(ec);
1733 ++ /*
1734 ++ * Unconditionally invoke this once after enabling the event
1735 ++ * handling mechanism to detect the pending events.
1736 ++ */
1737 ++ advance_transaction(ec);
1738 + }
1739 +
1740 + static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
1741 +@@ -1456,11 +1459,10 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
1742 + if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1743 + ec->reference_count >= 1)
1744 + acpi_ec_enable_gpe(ec, true);
1745 +-
1746 +- /* EC is fully operational, allow queries */
1747 +- acpi_ec_enable_event(ec);
1748 + }
1749 + }
1750 ++ /* EC is fully operational, allow queries */
1751 ++ acpi_ec_enable_event(ec);
1752 +
1753 + return 0;
1754 + }
1755 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
1756 +index e4effef0c83f..ea20e0eb4d5a 100644
1757 +--- a/drivers/ata/libata-eh.c
1758 ++++ b/drivers/ata/libata-eh.c
1759 +@@ -2264,8 +2264,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
1760 + if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
1761 + eflags |= ATA_EFLAG_DUBIOUS_XFER;
1762 + ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
1763 ++ trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
1764 + }
1765 +- trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
1766 + DPRINTK("EXIT\n");
1767 + }
1768 +
1769 +diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
1770 +index 0b718886479b..87509cb69f79 100644
1771 +--- a/drivers/base/power/opp/of.c
1772 ++++ b/drivers/base/power/opp/of.c
1773 +@@ -397,6 +397,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
1774 + dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1775 + ret);
1776 + _dev_pm_opp_remove_table(opp_table, dev, false);
1777 ++ of_node_put(np);
1778 + goto put_opp_table;
1779 + }
1780 + }
1781 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1782 +index 9adfb5445f8d..5f2a4240a204 100644
1783 +--- a/drivers/block/nbd.c
1784 ++++ b/drivers/block/nbd.c
1785 +@@ -288,15 +288,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
1786 + cmd->status = BLK_STS_TIMEOUT;
1787 + return BLK_EH_HANDLED;
1788 + }
1789 +-
1790 +- /* If we are waiting on our dead timer then we could get timeout
1791 +- * callbacks for our request. For this we just want to reset the timer
1792 +- * and let the queue side take care of everything.
1793 +- */
1794 +- if (!completion_done(&cmd->send_complete)) {
1795 +- nbd_config_put(nbd);
1796 +- return BLK_EH_RESET_TIMER;
1797 +- }
1798 + config = nbd->config;
1799 +
1800 + if (config->num_connections > 1) {
1801 +@@ -723,9 +714,9 @@ static int wait_for_reconnect(struct nbd_device *nbd)
1802 + return 0;
1803 + if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
1804 + return 0;
1805 +- wait_event_interruptible_timeout(config->conn_wait,
1806 +- atomic_read(&config->live_connections),
1807 +- config->dead_conn_timeout);
1808 ++ wait_event_timeout(config->conn_wait,
1809 ++ atomic_read(&config->live_connections),
1810 ++ config->dead_conn_timeout);
1811 + return atomic_read(&config->live_connections);
1812 + }
1813 +
1814 +@@ -740,6 +731,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1815 + if (!refcount_inc_not_zero(&nbd->config_refs)) {
1816 + dev_err_ratelimited(disk_to_dev(nbd->disk),
1817 + "Socks array is empty\n");
1818 ++ blk_mq_start_request(req);
1819 + return -EINVAL;
1820 + }
1821 + config = nbd->config;
1822 +@@ -748,6 +740,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1823 + dev_err_ratelimited(disk_to_dev(nbd->disk),
1824 + "Attempted send on invalid socket\n");
1825 + nbd_config_put(nbd);
1826 ++ blk_mq_start_request(req);
1827 + return -EINVAL;
1828 + }
1829 + cmd->status = BLK_STS_OK;
1830 +@@ -771,6 +764,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1831 + */
1832 + sock_shutdown(nbd);
1833 + nbd_config_put(nbd);
1834 ++ blk_mq_start_request(req);
1835 + return -EIO;
1836 + }
1837 + goto again;
1838 +@@ -781,6 +775,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1839 + * here so that it gets put _after_ the request that is already on the
1840 + * dispatch list.
1841 + */
1842 ++ blk_mq_start_request(req);
1843 + if (unlikely(nsock->pending && nsock->pending != req)) {
1844 + blk_mq_requeue_request(req, true);
1845 + ret = 0;
1846 +@@ -793,10 +788,10 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1847 + ret = nbd_send_cmd(nbd, cmd, index);
1848 + if (ret == -EAGAIN) {
1849 + dev_err_ratelimited(disk_to_dev(nbd->disk),
1850 +- "Request send failed trying another connection\n");
1851 ++ "Request send failed, requeueing\n");
1852 + nbd_mark_nsock_dead(nbd, nsock, 1);
1853 +- mutex_unlock(&nsock->tx_lock);
1854 +- goto again;
1855 ++ blk_mq_requeue_request(req, true);
1856 ++ ret = 0;
1857 + }
1858 + out:
1859 + mutex_unlock(&nsock->tx_lock);
1860 +@@ -820,7 +815,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1861 + * done sending everything over the wire.
1862 + */
1863 + init_completion(&cmd->send_complete);
1864 +- blk_mq_start_request(bd->rq);
1865 +
1866 + /* We can be called directly from the user space process, which means we
1867 + * could possibly have signals pending so our sendmsg will fail. In
1868 +diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
1869 +index d00c4fdae924..bd810d01538a 100644
1870 +--- a/drivers/bluetooth/btqcomsmd.c
1871 ++++ b/drivers/bluetooth/btqcomsmd.c
1872 +@@ -26,6 +26,7 @@
1873 + struct btqcomsmd {
1874 + struct hci_dev *hdev;
1875 +
1876 ++ bdaddr_t bdaddr;
1877 + struct rpmsg_endpoint *acl_channel;
1878 + struct rpmsg_endpoint *cmd_channel;
1879 + };
1880 +@@ -100,6 +101,38 @@ static int btqcomsmd_close(struct hci_dev *hdev)
1881 + return 0;
1882 + }
1883 +
1884 ++static int btqcomsmd_setup(struct hci_dev *hdev)
1885 ++{
1886 ++ struct btqcomsmd *btq = hci_get_drvdata(hdev);
1887 ++ struct sk_buff *skb;
1888 ++ int err;
1889 ++
1890 ++ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
1891 ++ if (IS_ERR(skb))
1892 ++ return PTR_ERR(skb);
1893 ++ kfree_skb(skb);
1894 ++
1895 ++ /* Devices do not have persistent storage for BD address. If no
1896 ++ * BD address has been retrieved during probe, mark the device
1897 ++ * as having an invalid BD address.
1898 ++ */
1899 ++ if (!bacmp(&btq->bdaddr, BDADDR_ANY)) {
1900 ++ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
1901 ++ return 0;
1902 ++ }
1903 ++
1904 ++ /* When setting a configured BD address fails, mark the device
1905 ++ * as having an invalid BD address.
1906 ++ */
1907 ++ err = qca_set_bdaddr_rome(hdev, &btq->bdaddr);
1908 ++ if (err) {
1909 ++ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
1910 ++ return 0;
1911 ++ }
1912 ++
1913 ++ return 0;
1914 ++}
1915 ++
1916 + static int btqcomsmd_probe(struct platform_device *pdev)
1917 + {
1918 + struct btqcomsmd *btq;
1919 +@@ -135,6 +168,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
1920 + hdev->open = btqcomsmd_open;
1921 + hdev->close = btqcomsmd_close;
1922 + hdev->send = btqcomsmd_send;
1923 ++ hdev->setup = btqcomsmd_setup;
1924 + hdev->set_bdaddr = qca_set_bdaddr_rome;
1925 +
1926 + ret = hci_register_dev(hdev);
1927 +diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
1928 +index 13eb04f72389..148815470431 100644
1929 +--- a/drivers/clk/ti/clk-dra7-atl.c
1930 ++++ b/drivers/clk/ti/clk-dra7-atl.c
1931 +@@ -274,8 +274,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
1932 +
1933 + /* Get configuration for the ATL instances */
1934 + snprintf(prop, sizeof(prop), "atl%u", i);
1935 +- of_node_get(node);
1936 +- cfg_node = of_find_node_by_name(node, prop);
1937 ++ cfg_node = of_get_child_by_name(node, prop);
1938 + if (cfg_node) {
1939 + ret = of_property_read_u32(cfg_node, "bws",
1940 + &cdesc->bws);
1941 +diff --git a/drivers/dax/super.c b/drivers/dax/super.c
1942 +index 557b93703532..c4cd034a3820 100644
1943 +--- a/drivers/dax/super.c
1944 ++++ b/drivers/dax/super.c
1945 +@@ -344,6 +344,9 @@ static struct inode *dax_alloc_inode(struct super_block *sb)
1946 + struct inode *inode;
1947 +
1948 + dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
1949 ++ if (!dax_dev)
1950 ++ return NULL;
1951 ++
1952 + inode = &dax_dev->inode;
1953 + inode->i_rdev = 0;
1954 + return inode;
1955 +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
1956 +index 4c4b46586af2..2af79e4f3235 100644
1957 +--- a/drivers/infiniband/core/cm.c
1958 ++++ b/drivers/infiniband/core/cm.c
1959 +@@ -1575,7 +1575,7 @@ static void cm_format_req_event(struct cm_work *work,
1960 + param->bth_pkey = cm_get_bth_pkey(work);
1961 + param->port = cm_id_priv->av.port->port_num;
1962 + param->primary_path = &work->path[0];
1963 +- if (req_msg->alt_local_lid)
1964 ++ if (cm_req_has_alt_path(req_msg))
1965 + param->alternate_path = &work->path[1];
1966 + else
1967 + param->alternate_path = NULL;
1968 +@@ -1856,7 +1856,8 @@ static int cm_req_handler(struct cm_work *work)
1969 + cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1970 +
1971 + memset(&work->path[0], 0, sizeof(work->path[0]));
1972 +- memset(&work->path[1], 0, sizeof(work->path[1]));
1973 ++ if (cm_req_has_alt_path(req_msg))
1974 ++ memset(&work->path[1], 0, sizeof(work->path[1]));
1975 + grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
1976 + ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
1977 + work->port->port_num,
1978 +@@ -3817,14 +3818,16 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
1979 + struct cm_port *port = mad_agent->context;
1980 + struct cm_work *work;
1981 + enum ib_cm_event_type event;
1982 ++ bool alt_path = false;
1983 + u16 attr_id;
1984 + int paths = 0;
1985 + int going_down = 0;
1986 +
1987 + switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
1988 + case CM_REQ_ATTR_ID:
1989 +- paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
1990 +- alt_local_lid != 0);
1991 ++ alt_path = cm_req_has_alt_path((struct cm_req_msg *)
1992 ++ mad_recv_wc->recv_buf.mad);
1993 ++ paths = 1 + (alt_path != 0);
1994 + event = IB_CM_REQ_RECEIVED;
1995 + break;
1996 + case CM_MRA_ATTR_ID:
1997 +diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
1998 +index f8f53bb90837..cb91245e9163 100644
1999 +--- a/drivers/infiniband/core/mad.c
2000 ++++ b/drivers/infiniband/core/mad.c
2001 +@@ -1974,14 +1974,15 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
2002 + unsigned long flags;
2003 + int ret;
2004 +
2005 ++ INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
2006 + ret = ib_mad_enforce_security(mad_agent_priv,
2007 + mad_recv_wc->wc->pkey_index);
2008 + if (ret) {
2009 + ib_free_recv_mad(mad_recv_wc);
2010 + deref_mad_agent(mad_agent_priv);
2011 ++ return;
2012 + }
2013 +
2014 +- INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
2015 + list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
2016 + if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2017 + mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
2018 +diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
2019 +index 88bdafb297f5..28607bb42d87 100644
2020 +--- a/drivers/infiniband/core/security.c
2021 ++++ b/drivers/infiniband/core/security.c
2022 +@@ -87,16 +87,14 @@ static int enforce_qp_pkey_security(u16 pkey,
2023 + if (ret)
2024 + return ret;
2025 +
2026 +- if (qp_sec->qp == qp_sec->qp->real_qp) {
2027 +- list_for_each_entry(shared_qp_sec,
2028 +- &qp_sec->shared_qp_list,
2029 +- shared_qp_list) {
2030 +- ret = security_ib_pkey_access(shared_qp_sec->security,
2031 +- subnet_prefix,
2032 +- pkey);
2033 +- if (ret)
2034 +- return ret;
2035 +- }
2036 ++ list_for_each_entry(shared_qp_sec,
2037 ++ &qp_sec->shared_qp_list,
2038 ++ shared_qp_list) {
2039 ++ ret = security_ib_pkey_access(shared_qp_sec->security,
2040 ++ subnet_prefix,
2041 ++ pkey);
2042 ++ if (ret)
2043 ++ return ret;
2044 + }
2045 + return 0;
2046 + }
2047 +@@ -560,15 +558,22 @@ int ib_security_modify_qp(struct ib_qp *qp,
2048 + int ret = 0;
2049 + struct ib_ports_pkeys *tmp_pps;
2050 + struct ib_ports_pkeys *new_pps;
2051 +- bool special_qp = (qp->qp_type == IB_QPT_SMI ||
2052 +- qp->qp_type == IB_QPT_GSI ||
2053 +- qp->qp_type >= IB_QPT_RESERVED1);
2054 ++ struct ib_qp *real_qp = qp->real_qp;
2055 ++ bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
2056 ++ real_qp->qp_type == IB_QPT_GSI ||
2057 ++ real_qp->qp_type >= IB_QPT_RESERVED1);
2058 + bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
2059 + (qp_attr_mask & IB_QP_ALT_PATH));
2060 +
2061 ++ /* The port/pkey settings are maintained only for the real QP. Open
2062 ++ * handles on the real QP will be in the shared_qp_list. When
2063 ++ * enforcing security on the real QP all the shared QPs will be
2064 ++ * checked as well.
2065 ++ */
2066 ++
2067 + if (pps_change && !special_qp) {
2068 +- mutex_lock(&qp->qp_sec->mutex);
2069 +- new_pps = get_new_pps(qp,
2070 ++ mutex_lock(&real_qp->qp_sec->mutex);
2071 ++ new_pps = get_new_pps(real_qp,
2072 + qp_attr,
2073 + qp_attr_mask);
2074 +
2075 +@@ -586,14 +591,14 @@ int ib_security_modify_qp(struct ib_qp *qp,
2076 +
2077 + if (!ret)
2078 + ret = check_qp_port_pkey_settings(new_pps,
2079 +- qp->qp_sec);
2080 ++ real_qp->qp_sec);
2081 + }
2082 +
2083 + if (!ret)
2084 +- ret = qp->device->modify_qp(qp->real_qp,
2085 +- qp_attr,
2086 +- qp_attr_mask,
2087 +- udata);
2088 ++ ret = real_qp->device->modify_qp(real_qp,
2089 ++ qp_attr,
2090 ++ qp_attr_mask,
2091 ++ udata);
2092 +
2093 + if (pps_change && !special_qp) {
2094 + /* Clean up the lists and free the appropriate
2095 +@@ -602,8 +607,8 @@ int ib_security_modify_qp(struct ib_qp *qp,
2096 + if (ret) {
2097 + tmp_pps = new_pps;
2098 + } else {
2099 +- tmp_pps = qp->qp_sec->ports_pkeys;
2100 +- qp->qp_sec->ports_pkeys = new_pps;
2101 ++ tmp_pps = real_qp->qp_sec->ports_pkeys;
2102 ++ real_qp->qp_sec->ports_pkeys = new_pps;
2103 + }
2104 +
2105 + if (tmp_pps) {
2106 +@@ -611,7 +616,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
2107 + port_pkey_list_remove(&tmp_pps->alt);
2108 + }
2109 + kfree(tmp_pps);
2110 +- mutex_unlock(&qp->qp_sec->mutex);
2111 ++ mutex_unlock(&real_qp->qp_sec->mutex);
2112 + }
2113 + return ret;
2114 + }
2115 +diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
2116 +index 0be42787759f..312444386f54 100644
2117 +--- a/drivers/infiniband/hw/hfi1/chip.c
2118 ++++ b/drivers/infiniband/hw/hfi1/chip.c
2119 +@@ -13074,7 +13074,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
2120 + first_sdma = last_general;
2121 + last_sdma = first_sdma + dd->num_sdma;
2122 + first_rx = last_sdma;
2123 +- last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
2124 ++ last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
2125 +
2126 + /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
2127 + dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
2128 +@@ -13294,8 +13294,9 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
2129 + * slow source, SDMACleanupDone)
2130 + * N interrupts - one per used SDMA engine
2131 + * M interrupt - one per kernel receive context
2132 ++ * V interrupt - one for each VNIC context
2133 + */
2134 +- total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
2135 ++ total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
2136 +
2137 + /* ask for MSI-X interrupts */
2138 + request = request_msix(dd, total);
2139 +@@ -13356,10 +13357,12 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
2140 + * in array of contexts
2141 + * freectxts - number of free user contexts
2142 + * num_send_contexts - number of PIO send contexts being used
2143 ++ * num_vnic_contexts - number of contexts reserved for VNIC
2144 + */
2145 + static int set_up_context_variables(struct hfi1_devdata *dd)
2146 + {
2147 + unsigned long num_kernel_contexts;
2148 ++ u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
2149 + int total_contexts;
2150 + int ret;
2151 + unsigned ngroups;
2152 +@@ -13393,6 +13396,14 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
2153 + num_kernel_contexts);
2154 + num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
2155 + }
2156 ++
2157 ++ /* Accommodate VNIC contexts if possible */
2158 ++ if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) {
2159 ++ dd_dev_err(dd, "No receive contexts available for VNIC\n");
2160 ++ num_vnic_contexts = 0;
2161 ++ }
2162 ++ total_contexts = num_kernel_contexts + num_vnic_contexts;
2163 ++
2164 + /*
2165 + * User contexts:
2166 + * - default to 1 user context per real (non-HT) CPU core if
2167 +@@ -13402,19 +13413,16 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
2168 + num_user_contexts =
2169 + cpumask_weight(&node_affinity.real_cpu_mask);
2170 +
2171 +- total_contexts = num_kernel_contexts + num_user_contexts;
2172 +-
2173 + /*
2174 + * Adjust the counts given a global max.
2175 + */
2176 +- if (total_contexts > dd->chip_rcv_contexts) {
2177 ++ if (total_contexts + num_user_contexts > dd->chip_rcv_contexts) {
2178 + dd_dev_err(dd,
2179 + "Reducing # user receive contexts to: %d, from %d\n",
2180 +- (int)(dd->chip_rcv_contexts - num_kernel_contexts),
2181 ++ (int)(dd->chip_rcv_contexts - total_contexts),
2182 + (int)num_user_contexts);
2183 +- num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
2184 + /* recalculate */
2185 +- total_contexts = num_kernel_contexts + num_user_contexts;
2186 ++ num_user_contexts = dd->chip_rcv_contexts - total_contexts;
2187 + }
2188 +
2189 + /* each user context requires an entry in the RMT */
2190 +@@ -13427,25 +13435,24 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
2191 + user_rmt_reduced);
2192 + /* recalculate */
2193 + num_user_contexts = user_rmt_reduced;
2194 +- total_contexts = num_kernel_contexts + num_user_contexts;
2195 + }
2196 +
2197 +- /* Accommodate VNIC contexts */
2198 +- if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
2199 +- total_contexts += HFI1_NUM_VNIC_CTXT;
2200 ++ total_contexts += num_user_contexts;
2201 +
2202 + /* the first N are kernel contexts, the rest are user/vnic contexts */
2203 + dd->num_rcv_contexts = total_contexts;
2204 + dd->n_krcv_queues = num_kernel_contexts;
2205 + dd->first_dyn_alloc_ctxt = num_kernel_contexts;
2206 ++ dd->num_vnic_contexts = num_vnic_contexts;
2207 + dd->num_user_contexts = num_user_contexts;
2208 + dd->freectxts = num_user_contexts;
2209 + dd_dev_info(dd,
2210 +- "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
2211 ++ "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
2212 + (int)dd->chip_rcv_contexts,
2213 + (int)dd->num_rcv_contexts,
2214 + (int)dd->n_krcv_queues,
2215 +- (int)dd->num_rcv_contexts - dd->n_krcv_queues);
2216 ++ dd->num_vnic_contexts,
2217 ++ dd->num_user_contexts);
2218 +
2219 + /*
2220 + * Receive array allocation:
2221 +diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
2222 +index 3ac9c307a285..6ff44dc606eb 100644
2223 +--- a/drivers/infiniband/hw/hfi1/hfi.h
2224 ++++ b/drivers/infiniband/hw/hfi1/hfi.h
2225 +@@ -1047,6 +1047,8 @@ struct hfi1_devdata {
2226 + u64 z_send_schedule;
2227 +
2228 + u64 __percpu *send_schedule;
2229 ++ /* number of reserved contexts for VNIC usage */
2230 ++ u16 num_vnic_contexts;
2231 + /* number of receive contexts in use by the driver */
2232 + u32 num_rcv_contexts;
2233 + /* number of pio send contexts in use by the driver */
2234 +diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
2235 +index 6d2702ef34ac..25e867393463 100644
2236 +--- a/drivers/infiniband/hw/hfi1/sysfs.c
2237 ++++ b/drivers/infiniband/hw/hfi1/sysfs.c
2238 +@@ -543,7 +543,7 @@ static ssize_t show_nctxts(struct device *device,
2239 + * give a more accurate picture of total contexts available.
2240 + */
2241 + return scnprintf(buf, PAGE_SIZE, "%u\n",
2242 +- min(dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt,
2243 ++ min(dd->num_user_contexts,
2244 + (u32)dd->sc_sizes[SC_USER].count));
2245 + }
2246 +
2247 +diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
2248 +index f419cbb05928..1a17708be46a 100644
2249 +--- a/drivers/infiniband/hw/hfi1/vnic_main.c
2250 ++++ b/drivers/infiniband/hw/hfi1/vnic_main.c
2251 +@@ -840,6 +840,9 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
2252 + struct rdma_netdev *rn;
2253 + int i, size, rc;
2254 +
2255 ++ if (!dd->num_vnic_contexts)
2256 ++ return ERR_PTR(-ENOMEM);
2257 ++
2258 + if (!port_num || (port_num > dd->num_pports))
2259 + return ERR_PTR(-EINVAL);
2260 +
2261 +@@ -848,7 +851,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
2262 +
2263 + size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
2264 + netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
2265 +- dd->chip_sdma_engines, HFI1_NUM_VNIC_CTXT);
2266 ++ dd->chip_sdma_engines, dd->num_vnic_contexts);
2267 + if (!netdev)
2268 + return ERR_PTR(-ENOMEM);
2269 +
2270 +@@ -856,7 +859,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
2271 + vinfo = opa_vnic_dev_priv(netdev);
2272 + vinfo->dd = dd;
2273 + vinfo->num_tx_q = dd->chip_sdma_engines;
2274 +- vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT;
2275 ++ vinfo->num_rx_q = dd->num_vnic_contexts;
2276 + vinfo->netdev = netdev;
2277 + rn->free_rdma_netdev = hfi1_vnic_free_rn;
2278 + rn->set_id = hfi1_vnic_set_vesw_id;
2279 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
2280 +index fa5ccdb3bb2a..60d7b493ed2d 100644
2281 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
2282 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
2283 +@@ -665,12 +665,19 @@ static void srp_path_rec_completion(int status,
2284 + static int srp_lookup_path(struct srp_rdma_ch *ch)
2285 + {
2286 + struct srp_target_port *target = ch->target;
2287 +- int ret;
2288 ++ int ret = -ENODEV;
2289 +
2290 + ch->path.numb_path = 1;
2291 +
2292 + init_completion(&ch->done);
2293 +
2294 ++ /*
2295 ++ * Avoid that the SCSI host can be removed by srp_remove_target()
2296 ++ * before srp_path_rec_completion() is called.
2297 ++ */
2298 ++ if (!scsi_host_get(target->scsi_host))
2299 ++ goto out;
2300 ++
2301 + ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
2302 + target->srp_host->srp_dev->dev,
2303 + target->srp_host->port,
2304 +@@ -684,18 +691,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
2305 + GFP_KERNEL,
2306 + srp_path_rec_completion,
2307 + ch, &ch->path_query);
2308 +- if (ch->path_query_id < 0)
2309 +- return ch->path_query_id;
2310 ++ ret = ch->path_query_id;
2311 ++ if (ret < 0)
2312 ++ goto put;
2313 +
2314 + ret = wait_for_completion_interruptible(&ch->done);
2315 + if (ret < 0)
2316 +- return ret;
2317 ++ goto put;
2318 +
2319 +- if (ch->status < 0)
2320 ++ ret = ch->status;
2321 ++ if (ret < 0)
2322 + shost_printk(KERN_WARNING, target->scsi_host,
2323 + PFX "Path record query failed\n");
2324 +
2325 +- return ch->status;
2326 ++put:
2327 ++ scsi_host_put(target->scsi_host);
2328 ++
2329 ++out:
2330 ++ return ret;
2331 + }
2332 +
2333 + static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
2334 +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
2335 +index 9e8e9220f816..95178b4e3565 100644
2336 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
2337 ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
2338 +@@ -2777,7 +2777,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
2339 + {
2340 + const char *p;
2341 + unsigned len, count, leading_zero_bytes;
2342 +- int ret, rc;
2343 ++ int ret;
2344 +
2345 + p = name;
2346 + if (strncasecmp(p, "0x", 2) == 0)
2347 +@@ -2789,10 +2789,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
2348 + count = min(len / 2, 16U);
2349 + leading_zero_bytes = 16 - count;
2350 + memset(i_port_id, 0, leading_zero_bytes);
2351 +- rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
2352 +- if (rc < 0)
2353 +- pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
2354 +- ret = 0;
2355 ++ ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
2356 ++ if (ret < 0)
2357 ++ pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
2358 + out:
2359 + return ret;
2360 + }
2361 +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
2362 +index b5df99c6f680..3b35271114ee 100644
2363 +--- a/drivers/irqchip/irq-gic-v3.c
2364 ++++ b/drivers/irqchip/irq-gic-v3.c
2365 +@@ -1071,18 +1071,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
2366 + int nr_parts;
2367 + struct partition_affinity *parts;
2368 +
2369 +- parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
2370 ++ parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
2371 + if (!parts_node)
2372 + return;
2373 +
2374 + nr_parts = of_get_child_count(parts_node);
2375 +
2376 + if (!nr_parts)
2377 +- return;
2378 ++ goto out_put_node;
2379 +
2380 + parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
2381 + if (WARN_ON(!parts))
2382 +- return;
2383 ++ goto out_put_node;
2384 +
2385 + for_each_child_of_node(parts_node, child_part) {
2386 + struct partition_affinity *part;
2387 +@@ -1149,6 +1149,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
2388 +
2389 + gic_data.ppi_descs[i] = desc;
2390 + }
2391 ++
2392 ++out_put_node:
2393 ++ of_node_put(parts_node);
2394 + }
2395 +
2396 + static void __init gic_of_setup_kvm_info(struct device_node *node)
2397 +diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
2398 +index ae6146311934..f052a3eb2098 100644
2399 +--- a/drivers/mailbox/bcm-flexrm-mailbox.c
2400 ++++ b/drivers/mailbox/bcm-flexrm-mailbox.c
2401 +@@ -1365,8 +1365,8 @@ static void flexrm_shutdown(struct mbox_chan *chan)
2402 + /* Disable/inactivate ring */
2403 + writel_relaxed(0x0, ring->regs + RING_CONTROL);
2404 +
2405 +- /* Flush ring with timeout of 1s */
2406 +- timeout = 1000;
2407 ++ /* Set ring flush state */
2408 ++ timeout = 1000; /* timeout of 1s */
2409 + writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
2410 + ring->regs + RING_CONTROL);
2411 + do {
2412 +@@ -1374,7 +1374,23 @@ static void flexrm_shutdown(struct mbox_chan *chan)
2413 + FLUSH_DONE_MASK)
2414 + break;
2415 + mdelay(1);
2416 +- } while (timeout--);
2417 ++ } while (--timeout);
2418 ++ if (!timeout)
2419 ++ dev_err(ring->mbox->dev,
2420 ++ "setting ring%d flush state timedout\n", ring->num);
2421 ++
2422 ++ /* Clear ring flush state */
2423 ++ timeout = 1000; /* timeout of 1s */
2424 ++ writel_relaxed(0x0, ring + RING_CONTROL);
2425 ++ do {
2426 ++ if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
2427 ++ FLUSH_DONE_MASK))
2428 ++ break;
2429 ++ mdelay(1);
2430 ++ } while (--timeout);
2431 ++ if (!timeout)
2432 ++ dev_err(ring->mbox->dev,
2433 ++ "clearing ring%d flush state timedout\n", ring->num);
2434 +
2435 + /* Abort all in-flight requests */
2436 + for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
2437 +diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
2438 +index 08035634795c..c9934139d609 100644
2439 +--- a/drivers/md/bcache/alloc.c
2440 ++++ b/drivers/md/bcache/alloc.c
2441 +@@ -407,7 +407,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
2442 +
2443 + finish_wait(&ca->set->bucket_wait, &w);
2444 + out:
2445 +- wake_up_process(ca->alloc_thread);
2446 ++ if (ca->alloc_thread)
2447 ++ wake_up_process(ca->alloc_thread);
2448 +
2449 + trace_bcache_alloc(ca, reserve);
2450 +
2451 +diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
2452 +index d2121637b4ab..cae57b5be817 100644
2453 +--- a/drivers/md/bitmap.c
2454 ++++ b/drivers/md/bitmap.c
2455 +@@ -625,7 +625,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
2456 + err = read_sb_page(bitmap->mddev,
2457 + offset,
2458 + sb_page,
2459 +- 0, PAGE_SIZE);
2460 ++ 0, sizeof(bitmap_super_t));
2461 + }
2462 + if (err)
2463 + return err;
2464 +@@ -2123,7 +2123,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2465 + if (store.sb_page && bitmap->storage.sb_page)
2466 + memcpy(page_address(store.sb_page),
2467 + page_address(bitmap->storage.sb_page),
2468 +- PAGE_SIZE);
2469 ++ sizeof(bitmap_super_t));
2470 + bitmap_file_unmap(&bitmap->storage);
2471 + bitmap->storage = store;
2472 +
2473 +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
2474 +index d216a8f7bc22..8e3adcb46851 100644
2475 +--- a/drivers/md/dm-bufio.c
2476 ++++ b/drivers/md/dm-bufio.c
2477 +@@ -974,7 +974,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
2478 + buffers = c->minimum_buffers;
2479 +
2480 + *limit_buffers = buffers;
2481 +- *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
2482 ++ *threshold_buffers = mult_frac(buffers,
2483 ++ DM_BUFIO_WRITEBACK_PERCENT, 100);
2484 + }
2485 +
2486 + /*
2487 +@@ -1910,19 +1911,15 @@ static int __init dm_bufio_init(void)
2488 + memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
2489 + memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
2490 +
2491 +- mem = (__u64)((totalram_pages - totalhigh_pages) *
2492 +- DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
2493 ++ mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
2494 ++ DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2495 +
2496 + if (mem > ULONG_MAX)
2497 + mem = ULONG_MAX;
2498 +
2499 + #ifdef CONFIG_MMU
2500 +- /*
2501 +- * Get the size of vmalloc space the same way as VMALLOC_TOTAL
2502 +- * in fs/proc/internal.h
2503 +- */
2504 +- if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
2505 +- mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
2506 ++ if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2507 ++ mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2508 + #endif
2509 +
2510 + dm_bufio_default_cache_size = mem;
2511 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
2512 +index 8785134c9f1f..0b7edfd0b454 100644
2513 +--- a/drivers/md/dm-cache-target.c
2514 ++++ b/drivers/md/dm-cache-target.c
2515 +@@ -1201,6 +1201,18 @@ static void background_work_end(struct cache *cache)
2516 +
2517 + /*----------------------------------------------------------------*/
2518 +
2519 ++static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
2520 ++{
2521 ++ return (bio_data_dir(bio) == WRITE) &&
2522 ++ (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
2523 ++}
2524 ++
2525 ++static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
2526 ++{
2527 ++ return writeback_mode(&cache->features) &&
2528 ++ (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
2529 ++}
2530 ++
2531 + static void quiesce(struct dm_cache_migration *mg,
2532 + void (*continuation)(struct work_struct *))
2533 + {
2534 +@@ -1474,12 +1486,50 @@ static void mg_upgrade_lock(struct work_struct *ws)
2535 + }
2536 + }
2537 +
2538 ++static void mg_full_copy(struct work_struct *ws)
2539 ++{
2540 ++ struct dm_cache_migration *mg = ws_to_mg(ws);
2541 ++ struct cache *cache = mg->cache;
2542 ++ struct policy_work *op = mg->op;
2543 ++ bool is_policy_promote = (op->op == POLICY_PROMOTE);
2544 ++
2545 ++ if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
2546 ++ is_discarded_oblock(cache, op->oblock)) {
2547 ++ mg_upgrade_lock(ws);
2548 ++ return;
2549 ++ }
2550 ++
2551 ++ init_continuation(&mg->k, mg_upgrade_lock);
2552 ++
2553 ++ if (copy(mg, is_policy_promote)) {
2554 ++ DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
2555 ++ mg->k.input = BLK_STS_IOERR;
2556 ++ mg_complete(mg, false);
2557 ++ }
2558 ++}
2559 ++
2560 + static void mg_copy(struct work_struct *ws)
2561 + {
2562 +- int r;
2563 + struct dm_cache_migration *mg = ws_to_mg(ws);
2564 +
2565 + if (mg->overwrite_bio) {
2566 ++ /*
2567 ++ * No exclusive lock was held when we last checked if the bio
2568 ++ * was optimisable. So we have to check again in case things
2569 ++ * have changed (eg, the block may no longer be discarded).
2570 ++ */
2571 ++ if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
2572 ++ /*
2573 ++ * Fallback to a real full copy after doing some tidying up.
2574 ++ */
2575 ++ bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
2576 ++ BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
2577 ++ mg->overwrite_bio = NULL;
2578 ++ inc_io_migrations(mg->cache);
2579 ++ mg_full_copy(ws);
2580 ++ return;
2581 ++ }
2582 ++
2583 + /*
2584 + * It's safe to do this here, even though it's new data
2585 + * because all IO has been locked out of the block.
2586 +@@ -1489,26 +1539,8 @@ static void mg_copy(struct work_struct *ws)
2587 + */
2588 + overwrite(mg, mg_update_metadata_after_copy);
2589 +
2590 +- } else {
2591 +- struct cache *cache = mg->cache;
2592 +- struct policy_work *op = mg->op;
2593 +- bool is_policy_promote = (op->op == POLICY_PROMOTE);
2594 +-
2595 +- if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
2596 +- is_discarded_oblock(cache, op->oblock)) {
2597 +- mg_upgrade_lock(ws);
2598 +- return;
2599 +- }
2600 +-
2601 +- init_continuation(&mg->k, mg_upgrade_lock);
2602 +-
2603 +- r = copy(mg, is_policy_promote);
2604 +- if (r) {
2605 +- DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
2606 +- mg->k.input = BLK_STS_IOERR;
2607 +- mg_complete(mg, false);
2608 +- }
2609 +- }
2610 ++ } else
2611 ++ mg_full_copy(ws);
2612 + }
2613 +
2614 + static int mg_lock_writes(struct dm_cache_migration *mg)
2615 +@@ -1748,18 +1780,6 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
2616 +
2617 + /*----------------------------------------------------------------*/
2618 +
2619 +-static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
2620 +-{
2621 +- return (bio_data_dir(bio) == WRITE) &&
2622 +- (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
2623 +-}
2624 +-
2625 +-static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
2626 +-{
2627 +- return writeback_mode(&cache->features) &&
2628 +- (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
2629 +-}
2630 +-
2631 + static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
2632 + bool *commit_needed)
2633 + {
2634 +diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
2635 +index 203144762f36..6a14f945783c 100644
2636 +--- a/drivers/md/dm-core.h
2637 ++++ b/drivers/md/dm-core.h
2638 +@@ -29,7 +29,6 @@ struct dm_kobject_holder {
2639 + * DM targets must _not_ deference a mapped_device to directly access its members!
2640 + */
2641 + struct mapped_device {
2642 +- struct srcu_struct io_barrier;
2643 + struct mutex suspend_lock;
2644 +
2645 + /*
2646 +@@ -127,6 +126,8 @@ struct mapped_device {
2647 + struct blk_mq_tag_set *tag_set;
2648 + bool use_blk_mq:1;
2649 + bool init_tio_pdu:1;
2650 ++
2651 ++ struct srcu_struct io_barrier;
2652 + };
2653 +
2654 + void dm_init_md_queue(struct mapped_device *md);
2655 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
2656 +index 96ab46512e1f..9fc12f556534 100644
2657 +--- a/drivers/md/dm-crypt.c
2658 ++++ b/drivers/md/dm-crypt.c
2659 +@@ -1075,7 +1075,7 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
2660 + BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
2661 +
2662 + /* Reject unexpected unaligned bio. */
2663 +- if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
2664 ++ if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
2665 + return -EIO;
2666 +
2667 + dmreq = dmreq_of_req(cc, req);
2668 +@@ -1168,7 +1168,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
2669 + int r = 0;
2670 +
2671 + /* Reject unexpected unaligned bio. */
2672 +- if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
2673 ++ if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
2674 + return -EIO;
2675 +
2676 + dmreq = dmreq_of_req(cc, req);
2677 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
2678 +index 096fe9b66c50..5e6737a44468 100644
2679 +--- a/drivers/md/dm-integrity.c
2680 ++++ b/drivers/md/dm-integrity.c
2681 +@@ -1376,7 +1376,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
2682 + struct bvec_iter iter;
2683 + struct bio_vec bv;
2684 + bio_for_each_segment(bv, bio, iter) {
2685 +- if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
2686 ++ if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
2687 + DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
2688 + bv.bv_offset, bv.bv_len, ic->sectors_per_block);
2689 + return DM_MAPIO_KILL;
2690 +diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
2691 +index 11f273d2f018..e8094d8fbe0d 100644
2692 +--- a/drivers/md/dm-mpath.c
2693 ++++ b/drivers/md/dm-mpath.c
2694 +@@ -499,8 +499,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
2695 + if (IS_ERR(clone)) {
2696 + /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
2697 + bool queue_dying = blk_queue_dying(q);
2698 +- DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing",
2699 +- PTR_ERR(clone), queue_dying ? " (path offline)" : "");
2700 + if (queue_dying) {
2701 + atomic_inc(&m->pg_init_in_progress);
2702 + activate_or_offline_path(pgpath);
2703 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
2704 +index ef7b8f201f73..4287fc9f3527 100644
2705 +--- a/drivers/md/dm-table.c
2706 ++++ b/drivers/md/dm-table.c
2707 +@@ -1758,13 +1758,12 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
2708 + return true;
2709 + }
2710 +
2711 +-
2712 +-static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
2713 +- sector_t start, sector_t len, void *data)
2714 ++static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
2715 ++ sector_t start, sector_t len, void *data)
2716 + {
2717 + struct request_queue *q = bdev_get_queue(dev->bdev);
2718 +
2719 +- return q && blk_queue_discard(q);
2720 ++ return q && !blk_queue_discard(q);
2721 + }
2722 +
2723 + static bool dm_table_supports_discards(struct dm_table *t)
2724 +@@ -1772,28 +1771,24 @@ static bool dm_table_supports_discards(struct dm_table *t)
2725 + struct dm_target *ti;
2726 + unsigned i;
2727 +
2728 +- /*
2729 +- * Unless any target used by the table set discards_supported,
2730 +- * require at least one underlying device to support discards.
2731 +- * t->devices includes internal dm devices such as mirror logs
2732 +- * so we need to use iterate_devices here, which targets
2733 +- * supporting discard selectively must provide.
2734 +- */
2735 + for (i = 0; i < dm_table_get_num_targets(t); i++) {
2736 + ti = dm_table_get_target(t, i);
2737 +
2738 + if (!ti->num_discard_bios)
2739 +- continue;
2740 +-
2741 +- if (ti->discards_supported)
2742 +- return true;
2743 ++ return false;
2744 +
2745 +- if (ti->type->iterate_devices &&
2746 +- ti->type->iterate_devices(ti, device_discard_capable, NULL))
2747 +- return true;
2748 ++ /*
2749 ++ * Either the target provides discard support (as implied by setting
2750 ++ * 'discards_supported') or it relies on _all_ data devices having
2751 ++ * discard support.
2752 ++ */
2753 ++ if (!ti->discards_supported &&
2754 ++ (!ti->type->iterate_devices ||
2755 ++ ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
2756 ++ return false;
2757 + }
2758 +
2759 +- return false;
2760 ++ return true;
2761 + }
2762 +
2763 + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
2764 +diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
2765 +index b87c1741da4b..6d7bda6f8190 100644
2766 +--- a/drivers/md/dm-zoned-target.c
2767 ++++ b/drivers/md/dm-zoned-target.c
2768 +@@ -660,6 +660,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
2769 + struct dmz_target *dmz = ti->private;
2770 + struct request_queue *q;
2771 + struct dmz_dev *dev;
2772 ++ sector_t aligned_capacity;
2773 + int ret;
2774 +
2775 + /* Get the target device */
2776 +@@ -685,15 +686,17 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
2777 + goto err;
2778 + }
2779 +
2780 ++ q = bdev_get_queue(dev->bdev);
2781 + dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
2782 +- if (ti->begin || (ti->len != dev->capacity)) {
2783 ++ aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
2784 ++ if (ti->begin ||
2785 ++ ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
2786 + ti->error = "Partial mapping not supported";
2787 + ret = -EINVAL;
2788 + goto err;
2789 + }
2790 +
2791 +- q = bdev_get_queue(dev->bdev);
2792 +- dev->zone_nr_sectors = q->limits.chunk_sectors;
2793 ++ dev->zone_nr_sectors = blk_queue_zone_sectors(q);
2794 + dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
2795 +
2796 + dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
2797 +@@ -929,8 +932,10 @@ static int dmz_iterate_devices(struct dm_target *ti,
2798 + iterate_devices_callout_fn fn, void *data)
2799 + {
2800 + struct dmz_target *dmz = ti->private;
2801 ++ struct dmz_dev *dev = dmz->dev;
2802 ++ sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
2803 +
2804 +- return fn(ti, dmz->ddev, 0, dmz->dev->capacity, data);
2805 ++ return fn(ti, dmz->ddev, 0, capacity, data);
2806 + }
2807 +
2808 + static struct target_type dmz_type = {
2809 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
2810 +index 4be85324f44d..804419635cc7 100644
2811 +--- a/drivers/md/dm.c
2812 ++++ b/drivers/md/dm.c
2813 +@@ -1695,7 +1695,7 @@ static struct mapped_device *alloc_dev(int minor)
2814 + struct mapped_device *md;
2815 + void *old_md;
2816 +
2817 +- md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
2818 ++ md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
2819 + if (!md) {
2820 + DMWARN("unable to allocate device, out of memory.");
2821 + return NULL;
2822 +@@ -1795,7 +1795,7 @@ static struct mapped_device *alloc_dev(int minor)
2823 + bad_minor:
2824 + module_put(THIS_MODULE);
2825 + bad_module_get:
2826 +- kfree(md);
2827 ++ kvfree(md);
2828 + return NULL;
2829 + }
2830 +
2831 +@@ -1814,7 +1814,7 @@ static void free_dev(struct mapped_device *md)
2832 + free_minor(minor);
2833 +
2834 + module_put(THIS_MODULE);
2835 +- kfree(md);
2836 ++ kvfree(md);
2837 + }
2838 +
2839 + static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2840 +@@ -2709,11 +2709,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2841 +
2842 + md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
2843 +
2844 +- if (test_bit(DMF_FREEING, &md->flags) ||
2845 +- dm_deleting_md(md))
2846 +- return NULL;
2847 +-
2848 ++ spin_lock(&_minor_lock);
2849 ++ if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2850 ++ md = NULL;
2851 ++ goto out;
2852 ++ }
2853 + dm_get(md);
2854 ++out:
2855 ++ spin_unlock(&_minor_lock);
2856 ++
2857 + return md;
2858 + }
2859 +
2860 +diff --git a/drivers/md/md.c b/drivers/md/md.c
2861 +index 0ff1bbf6c90e..e019cf8c0d13 100644
2862 +--- a/drivers/md/md.c
2863 ++++ b/drivers/md/md.c
2864 +@@ -8039,7 +8039,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
2865 + if (did_change)
2866 + sysfs_notify_dirent_safe(mddev->sysfs_state);
2867 + wait_event(mddev->sb_wait,
2868 +- !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended);
2869 ++ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
2870 ++ mddev->suspended);
2871 + if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2872 + percpu_ref_put(&mddev->writes_pending);
2873 + return false;
2874 +@@ -8110,7 +8111,6 @@ void md_allow_write(struct mddev *mddev)
2875 + sysfs_notify_dirent_safe(mddev->sysfs_state);
2876 + /* wait for the dirty state to be recorded in the metadata */
2877 + wait_event(mddev->sb_wait,
2878 +- !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) &&
2879 + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
2880 + } else
2881 + spin_unlock(&mddev->lock);
2882 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2883 +index f3f3e40dc9d8..e4e8f9e565b7 100644
2884 +--- a/drivers/md/raid1.c
2885 ++++ b/drivers/md/raid1.c
2886 +@@ -990,14 +990,6 @@ static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
2887 + _wait_barrier(conf, idx);
2888 + }
2889 +
2890 +-static void wait_all_barriers(struct r1conf *conf)
2891 +-{
2892 +- int idx;
2893 +-
2894 +- for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
2895 +- _wait_barrier(conf, idx);
2896 +-}
2897 +-
2898 + static void _allow_barrier(struct r1conf *conf, int idx)
2899 + {
2900 + atomic_dec(&conf->nr_pending[idx]);
2901 +@@ -1011,14 +1003,6 @@ static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
2902 + _allow_barrier(conf, idx);
2903 + }
2904 +
2905 +-static void allow_all_barriers(struct r1conf *conf)
2906 +-{
2907 +- int idx;
2908 +-
2909 +- for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
2910 +- _allow_barrier(conf, idx);
2911 +-}
2912 +-
2913 + /* conf->resync_lock should be held */
2914 + static int get_unqueued_pending(struct r1conf *conf)
2915 + {
2916 +@@ -1654,8 +1638,12 @@ static void print_conf(struct r1conf *conf)
2917 +
2918 + static void close_sync(struct r1conf *conf)
2919 + {
2920 +- wait_all_barriers(conf);
2921 +- allow_all_barriers(conf);
2922 ++ int idx;
2923 ++
2924 ++ for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
2925 ++ _wait_barrier(conf, idx);
2926 ++ _allow_barrier(conf, idx);
2927 ++ }
2928 +
2929 + mempool_destroy(conf->r1buf_pool);
2930 + conf->r1buf_pool = NULL;
2931 +diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
2932 +index cba092bcb76d..a0fe80df0cbd 100644
2933 +--- a/drivers/media/platform/qcom/venus/core.h
2934 ++++ b/drivers/media/platform/qcom/venus/core.h
2935 +@@ -194,7 +194,6 @@ struct venus_buffer {
2936 + * @fh: a holder of v4l file handle structure
2937 + * @streamon_cap: stream on flag for capture queue
2938 + * @streamon_out: stream on flag for output queue
2939 +- * @cmd_stop: a flag to signal encoder/decoder commands
2940 + * @width: current capture width
2941 + * @height: current capture height
2942 + * @out_width: current output width
2943 +@@ -258,7 +257,6 @@ struct venus_inst {
2944 + } controls;
2945 + struct v4l2_fh fh;
2946 + unsigned int streamon_cap, streamon_out;
2947 +- bool cmd_stop;
2948 + u32 width;
2949 + u32 height;
2950 + u32 out_width;
2951 +diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
2952 +index 9b2a401a4891..0ce9559a2924 100644
2953 +--- a/drivers/media/platform/qcom/venus/helpers.c
2954 ++++ b/drivers/media/platform/qcom/venus/helpers.c
2955 +@@ -623,13 +623,6 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
2956 +
2957 + mutex_lock(&inst->lock);
2958 +
2959 +- if (inst->cmd_stop) {
2960 +- vbuf->flags |= V4L2_BUF_FLAG_LAST;
2961 +- v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
2962 +- inst->cmd_stop = false;
2963 +- goto unlock;
2964 +- }
2965 +-
2966 + v4l2_m2m_buf_queue(m2m_ctx, vbuf);
2967 +
2968 + if (!(inst->streamon_out & inst->streamon_cap))
2969 +diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
2970 +index c09490876516..ba29fd4d4984 100644
2971 +--- a/drivers/media/platform/qcom/venus/hfi.c
2972 ++++ b/drivers/media/platform/qcom/venus/hfi.c
2973 +@@ -484,6 +484,7 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
2974 +
2975 + return -EINVAL;
2976 + }
2977 ++EXPORT_SYMBOL_GPL(hfi_session_process_buf);
2978 +
2979 + irqreturn_t hfi_isr_thread(int irq, void *dev_id)
2980 + {
2981 +diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
2982 +index 1caae8feaa36..734ce11b0ed0 100644
2983 +--- a/drivers/media/platform/qcom/venus/hfi_venus.c
2984 ++++ b/drivers/media/platform/qcom/venus/hfi_venus.c
2985 +@@ -344,7 +344,7 @@ static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
2986 + desc->attrs = DMA_ATTR_WRITE_COMBINE;
2987 + desc->size = ALIGN(size, SZ_4K);
2988 +
2989 +- desc->kva = dma_alloc_attrs(dev, size, &desc->da, GFP_KERNEL,
2990 ++ desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
2991 + desc->attrs);
2992 + if (!desc->kva)
2993 + return -ENOMEM;
2994 +@@ -710,10 +710,8 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev)
2995 + if (ret)
2996 + return ret;
2997 +
2998 +- hdev->ifaceq_table.kva = desc.kva;
2999 +- hdev->ifaceq_table.da = desc.da;
3000 +- hdev->ifaceq_table.size = IFACEQ_TABLE_SIZE;
3001 +- offset = hdev->ifaceq_table.size;
3002 ++ hdev->ifaceq_table = desc;
3003 ++ offset = IFACEQ_TABLE_SIZE;
3004 +
3005 + for (i = 0; i < IFACEQ_NUM; i++) {
3006 + queue = &hdev->queues[i];
3007 +@@ -755,9 +753,7 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev)
3008 + if (ret) {
3009 + hdev->sfr.da = 0;
3010 + } else {
3011 +- hdev->sfr.da = desc.da;
3012 +- hdev->sfr.kva = desc.kva;
3013 +- hdev->sfr.size = ALIGNED_SFR_SIZE;
3014 ++ hdev->sfr = desc;
3015 + sfr = hdev->sfr.kva;
3016 + sfr->buf_size = ALIGNED_SFR_SIZE;
3017 + }
3018 +diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
3019 +index da611a5eb670..c9e9576bb08a 100644
3020 +--- a/drivers/media/platform/qcom/venus/vdec.c
3021 ++++ b/drivers/media/platform/qcom/venus/vdec.c
3022 +@@ -469,8 +469,14 @@ static int vdec_subscribe_event(struct v4l2_fh *fh,
3023 + static int
3024 + vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
3025 + {
3026 +- if (cmd->cmd != V4L2_DEC_CMD_STOP)
3027 ++ switch (cmd->cmd) {
3028 ++ case V4L2_DEC_CMD_STOP:
3029 ++ if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
3030 ++ return -EINVAL;
3031 ++ break;
3032 ++ default:
3033 + return -EINVAL;
3034 ++ }
3035 +
3036 + return 0;
3037 + }
3038 +@@ -479,6 +485,7 @@ static int
3039 + vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
3040 + {
3041 + struct venus_inst *inst = to_inst(file);
3042 ++ struct hfi_frame_data fdata = {0};
3043 + int ret;
3044 +
3045 + ret = vdec_try_decoder_cmd(file, fh, cmd);
3046 +@@ -486,12 +493,23 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
3047 + return ret;
3048 +
3049 + mutex_lock(&inst->lock);
3050 +- inst->cmd_stop = true;
3051 +- mutex_unlock(&inst->lock);
3052 +
3053 +- hfi_session_flush(inst);
3054 ++ /*
3055 ++ * Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on decoder
3056 ++ * input to signal EOS.
3057 ++ */
3058 ++ if (!(inst->streamon_out & inst->streamon_cap))
3059 ++ goto unlock;
3060 ++
3061 ++ fdata.buffer_type = HFI_BUFFER_INPUT;
3062 ++ fdata.flags |= HFI_BUFFERFLAG_EOS;
3063 ++ fdata.device_addr = 0xdeadbeef;
3064 +
3065 +- return 0;
3066 ++ ret = hfi_session_process_buf(inst, &fdata);
3067 ++
3068 ++unlock:
3069 ++ mutex_unlock(&inst->lock);
3070 ++ return ret;
3071 + }
3072 +
3073 + static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
3074 +@@ -718,7 +736,6 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
3075 + inst->reconfig = false;
3076 + inst->sequence_cap = 0;
3077 + inst->sequence_out = 0;
3078 +- inst->cmd_stop = false;
3079 +
3080 + ret = vdec_init_session(inst);
3081 + if (ret)
3082 +@@ -807,11 +824,6 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
3083 + vb->timestamp = timestamp_us * NSEC_PER_USEC;
3084 + vbuf->sequence = inst->sequence_cap++;
3085 +
3086 +- if (inst->cmd_stop) {
3087 +- vbuf->flags |= V4L2_BUF_FLAG_LAST;
3088 +- inst->cmd_stop = false;
3089 +- }
3090 +-
3091 + if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
3092 + const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
3093 +
3094 +diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
3095 +index 6f123a387cf9..3fcf0e9b7b29 100644
3096 +--- a/drivers/media/platform/qcom/venus/venc.c
3097 ++++ b/drivers/media/platform/qcom/venus/venc.c
3098 +@@ -963,13 +963,12 @@ static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type,
3099 + if (!vbuf)
3100 + return;
3101 +
3102 +- vb = &vbuf->vb2_buf;
3103 +- vb->planes[0].bytesused = bytesused;
3104 +- vb->planes[0].data_offset = data_offset;
3105 +-
3106 + vbuf->flags = flags;
3107 +
3108 + if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
3109 ++ vb = &vbuf->vb2_buf;
3110 ++ vb2_set_plane_payload(vb, 0, bytesused + data_offset);
3111 ++ vb->planes[0].data_offset = data_offset;
3112 + vb->timestamp = timestamp_us * NSEC_PER_USEC;
3113 + vbuf->sequence = inst->sequence_cap++;
3114 + } else {
3115 +diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
3116 +index d2223c04e9ad..4c8f456238bc 100644
3117 +--- a/drivers/media/rc/ir-lirc-codec.c
3118 ++++ b/drivers/media/rc/ir-lirc-codec.c
3119 +@@ -298,11 +298,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
3120 + if (!dev->max_timeout)
3121 + return -ENOTTY;
3122 +
3123 ++ /* Check for multiply overflow */
3124 ++ if (val > U32_MAX / 1000)
3125 ++ return -EINVAL;
3126 ++
3127 + tmp = val * 1000;
3128 +
3129 +- if (tmp < dev->min_timeout ||
3130 +- tmp > dev->max_timeout)
3131 +- return -EINVAL;
3132 ++ if (tmp < dev->min_timeout || tmp > dev->max_timeout)
3133 ++ return -EINVAL;
3134 +
3135 + if (dev->s_timeout)
3136 + ret = dev->s_timeout(dev, tmp);
3137 +diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
3138 +index 817c18f2ddd1..a95d09acc22a 100644
3139 +--- a/drivers/media/rc/ir-nec-decoder.c
3140 ++++ b/drivers/media/rc/ir-nec-decoder.c
3141 +@@ -87,8 +87,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
3142 + data->state = STATE_BIT_PULSE;
3143 + return 0;
3144 + } else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) {
3145 +- rc_repeat(dev);
3146 +- IR_dprintk(1, "Repeat last key\n");
3147 + data->state = STATE_TRAILER_PULSE;
3148 + return 0;
3149 + }
3150 +@@ -151,19 +149,26 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
3151 + if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2))
3152 + break;
3153 +
3154 +- address = bitrev8((data->bits >> 24) & 0xff);
3155 +- not_address = bitrev8((data->bits >> 16) & 0xff);
3156 +- command = bitrev8((data->bits >> 8) & 0xff);
3157 +- not_command = bitrev8((data->bits >> 0) & 0xff);
3158 ++ if (data->count == NEC_NBITS) {
3159 ++ address = bitrev8((data->bits >> 24) & 0xff);
3160 ++ not_address = bitrev8((data->bits >> 16) & 0xff);
3161 ++ command = bitrev8((data->bits >> 8) & 0xff);
3162 ++ not_command = bitrev8((data->bits >> 0) & 0xff);
3163 ++
3164 ++ scancode = ir_nec_bytes_to_scancode(address,
3165 ++ not_address,
3166 ++ command,
3167 ++ not_command,
3168 ++ &rc_proto);
3169 +
3170 +- scancode = ir_nec_bytes_to_scancode(address, not_address,
3171 +- command, not_command,
3172 +- &rc_proto);
3173 ++ if (data->is_nec_x)
3174 ++ data->necx_repeat = true;
3175 +
3176 +- if (data->is_nec_x)
3177 +- data->necx_repeat = true;
3178 ++ rc_keydown(dev, rc_proto, scancode, 0);
3179 ++ } else {
3180 ++ rc_repeat(dev);
3181 ++ }
3182 +
3183 +- rc_keydown(dev, rc_proto, scancode, 0);
3184 + data->state = STATE_INACTIVE;
3185 + return 0;
3186 + }
3187 +diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c
3188 +index 5a28ce3a1d49..38dbc128340d 100644
3189 +--- a/drivers/media/usb/as102/as102_fw.c
3190 ++++ b/drivers/media/usb/as102/as102_fw.c
3191 +@@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
3192 + unsigned char *cmd,
3193 + const struct firmware *firmware) {
3194 +
3195 +- struct as10x_fw_pkt_t fw_pkt;
3196 ++ struct as10x_fw_pkt_t *fw_pkt;
3197 + int total_read_bytes = 0, errno = 0;
3198 + unsigned char addr_has_changed = 0;
3199 +
3200 ++ fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL);
3201 ++ if (!fw_pkt)
3202 ++ return -ENOMEM;
3203 ++
3204 ++
3205 + for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
3206 + int read_bytes = 0, data_len = 0;
3207 +
3208 + /* parse intel hex line */
3209 + read_bytes = parse_hex_line(
3210 + (u8 *) (firmware->data + total_read_bytes),
3211 +- fw_pkt.raw.address,
3212 +- fw_pkt.raw.data,
3213 ++ fw_pkt->raw.address,
3214 ++ fw_pkt->raw.data,
3215 + &data_len,
3216 + &addr_has_changed);
3217 +
3218 +@@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
3219 + /* detect the end of file */
3220 + total_read_bytes += read_bytes;
3221 + if (total_read_bytes == firmware->size) {
3222 +- fw_pkt.u.request[0] = 0x00;
3223 +- fw_pkt.u.request[1] = 0x03;
3224 ++ fw_pkt->u.request[0] = 0x00;
3225 ++ fw_pkt->u.request[1] = 0x03;
3226 +
3227 + /* send EOF command */
3228 + errno = bus_adap->ops->upload_fw_pkt(bus_adap,
3229 + (uint8_t *)
3230 +- &fw_pkt, 2, 0);
3231 ++ fw_pkt, 2, 0);
3232 + if (errno < 0)
3233 + goto error;
3234 + } else {
3235 + if (!addr_has_changed) {
3236 + /* prepare command to send */
3237 +- fw_pkt.u.request[0] = 0x00;
3238 +- fw_pkt.u.request[1] = 0x01;
3239 ++ fw_pkt->u.request[0] = 0x00;
3240 ++ fw_pkt->u.request[1] = 0x01;
3241 +
3242 +- data_len += sizeof(fw_pkt.u.request);
3243 +- data_len += sizeof(fw_pkt.raw.address);
3244 ++ data_len += sizeof(fw_pkt->u.request);
3245 ++ data_len += sizeof(fw_pkt->raw.address);
3246 +
3247 + /* send cmd to device */
3248 + errno = bus_adap->ops->upload_fw_pkt(bus_adap,
3249 + (uint8_t *)
3250 +- &fw_pkt,
3251 ++ fw_pkt,
3252 + data_len,
3253 + 0);
3254 + if (errno < 0)
3255 +@@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
3256 + }
3257 + }
3258 + error:
3259 ++ kfree(fw_pkt);
3260 + return (errno == 0) ? total_read_bytes : errno;
3261 + }
3262 +
3263 +diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
3264 +index e0daa9b6c2a0..9b742d569fb5 100644
3265 +--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
3266 ++++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
3267 +@@ -1684,7 +1684,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
3268 + nr = dev->devno;
3269 +
3270 + assoc_desc = udev->actconfig->intf_assoc[0];
3271 +- if (assoc_desc->bFirstInterface != ifnum) {
3272 ++ if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) {
3273 + dev_err(d, "Not found matching IAD interface\n");
3274 + retval = -ENODEV;
3275 + goto err_if;
3276 +diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
3277 +index dd1db678718c..8033d6f73501 100644
3278 +--- a/drivers/media/v4l2-core/v4l2-ctrls.c
3279 ++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
3280 +@@ -1227,6 +1227,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
3281 + }
3282 + EXPORT_SYMBOL(v4l2_ctrl_fill);
3283 +
3284 ++static u32 user_flags(const struct v4l2_ctrl *ctrl)
3285 ++{
3286 ++ u32 flags = ctrl->flags;
3287 ++
3288 ++ if (ctrl->is_ptr)
3289 ++ flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
3290 ++
3291 ++ return flags;
3292 ++}
3293 ++
3294 + static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
3295 + {
3296 + memset(ev->reserved, 0, sizeof(ev->reserved));
3297 +@@ -1234,7 +1244,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change
3298 + ev->id = ctrl->id;
3299 + ev->u.ctrl.changes = changes;
3300 + ev->u.ctrl.type = ctrl->type;
3301 +- ev->u.ctrl.flags = ctrl->flags;
3302 ++ ev->u.ctrl.flags = user_flags(ctrl);
3303 + if (ctrl->is_ptr)
3304 + ev->u.ctrl.value64 = 0;
3305 + else
3306 +@@ -2577,10 +2587,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
3307 + else
3308 + qc->id = ctrl->id;
3309 + strlcpy(qc->name, ctrl->name, sizeof(qc->name));
3310 +- qc->flags = ctrl->flags;
3311 ++ qc->flags = user_flags(ctrl);
3312 + qc->type = ctrl->type;
3313 +- if (ctrl->is_ptr)
3314 +- qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
3315 + qc->elem_size = ctrl->elem_size;
3316 + qc->elems = ctrl->elems;
3317 + qc->nr_of_dims = ctrl->nr_of_dims;
3318 +diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
3319 +index 450ae36645aa..cf1120abbf52 100644
3320 +--- a/drivers/mfd/lpc_ich.c
3321 ++++ b/drivers/mfd/lpc_ich.c
3322 +@@ -522,6 +522,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
3323 + .name = "Avoton SoC",
3324 + .iTCO_version = 3,
3325 + .gpio_version = AVOTON_GPIO,
3326 ++ .spi_type = INTEL_SPI_BYT,
3327 + },
3328 + [LPC_BAYTRAIL] = {
3329 + .name = "Bay Trail SoC",
3330 +diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
3331 +index 84b16133554b..0806f72102c0 100644
3332 +--- a/drivers/mtd/devices/docg3.c
3333 ++++ b/drivers/mtd/devices/docg3.c
3334 +@@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor)
3335 + struct dentry *root = floor->dbg.dfs_dir;
3336 + struct docg3 *docg3 = floor->priv;
3337 +
3338 +- if (IS_ERR_OR_NULL(root))
3339 ++ if (IS_ERR_OR_NULL(root)) {
3340 ++ if (IS_ENABLED(CONFIG_DEBUG_FS) &&
3341 ++ !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
3342 ++ dev_warn(floor->dev.parent,
3343 ++ "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
3344 + return;
3345 ++ }
3346 +
3347 + debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3,
3348 + &flashcontrol_fops);
3349 +diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
3350 +index f25eca79f4e5..68c9d98a3347 100644
3351 +--- a/drivers/mtd/nand/atmel/nand-controller.c
3352 ++++ b/drivers/mtd/nand/atmel/nand-controller.c
3353 +@@ -2547,6 +2547,7 @@ static struct platform_driver atmel_nand_controller_driver = {
3354 + .driver = {
3355 + .name = "atmel-nand-controller",
3356 + .of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
3357 ++ .pm = &atmel_nand_controller_pm_ops,
3358 + },
3359 + .probe = atmel_nand_controller_probe,
3360 + .remove = atmel_nand_controller_remove,
3361 +diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
3362 +index 7f3b065b6b8f..c51d214d169e 100644
3363 +--- a/drivers/mtd/nand/mtk_ecc.c
3364 ++++ b/drivers/mtd/nand/mtk_ecc.c
3365 +@@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
3366 + op = ECC_DECODE;
3367 + dec = readw(ecc->regs + ECC_DECDONE);
3368 + if (dec & ecc->sectors) {
3369 ++ /*
3370 ++ * Clear decode IRQ status once again to ensure that
3371 ++ * there will be no extra IRQ.
3372 ++ */
3373 ++ readw(ecc->regs + ECC_DECIRQ_STA);
3374 + ecc->sectors = 0;
3375 + complete(&ecc->done);
3376 + } else {
3377 +@@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
3378 + }
3379 + }
3380 +
3381 +- writel(0, ecc->regs + ECC_IRQ_REG(op));
3382 +-
3383 + return IRQ_HANDLED;
3384 + }
3385 +
3386 +@@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
3387 +
3388 + /* disable it */
3389 + mtk_ecc_wait_idle(ecc, op);
3390 ++ if (op == ECC_DECODE)
3391 ++ /*
3392 ++ * Clear decode IRQ status in case there is a timeout to wait
3393 ++ * decode IRQ.
3394 ++ */
3395 ++ readw(ecc->regs + ECC_DECIRQ_STA);
3396 + writew(0, ecc->regs + ECC_IRQ_REG(op));
3397 + writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
3398 +
3399 +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
3400 +index 12edaae17d81..3f1d806e590a 100644
3401 +--- a/drivers/mtd/nand/nand_base.c
3402 ++++ b/drivers/mtd/nand/nand_base.c
3403 +@@ -1246,6 +1246,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
3404 +
3405 + return 0;
3406 + }
3407 ++EXPORT_SYMBOL_GPL(nand_reset);
3408 +
3409 + /**
3410 + * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
3411 +@@ -2799,15 +2800,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
3412 + size_t *retlen, const uint8_t *buf)
3413 + {
3414 + struct nand_chip *chip = mtd_to_nand(mtd);
3415 ++ int chipnr = (int)(to >> chip->chip_shift);
3416 + struct mtd_oob_ops ops;
3417 + int ret;
3418 +
3419 +- /* Wait for the device to get ready */
3420 +- panic_nand_wait(mtd, chip, 400);
3421 +-
3422 + /* Grab the device */
3423 + panic_nand_get_device(chip, mtd, FL_WRITING);
3424 +
3425 ++ chip->select_chip(mtd, chipnr);
3426 ++
3427 ++ /* Wait for the device to get ready */
3428 ++ panic_nand_wait(mtd, chip, 400);
3429 ++
3430 + memset(&ops, 0, sizeof(ops));
3431 + ops.len = len;
3432 + ops.datbuf = (uint8_t *)buf;
3433 +diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
3434 +index 246b4393118e..44322a363ba5 100644
3435 +--- a/drivers/mtd/nand/nandsim.c
3436 ++++ b/drivers/mtd/nand/nandsim.c
3437 +@@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev)
3438 + struct dentry *root = nsmtd->dbg.dfs_dir;
3439 + struct dentry *dent;
3440 +
3441 +- if (!IS_ENABLED(CONFIG_DEBUG_FS))
3442 ++ /*
3443 ++ * Just skip debugfs initialization when the debugfs directory is
3444 ++ * missing.
3445 ++ */
3446 ++ if (IS_ERR_OR_NULL(root)) {
3447 ++ if (IS_ENABLED(CONFIG_DEBUG_FS) &&
3448 ++ !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
3449 ++ NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
3450 + return 0;
3451 +-
3452 +- if (IS_ERR_OR_NULL(root))
3453 +- return -1;
3454 ++ }
3455 +
3456 + dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
3457 + root, dev, &dfs_fops);
3458 +diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
3459 +index 54540c8fa1a2..9f98f74ff221 100644
3460 +--- a/drivers/mtd/nand/omap2.c
3461 ++++ b/drivers/mtd/nand/omap2.c
3462 +@@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
3463 + 0x97, 0x79, 0xe5, 0x24, 0xb5};
3464 +
3465 + /**
3466 +- * omap_calculate_ecc_bch - Generate bytes of ECC bytes
3467 ++ * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
3468 + * @mtd: MTD device structure
3469 + * @dat: The pointer to data on which ecc is computed
3470 + * @ecc_code: The ecc_code buffer
3471 ++ * @i: The sector number (for a multi sector page)
3472 + *
3473 +- * Support calculating of BCH4/8 ecc vectors for the page
3474 ++ * Support calculating of BCH4/8/16 ECC vectors for one sector
3475 ++ * within a page. Sector number is in @i.
3476 + */
3477 +-static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
3478 +- const u_char *dat, u_char *ecc_calc)
3479 ++static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
3480 ++ const u_char *dat, u_char *ecc_calc, int i)
3481 + {
3482 + struct omap_nand_info *info = mtd_to_omap(mtd);
3483 + int eccbytes = info->nand.ecc.bytes;
3484 + struct gpmc_nand_regs *gpmc_regs = &info->reg;
3485 + u8 *ecc_code;
3486 +- unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
3487 ++ unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
3488 + u32 val;
3489 +- int i, j;
3490 ++ int j;
3491 ++
3492 ++ ecc_code = ecc_calc;
3493 ++ switch (info->ecc_opt) {
3494 ++ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
3495 ++ case OMAP_ECC_BCH8_CODE_HW:
3496 ++ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
3497 ++ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
3498 ++ bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
3499 ++ bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
3500 ++ *ecc_code++ = (bch_val4 & 0xFF);
3501 ++ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
3502 ++ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
3503 ++ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
3504 ++ *ecc_code++ = (bch_val3 & 0xFF);
3505 ++ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
3506 ++ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
3507 ++ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
3508 ++ *ecc_code++ = (bch_val2 & 0xFF);
3509 ++ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
3510 ++ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
3511 ++ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
3512 ++ *ecc_code++ = (bch_val1 & 0xFF);
3513 ++ break;
3514 ++ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
3515 ++ case OMAP_ECC_BCH4_CODE_HW:
3516 ++ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
3517 ++ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
3518 ++ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
3519 ++ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
3520 ++ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
3521 ++ ((bch_val1 >> 28) & 0xF);
3522 ++ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
3523 ++ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
3524 ++ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
3525 ++ *ecc_code++ = ((bch_val1 & 0xF) << 4);
3526 ++ break;
3527 ++ case OMAP_ECC_BCH16_CODE_HW:
3528 ++ val = readl(gpmc_regs->gpmc_bch_result6[i]);
3529 ++ ecc_code[0] = ((val >> 8) & 0xFF);
3530 ++ ecc_code[1] = ((val >> 0) & 0xFF);
3531 ++ val = readl(gpmc_regs->gpmc_bch_result5[i]);
3532 ++ ecc_code[2] = ((val >> 24) & 0xFF);
3533 ++ ecc_code[3] = ((val >> 16) & 0xFF);
3534 ++ ecc_code[4] = ((val >> 8) & 0xFF);
3535 ++ ecc_code[5] = ((val >> 0) & 0xFF);
3536 ++ val = readl(gpmc_regs->gpmc_bch_result4[i]);
3537 ++ ecc_code[6] = ((val >> 24) & 0xFF);
3538 ++ ecc_code[7] = ((val >> 16) & 0xFF);
3539 ++ ecc_code[8] = ((val >> 8) & 0xFF);
3540 ++ ecc_code[9] = ((val >> 0) & 0xFF);
3541 ++ val = readl(gpmc_regs->gpmc_bch_result3[i]);
3542 ++ ecc_code[10] = ((val >> 24) & 0xFF);
3543 ++ ecc_code[11] = ((val >> 16) & 0xFF);
3544 ++ ecc_code[12] = ((val >> 8) & 0xFF);
3545 ++ ecc_code[13] = ((val >> 0) & 0xFF);
3546 ++ val = readl(gpmc_regs->gpmc_bch_result2[i]);
3547 ++ ecc_code[14] = ((val >> 24) & 0xFF);
3548 ++ ecc_code[15] = ((val >> 16) & 0xFF);
3549 ++ ecc_code[16] = ((val >> 8) & 0xFF);
3550 ++ ecc_code[17] = ((val >> 0) & 0xFF);
3551 ++ val = readl(gpmc_regs->gpmc_bch_result1[i]);
3552 ++ ecc_code[18] = ((val >> 24) & 0xFF);
3553 ++ ecc_code[19] = ((val >> 16) & 0xFF);
3554 ++ ecc_code[20] = ((val >> 8) & 0xFF);
3555 ++ ecc_code[21] = ((val >> 0) & 0xFF);
3556 ++ val = readl(gpmc_regs->gpmc_bch_result0[i]);
3557 ++ ecc_code[22] = ((val >> 24) & 0xFF);
3558 ++ ecc_code[23] = ((val >> 16) & 0xFF);
3559 ++ ecc_code[24] = ((val >> 8) & 0xFF);
3560 ++ ecc_code[25] = ((val >> 0) & 0xFF);
3561 ++ break;
3562 ++ default:
3563 ++ return -EINVAL;
3564 ++ }
3565 ++
3566 ++ /* ECC scheme specific syndrome customizations */
3567 ++ switch (info->ecc_opt) {
3568 ++ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
3569 ++ /* Add constant polynomial to remainder, so that
3570 ++ * ECC of blank pages results in 0x0 on reading back
3571 ++ */
3572 ++ for (j = 0; j < eccbytes; j++)
3573 ++ ecc_calc[j] ^= bch4_polynomial[j];
3574 ++ break;
3575 ++ case OMAP_ECC_BCH4_CODE_HW:
3576 ++ /* Set 8th ECC byte as 0x0 for ROM compatibility */
3577 ++ ecc_calc[eccbytes - 1] = 0x0;
3578 ++ break;
3579 ++ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
3580 ++ /* Add constant polynomial to remainder, so that
3581 ++ * ECC of blank pages results in 0x0 on reading back
3582 ++ */
3583 ++ for (j = 0; j < eccbytes; j++)
3584 ++ ecc_calc[j] ^= bch8_polynomial[j];
3585 ++ break;
3586 ++ case OMAP_ECC_BCH8_CODE_HW:
3587 ++ /* Set 14th ECC byte as 0x0 for ROM compatibility */
3588 ++ ecc_calc[eccbytes - 1] = 0x0;
3589 ++ break;
3590 ++ case OMAP_ECC_BCH16_CODE_HW:
3591 ++ break;
3592 ++ default:
3593 ++ return -EINVAL;
3594 ++ }
3595 ++
3596 ++ return 0;
3597 ++}
3598 ++
3599 ++/**
3600 ++ * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
3601 ++ * @mtd: MTD device structure
3602 ++ * @dat: The pointer to data on which ecc is computed
3603 ++ * @ecc_code: The ecc_code buffer
3604 ++ *
3605 ++ * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
3606 ++ * when SW based correction is required as ECC is required for one sector
3607 ++ * at a time.
3608 ++ */
3609 ++static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
3610 ++ const u_char *dat, u_char *ecc_calc)
3611 ++{
3612 ++ return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
3613 ++}
3614 ++
3615 ++/**
3616 ++ * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
3617 ++ * @mtd: MTD device structure
3618 ++ * @dat: The pointer to data on which ecc is computed
3619 ++ * @ecc_code: The ecc_code buffer
3620 ++ *
3621 ++ * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
3622 ++ */
3623 ++static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
3624 ++ const u_char *dat, u_char *ecc_calc)
3625 ++{
3626 ++ struct omap_nand_info *info = mtd_to_omap(mtd);
3627 ++ int eccbytes = info->nand.ecc.bytes;
3628 ++ unsigned long nsectors;
3629 ++ int i, ret;
3630 +
3631 + nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
3632 + for (i = 0; i < nsectors; i++) {
3633 +- ecc_code = ecc_calc;
3634 +- switch (info->ecc_opt) {
3635 +- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
3636 +- case OMAP_ECC_BCH8_CODE_HW:
3637 +- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
3638 +- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
3639 +- bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
3640 +- bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
3641 +- *ecc_code++ = (bch_val4 & 0xFF);
3642 +- *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
3643 +- *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
3644 +- *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
3645 +- *ecc_code++ = (bch_val3 & 0xFF);
3646 +- *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
3647 +- *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
3648 +- *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
3649 +- *ecc_code++ = (bch_val2 & 0xFF);
3650 +- *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
3651 +- *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
3652 +- *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
3653 +- *ecc_code++ = (bch_val1 & 0xFF);
3654 +- break;
3655 +- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
3656 +- case OMAP_ECC_BCH4_CODE_HW:
3657 +- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
3658 +- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
3659 +- *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
3660 +- *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
3661 +- *ecc_code++ = ((bch_val2 & 0xF) << 4) |
3662 +- ((bch_val1 >> 28) & 0xF);
3663 +- *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
3664 +- *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
3665 +- *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
3666 +- *ecc_code++ = ((bch_val1 & 0xF) << 4);
3667 +- break;
3668 +- case OMAP_ECC_BCH16_CODE_HW:
3669 +- val = readl(gpmc_regs->gpmc_bch_result6[i]);
3670 +- ecc_code[0] = ((val >> 8) & 0xFF);
3671 +- ecc_code[1] = ((val >> 0) & 0xFF);
3672 +- val = readl(gpmc_regs->gpmc_bch_result5[i]);
3673 +- ecc_code[2] = ((val >> 24) & 0xFF);
3674 +- ecc_code[3] = ((val >> 16) & 0xFF);
3675 +- ecc_code[4] = ((val >> 8) & 0xFF);
3676 +- ecc_code[5] = ((val >> 0) & 0xFF);
3677 +- val = readl(gpmc_regs->gpmc_bch_result4[i]);
3678 +- ecc_code[6] = ((val >> 24) & 0xFF);
3679 +- ecc_code[7] = ((val >> 16) & 0xFF);
3680 +- ecc_code[8] = ((val >> 8) & 0xFF);
3681 +- ecc_code[9] = ((val >> 0) & 0xFF);
3682 +- val = readl(gpmc_regs->gpmc_bch_result3[i]);
3683 +- ecc_code[10] = ((val >> 24) & 0xFF);
3684 +- ecc_code[11] = ((val >> 16) & 0xFF);
3685 +- ecc_code[12] = ((val >> 8) & 0xFF);
3686 +- ecc_code[13] = ((val >> 0) & 0xFF);
3687 +- val = readl(gpmc_regs->gpmc_bch_result2[i]);
3688 +- ecc_code[14] = ((val >> 24) & 0xFF);
3689 +- ecc_code[15] = ((val >> 16) & 0xFF);
3690 +- ecc_code[16] = ((val >> 8) & 0xFF);
3691 +- ecc_code[17] = ((val >> 0) & 0xFF);
3692 +- val = readl(gpmc_regs->gpmc_bch_result1[i]);
3693 +- ecc_code[18] = ((val >> 24) & 0xFF);
3694 +- ecc_code[19] = ((val >> 16) & 0xFF);
3695 +- ecc_code[20] = ((val >> 8) & 0xFF);
3696 +- ecc_code[21] = ((val >> 0) & 0xFF);
3697 +- val = readl(gpmc_regs->gpmc_bch_result0[i]);
3698 +- ecc_code[22] = ((val >> 24) & 0xFF);
3699 +- ecc_code[23] = ((val >> 16) & 0xFF);
3700 +- ecc_code[24] = ((val >> 8) & 0xFF);
3701 +- ecc_code[25] = ((val >> 0) & 0xFF);
3702 +- break;
3703 +- default:
3704 +- return -EINVAL;
3705 +- }
3706 +-
3707 +- /* ECC scheme specific syndrome customizations */
3708 +- switch (info->ecc_opt) {
3709 +- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
3710 +- /* Add constant polynomial to remainder, so that
3711 +- * ECC of blank pages results in 0x0 on reading back */
3712 +- for (j = 0; j < eccbytes; j++)
3713 +- ecc_calc[j] ^= bch4_polynomial[j];
3714 +- break;
3715 +- case OMAP_ECC_BCH4_CODE_HW:
3716 +- /* Set 8th ECC byte as 0x0 for ROM compatibility */
3717 +- ecc_calc[eccbytes - 1] = 0x0;
3718 +- break;
3719 +- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
3720 +- /* Add constant polynomial to remainder, so that
3721 +- * ECC of blank pages results in 0x0 on reading back */
3722 +- for (j = 0; j < eccbytes; j++)
3723 +- ecc_calc[j] ^= bch8_polynomial[j];
3724 +- break;
3725 +- case OMAP_ECC_BCH8_CODE_HW:
3726 +- /* Set 14th ECC byte as 0x0 for ROM compatibility */
3727 +- ecc_calc[eccbytes - 1] = 0x0;
3728 +- break;
3729 +- case OMAP_ECC_BCH16_CODE_HW:
3730 +- break;
3731 +- default:
3732 +- return -EINVAL;
3733 +- }
3734 ++ ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
3735 ++ if (ret)
3736 ++ return ret;
3737 +
3738 +- ecc_calc += eccbytes;
3739 ++ ecc_calc += eccbytes;
3740 + }
3741 +
3742 + return 0;
3743 +@@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
3744 + chip->write_buf(mtd, buf, mtd->writesize);
3745 +
3746 + /* Update ecc vector from GPMC result registers */
3747 +- chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
3748 ++ omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
3749 +
3750 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3751 + chip->ecc.total);
3752 +@@ -1508,6 +1551,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
3753 + return 0;
3754 + }
3755 +
3756 ++/**
3757 ++ * omap_write_subpage_bch - BCH hardware ECC based subpage write
3758 ++ * @mtd: mtd info structure
3759 ++ * @chip: nand chip info structure
3760 ++ * @offset: column address of subpage within the page
3761 ++ * @data_len: data length
3762 ++ * @buf: data buffer
3763 ++ * @oob_required: must write chip->oob_poi to OOB
3764 ++ * @page: page number to write
3765 ++ *
3766 ++ * OMAP optimized subpage write method.
3767 ++ */
3768 ++static int omap_write_subpage_bch(struct mtd_info *mtd,
3769 ++ struct nand_chip *chip, u32 offset,
3770 ++ u32 data_len, const u8 *buf,
3771 ++ int oob_required, int page)
3772 ++{
3773 ++ u8 *ecc_calc = chip->buffers->ecccalc;
3774 ++ int ecc_size = chip->ecc.size;
3775 ++ int ecc_bytes = chip->ecc.bytes;
3776 ++ int ecc_steps = chip->ecc.steps;
3777 ++ u32 start_step = offset / ecc_size;
3778 ++ u32 end_step = (offset + data_len - 1) / ecc_size;
3779 ++ int step, ret = 0;
3780 ++
3781 ++ /*
3782 ++ * Write entire page at one go as it would be optimal
3783 ++ * as ECC is calculated by hardware.
3784 ++ * ECC is calculated for all subpages but we choose
3785 ++ * only what we want.
3786 ++ */
3787 ++
3788 ++ /* Enable GPMC ECC engine */
3789 ++ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
3790 ++
3791 ++ /* Write data */
3792 ++ chip->write_buf(mtd, buf, mtd->writesize);
3793 ++
3794 ++ for (step = 0; step < ecc_steps; step++) {
3795 ++ /* mask ECC of un-touched subpages by padding 0xFF */
3796 ++ if (step < start_step || step > end_step)
3797 ++ memset(ecc_calc, 0xff, ecc_bytes);
3798 ++ else
3799 ++ ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
3800 ++
3801 ++ if (ret)
3802 ++ return ret;
3803 ++
3804 ++ buf += ecc_size;
3805 ++ ecc_calc += ecc_bytes;
3806 ++ }
3807 ++
3808 ++ /* copy calculated ECC for whole page to chip->buffer->oob */
3809 ++ /* this include masked-value(0xFF) for unwritten subpages */
3810 ++ ecc_calc = chip->buffers->ecccalc;
3811 ++ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3812 ++ chip->ecc.total);
3813 ++ if (ret)
3814 ++ return ret;
3815 ++
3816 ++ /* write OOB buffer to NAND device */
3817 ++ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
3818 ++
3819 ++ return 0;
3820 ++}
3821 ++
3822 + /**
3823 + * omap_read_page_bch - BCH ecc based page read function for entire page
3824 + * @mtd: mtd info structure
3825 +@@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
3826 + chip->ecc.total);
3827 +
3828 + /* Calculate ecc bytes */
3829 +- chip->ecc.calculate(mtd, buf, ecc_calc);
3830 ++ omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
3831 +
3832 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3833 + chip->ecc.total);
3834 +@@ -2044,7 +2153,7 @@ static int omap_nand_probe(struct platform_device *pdev)
3835 + nand_chip->ecc.strength = 4;
3836 + nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
3837 + nand_chip->ecc.correct = nand_bch_correct_data;
3838 +- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
3839 ++ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
3840 + mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
3841 + /* Reserve one byte for the OMAP marker */
3842 + oobbytes_per_step = nand_chip->ecc.bytes + 1;
3843 +@@ -2066,9 +2175,9 @@ static int omap_nand_probe(struct platform_device *pdev)
3844 + nand_chip->ecc.strength = 4;
3845 + nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
3846 + nand_chip->ecc.correct = omap_elm_correct_data;
3847 +- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
3848 + nand_chip->ecc.read_page = omap_read_page_bch;
3849 + nand_chip->ecc.write_page = omap_write_page_bch;
3850 ++ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
3851 + mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
3852 + oobbytes_per_step = nand_chip->ecc.bytes;
3853 +
3854 +@@ -2087,7 +2196,7 @@ static int omap_nand_probe(struct platform_device *pdev)
3855 + nand_chip->ecc.strength = 8;
3856 + nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
3857 + nand_chip->ecc.correct = nand_bch_correct_data;
3858 +- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
3859 ++ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
3860 + mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
3861 + /* Reserve one byte for the OMAP marker */
3862 + oobbytes_per_step = nand_chip->ecc.bytes + 1;
3863 +@@ -2109,9 +2218,9 @@ static int omap_nand_probe(struct platform_device *pdev)
3864 + nand_chip->ecc.strength = 8;
3865 + nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
3866 + nand_chip->ecc.correct = omap_elm_correct_data;
3867 +- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
3868 + nand_chip->ecc.read_page = omap_read_page_bch;
3869 + nand_chip->ecc.write_page = omap_write_page_bch;
3870 ++ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
3871 + mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
3872 + oobbytes_per_step = nand_chip->ecc.bytes;
3873 +
3874 +@@ -2131,9 +2240,9 @@ static int omap_nand_probe(struct platform_device *pdev)
3875 + nand_chip->ecc.strength = 16;
3876 + nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
3877 + nand_chip->ecc.correct = omap_elm_correct_data;
3878 +- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
3879 + nand_chip->ecc.read_page = omap_read_page_bch;
3880 + nand_chip->ecc.write_page = omap_write_page_bch;
3881 ++ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
3882 + mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
3883 + oobbytes_per_step = nand_chip->ecc.bytes;
3884 +
3885 +diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
3886 +index 8a596bfeddff..7802ac3ba934 100644
3887 +--- a/drivers/mtd/spi-nor/intel-spi.c
3888 ++++ b/drivers/mtd/spi-nor/intel-spi.c
3889 +@@ -422,7 +422,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
3890 + if (ret < 0)
3891 + return ret;
3892 +
3893 +- val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
3894 ++ val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
3895 + val |= ret << SSFSTS_CTL_COP_SHIFT;
3896 + val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
3897 + val |= SSFSTS_CTL_SCGO;
3898 +@@ -432,7 +432,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
3899 + if (ret)
3900 + return ret;
3901 +
3902 +- status = readl(ispi->base + SSFSTS_CTL);
3903 ++ status = readl(ispi->sregs + SSFSTS_CTL);
3904 + if (status & SSFSTS_CTL_FCERR)
3905 + return -EIO;
3906 + else if (status & SSFSTS_CTL_AEL)
3907 +diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
3908 +index 0641c0098738..afb7ebe20b24 100644
3909 +--- a/drivers/net/ethernet/intel/e1000e/defines.h
3910 ++++ b/drivers/net/ethernet/intel/e1000e/defines.h
3911 +@@ -398,6 +398,7 @@
3912 + #define E1000_ICR_LSC 0x00000004 /* Link Status Change */
3913 + #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
3914 + #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
3915 ++#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */
3916 + #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
3917 + #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
3918 + /* If this bit asserted, the driver should claim the interrupt */
3919 +diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
3920 +index b322011ec282..f457c5703d0c 100644
3921 +--- a/drivers/net/ethernet/intel/e1000e/mac.c
3922 ++++ b/drivers/net/ethernet/intel/e1000e/mac.c
3923 +@@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
3924 + * Checks to see of the link status of the hardware has changed. If a
3925 + * change in link status has been detected, then we read the PHY registers
3926 + * to get the current speed/duplex if link exists.
3927 ++ *
3928 ++ * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
3929 ++ * up).
3930 + **/
3931 + s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
3932 + {
3933 +@@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
3934 + * Change or Rx Sequence Error interrupt.
3935 + */
3936 + if (!mac->get_link_status)
3937 +- return 0;
3938 ++ return 1;
3939 +
3940 + /* First we want to see if the MII Status Register reports
3941 + * link. If so, then we want to get the current speed/duplex
3942 +@@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
3943 + * different link partner.
3944 + */
3945 + ret_val = e1000e_config_fc_after_link_up(hw);
3946 +- if (ret_val)
3947 ++ if (ret_val) {
3948 + e_dbg("Error configuring flow control\n");
3949 ++ return ret_val;
3950 ++ }
3951 +
3952 +- return ret_val;
3953 ++ return 1;
3954 + }
3955 +
3956 + /**
3957 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
3958 +index 327dfe5bedc0..c38b00c90f48 100644
3959 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
3960 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
3961 +@@ -1910,14 +1910,30 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
3962 + struct net_device *netdev = data;
3963 + struct e1000_adapter *adapter = netdev_priv(netdev);
3964 + struct e1000_hw *hw = &adapter->hw;
3965 ++ u32 icr;
3966 ++ bool enable = true;
3967 ++
3968 ++ icr = er32(ICR);
3969 ++ if (icr & E1000_ICR_RXO) {
3970 ++ ew32(ICR, E1000_ICR_RXO);
3971 ++ enable = false;
3972 ++ /* napi poll will re-enable Other, make sure it runs */
3973 ++ if (napi_schedule_prep(&adapter->napi)) {
3974 ++ adapter->total_rx_bytes = 0;
3975 ++ adapter->total_rx_packets = 0;
3976 ++ __napi_schedule(&adapter->napi);
3977 ++ }
3978 ++ }
3979 ++ if (icr & E1000_ICR_LSC) {
3980 ++ ew32(ICR, E1000_ICR_LSC);
3981 ++ hw->mac.get_link_status = true;
3982 ++ /* guard against interrupt when we're going down */
3983 ++ if (!test_bit(__E1000_DOWN, &adapter->state))
3984 ++ mod_timer(&adapter->watchdog_timer, jiffies + 1);
3985 ++ }
3986 +
3987 +- hw->mac.get_link_status = true;
3988 +-
3989 +- /* guard against interrupt when we're going down */
3990 +- if (!test_bit(__E1000_DOWN, &adapter->state)) {
3991 +- mod_timer(&adapter->watchdog_timer, jiffies + 1);
3992 ++ if (enable && !test_bit(__E1000_DOWN, &adapter->state))
3993 + ew32(IMS, E1000_IMS_OTHER);
3994 +- }
3995 +
3996 + return IRQ_HANDLED;
3997 + }
3998 +@@ -2687,7 +2703,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
3999 + napi_complete_done(napi, work_done);
4000 + if (!test_bit(__E1000_DOWN, &adapter->state)) {
4001 + if (adapter->msix_entries)
4002 +- ew32(IMS, adapter->rx_ring->ims_val);
4003 ++ ew32(IMS, adapter->rx_ring->ims_val |
4004 ++ E1000_IMS_OTHER);
4005 + else
4006 + e1000_irq_enable(adapter);
4007 + }
4008 +@@ -3004,8 +3021,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
4009 +
4010 + hw->mac.ops.config_collision_dist(hw);
4011 +
4012 +- /* SPT and CNP Si errata workaround to avoid data corruption */
4013 +- if (hw->mac.type >= e1000_pch_spt) {
4014 ++ /* SPT and KBL Si errata workaround to avoid data corruption */
4015 ++ if (hw->mac.type == e1000_pch_spt) {
4016 + u32 reg_val;
4017 +
4018 + reg_val = er32(IOSFPC);
4019 +@@ -3013,7 +3030,9 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
4020 + ew32(IOSFPC, reg_val);
4021 +
4022 + reg_val = er32(TARC(0));
4023 +- reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ;
4024 ++ /* SPT and KBL Si errata workaround to avoid Tx hang */
4025 ++ reg_val &= ~BIT(28);
4026 ++ reg_val |= BIT(29);
4027 + ew32(TARC(0), reg_val);
4028 + }
4029 + }
4030 +@@ -4204,7 +4223,7 @@ static void e1000e_trigger_lsc(struct e1000_adapter *adapter)
4031 + struct e1000_hw *hw = &adapter->hw;
4032 +
4033 + if (adapter->msix_entries)
4034 +- ew32(ICS, E1000_ICS_OTHER);
4035 ++ ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER);
4036 + else
4037 + ew32(ICS, E1000_ICS_LSC);
4038 + }
4039 +@@ -5081,7 +5100,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
4040 + case e1000_media_type_copper:
4041 + if (hw->mac.get_link_status) {
4042 + ret_val = hw->mac.ops.check_for_link(hw);
4043 +- link_active = !hw->mac.get_link_status;
4044 ++ link_active = ret_val > 0;
4045 + } else {
4046 + link_active = true;
4047 + }
4048 +@@ -5099,7 +5118,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
4049 + break;
4050 + }
4051 +
4052 +- if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4053 ++ if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4054 + (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4055 + /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4056 + e_info("Gigabit has been disabled, downgrading speed\n");
4057 +diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
4058 +index d78d47b41a71..86ff0969efb6 100644
4059 +--- a/drivers/net/ethernet/intel/e1000e/phy.c
4060 ++++ b/drivers/net/ethernet/intel/e1000e/phy.c
4061 +@@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
4062 + s32 ret_val = 0;
4063 + u16 i, phy_status;
4064 +
4065 ++ *success = false;
4066 + for (i = 0; i < iterations; i++) {
4067 + /* Some PHYs require the MII_BMSR register to be read
4068 + * twice due to the link bit being sticky. No harm doing
4069 +@@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
4070 + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
4071 + if (ret_val)
4072 + break;
4073 +- if (phy_status & BMSR_LSTATUS)
4074 ++ if (phy_status & BMSR_LSTATUS) {
4075 ++ *success = true;
4076 + break;
4077 ++ }
4078 + if (usec_interval >= 1000)
4079 + msleep(usec_interval / 1000);
4080 + else
4081 + udelay(usec_interval);
4082 + }
4083 +
4084 +- *success = (i < iterations);
4085 +-
4086 + return ret_val;
4087 + }
4088 +
4089 +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
4090 +index 9dffaba85ae6..103c0a742d03 100644
4091 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
4092 ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
4093 +@@ -1229,7 +1229,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
4094 + break;
4095 +
4096 + /* prevent any other reads prior to eop_desc */
4097 +- read_barrier_depends();
4098 ++ smp_rmb();
4099 +
4100 + /* if DD is not set pending work has not been completed */
4101 + if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
4102 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
4103 +index 6498da8806cb..ea20aacd5e1d 100644
4104 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
4105 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
4106 +@@ -3760,7 +3760,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4107 + break;
4108 +
4109 + /* prevent any other reads prior to eop_desc */
4110 +- read_barrier_depends();
4111 ++ smp_rmb();
4112 +
4113 + /* if the descriptor isn't done, no work yet to do */
4114 + if (!(eop_desc->cmd_type_offset_bsz &
4115 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
4116 +index 120c68f78951..3c07ff171ddc 100644
4117 +--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
4118 ++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
4119 +@@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
4120 + break;
4121 +
4122 + /* prevent any other reads prior to eop_desc */
4123 +- read_barrier_depends();
4124 ++ smp_rmb();
4125 +
4126 + i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
4127 + /* we have caught up to head, no work left to do */
4128 +diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
4129 +index c32c62462c84..07a4e6e13925 100644
4130 +--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
4131 ++++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
4132 +@@ -179,7 +179,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
4133 + break;
4134 +
4135 + /* prevent any other reads prior to eop_desc */
4136 +- read_barrier_depends();
4137 ++ smp_rmb();
4138 +
4139 + i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
4140 + /* if the descriptor isn't done, no work yet to do */
4141 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
4142 +index ea69af267d63..b0031c5ff767 100644
4143 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
4144 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
4145 +@@ -6970,7 +6970,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
4146 + break;
4147 +
4148 + /* prevent any other reads prior to eop_desc */
4149 +- read_barrier_depends();
4150 ++ smp_rmb();
4151 +
4152 + /* if DD is not set pending work has not been completed */
4153 + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
4154 +diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
4155 +index 1ed556911b14..6f5888bd9194 100644
4156 +--- a/drivers/net/ethernet/intel/igbvf/netdev.c
4157 ++++ b/drivers/net/ethernet/intel/igbvf/netdev.c
4158 +@@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
4159 + break;
4160 +
4161 + /* prevent any other reads prior to eop_desc */
4162 +- read_barrier_depends();
4163 ++ smp_rmb();
4164 +
4165 + /* if DD is not set pending work has not been completed */
4166 + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
4167 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4168 +index 6d5f31e94358..879a9c4cef59 100644
4169 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4170 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4171 +@@ -1192,7 +1192,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
4172 + break;
4173 +
4174 + /* prevent any other reads prior to eop_desc */
4175 +- read_barrier_depends();
4176 ++ smp_rmb();
4177 +
4178 + /* if DD is not set pending work has not been completed */
4179 + if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
4180 +diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
4181 +index 032f8ac06357..90ecc4b06462 100644
4182 +--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
4183 ++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
4184 +@@ -326,7 +326,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
4185 + break;
4186 +
4187 + /* prevent any other reads prior to eop_desc */
4188 +- read_barrier_depends();
4189 ++ smp_rmb();
4190 +
4191 + /* if DD is not set pending work has not been completed */
4192 + if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
4193 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
4194 +index 64a04975bcf8..bc93b69cfd1e 100644
4195 +--- a/drivers/net/ethernet/marvell/mvneta.c
4196 ++++ b/drivers/net/ethernet/marvell/mvneta.c
4197 +@@ -816,11 +816,14 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
4198 + {
4199 + u32 val;
4200 +
4201 +- /* Only 255 descriptors can be added at once ; Assume caller
4202 +- * process TX desriptors in quanta less than 256
4203 +- */
4204 +- val = pend_desc + txq->pending;
4205 +- mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
4206 ++ pend_desc += txq->pending;
4207 ++
4208 ++ /* Only 255 Tx descriptors can be added at once */
4209 ++ do {
4210 ++ val = min(pend_desc, 255);
4211 ++ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
4212 ++ pend_desc -= val;
4213 ++ } while (pend_desc > 0);
4214 + txq->pending = 0;
4215 + }
4216 +
4217 +diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
4218 +index e8b5ff42f5a8..c8e7b54a538a 100644
4219 +--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
4220 ++++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
4221 +@@ -72,18 +72,21 @@
4222 + #define IWL9000_SMEM_OFFSET 0x400000
4223 + #define IWL9000_SMEM_LEN 0x68000
4224 +
4225 +-#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
4226 ++#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
4227 ++#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
4228 + #define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
4229 + #define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
4230 + #define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
4231 +-#define IWL9000_MODULE_FIRMWARE(api) \
4232 +- IWL9000_FW_PRE "-" __stringify(api) ".ucode"
4233 ++#define IWL9000A_MODULE_FIRMWARE(api) \
4234 ++ IWL9000A_FW_PRE __stringify(api) ".ucode"
4235 ++#define IWL9000B_MODULE_FIRMWARE(api) \
4236 ++ IWL9000B_FW_PRE __stringify(api) ".ucode"
4237 + #define IWL9000RFB_MODULE_FIRMWARE(api) \
4238 +- IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode"
4239 ++ IWL9000RFB_FW_PRE __stringify(api) ".ucode"
4240 + #define IWL9260A_MODULE_FIRMWARE(api) \
4241 +- IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
4242 ++ IWL9260A_FW_PRE __stringify(api) ".ucode"
4243 + #define IWL9260B_MODULE_FIRMWARE(api) \
4244 +- IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
4245 ++ IWL9260B_FW_PRE __stringify(api) ".ucode"
4246 +
4247 + #define NVM_HW_SECTION_NUM_FAMILY_9000 10
4248 +
4249 +@@ -193,7 +196,48 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
4250 + .nvm_ver = IWL9000_NVM_VERSION,
4251 + .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
4252 + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
4253 ++};
4254 ++
4255 ++const struct iwl_cfg iwl9460_2ac_cfg_soc = {
4256 ++ .name = "Intel(R) Dual Band Wireless AC 9460",
4257 ++ .fw_name_pre = IWL9000A_FW_PRE,
4258 ++ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
4259 ++ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
4260 ++ IWL_DEVICE_9000,
4261 ++ .ht_params = &iwl9000_ht_params,
4262 ++ .nvm_ver = IWL9000_NVM_VERSION,
4263 ++ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
4264 ++ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
4265 + .integrated = true,
4266 ++ .soc_latency = 5000,
4267 ++};
4268 ++
4269 ++const struct iwl_cfg iwl9461_2ac_cfg_soc = {
4270 ++ .name = "Intel(R) Dual Band Wireless AC 9461",
4271 ++ .fw_name_pre = IWL9000A_FW_PRE,
4272 ++ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
4273 ++ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
4274 ++ IWL_DEVICE_9000,
4275 ++ .ht_params = &iwl9000_ht_params,
4276 ++ .nvm_ver = IWL9000_NVM_VERSION,
4277 ++ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
4278 ++ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
4279 ++ .integrated = true,
4280 ++ .soc_latency = 5000,
4281 ++};
4282 ++
4283 ++const struct iwl_cfg iwl9462_2ac_cfg_soc = {
4284 ++ .name = "Intel(R) Dual Band Wireless AC 9462",
4285 ++ .fw_name_pre = IWL9000A_FW_PRE,
4286 ++ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
4287 ++ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
4288 ++ IWL_DEVICE_9000,
4289 ++ .ht_params = &iwl9000_ht_params,
4290 ++ .nvm_ver = IWL9000_NVM_VERSION,
4291 ++ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
4292 ++ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
4293 ++ .integrated = true,
4294 ++ .soc_latency = 5000,
4295 + };
4296 +
4297 + const struct iwl_cfg iwl9560_2ac_cfg = {
4298 +@@ -205,10 +249,23 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
4299 + .nvm_ver = IWL9000_NVM_VERSION,
4300 + .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
4301 + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
4302 +- .integrated = true,
4303 + };
4304 +
4305 +-MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
4306 ++const struct iwl_cfg iwl9560_2ac_cfg_soc = {
4307 ++ .name = "Intel(R) Dual Band Wireless AC 9560",
4308 ++ .fw_name_pre = IWL9000A_FW_PRE,
4309 ++ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
4310 ++ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
4311 ++ IWL_DEVICE_9000,
4312 ++ .ht_params = &iwl9000_ht_params,
4313 ++ .nvm_ver = IWL9000_NVM_VERSION,
4314 ++ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
4315 ++ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
4316 ++ .integrated = true,
4317 ++ .soc_latency = 5000,
4318 ++};
4319 ++MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
4320 ++MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
4321 + MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
4322 + MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
4323 + MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
4324 +diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
4325 +index a440140ed8dd..7eade165b747 100644
4326 +--- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
4327 ++++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
4328 +@@ -80,15 +80,15 @@
4329 + #define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
4330 +
4331 + #define IWL_A000_HR_MODULE_FIRMWARE(api) \
4332 +- IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
4333 ++ IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
4334 + #define IWL_A000_JF_MODULE_FIRMWARE(api) \
4335 +- IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode"
4336 ++ IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
4337 + #define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
4338 +- IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode"
4339 ++ IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
4340 + #define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
4341 +- IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode"
4342 ++ IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
4343 + #define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
4344 +- IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode"
4345 ++ IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
4346 +
4347 + #define NVM_HW_SECTION_NUM_FAMILY_A000 10
4348 +
4349 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
4350 +index 5a40092febfb..3bfc657f6b42 100644
4351 +--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
4352 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
4353 +@@ -531,6 +531,8 @@ struct iwl_scan_config_v1 {
4354 + } __packed; /* SCAN_CONFIG_DB_CMD_API_S */
4355 +
4356 + #define SCAN_TWO_LMACS 2
4357 ++#define SCAN_LB_LMAC_IDX 0
4358 ++#define SCAN_HB_LMAC_IDX 1
4359 +
4360 + struct iwl_scan_config {
4361 + __le32 flags;
4362 +@@ -578,6 +580,7 @@ enum iwl_umac_scan_general_flags {
4363 + IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
4364 + IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
4365 + IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
4366 ++ IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13),
4367 + };
4368 +
4369 + /**
4370 +@@ -631,12 +634,17 @@ struct iwl_scan_req_umac_tail {
4371 + * @uid: scan id, &enum iwl_umac_scan_uid_offsets
4372 + * @ooc_priority: out of channel priority - &enum iwl_scan_priority
4373 + * @general_flags: &enum iwl_umac_scan_general_flags
4374 +- * @reserved2: for future use and alignment
4375 + * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
4376 + * @extended_dwell: dwell time for channels 1, 6 and 11
4377 + * @active_dwell: dwell time for active scan
4378 + * @passive_dwell: dwell time for passive scan
4379 + * @fragmented_dwell: dwell time for fragmented passive scan
4380 ++ * @adwell_default_n_aps: for adaptive dwell the default number of APs
4381 ++ * per channel
4382 ++ * @adwell_default_n_aps_social: for adaptive dwell the default
4383 ++ * number of APs per social (1,6,11) channel
4384 ++ * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
4385 ++ * to total scan time
4386 + * @max_out_time: max out of serving channel time, per LMAC - for CDB there
4387 + * are 2 LMACs
4388 + * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
4389 +@@ -644,6 +652,8 @@ struct iwl_scan_req_umac_tail {
4390 + * @channel_flags: &enum iwl_scan_channel_flags
4391 + * @n_channels: num of channels in scan request
4392 + * @reserved: for future use and alignment
4393 ++ * @reserved2: for future use and alignment
4394 ++ * @reserved3: for future use and alignment
4395 + * @data: &struct iwl_scan_channel_cfg_umac and
4396 + * &struct iwl_scan_req_umac_tail
4397 + */
4398 +@@ -651,41 +661,64 @@ struct iwl_scan_req_umac {
4399 + __le32 flags;
4400 + __le32 uid;
4401 + __le32 ooc_priority;
4402 +- /* SCAN_GENERAL_PARAMS_API_S_VER_4 */
4403 + __le16 general_flags;
4404 +- u8 reserved2;
4405 ++ u8 reserved;
4406 + u8 scan_start_mac_id;
4407 +- u8 extended_dwell;
4408 +- u8 active_dwell;
4409 +- u8 passive_dwell;
4410 +- u8 fragmented_dwell;
4411 + union {
4412 + struct {
4413 ++ u8 extended_dwell;
4414 ++ u8 active_dwell;
4415 ++ u8 passive_dwell;
4416 ++ u8 fragmented_dwell;
4417 + __le32 max_out_time;
4418 + __le32 suspend_time;
4419 + __le32 scan_priority;
4420 +- /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
4421 ++ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
4422 + u8 channel_flags;
4423 + u8 n_channels;
4424 +- __le16 reserved;
4425 ++ __le16 reserved2;
4426 + u8 data[];
4427 + } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
4428 + struct {
4429 ++ u8 extended_dwell;
4430 ++ u8 active_dwell;
4431 ++ u8 passive_dwell;
4432 ++ u8 fragmented_dwell;
4433 + __le32 max_out_time[SCAN_TWO_LMACS];
4434 + __le32 suspend_time[SCAN_TWO_LMACS];
4435 + __le32 scan_priority;
4436 +- /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
4437 ++ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
4438 + u8 channel_flags;
4439 + u8 n_channels;
4440 +- __le16 reserved;
4441 ++ __le16 reserved2;
4442 + u8 data[];
4443 + } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
4444 ++ struct {
4445 ++ u8 active_dwell;
4446 ++ u8 passive_dwell;
4447 ++ u8 fragmented_dwell;
4448 ++ u8 adwell_default_n_aps;
4449 ++ u8 adwell_default_n_aps_social;
4450 ++ u8 reserved3;
4451 ++ __le16 adwell_max_budget;
4452 ++ __le32 max_out_time[SCAN_TWO_LMACS];
4453 ++ __le32 suspend_time[SCAN_TWO_LMACS];
4454 ++ __le32 scan_priority;
4455 ++ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
4456 ++ u8 channel_flags;
4457 ++ u8 n_channels;
4458 ++ __le16 reserved2;
4459 ++ u8 data[];
4460 ++ } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
4461 + };
4462 + } __packed;
4463 +
4464 +-#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
4465 ++#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac)
4466 ++#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \
4467 ++ 2 * sizeof(u8) - sizeof(__le16))
4468 + #define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
4469 +- 2 * sizeof(__le32))
4470 ++ 2 * sizeof(__le32) - 2 * sizeof(u8) - \
4471 ++ sizeof(__le16))
4472 +
4473 + /**
4474 + * struct iwl_umac_scan_abort
4475 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
4476 +index 279248cd9cfb..e988e4c371c4 100644
4477 +--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
4478 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
4479 +@@ -262,6 +262,7 @@ enum iwl_ucode_tlv_api {
4480 + IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
4481 + IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31,
4482 + /* API Set 1 */
4483 ++ IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32,
4484 + IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
4485 + IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
4486 + IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37,
4487 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
4488 +index 71cb1ecde0f7..e226179c32fa 100644
4489 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
4490 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
4491 +@@ -364,6 +364,7 @@ struct iwl_cfg {
4492 + u32 dccm2_len;
4493 + u32 smem_offset;
4494 + u32 smem_len;
4495 ++ u32 soc_latency;
4496 + u16 nvm_ver;
4497 + u16 nvm_calib_ver;
4498 + u16 rx_with_siso_diversity:1,
4499 +@@ -471,6 +472,10 @@ extern const struct iwl_cfg iwl9260_2ac_cfg;
4500 + extern const struct iwl_cfg iwl9270_2ac_cfg;
4501 + extern const struct iwl_cfg iwl9460_2ac_cfg;
4502 + extern const struct iwl_cfg iwl9560_2ac_cfg;
4503 ++extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
4504 ++extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
4505 ++extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
4506 ++extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
4507 + extern const struct iwl_cfg iwla000_2ac_cfg_hr;
4508 + extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
4509 + extern const struct iwl_cfg iwla000_2ac_cfg_jf;
4510 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
4511 +index 949e63418299..8dcdb522b846 100644
4512 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
4513 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
4514 +@@ -1124,6 +1124,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
4515 + IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
4516 + }
4517 +
4518 ++static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm)
4519 ++{
4520 ++ return fw_has_api(&mvm->fw->ucode_capa,
4521 ++ IWL_UCODE_TLV_API_ADAPTIVE_DWELL);
4522 ++}
4523 ++
4524 + static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
4525 + {
4526 + /* For now we only use this mode to differentiate between
4527 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
4528 +index 774122fed454..e4fd476e9ccb 100644
4529 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
4530 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
4531 +@@ -130,6 +130,19 @@ struct iwl_mvm_scan_params {
4532 + u32 measurement_dwell;
4533 + };
4534 +
4535 ++static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
4536 ++{
4537 ++ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
4538 ++
4539 ++ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
4540 ++ return (void *)&cmd->v7.data;
4541 ++
4542 ++ if (iwl_mvm_has_new_tx_api(mvm))
4543 ++ return (void *)&cmd->v6.data;
4544 ++
4545 ++ return (void *)&cmd->v1.data;
4546 ++}
4547 ++
4548 + static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
4549 + {
4550 + if (mvm->scan_rx_ant != ANT_NONE)
4551 +@@ -1075,25 +1088,57 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
4552 + {
4553 + struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type];
4554 +
4555 ++ if (iwl_mvm_is_regular_scan(params))
4556 ++ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
4557 ++ else
4558 ++ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
4559 ++
4560 ++ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
4561 ++ if (params->measurement_dwell) {
4562 ++ cmd->v7.active_dwell = params->measurement_dwell;
4563 ++ cmd->v7.passive_dwell = params->measurement_dwell;
4564 ++ } else {
4565 ++ cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE;
4566 ++ cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
4567 ++ }
4568 ++ cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
4569 ++
4570 ++ cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
4571 ++ cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
4572 ++ cpu_to_le32(timing->max_out_time);
4573 ++ cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
4574 ++ cpu_to_le32(timing->suspend_time);
4575 ++ if (iwl_mvm_is_cdb_supported(mvm)) {
4576 ++ cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
4577 ++ cpu_to_le32(timing->max_out_time);
4578 ++ cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
4579 ++ cpu_to_le32(timing->suspend_time);
4580 ++ }
4581 ++
4582 ++ return;
4583 ++ }
4584 ++
4585 + if (params->measurement_dwell) {
4586 +- cmd->active_dwell = params->measurement_dwell;
4587 +- cmd->passive_dwell = params->measurement_dwell;
4588 +- cmd->extended_dwell = params->measurement_dwell;
4589 ++ cmd->v1.active_dwell = params->measurement_dwell;
4590 ++ cmd->v1.passive_dwell = params->measurement_dwell;
4591 ++ cmd->v1.extended_dwell = params->measurement_dwell;
4592 + } else {
4593 +- cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
4594 +- cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
4595 +- cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
4596 ++ cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE;
4597 ++ cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
4598 ++ cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
4599 + }
4600 +- cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
4601 ++ cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
4602 +
4603 + if (iwl_mvm_has_new_tx_api(mvm)) {
4604 + cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
4605 +- cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
4606 +- cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
4607 ++ cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
4608 ++ cpu_to_le32(timing->max_out_time);
4609 ++ cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
4610 ++ cpu_to_le32(timing->suspend_time);
4611 + if (iwl_mvm_is_cdb_supported(mvm)) {
4612 +- cmd->v6.max_out_time[1] =
4613 ++ cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
4614 + cpu_to_le32(timing->max_out_time);
4615 +- cmd->v6.suspend_time[1] =
4616 ++ cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
4617 + cpu_to_le32(timing->suspend_time);
4618 + }
4619 + } else {
4620 +@@ -1102,11 +1147,6 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
4621 + cmd->v1.scan_priority =
4622 + cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
4623 + }
4624 +-
4625 +- if (iwl_mvm_is_regular_scan(params))
4626 +- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
4627 +- else
4628 +- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
4629 + }
4630 +
4631 + static void
4632 +@@ -1178,8 +1218,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
4633 + int type)
4634 + {
4635 + struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
4636 +- void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
4637 +- (void *)&cmd->v6.data : (void *)&cmd->v1.data;
4638 ++ void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
4639 + struct iwl_scan_req_umac_tail *sec_part = cmd_data +
4640 + sizeof(struct iwl_scan_channel_cfg_umac) *
4641 + mvm->fw->ucode_capa.n_scan_channels;
4642 +@@ -1216,7 +1255,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
4643 + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
4644 + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
4645 +
4646 +- if (iwl_mvm_has_new_tx_api(mvm)) {
4647 ++ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
4648 ++ cmd->v7.channel_flags = channel_flags;
4649 ++ cmd->v7.n_channels = params->n_channels;
4650 ++ } else if (iwl_mvm_has_new_tx_api(mvm)) {
4651 + cmd->v6.channel_flags = channel_flags;
4652 + cmd->v6.n_channels = params->n_channels;
4653 + } else {
4654 +@@ -1661,8 +1703,10 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
4655 + {
4656 + int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
4657 +
4658 +- if (iwl_mvm_has_new_tx_api(mvm))
4659 +- base_size = IWL_SCAN_REQ_UMAC_SIZE;
4660 ++ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
4661 ++ base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
4662 ++ else if (iwl_mvm_has_new_tx_api(mvm))
4663 ++ base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
4664 +
4665 + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
4666 + return base_size +
4667 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4668 +index 858765fed8f8..548e1928430d 100644
4669 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4670 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4671 +@@ -465,6 +465,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4672 + {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
4673 + {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
4674 + {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
4675 ++ {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
4676 ++ {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
4677 + {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
4678 + {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
4679 + {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
4680 +@@ -483,6 +485,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4681 + {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
4682 + {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
4683 + {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
4684 ++ {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)},
4685 + {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
4686 + {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
4687 + {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
4688 +@@ -508,67 +511,143 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4689 + {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)},
4690 + {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)},
4691 + {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
4692 ++ {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)},
4693 ++ {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
4694 +
4695 + /* 9000 Series */
4696 +- {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
4697 +- {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
4698 +- {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
4699 + {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
4700 + {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
4701 + {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
4702 +- {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
4703 +- {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
4704 +- {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
4705 +- {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
4706 +- {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
4707 +- {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
4708 +- {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
4709 +- {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
4710 +- {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
4711 +- {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
4712 +- {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
4713 +- {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
4714 +- {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
4715 +- {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
4716 +- {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
4717 +- {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
4718 +- {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
4719 +- {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
4720 +- {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
4721 ++ {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
4722 ++ {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
4723 ++ {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
4724 ++ {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)},
4725 + {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
4726 +- {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
4727 + {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
4728 +- {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
4729 +- {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
4730 +- {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
4731 + {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
4732 +- {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
4733 +- {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
4734 +- {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
4735 +- {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
4736 +- {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
4737 +- {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
4738 ++ {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
4739 ++ {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
4740 ++ {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
4741 + {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
4742 + {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
4743 + {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
4744 + {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
4745 +- {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
4746 +- {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
4747 +- {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
4748 ++ {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
4749 ++ {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
4750 ++ {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
4751 ++ {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
4752 ++ {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
4753 + {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
4754 +- {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
4755 +- {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)},
4756 +- {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)},
4757 +- {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)},
4758 +- {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
4759 +- {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)},
4760 +- {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)},
4761 +- {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)},
4762 +- {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)},
4763 +- {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)},
4764 +- {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)},
4765 +- {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)},
4766 +- {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
4767 ++ {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
4768 ++ {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
4769 ++ {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
4770 ++ {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
4771 ++ {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
4772 ++ {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
4773 ++ {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
4774 ++ {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
4775 ++ {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
4776 ++ {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
4777 ++ {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
4778 ++ {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
4779 ++ {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
4780 ++ {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
4781 ++ {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
4782 ++ {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
4783 ++ {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
4784 ++ {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
4785 ++ {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
4786 ++ {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
4787 ++ {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
4788 ++ {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
4789 ++ {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
4790 ++ {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
4791 ++ {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
4792 ++ {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
4793 ++ {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
4794 ++ {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
4795 ++ {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
4796 ++ {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
4797 ++ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
4798 ++ {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
4799 ++ {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
4800 ++ {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
4801 ++ {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
4802 ++ {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
4803 ++ {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
4804 ++ {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
4805 ++ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)},
4806 ++ {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)},
4807 ++ {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)},
4808 ++ {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)},
4809 ++ {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)},
4810 ++ {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)},
4811 ++ {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)},
4812 ++ {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)},
4813 ++ {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)},
4814 ++ {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)},
4815 ++ {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)},
4816 ++ {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)},
4817 ++ {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)},
4818 ++ {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)},
4819 ++ {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)},
4820 ++ {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)},
4821 ++ {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)},
4822 ++ {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)},
4823 ++ {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)},
4824 ++ {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
4825 ++ {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
4826 ++ {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
4827 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
4828 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
4829 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
4830 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
4831 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)},
4832 ++ {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)},
4833 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
4834 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
4835 ++ {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
4836 ++ {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
4837 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)},
4838 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)},
4839 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)},
4840 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)},
4841 ++ {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)},
4842 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)},
4843 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)},
4844 ++ {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
4845 ++ {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
4846 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)},
4847 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)},
4848 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)},
4849 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
4850 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
4851 ++ {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
4852 ++ {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
4853 ++ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
4854 ++ {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
4855 ++ {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
4856 ++ {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
4857 ++ {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
4858 ++ {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
4859 ++ {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
4860 ++ {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)},
4861 ++ {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
4862 ++ {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
4863 ++ {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
4864 ++ {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)},
4865 ++ {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)},
4866 ++ {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)},
4867 ++ {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)},
4868 ++ {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)},
4869 ++ {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)},
4870 ++ {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
4871 ++ {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
4872 ++ {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
4873 ++ {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
4874 ++ {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
4875 ++ {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
4876 ++ {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
4877 +
4878 + /* a000 Series */
4879 + {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
4880 +@@ -576,8 +655,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4881 + {IWL_PCI_DEVICE(0x2720, 0x0000, iwla000_2ax_cfg_hr)},
4882 + {IWL_PCI_DEVICE(0x34F0, 0x0070, iwla000_2ax_cfg_hr)},
4883 + {IWL_PCI_DEVICE(0x2720, 0x0078, iwla000_2ax_cfg_hr)},
4884 +- {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ax_cfg_hr)},
4885 ++ {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)},
4886 ++ {IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)},
4887 + {IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)},
4888 ++ {IWL_PCI_DEVICE(0x2720, 0x0090, iwla000_2ac_cfg_hr_cdb)},
4889 ++ {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
4890 ++ {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
4891 ++ {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
4892 ++
4893 + #endif /* CONFIG_IWLMVM */
4894 +
4895 + {0}
4896 +diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
4897 +index d5a3bf91a03e..ab6d39e12069 100644
4898 +--- a/drivers/net/wireless/intersil/p54/main.c
4899 ++++ b/drivers/net/wireless/intersil/p54/main.c
4900 +@@ -852,12 +852,11 @@ void p54_unregister_common(struct ieee80211_hw *dev)
4901 + {
4902 + struct p54_common *priv = dev->priv;
4903 +
4904 +-#ifdef CONFIG_P54_LEDS
4905 +- p54_unregister_leds(priv);
4906 +-#endif /* CONFIG_P54_LEDS */
4907 +-
4908 + if (priv->registered) {
4909 + priv->registered = false;
4910 ++#ifdef CONFIG_P54_LEDS
4911 ++ p54_unregister_leds(priv);
4912 ++#endif /* CONFIG_P54_LEDS */
4913 + ieee80211_unregister_hw(dev);
4914 + }
4915 +
4916 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
4917 +index e2f4f5778267..086aad22743d 100644
4918 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
4919 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
4920 +@@ -57,7 +57,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
4921 + if (status >= 0)
4922 + return 0;
4923 +
4924 +- if (status == -ENODEV) {
4925 ++ if (status == -ENODEV || status == -ENOENT) {
4926 + /* Device has disappeared. */
4927 + clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
4928 + break;
4929 +@@ -321,7 +321,7 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
4930 +
4931 + status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
4932 + if (status) {
4933 +- if (status == -ENODEV)
4934 ++ if (status == -ENODEV || status == -ENOENT)
4935 + clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
4936 + set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
4937 + rt2x00lib_dmadone(entry);
4938 +@@ -410,7 +410,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
4939 +
4940 + status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
4941 + if (status) {
4942 +- if (status == -ENODEV)
4943 ++ if (status == -ENODEV || status == -ENOENT)
4944 + clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
4945 + set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
4946 + rt2x00lib_dmadone(entry);
4947 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
4948 +index 7eae27f8e173..f9563ae301ad 100644
4949 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
4950 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
4951 +@@ -682,7 +682,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
4952 + struct rtl_priv *rtlpriv = rtl_priv(hw);
4953 + struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
4954 + struct sk_buff *skb = NULL;
4955 +-
4956 ++ bool rtstatus;
4957 + u32 totalpacketlen;
4958 + u8 u1rsvdpageloc[5] = { 0 };
4959 + bool b_dlok = false;
4960 +@@ -768,7 +768,9 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
4961 + skb = dev_alloc_skb(totalpacketlen);
4962 + skb_put_data(skb, &reserved_page_packet, totalpacketlen);
4963 +
4964 +- b_dlok = true;
4965 ++ rtstatus = rtl_cmd_send_packet(hw, skb);
4966 ++ if (rtstatus)
4967 ++ b_dlok = true;
4968 +
4969 + if (b_dlok) {
4970 + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
4971 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
4972 +index 1d431d4bf6d2..9ac1511de7ba 100644
4973 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
4974 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
4975 +@@ -1372,6 +1372,7 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw)
4976 +
4977 + ppsc->wakeup_reason = 0;
4978 +
4979 ++ do_gettimeofday(&ts);
4980 + rtlhal->last_suspend_sec = ts.tv_sec;
4981 +
4982 + switch (fw_reason) {
4983 +diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
4984 +index e0f0e3ce1a32..98466d762c8f 100644
4985 +--- a/drivers/nvdimm/dimm.c
4986 ++++ b/drivers/nvdimm/dimm.c
4987 +@@ -68,6 +68,7 @@ static int nvdimm_probe(struct device *dev)
4988 + rc = nd_label_reserve_dpa(ndd);
4989 + if (ndd->ns_current >= 0)
4990 + nvdimm_set_aliasing(dev);
4991 ++ nvdimm_clear_locked(dev);
4992 + nvdimm_bus_unlock(dev);
4993 +
4994 + if (rc)
4995 +diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
4996 +index f0d1b7e5de01..5f1385b96b13 100644
4997 +--- a/drivers/nvdimm/dimm_devs.c
4998 ++++ b/drivers/nvdimm/dimm_devs.c
4999 +@@ -200,6 +200,13 @@ void nvdimm_set_locked(struct device *dev)
5000 + set_bit(NDD_LOCKED, &nvdimm->flags);
5001 + }
5002 +
5003 ++void nvdimm_clear_locked(struct device *dev)
5004 ++{
5005 ++ struct nvdimm *nvdimm = to_nvdimm(dev);
5006 ++
5007 ++ clear_bit(NDD_LOCKED, &nvdimm->flags);
5008 ++}
5009 ++
5010 + static void nvdimm_release(struct device *dev)
5011 + {
5012 + struct nvdimm *nvdimm = to_nvdimm(dev);
5013 +diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
5014 +index 9c5f108910e3..de66c02f6140 100644
5015 +--- a/drivers/nvdimm/label.c
5016 ++++ b/drivers/nvdimm/label.c
5017 +@@ -1050,7 +1050,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
5018 + nsindex = to_namespace_index(ndd, 0);
5019 + memset(nsindex, 0, ndd->nsarea.config_size);
5020 + for (i = 0; i < 2; i++) {
5021 +- int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT);
5022 ++ int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
5023 +
5024 + if (rc)
5025 + return rc;
5026 +diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
5027 +index 3e4d1e7998da..0af988739a06 100644
5028 +--- a/drivers/nvdimm/namespace_devs.c
5029 ++++ b/drivers/nvdimm/namespace_devs.c
5030 +@@ -1620,7 +1620,7 @@ static umode_t namespace_visible(struct kobject *kobj,
5031 + if (a == &dev_attr_resource.attr) {
5032 + if (is_namespace_blk(dev))
5033 + return 0;
5034 +- return a->mode;
5035 ++ return 0400;
5036 + }
5037 +
5038 + if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
5039 +diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
5040 +index 9c758a91372b..156be00e1f76 100644
5041 +--- a/drivers/nvdimm/nd.h
5042 ++++ b/drivers/nvdimm/nd.h
5043 +@@ -254,6 +254,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
5044 + unsigned int len);
5045 + void nvdimm_set_aliasing(struct device *dev);
5046 + void nvdimm_set_locked(struct device *dev);
5047 ++void nvdimm_clear_locked(struct device *dev);
5048 + struct nd_btt *to_nd_btt(struct device *dev);
5049 +
5050 + struct nd_gen_sb {
5051 +diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
5052 +index 9576c444f0ab..65cc171c721d 100644
5053 +--- a/drivers/nvdimm/pfn_devs.c
5054 ++++ b/drivers/nvdimm/pfn_devs.c
5055 +@@ -282,8 +282,16 @@ static struct attribute *nd_pfn_attributes[] = {
5056 + NULL,
5057 + };
5058 +
5059 ++static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n)
5060 ++{
5061 ++ if (a == &dev_attr_resource.attr)
5062 ++ return 0400;
5063 ++ return a->mode;
5064 ++}
5065 ++
5066 + struct attribute_group nd_pfn_attribute_group = {
5067 + .attrs = nd_pfn_attributes,
5068 ++ .is_visible = pfn_visible,
5069 + };
5070 +
5071 + static const struct attribute_group *nd_pfn_attribute_groups[] = {
5072 +diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
5073 +index 829d760f651c..abaf38c61220 100644
5074 +--- a/drivers/nvdimm/region_devs.c
5075 ++++ b/drivers/nvdimm/region_devs.c
5076 +@@ -562,8 +562,12 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
5077 + if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
5078 + return 0;
5079 +
5080 +- if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr)
5081 +- return 0;
5082 ++ if (a == &dev_attr_resource.attr) {
5083 ++ if (is_nd_pmem(dev))
5084 ++ return 0400;
5085 ++ else
5086 ++ return 0;
5087 ++ }
5088 +
5089 + if (a == &dev_attr_deep_flush.attr) {
5090 + int has_flush = nvdimm_has_flush(nd_region);
5091 +diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
5092 +index 0fe3ea164ee5..04dac6a42c9f 100644
5093 +--- a/drivers/pci/host/pci-hyperv.c
5094 ++++ b/drivers/pci/host/pci-hyperv.c
5095 +@@ -879,7 +879,7 @@ static void hv_irq_unmask(struct irq_data *data)
5096 + int cpu;
5097 + u64 res;
5098 +
5099 +- dest = irq_data_get_affinity_mask(data);
5100 ++ dest = irq_data_get_effective_affinity_mask(data);
5101 + pdev = msi_desc_to_pci_dev(msi_desc);
5102 + pbus = pdev->bus;
5103 + hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
5104 +@@ -1042,6 +1042,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
5105 + struct hv_pci_dev *hpdev;
5106 + struct pci_bus *pbus;
5107 + struct pci_dev *pdev;
5108 ++ struct cpumask *dest;
5109 + struct compose_comp_ctxt comp;
5110 + struct tran_int_desc *int_desc;
5111 + struct {
5112 +@@ -1056,6 +1057,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
5113 + int ret;
5114 +
5115 + pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
5116 ++ dest = irq_data_get_effective_affinity_mask(data);
5117 + pbus = pdev->bus;
5118 + hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
5119 + hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
5120 +@@ -1081,14 +1083,14 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
5121 + switch (pci_protocol_version) {
5122 + case PCI_PROTOCOL_VERSION_1_1:
5123 + size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
5124 +- irq_data_get_affinity_mask(data),
5125 ++ dest,
5126 + hpdev->desc.win_slot.slot,
5127 + cfg->vector);
5128 + break;
5129 +
5130 + case PCI_PROTOCOL_VERSION_1_2:
5131 + size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
5132 +- irq_data_get_affinity_mask(data),
5133 ++ dest,
5134 + hpdev->desc.win_slot.slot,
5135 + cfg->vector);
5136 + break;
5137 +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
5138 +index 83e4a892b14b..cae54f8320be 100644
5139 +--- a/drivers/pci/pcie/aspm.c
5140 ++++ b/drivers/pci/pcie/aspm.c
5141 +@@ -453,7 +453,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
5142 +
5143 + /* Choose the greater of the two T_cmn_mode_rstr_time */
5144 + val1 = (upreg->l1ss_cap >> 8) & 0xFF;
5145 +- val2 = (upreg->l1ss_cap >> 8) & 0xFF;
5146 ++ val2 = (dwreg->l1ss_cap >> 8) & 0xFF;
5147 + if (val1 > val2)
5148 + link->l1ss.ctl1 |= val1 << 8;
5149 + else
5150 +@@ -658,7 +658,7 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
5151 + 0xFF00, link->l1ss.ctl1);
5152 +
5153 + /* Program LTR L1.2 threshold in both ports */
5154 +- pci_clear_and_set_dword(parent, dw_cap_ptr + PCI_L1SS_CTL1,
5155 ++ pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
5156 + 0xE3FF0000, link->l1ss.ctl1);
5157 + pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
5158 + 0xE3FF0000, link->l1ss.ctl1);
5159 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
5160 +index 911b3b65c8b2..f66f9375177c 100644
5161 +--- a/drivers/pci/quirks.c
5162 ++++ b/drivers/pci/quirks.c
5163 +@@ -4212,17 +4212,32 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
5164 + #endif
5165 + }
5166 +
5167 ++static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
5168 ++{
5169 ++ /*
5170 ++ * Effectively selects all downstream ports for whole ThunderX 1
5171 ++ * family by 0xf800 mask (which represents 8 SoCs), while the lower
5172 ++ * bits of device ID are used to indicate which subdevice is used
5173 ++ * within the SoC.
5174 ++ */
5175 ++ return (pci_is_pcie(dev) &&
5176 ++ (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) &&
5177 ++ ((dev->device & 0xf800) == 0xa000));
5178 ++}
5179 ++
5180 + static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
5181 + {
5182 + /*
5183 +- * Cavium devices matching this quirk do not perform peer-to-peer
5184 +- * with other functions, allowing masking out these bits as if they
5185 +- * were unimplemented in the ACS capability.
5186 ++ * Cavium root ports don't advertise an ACS capability. However,
5187 ++ * the RTL internally implements similar protection as if ACS had
5188 ++ * Request Redirection, Completion Redirection, Source Validation,
5189 ++ * and Upstream Forwarding features enabled. Assert that the
5190 ++ * hardware implements and enables equivalent ACS functionality for
5191 ++ * these flags.
5192 + */
5193 +- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
5194 +- PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
5195 ++ acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
5196 +
5197 +- if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff)))
5198 ++ if (!pci_quirk_cavium_acs_match(dev))
5199 + return -ENOTTY;
5200 +
5201 + return acs_flags ? 0 : 1;
5202 +diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
5203 +index c17677f494af..dc6519b2c53a 100644
5204 +--- a/drivers/scsi/lpfc/lpfc_attr.c
5205 ++++ b/drivers/scsi/lpfc/lpfc_attr.c
5206 +@@ -3134,7 +3134,8 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
5207 + struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
5208 + struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
5209 +
5210 +- return snprintf(buf, PAGE_SIZE, "%d\n", pring->txq_max);
5211 ++ return snprintf(buf, PAGE_SIZE, "%d\n",
5212 ++ pring ? pring->txq_max : 0);
5213 + }
5214 +
5215 + static DEVICE_ATTR(txq_hw, S_IRUGO,
5216 +@@ -3147,7 +3148,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
5217 + struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
5218 + struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
5219 +
5220 +- return snprintf(buf, PAGE_SIZE, "%d\n", pring->txcmplq_max);
5221 ++ return snprintf(buf, PAGE_SIZE, "%d\n",
5222 ++ pring ? pring->txcmplq_max : 0);
5223 + }
5224 +
5225 + static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
5226 +diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
5227 +index fe9e1c079c20..d89816222b23 100644
5228 +--- a/drivers/scsi/lpfc/lpfc_bsg.c
5229 ++++ b/drivers/scsi/lpfc/lpfc_bsg.c
5230 +@@ -2911,7 +2911,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
5231 + }
5232 + }
5233 +
5234 +- if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
5235 ++ if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
5236 + ret_val = -ENOMEM;
5237 + goto err_post_rxbufs_exit;
5238 + }
5239 +@@ -5421,6 +5421,8 @@ lpfc_bsg_timeout(struct bsg_job *job)
5240 + struct lpfc_iocbq *check_iocb, *next_iocb;
5241 +
5242 + pring = lpfc_phba_elsring(phba);
5243 ++ if (unlikely(!pring))
5244 ++ return -EIO;
5245 +
5246 + /* if job's driver data is NULL, the command completed or is in the
5247 + * the process of completing. In this case, return status to request
5248 +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
5249 +index 468a66371de9..3ebf6ccba6e6 100644
5250 +--- a/drivers/scsi/lpfc/lpfc_els.c
5251 ++++ b/drivers/scsi/lpfc/lpfc_els.c
5252 +@@ -7430,6 +7430,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
5253 + timeout = (uint32_t)(phba->fc_ratov << 1);
5254 +
5255 + pring = lpfc_phba_elsring(phba);
5256 ++ if (unlikely(!pring))
5257 ++ return;
5258 +
5259 + if ((phba->pport->load_flag & FC_UNLOADING))
5260 + return;
5261 +@@ -9310,6 +9312,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
5262 +
5263 + pring = lpfc_phba_elsring(phba);
5264 +
5265 ++ if (unlikely(!pring))
5266 ++ return;
5267 ++
5268 + spin_lock_irq(&phba->hbalock);
5269 + list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
5270 + list) {
5271 +@@ -9416,7 +9421,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
5272 + rxid, 1);
5273 +
5274 + /* Check if TXQ queue needs to be serviced */
5275 +- if (!(list_empty(&pring->txq)))
5276 ++ if (pring && !list_empty(&pring->txq))
5277 + lpfc_worker_wake_up(phba);
5278 + return;
5279 + }
5280 +diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
5281 +index 20808349a80e..499df9d17339 100644
5282 +--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
5283 ++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
5284 +@@ -3324,7 +3324,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5285 +
5286 + /* Unblock ELS traffic */
5287 + pring = lpfc_phba_elsring(phba);
5288 +- pring->flag &= ~LPFC_STOP_IOCB_EVENT;
5289 ++ if (pring)
5290 ++ pring->flag &= ~LPFC_STOP_IOCB_EVENT;
5291 +
5292 + /* Check for error */
5293 + if (mb->mbxStatus) {
5294 +@@ -5430,6 +5431,8 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5295 +
5296 + psli = &phba->sli;
5297 + pring = lpfc_phba_elsring(phba);
5298 ++ if (unlikely(!pring))
5299 ++ return;
5300 +
5301 + /* Error matching iocb on txq or txcmplq
5302 + * First check the txq.
5303 +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
5304 +index 100bc4c8798d..6acf1bb1d320 100644
5305 +--- a/drivers/scsi/lpfc/lpfc_init.c
5306 ++++ b/drivers/scsi/lpfc/lpfc_init.c
5307 +@@ -11404,6 +11404,13 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
5308 + /* Remove FC host and then SCSI host with the physical port */
5309 + fc_remove_host(shost);
5310 + scsi_remove_host(shost);
5311 ++ /*
5312 ++ * Bring down the SLI Layer. This step disables all interrupts,
5313 ++ * clears the rings, discards all mailbox commands, and resets
5314 ++ * the HBA FCoE function.
5315 ++ */
5316 ++ lpfc_debugfs_terminate(vport);
5317 ++ lpfc_sli4_hba_unset(phba);
5318 +
5319 + /* Perform ndlp cleanup on the physical port. The nvme and nvmet
5320 + * localports are destroyed after to cleanup all transport memory.
5321 +@@ -11412,14 +11419,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
5322 + lpfc_nvmet_destroy_targetport(phba);
5323 + lpfc_nvme_destroy_localport(vport);
5324 +
5325 +- /*
5326 +- * Bring down the SLI Layer. This step disables all interrupts,
5327 +- * clears the rings, discards all mailbox commands, and resets
5328 +- * the HBA FCoE function.
5329 +- */
5330 +- lpfc_debugfs_terminate(vport);
5331 +- lpfc_sli4_hba_unset(phba);
5332 +
5333 ++ lpfc_stop_hba_timers(phba);
5334 + spin_lock_irq(&phba->hbalock);
5335 + list_del_init(&vport->listentry);
5336 + spin_unlock_irq(&phba->hbalock);
5337 +diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
5338 +index f3ad7cac355d..b6957d944b9a 100644
5339 +--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
5340 ++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
5341 +@@ -216,7 +216,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5342 + pring = lpfc_phba_elsring(phba);
5343 +
5344 + /* In case of error recovery path, we might have a NULL pring here */
5345 +- if (!pring)
5346 ++ if (unlikely(!pring))
5347 + return;
5348 +
5349 + /* Abort outstanding I/O on NPort <nlp_DID> */
5350 +diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
5351 +index 0b7c1a49e203..3c5b054a56ac 100644
5352 +--- a/drivers/scsi/lpfc/lpfc_nvmet.c
5353 ++++ b/drivers/scsi/lpfc/lpfc_nvmet.c
5354 +@@ -1138,9 +1138,14 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
5355 + #endif
5356 + if (error) {
5357 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
5358 +- "6025 Cannot register NVME targetport "
5359 +- "x%x\n", error);
5360 ++ "6025 Cannot register NVME targetport x%x: "
5361 ++ "portnm %llx nodenm %llx segs %d qs %d\n",
5362 ++ error,
5363 ++ pinfo.port_name, pinfo.node_name,
5364 ++ lpfc_tgttemplate.max_sgl_segments,
5365 ++ lpfc_tgttemplate.max_hw_queues);
5366 + phba->targetport = NULL;
5367 ++ phba->nvmet_support = 0;
5368 +
5369 + lpfc_nvmet_cleanup_io_context(phba);
5370 +
5371 +@@ -1152,9 +1157,11 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
5372 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
5373 + "6026 Registered NVME "
5374 + "targetport: %p, private %p "
5375 +- "portnm %llx nodenm %llx\n",
5376 ++ "portnm %llx nodenm %llx segs %d qs %d\n",
5377 + phba->targetport, tgtp,
5378 +- pinfo.port_name, pinfo.node_name);
5379 ++ pinfo.port_name, pinfo.node_name,
5380 ++ lpfc_tgttemplate.max_sgl_segments,
5381 ++ lpfc_tgttemplate.max_hw_queues);
5382 +
5383 + atomic_set(&tgtp->rcv_ls_req_in, 0);
5384 + atomic_set(&tgtp->rcv_ls_req_out, 0);
5385 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
5386 +index 8b119f87b51d..455f3ce9fda9 100644
5387 +--- a/drivers/scsi/lpfc/lpfc_sli.c
5388 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
5389 +@@ -9396,10 +9396,13 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
5390 + * for abort iocb hba_wqidx should already
5391 + * be setup based on what work queue we used.
5392 + */
5393 +- if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
5394 ++ if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
5395 + piocb->hba_wqidx =
5396 + lpfc_sli4_scmd_to_wqidx_distr(phba,
5397 + piocb->context1);
5398 ++ piocb->hba_wqidx = piocb->hba_wqidx %
5399 ++ phba->cfg_fcp_io_channel;
5400 ++ }
5401 + return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
5402 + } else {
5403 + if (unlikely(!phba->sli4_hba.oas_wq))
5404 +@@ -10632,6 +10635,14 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5405 + (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
5406 + return 0;
5407 +
5408 ++ if (!pring) {
5409 ++ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
5410 ++ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
5411 ++ else
5412 ++ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
5413 ++ goto abort_iotag_exit;
5414 ++ }
5415 ++
5416 + /*
5417 + * If we're unloading, don't abort iocb on the ELS ring, but change
5418 + * the callback so that nothing happens when it finishes.
5419 +@@ -12500,6 +12511,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
5420 + unsigned long iflags;
5421 +
5422 + pring = lpfc_phba_elsring(phba);
5423 ++ if (unlikely(!pring))
5424 ++ return NULL;
5425 +
5426 + wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
5427 + spin_lock_irqsave(&pring->ring_lock, iflags);
5428 +@@ -12507,19 +12520,21 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
5429 + /* Look up the ELS command IOCB and create pseudo response IOCB */
5430 + cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
5431 + bf_get(lpfc_wcqe_c_request_tag, wcqe));
5432 +- /* Put the iocb back on the txcmplq */
5433 +- lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
5434 +- spin_unlock_irqrestore(&pring->ring_lock, iflags);
5435 +-
5436 + if (unlikely(!cmdiocbq)) {
5437 ++ spin_unlock_irqrestore(&pring->ring_lock, iflags);
5438 + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5439 + "0386 ELS complete with no corresponding "
5440 +- "cmdiocb: iotag (%d)\n",
5441 +- bf_get(lpfc_wcqe_c_request_tag, wcqe));
5442 ++ "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
5443 ++ wcqe->word0, wcqe->total_data_placed,
5444 ++ wcqe->parameter, wcqe->word3);
5445 + lpfc_sli_release_iocbq(phba, irspiocbq);
5446 + return NULL;
5447 + }
5448 +
5449 ++ /* Put the iocb back on the txcmplq */
5450 ++ lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
5451 ++ spin_unlock_irqrestore(&pring->ring_lock, iflags);
5452 ++
5453 + /* Fake the irspiocbq and copy necessary response information */
5454 + lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
5455 +
5456 +@@ -17137,7 +17152,8 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
5457 + if (pcmd && pcmd->virt)
5458 + dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
5459 + kfree(pcmd);
5460 +- lpfc_sli_release_iocbq(phba, iocbq);
5461 ++ if (iocbq)
5462 ++ lpfc_sli_release_iocbq(phba, iocbq);
5463 + lpfc_in_buf_free(phba, &dmabuf->dbuf);
5464 + }
5465 +
5466 +@@ -18691,6 +18707,8 @@ lpfc_drain_txq(struct lpfc_hba *phba)
5467 + uint32_t txq_cnt = 0;
5468 +
5469 + pring = lpfc_phba_elsring(phba);
5470 ++ if (unlikely(!pring))
5471 ++ return 0;
5472 +
5473 + spin_lock_irqsave(&pring->ring_lock, iflags);
5474 + list_for_each_entry(piocbq, &pring->txq, list) {
5475 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
5476 +index dce42a416876..6eaaa326e508 100644
5477 +--- a/drivers/scsi/qla2xxx/qla_os.c
5478 ++++ b/drivers/scsi/qla2xxx/qla_os.c
5479 +@@ -388,7 +388,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
5480 + INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list);
5481 + ha->base_qpair->enable_class_2 = ql2xenableclass2;
5482 + /* init qpair to this cpu. Will adjust at run time. */
5483 +- qla_cpu_update(rsp->qpair, smp_processor_id());
5484 ++ qla_cpu_update(rsp->qpair, raw_smp_processor_id());
5485 + ha->base_qpair->pdev = ha->pdev;
5486 +
5487 + if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
5488 +diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
5489 +index 8aa54779aac1..2eb61d54bbb4 100644
5490 +--- a/drivers/scsi/sd_zbc.c
5491 ++++ b/drivers/scsi/sd_zbc.c
5492 +@@ -375,15 +375,15 @@ static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
5493 + if (sdkp->device->type != TYPE_ZBC) {
5494 + /* Host-aware */
5495 + sdkp->urswrz = 1;
5496 +- sdkp->zones_optimal_open = get_unaligned_be64(&buf[8]);
5497 +- sdkp->zones_optimal_nonseq = get_unaligned_be64(&buf[12]);
5498 ++ sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
5499 ++ sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
5500 + sdkp->zones_max_open = 0;
5501 + } else {
5502 + /* Host-managed */
5503 + sdkp->urswrz = buf[4] & 1;
5504 + sdkp->zones_optimal_open = 0;
5505 + sdkp->zones_optimal_nonseq = 0;
5506 +- sdkp->zones_max_open = get_unaligned_be64(&buf[16]);
5507 ++ sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
5508 + }
5509 +
5510 + return 0;
5511 +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
5512 +index 5001261f5d69..d9ba4ee2c62b 100644
5513 +--- a/drivers/target/iscsi/iscsi_target.c
5514 ++++ b/drivers/target/iscsi/iscsi_target.c
5515 +@@ -1960,7 +1960,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
5516 + struct iscsi_tmr_req *tmr_req;
5517 + struct iscsi_tm *hdr;
5518 + int out_of_order_cmdsn = 0, ret;
5519 +- bool sess_ref = false;
5520 + u8 function, tcm_function = TMR_UNKNOWN;
5521 +
5522 + hdr = (struct iscsi_tm *) buf;
5523 +@@ -1993,22 +1992,23 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
5524 +
5525 + cmd->data_direction = DMA_NONE;
5526 + cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
5527 +- if (!cmd->tmr_req)
5528 ++ if (!cmd->tmr_req) {
5529 + return iscsit_add_reject_cmd(cmd,
5530 + ISCSI_REASON_BOOKMARK_NO_RESOURCES,
5531 + buf);
5532 ++ }
5533 ++
5534 ++ transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
5535 ++ conn->sess->se_sess, 0, DMA_NONE,
5536 ++ TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
5537 ++
5538 ++ target_get_sess_cmd(&cmd->se_cmd, true);
5539 +
5540 + /*
5541 + * TASK_REASSIGN for ERL=2 / connection stays inside of
5542 + * LIO-Target $FABRIC_MOD
5543 + */
5544 + if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
5545 +- transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
5546 +- conn->sess->se_sess, 0, DMA_NONE,
5547 +- TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
5548 +-
5549 +- target_get_sess_cmd(&cmd->se_cmd, true);
5550 +- sess_ref = true;
5551 + tcm_function = iscsit_convert_tmf(function);
5552 + if (tcm_function == TMR_UNKNOWN) {
5553 + pr_err("Unknown iSCSI TMR Function:"
5554 +@@ -2099,12 +2099,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
5555 +
5556 + if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
5557 + int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
5558 +- if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
5559 ++ if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
5560 + out_of_order_cmdsn = 1;
5561 +- else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
5562 ++ } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
5563 ++ target_put_sess_cmd(&cmd->se_cmd);
5564 + return 0;
5565 +- else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
5566 ++ } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
5567 + return -1;
5568 ++ }
5569 + }
5570 + iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
5571 +
5572 +@@ -2124,12 +2126,8 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
5573 + * For connection recovery, this is also the default action for
5574 + * TMR TASK_REASSIGN.
5575 + */
5576 +- if (sess_ref) {
5577 +- pr_debug("Handle TMR, using sess_ref=true check\n");
5578 +- target_put_sess_cmd(&cmd->se_cmd);
5579 +- }
5580 +-
5581 + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
5582 ++ target_put_sess_cmd(&cmd->se_cmd);
5583 + return 0;
5584 + }
5585 + EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
5586 +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
5587 +index dd2cd8048582..9f25c9c6f67d 100644
5588 +--- a/drivers/target/target_core_pr.c
5589 ++++ b/drivers/target/target_core_pr.c
5590 +@@ -4011,6 +4011,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
5591 + * Set the ADDITIONAL DESCRIPTOR LENGTH
5592 + */
5593 + put_unaligned_be32(desc_len, &buf[off]);
5594 ++ off += 4;
5595 + /*
5596 + * Size of full desctipor header minus TransportID
5597 + * containing $FABRIC_MOD specific) initiator device/port
5598 +diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
5599 +index e22847bd79b9..9c7bc1ca341a 100644
5600 +--- a/drivers/target/target_core_tmr.c
5601 ++++ b/drivers/target/target_core_tmr.c
5602 +@@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
5603 + spin_unlock(&se_cmd->t_state_lock);
5604 + return false;
5605 + }
5606 ++ if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
5607 ++ if (se_cmd->scsi_status) {
5608 ++ pr_debug("Attempted to abort io tag: %llu early failure"
5609 ++ " status: 0x%02x\n", se_cmd->tag,
5610 ++ se_cmd->scsi_status);
5611 ++ spin_unlock(&se_cmd->t_state_lock);
5612 ++ return false;
5613 ++ }
5614 ++ }
5615 + if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
5616 + pr_debug("Attempted to abort io tag: %llu already shutdown,"
5617 + " skipping\n", se_cmd->tag);
5618 +@@ -217,7 +226,8 @@ static void core_tmr_drain_tmr_list(
5619 + * LUN_RESET tmr..
5620 + */
5621 + spin_lock_irqsave(&dev->se_tmr_lock, flags);
5622 +- list_del_init(&tmr->tmr_list);
5623 ++ if (tmr)
5624 ++ list_del_init(&tmr->tmr_list);
5625 + list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
5626 + cmd = tmr_p->task_cmd;
5627 + if (!cmd) {
5628 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
5629 +index 836d552b0385..e6d51135d105 100644
5630 +--- a/drivers/target/target_core_transport.c
5631 ++++ b/drivers/target/target_core_transport.c
5632 +@@ -1730,9 +1730,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
5633 + {
5634 + int ret = 0, post_ret = 0;
5635 +
5636 +- if (transport_check_aborted_status(cmd, 1))
5637 +- return;
5638 +-
5639 + pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
5640 + sense_reason);
5641 + target_show_cmd("-----[ ", cmd);
5642 +@@ -1741,6 +1738,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
5643 + * For SAM Task Attribute emulation for failed struct se_cmd
5644 + */
5645 + transport_complete_task_attr(cmd);
5646 ++
5647 + /*
5648 + * Handle special case for COMPARE_AND_WRITE failure, where the
5649 + * callback is expected to drop the per device ->caw_sem.
5650 +@@ -1749,6 +1747,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
5651 + cmd->transport_complete_callback)
5652 + cmd->transport_complete_callback(cmd, false, &post_ret);
5653 +
5654 ++ if (transport_check_aborted_status(cmd, 1))
5655 ++ return;
5656 ++
5657 + switch (sense_reason) {
5658 + case TCM_NON_EXISTENT_LUN:
5659 + case TCM_UNSUPPORTED_SCSI_OPCODE:
5660 +@@ -1973,6 +1974,7 @@ void target_execute_cmd(struct se_cmd *cmd)
5661 + }
5662 +
5663 + cmd->t_state = TRANSPORT_PROCESSING;
5664 ++ cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
5665 + cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
5666 + spin_unlock_irq(&cmd->t_state_lock);
5667 +
5668 +@@ -2010,6 +2012,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
5669 + list_del(&cmd->se_delayed_node);
5670 + spin_unlock(&dev->delayed_cmd_lock);
5671 +
5672 ++ cmd->transport_state |= CMD_T_SENT;
5673 ++
5674 + __target_execute_cmd(cmd, true);
5675 +
5676 + if (cmd->sam_task_attr == TCM_ORDERED_TAG)
5677 +@@ -2045,6 +2049,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
5678 + pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
5679 + dev->dev_cur_ordered_id);
5680 + }
5681 ++ cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
5682 ++
5683 + restart:
5684 + target_restart_delayed_cmds(dev);
5685 + }
5686 +@@ -2570,7 +2576,20 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
5687 +
5688 + static void transport_write_pending_qf(struct se_cmd *cmd)
5689 + {
5690 ++ unsigned long flags;
5691 + int ret;
5692 ++ bool stop;
5693 ++
5694 ++ spin_lock_irqsave(&cmd->t_state_lock, flags);
5695 ++ stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
5696 ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5697 ++
5698 ++ if (stop) {
5699 ++ pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
5700 ++ __func__, __LINE__, cmd->tag);
5701 ++ complete_all(&cmd->t_transport_stop_comp);
5702 ++ return;
5703 ++ }
5704 +
5705 + ret = cmd->se_tfo->write_pending(cmd);
5706 + if (ret) {
5707 +@@ -2664,6 +2683,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
5708 + ret = -ESHUTDOWN;
5709 + goto out;
5710 + }
5711 ++ se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
5712 + list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
5713 + out:
5714 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
5715 +diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
5716 +index c68fb3a8ea1c..97db76afced2 100644
5717 +--- a/drivers/tty/serdev/core.c
5718 ++++ b/drivers/tty/serdev/core.c
5719 +@@ -65,21 +65,32 @@ static int serdev_uevent(struct device *dev, struct kobj_uevent_env *env)
5720 + */
5721 + int serdev_device_add(struct serdev_device *serdev)
5722 + {
5723 ++ struct serdev_controller *ctrl = serdev->ctrl;
5724 + struct device *parent = serdev->dev.parent;
5725 + int err;
5726 +
5727 + dev_set_name(&serdev->dev, "%s-%d", dev_name(parent), serdev->nr);
5728 +
5729 ++ /* Only a single slave device is currently supported. */
5730 ++ if (ctrl->serdev) {
5731 ++ dev_err(&serdev->dev, "controller busy\n");
5732 ++ return -EBUSY;
5733 ++ }
5734 ++ ctrl->serdev = serdev;
5735 ++
5736 + err = device_add(&serdev->dev);
5737 + if (err < 0) {
5738 + dev_err(&serdev->dev, "Can't add %s, status %d\n",
5739 + dev_name(&serdev->dev), err);
5740 +- goto err_device_add;
5741 ++ goto err_clear_serdev;
5742 + }
5743 +
5744 + dev_dbg(&serdev->dev, "device %s registered\n", dev_name(&serdev->dev));
5745 +
5746 +-err_device_add:
5747 ++ return 0;
5748 ++
5749 ++err_clear_serdev:
5750 ++ ctrl->serdev = NULL;
5751 + return err;
5752 + }
5753 + EXPORT_SYMBOL_GPL(serdev_device_add);
5754 +@@ -90,7 +101,10 @@ EXPORT_SYMBOL_GPL(serdev_device_add);
5755 + */
5756 + void serdev_device_remove(struct serdev_device *serdev)
5757 + {
5758 ++ struct serdev_controller *ctrl = serdev->ctrl;
5759 ++
5760 + device_unregister(&serdev->dev);
5761 ++ ctrl->serdev = NULL;
5762 + }
5763 + EXPORT_SYMBOL_GPL(serdev_device_remove);
5764 +
5765 +@@ -295,7 +309,6 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *ctrl)
5766 + return NULL;
5767 +
5768 + serdev->ctrl = ctrl;
5769 +- ctrl->serdev = serdev;
5770 + device_initialize(&serdev->dev);
5771 + serdev->dev.parent = &ctrl->dev;
5772 + serdev->dev.bus = &serdev_bus_type;
5773 +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
5774 +index 046f6d280af5..e47c5bc3ddca 100644
5775 +--- a/drivers/vhost/scsi.c
5776 ++++ b/drivers/vhost/scsi.c
5777 +@@ -688,6 +688,7 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
5778 + struct scatterlist *sg, int sg_count)
5779 + {
5780 + size_t off = iter->iov_offset;
5781 ++ struct scatterlist *p = sg;
5782 + int i, ret;
5783 +
5784 + for (i = 0; i < iter->nr_segs; i++) {
5785 +@@ -696,8 +697,8 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
5786 +
5787 + ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
5788 + if (ret < 0) {
5789 +- for (i = 0; i < sg_count; i++) {
5790 +- struct page *page = sg_page(&sg[i]);
5791 ++ while (p < sg) {
5792 ++ struct page *page = sg_page(p++);
5793 + if (page)
5794 + put_page(page);
5795 + }
5796 +diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
5797 +index 2a5de610dd8f..bdabb2765d1b 100644
5798 +--- a/fs/9p/vfs_inode.c
5799 ++++ b/fs/9p/vfs_inode.c
5800 +@@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data)
5801 +
5802 + if (v9inode->qid.type != st->qid.type)
5803 + return 0;
5804 ++
5805 ++ if (v9inode->qid.path != st->qid.path)
5806 ++ return 0;
5807 + return 1;
5808 + }
5809 +
5810 +diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
5811 +index 70f9887c59a9..7f6ae21a27b3 100644
5812 +--- a/fs/9p/vfs_inode_dotl.c
5813 ++++ b/fs/9p/vfs_inode_dotl.c
5814 +@@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data)
5815 +
5816 + if (v9inode->qid.type != st->qid.type)
5817 + return 0;
5818 ++
5819 ++ if (v9inode->qid.path != st->qid.path)
5820 ++ return 0;
5821 + return 1;
5822 + }
5823 +
5824 +diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
5825 +index 4ac49d038bf3..8fc41705c7cd 100644
5826 +--- a/fs/autofs4/waitq.c
5827 ++++ b/fs/autofs4/waitq.c
5828 +@@ -81,7 +81,8 @@ static int autofs4_write(struct autofs_sb_info *sbi,
5829 + spin_unlock_irqrestore(&current->sighand->siglock, flags);
5830 + }
5831 +
5832 +- return (bytes > 0);
5833 ++ /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */
5834 ++ return bytes == 0 ? 0 : wr < 0 ? wr : -EIO;
5835 + }
5836 +
5837 + static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
5838 +@@ -95,6 +96,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
5839 + } pkt;
5840 + struct file *pipe = NULL;
5841 + size_t pktsz;
5842 ++ int ret;
5843 +
5844 + pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
5845 + (unsigned long) wq->wait_queue_token,
5846 +@@ -169,7 +171,18 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
5847 + mutex_unlock(&sbi->wq_mutex);
5848 +
5849 + if (autofs4_write(sbi, pipe, &pkt, pktsz))
5850 ++ switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
5851 ++ case 0:
5852 ++ break;
5853 ++ case -ENOMEM:
5854 ++ case -ERESTARTSYS:
5855 ++ /* Just fail this one */
5856 ++ autofs4_wait_release(sbi, wq->wait_queue_token, ret);
5857 ++ break;
5858 ++ default:
5859 + autofs4_catatonic_mode(sbi);
5860 ++ break;
5861 ++ }
5862 + fput(pipe);
5863 + }
5864 +
5865 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
5866 +index e2d7e86b51d1..08698105fa4a 100644
5867 +--- a/fs/btrfs/extent-tree.c
5868 ++++ b/fs/btrfs/extent-tree.c
5869 +@@ -4919,6 +4919,13 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
5870 + }
5871 + }
5872 +
5873 ++struct reserve_ticket {
5874 ++ u64 bytes;
5875 ++ int error;
5876 ++ struct list_head list;
5877 ++ wait_queue_head_t wait;
5878 ++};
5879 ++
5880 + /**
5881 + * maybe_commit_transaction - possibly commit the transaction if its ok to
5882 + * @root - the root we're allocating for
5883 +@@ -4930,18 +4937,29 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
5884 + * will return -ENOSPC.
5885 + */
5886 + static int may_commit_transaction(struct btrfs_fs_info *fs_info,
5887 +- struct btrfs_space_info *space_info,
5888 +- u64 bytes, int force)
5889 ++ struct btrfs_space_info *space_info)
5890 + {
5891 ++ struct reserve_ticket *ticket = NULL;
5892 + struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
5893 + struct btrfs_trans_handle *trans;
5894 ++ u64 bytes;
5895 +
5896 + trans = (struct btrfs_trans_handle *)current->journal_info;
5897 + if (trans)
5898 + return -EAGAIN;
5899 +
5900 +- if (force)
5901 +- goto commit;
5902 ++ spin_lock(&space_info->lock);
5903 ++ if (!list_empty(&space_info->priority_tickets))
5904 ++ ticket = list_first_entry(&space_info->priority_tickets,
5905 ++ struct reserve_ticket, list);
5906 ++ else if (!list_empty(&space_info->tickets))
5907 ++ ticket = list_first_entry(&space_info->tickets,
5908 ++ struct reserve_ticket, list);
5909 ++ bytes = (ticket) ? ticket->bytes : 0;
5910 ++ spin_unlock(&space_info->lock);
5911 ++
5912 ++ if (!bytes)
5913 ++ return 0;
5914 +
5915 + /* See if there is enough pinned space to make this reservation */
5916 + if (percpu_counter_compare(&space_info->total_bytes_pinned,
5917 +@@ -4956,8 +4974,12 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
5918 + return -ENOSPC;
5919 +
5920 + spin_lock(&delayed_rsv->lock);
5921 ++ if (delayed_rsv->size > bytes)
5922 ++ bytes = 0;
5923 ++ else
5924 ++ bytes -= delayed_rsv->size;
5925 + if (percpu_counter_compare(&space_info->total_bytes_pinned,
5926 +- bytes - delayed_rsv->size) < 0) {
5927 ++ bytes) < 0) {
5928 + spin_unlock(&delayed_rsv->lock);
5929 + return -ENOSPC;
5930 + }
5931 +@@ -4971,13 +4993,6 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
5932 + return btrfs_commit_transaction(trans);
5933 + }
5934 +
5935 +-struct reserve_ticket {
5936 +- u64 bytes;
5937 +- int error;
5938 +- struct list_head list;
5939 +- wait_queue_head_t wait;
5940 +-};
5941 +-
5942 + /*
5943 + * Try to flush some data based on policy set by @state. This is only advisory
5944 + * and may fail for various reasons. The caller is supposed to examine the
5945 +@@ -5027,8 +5042,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
5946 + ret = 0;
5947 + break;
5948 + case COMMIT_TRANS:
5949 +- ret = may_commit_transaction(fs_info, space_info,
5950 +- num_bytes, 0);
5951 ++ ret = may_commit_transaction(fs_info, space_info);
5952 + break;
5953 + default:
5954 + ret = -ENOSPC;
5955 +diff --git a/fs/buffer.c b/fs/buffer.c
5956 +index 170df856bdb9..b96f3b98a6ef 100644
5957 +--- a/fs/buffer.c
5958 ++++ b/fs/buffer.c
5959 +@@ -3055,8 +3055,16 @@ void guard_bio_eod(int op, struct bio *bio)
5960 + sector_t maxsector;
5961 + struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
5962 + unsigned truncated_bytes;
5963 ++ struct hd_struct *part;
5964 ++
5965 ++ rcu_read_lock();
5966 ++ part = __disk_get_part(bio->bi_disk, bio->bi_partno);
5967 ++ if (part)
5968 ++ maxsector = part_nr_sects_read(part);
5969 ++ else
5970 ++ maxsector = get_capacity(bio->bi_disk);
5971 ++ rcu_read_unlock();
5972 +
5973 +- maxsector = get_capacity(bio->bi_disk);
5974 + if (!maxsector)
5975 + return;
5976 +
5977 +diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
5978 +index c7835df7e7b8..d262a93d9b31 100644
5979 +--- a/fs/crypto/crypto.c
5980 ++++ b/fs/crypto/crypto.c
5981 +@@ -410,11 +410,8 @@ int fscrypt_initialize(unsigned int cop_flags)
5982 + {
5983 + int i, res = -ENOMEM;
5984 +
5985 +- /*
5986 +- * No need to allocate a bounce page pool if there already is one or
5987 +- * this FS won't use it.
5988 +- */
5989 +- if (cop_flags & FS_CFLG_OWN_PAGES || fscrypt_bounce_page_pool)
5990 ++ /* No need to allocate a bounce page pool if this FS won't use it. */
5991 ++ if (cop_flags & FS_CFLG_OWN_PAGES)
5992 + return 0;
5993 +
5994 + mutex_lock(&fscrypt_init_mutex);
5995 +diff --git a/fs/dax.c b/fs/dax.c
5996 +index f001d8c72a06..191306cd8b6b 100644
5997 +--- a/fs/dax.c
5998 ++++ b/fs/dax.c
5999 +@@ -1327,7 +1327,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
6000 + * this is a reliable test.
6001 + */
6002 + pgoff = linear_page_index(vma, pmd_addr);
6003 +- max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
6004 ++ max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
6005 +
6006 + trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
6007 +
6008 +@@ -1351,13 +1351,13 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
6009 + if ((pmd_addr + PMD_SIZE) > vma->vm_end)
6010 + goto fallback;
6011 +
6012 +- if (pgoff > max_pgoff) {
6013 ++ if (pgoff >= max_pgoff) {
6014 + result = VM_FAULT_SIGBUS;
6015 + goto out;
6016 + }
6017 +
6018 + /* If the PMD would extend beyond the file size */
6019 +- if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
6020 ++ if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
6021 + goto fallback;
6022 +
6023 + /*
6024 +diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
6025 +index 286f10b0363b..4f457d5c4933 100644
6026 +--- a/fs/ecryptfs/messaging.c
6027 ++++ b/fs/ecryptfs/messaging.c
6028 +@@ -442,15 +442,16 @@ void ecryptfs_release_messaging(void)
6029 + }
6030 + if (ecryptfs_daemon_hash) {
6031 + struct ecryptfs_daemon *daemon;
6032 ++ struct hlist_node *n;
6033 + int i;
6034 +
6035 + mutex_lock(&ecryptfs_daemon_hash_mux);
6036 + for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
6037 + int rc;
6038 +
6039 +- hlist_for_each_entry(daemon,
6040 +- &ecryptfs_daemon_hash[i],
6041 +- euid_chain) {
6042 ++ hlist_for_each_entry_safe(daemon, n,
6043 ++ &ecryptfs_daemon_hash[i],
6044 ++ euid_chain) {
6045 + rc = ecryptfs_exorcise_daemon(daemon);
6046 + if (rc)
6047 + printk(KERN_ERR "%s: Error whilst "
6048 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
6049 +index 97f0fd06728d..07bca11749d4 100644
6050 +--- a/fs/ext4/extents.c
6051 ++++ b/fs/ext4/extents.c
6052 +@@ -4794,7 +4794,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
6053 + }
6054 +
6055 + if (!(mode & FALLOC_FL_KEEP_SIZE) &&
6056 +- offset + len > i_size_read(inode)) {
6057 ++ (offset + len > i_size_read(inode) ||
6058 ++ offset + len > EXT4_I(inode)->i_disksize)) {
6059 + new_size = offset + len;
6060 + ret = inode_newsize_ok(inode, new_size);
6061 + if (ret)
6062 +@@ -4965,7 +4966,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
6063 + }
6064 +
6065 + if (!(mode & FALLOC_FL_KEEP_SIZE) &&
6066 +- offset + len > i_size_read(inode)) {
6067 ++ (offset + len > i_size_read(inode) ||
6068 ++ offset + len > EXT4_I(inode)->i_disksize)) {
6069 + new_size = offset + len;
6070 + ret = inode_newsize_ok(inode, new_size);
6071 + if (ret)
6072 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
6073 +index 28c5c3abddb3..fd9501977f1c 100644
6074 +--- a/fs/ext4/inline.c
6075 ++++ b/fs/ext4/inline.c
6076 +@@ -302,11 +302,6 @@ static int ext4_create_inline_data(handle_t *handle,
6077 + EXT4_I(inode)->i_inline_size = len + EXT4_MIN_INLINE_DATA_SIZE;
6078 + ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
6079 + ext4_set_inode_flag(inode, EXT4_INODE_INLINE_DATA);
6080 +- /*
6081 +- * Propagate changes to inode->i_flags as well - e.g. S_DAX may
6082 +- * get cleared
6083 +- */
6084 +- ext4_set_inode_flags(inode);
6085 + get_bh(is.iloc.bh);
6086 + error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
6087 +
6088 +@@ -451,11 +446,6 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
6089 + }
6090 + }
6091 + ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA);
6092 +- /*
6093 +- * Propagate changes to inode->i_flags as well - e.g. S_DAX may
6094 +- * get set.
6095 +- */
6096 +- ext4_set_inode_flags(inode);
6097 +
6098 + get_bh(is.iloc.bh);
6099 + error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
6100 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
6101 +index 90afeb7293a6..38eb621edd80 100644
6102 +--- a/fs/ext4/inode.c
6103 ++++ b/fs/ext4/inode.c
6104 +@@ -5967,11 +5967,6 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
6105 + ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6106 + }
6107 + ext4_set_aops(inode);
6108 +- /*
6109 +- * Update inode->i_flags after EXT4_INODE_JOURNAL_DATA was updated.
6110 +- * E.g. S_DAX may get cleared / set.
6111 +- */
6112 +- ext4_set_inode_flags(inode);
6113 +
6114 + jbd2_journal_unlock_updates(journal);
6115 + percpu_up_write(&sbi->s_journal_flag_rwsem);
6116 +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
6117 +index 75d83471f65c..d97f40396765 100644
6118 +--- a/fs/ext4/ioctl.c
6119 ++++ b/fs/ext4/ioctl.c
6120 +@@ -291,10 +291,20 @@ static int ext4_ioctl_setflags(struct inode *inode,
6121 + if (err)
6122 + goto flags_out;
6123 +
6124 +- if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL))
6125 ++ if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
6126 ++ /*
6127 ++ * Changes to the journaling mode can cause unsafe changes to
6128 ++ * S_DAX if we are using the DAX mount option.
6129 ++ */
6130 ++ if (test_opt(inode->i_sb, DAX)) {
6131 ++ err = -EBUSY;
6132 ++ goto flags_out;
6133 ++ }
6134 ++
6135 + err = ext4_change_inode_journal_flag(inode, jflag);
6136 +- if (err)
6137 +- goto flags_out;
6138 ++ if (err)
6139 ++ goto flags_out;
6140 ++ }
6141 + if (migrate) {
6142 + if (flags & EXT4_EXTENTS_FL)
6143 + err = ext4_ext_migrate(inode);
6144 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
6145 +index b0915b734a38..f29351c66610 100644
6146 +--- a/fs/ext4/super.c
6147 ++++ b/fs/ext4/super.c
6148 +@@ -3708,6 +3708,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
6149 + }
6150 +
6151 + if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
6152 ++ if (ext4_has_feature_inline_data(sb)) {
6153 ++ ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
6154 ++ " that may contain inline data");
6155 ++ goto failed_mount;
6156 ++ }
6157 + err = bdev_dax_supported(sb, blocksize);
6158 + if (err)
6159 + goto failed_mount;
6160 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
6161 +index 517e112c8a9a..6ce467872376 100644
6162 +--- a/fs/f2fs/file.c
6163 ++++ b/fs/f2fs/file.c
6164 +@@ -683,6 +683,12 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
6165 + STATX_ATTR_NODUMP);
6166 +
6167 + generic_fillattr(inode, stat);
6168 ++
6169 ++ /* we need to show initial sectors used for inline_data/dentries */
6170 ++ if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
6171 ++ f2fs_has_inline_dentry(inode))
6172 ++ stat->blocks += (stat->size + 511) >> 9;
6173 ++
6174 + return 0;
6175 + }
6176 +
6177 +diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
6178 +index 57d4c3e2e94a..8e42b4fbefdc 100644
6179 +--- a/fs/isofs/isofs.h
6180 ++++ b/fs/isofs/isofs.h
6181 +@@ -107,7 +107,7 @@ static inline unsigned int isonum_733(char *p)
6182 + /* Ignore bigendian datum due to broken mastering programs */
6183 + return get_unaligned_le32(p);
6184 + }
6185 +-extern int iso_date(char *, int);
6186 ++extern int iso_date(u8 *, int);
6187 +
6188 + struct inode; /* To make gcc happy */
6189 +
6190 +diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
6191 +index ef03625431bb..ac5cc587d718 100644
6192 +--- a/fs/isofs/rock.h
6193 ++++ b/fs/isofs/rock.h
6194 +@@ -66,7 +66,7 @@ struct RR_PL_s {
6195 + };
6196 +
6197 + struct stamp {
6198 +- char time[7];
6199 ++ __u8 time[7]; /* actually 6 unsigned, 1 signed */
6200 + } __attribute__ ((packed));
6201 +
6202 + struct RR_TF_s {
6203 +diff --git a/fs/isofs/util.c b/fs/isofs/util.c
6204 +index 42544bf0e222..e88dba721661 100644
6205 +--- a/fs/isofs/util.c
6206 ++++ b/fs/isofs/util.c
6207 +@@ -16,7 +16,7 @@
6208 + * to GMT. Thus we should always be correct.
6209 + */
6210 +
6211 +-int iso_date(char * p, int flag)
6212 ++int iso_date(u8 *p, int flag)
6213 + {
6214 + int year, month, day, hour, minute, second, tz;
6215 + int crtime;
6216 +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
6217 +index b995bdc13976..f04ecfc7ece0 100644
6218 +--- a/fs/lockd/svc.c
6219 ++++ b/fs/lockd/svc.c
6220 +@@ -369,6 +369,7 @@ static int lockd_start_svc(struct svc_serv *serv)
6221 + printk(KERN_WARNING
6222 + "lockd_up: svc_rqst allocation failed, error=%d\n",
6223 + error);
6224 ++ lockd_unregister_notifiers();
6225 + goto out_rqst;
6226 + }
6227 +
6228 +@@ -459,13 +460,16 @@ int lockd_up(struct net *net)
6229 + }
6230 +
6231 + error = lockd_up_net(serv, net);
6232 +- if (error < 0)
6233 +- goto err_net;
6234 ++ if (error < 0) {
6235 ++ lockd_unregister_notifiers();
6236 ++ goto err_put;
6237 ++ }
6238 +
6239 + error = lockd_start_svc(serv);
6240 +- if (error < 0)
6241 +- goto err_start;
6242 +-
6243 ++ if (error < 0) {
6244 ++ lockd_down_net(serv, net);
6245 ++ goto err_put;
6246 ++ }
6247 + nlmsvc_users++;
6248 + /*
6249 + * Note: svc_serv structures have an initial use count of 1,
6250 +@@ -476,12 +480,6 @@ int lockd_up(struct net *net)
6251 + err_create:
6252 + mutex_unlock(&nlmsvc_mutex);
6253 + return error;
6254 +-
6255 +-err_start:
6256 +- lockd_down_net(serv, net);
6257 +-err_net:
6258 +- lockd_unregister_notifiers();
6259 +- goto err_put;
6260 + }
6261 + EXPORT_SYMBOL_GPL(lockd_up);
6262 +
6263 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
6264 +index 5ceaeb1f6fb6..b03b3bc05f96 100644
6265 +--- a/fs/nfs/dir.c
6266 ++++ b/fs/nfs/dir.c
6267 +@@ -1241,8 +1241,7 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
6268 + return 0;
6269 + }
6270 +
6271 +- if (nfs_mapping_need_revalidate_inode(inode))
6272 +- error = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
6273 ++ error = nfs_lookup_verify_inode(inode, flags);
6274 + dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
6275 + __func__, inode->i_ino, error ? "invalid" : "valid");
6276 + return !error;
6277 +@@ -1393,6 +1392,7 @@ static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
6278 +
6279 + const struct dentry_operations nfs4_dentry_operations = {
6280 + .d_revalidate = nfs4_lookup_revalidate,
6281 ++ .d_weak_revalidate = nfs_weak_revalidate,
6282 + .d_delete = nfs_dentry_delete,
6283 + .d_iput = nfs_dentry_iput,
6284 + .d_automount = nfs_d_automount,
6285 +diff --git a/fs/nfs/file.c b/fs/nfs/file.c
6286 +index 0214dd1e1060..81cca49a8375 100644
6287 +--- a/fs/nfs/file.c
6288 ++++ b/fs/nfs/file.c
6289 +@@ -829,23 +829,9 @@ int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
6290 + if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK)
6291 + is_local = 1;
6292 +
6293 +- /*
6294 +- * VFS doesn't require the open mode to match a flock() lock's type.
6295 +- * NFS, however, may simulate flock() locking with posix locking which
6296 +- * requires the open mode to match the lock type.
6297 +- */
6298 +- switch (fl->fl_type) {
6299 +- case F_UNLCK:
6300 ++ /* We're simulating flock() locks using posix locks on the server */
6301 ++ if (fl->fl_type == F_UNLCK)
6302 + return do_unlk(filp, cmd, fl, is_local);
6303 +- case F_RDLCK:
6304 +- if (!(filp->f_mode & FMODE_READ))
6305 +- return -EBADF;
6306 +- break;
6307 +- case F_WRLCK:
6308 +- if (!(filp->f_mode & FMODE_WRITE))
6309 +- return -EBADF;
6310 +- }
6311 +-
6312 + return do_setlk(filp, cmd, fl, is_local);
6313 + }
6314 + EXPORT_SYMBOL_GPL(nfs_flock);
6315 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
6316 +index f90090e8c959..2241d52710f7 100644
6317 +--- a/fs/nfs/nfs4proc.c
6318 ++++ b/fs/nfs/nfs4proc.c
6319 +@@ -254,15 +254,12 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
6320 + };
6321 +
6322 + const u32 nfs4_fs_locations_bitmap[3] = {
6323 +- FATTR4_WORD0_TYPE
6324 +- | FATTR4_WORD0_CHANGE
6325 ++ FATTR4_WORD0_CHANGE
6326 + | FATTR4_WORD0_SIZE
6327 + | FATTR4_WORD0_FSID
6328 + | FATTR4_WORD0_FILEID
6329 + | FATTR4_WORD0_FS_LOCATIONS,
6330 +- FATTR4_WORD1_MODE
6331 +- | FATTR4_WORD1_NUMLINKS
6332 +- | FATTR4_WORD1_OWNER
6333 ++ FATTR4_WORD1_OWNER
6334 + | FATTR4_WORD1_OWNER_GROUP
6335 + | FATTR4_WORD1_RAWDEV
6336 + | FATTR4_WORD1_SPACE_USED
6337 +@@ -6568,6 +6565,20 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6338 + !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
6339 + return -ENOLCK;
6340 +
6341 ++ /*
6342 ++ * Don't rely on the VFS having checked the file open mode,
6343 ++ * since it won't do this for flock() locks.
6344 ++ */
6345 ++ switch (request->fl_type) {
6346 ++ case F_RDLCK:
6347 ++ if (!(filp->f_mode & FMODE_READ))
6348 ++ return -EBADF;
6349 ++ break;
6350 ++ case F_WRLCK:
6351 ++ if (!(filp->f_mode & FMODE_WRITE))
6352 ++ return -EBADF;
6353 ++ }
6354 ++
6355 + status = nfs4_set_lock_state(state, request);
6356 + if (status != 0)
6357 + return status;
6358 +@@ -6763,9 +6774,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6359 + struct page *page)
6360 + {
6361 + struct nfs_server *server = NFS_SERVER(dir);
6362 +- u32 bitmask[3] = {
6363 +- [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6364 +- };
6365 ++ u32 bitmask[3];
6366 + struct nfs4_fs_locations_arg args = {
6367 + .dir_fh = NFS_FH(dir),
6368 + .name = name,
6369 +@@ -6784,12 +6793,15 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6370 +
6371 + dprintk("%s: start\n", __func__);
6372 +
6373 ++ bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
6374 ++ bitmask[1] = nfs4_fattr_bitmap[1];
6375 ++
6376 + /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6377 + * is not supported */
6378 + if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6379 +- bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6380 ++ bitmask[0] &= ~FATTR4_WORD0_FILEID;
6381 + else
6382 +- bitmask[0] |= FATTR4_WORD0_FILEID;
6383 ++ bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
6384 +
6385 + nfs_fattr_init(&fs_locations->fattr);
6386 + fs_locations->server = server;
6387 +diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
6388 +index e7c6275519b0..71d2ca04a9f8 100644
6389 +--- a/fs/nfs/nfs4trace.h
6390 ++++ b/fs/nfs/nfs4trace.h
6391 +@@ -202,17 +202,13 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
6392 + TP_ARGS(clp, error),
6393 +
6394 + TP_STRUCT__entry(
6395 +- __string(dstaddr,
6396 +- rpc_peeraddr2str(clp->cl_rpcclient,
6397 +- RPC_DISPLAY_ADDR))
6398 ++ __string(dstaddr, clp->cl_hostname)
6399 + __field(int, error)
6400 + ),
6401 +
6402 + TP_fast_assign(
6403 + __entry->error = error;
6404 +- __assign_str(dstaddr,
6405 +- rpc_peeraddr2str(clp->cl_rpcclient,
6406 +- RPC_DISPLAY_ADDR));
6407 ++ __assign_str(dstaddr, clp->cl_hostname);
6408 + ),
6409 +
6410 + TP_printk(
6411 +@@ -1133,9 +1129,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
6412 + __field(dev_t, dev)
6413 + __field(u32, fhandle)
6414 + __field(u64, fileid)
6415 +- __string(dstaddr, clp ?
6416 +- rpc_peeraddr2str(clp->cl_rpcclient,
6417 +- RPC_DISPLAY_ADDR) : "unknown")
6418 ++ __string(dstaddr, clp ? clp->cl_hostname : "unknown")
6419 + ),
6420 +
6421 + TP_fast_assign(
6422 +@@ -1148,9 +1142,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
6423 + __entry->fileid = 0;
6424 + __entry->dev = 0;
6425 + }
6426 +- __assign_str(dstaddr, clp ?
6427 +- rpc_peeraddr2str(clp->cl_rpcclient,
6428 +- RPC_DISPLAY_ADDR) : "unknown")
6429 ++ __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
6430 + ),
6431 +
6432 + TP_printk(
6433 +@@ -1192,9 +1184,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
6434 + __field(dev_t, dev)
6435 + __field(u32, fhandle)
6436 + __field(u64, fileid)
6437 +- __string(dstaddr, clp ?
6438 +- rpc_peeraddr2str(clp->cl_rpcclient,
6439 +- RPC_DISPLAY_ADDR) : "unknown")
6440 ++ __string(dstaddr, clp ? clp->cl_hostname : "unknown")
6441 + __field(int, stateid_seq)
6442 + __field(u32, stateid_hash)
6443 + ),
6444 +@@ -1209,9 +1199,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
6445 + __entry->fileid = 0;
6446 + __entry->dev = 0;
6447 + }
6448 +- __assign_str(dstaddr, clp ?
6449 +- rpc_peeraddr2str(clp->cl_rpcclient,
6450 +- RPC_DISPLAY_ADDR) : "unknown")
6451 ++ __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
6452 + __entry->stateid_seq =
6453 + be32_to_cpu(stateid->seqid);
6454 + __entry->stateid_hash =
6455 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
6456 +index c9d24bae3025..216f67d628b3 100644
6457 +--- a/fs/nfs/super.c
6458 ++++ b/fs/nfs/super.c
6459 +@@ -1332,7 +1332,7 @@ static int nfs_parse_mount_options(char *raw,
6460 + mnt->options |= NFS_OPTION_MIGRATION;
6461 + break;
6462 + case Opt_nomigration:
6463 +- mnt->options &= NFS_OPTION_MIGRATION;
6464 ++ mnt->options &= ~NFS_OPTION_MIGRATION;
6465 + break;
6466 +
6467 + /*
6468 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
6469 +index 0c04f81aa63b..d386d569edbc 100644
6470 +--- a/fs/nfsd/nfs4state.c
6471 ++++ b/fs/nfsd/nfs4state.c
6472 +@@ -3966,7 +3966,8 @@ static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, statei
6473 + {
6474 + struct nfs4_stid *ret;
6475 +
6476 +- ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
6477 ++ ret = find_stateid_by_type(cl, s,
6478 ++ NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
6479 + if (!ret)
6480 + return NULL;
6481 + return delegstateid(ret);
6482 +@@ -3989,6 +3990,12 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
6483 + deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
6484 + if (deleg == NULL)
6485 + goto out;
6486 ++ if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
6487 ++ nfs4_put_stid(&deleg->dl_stid);
6488 ++ if (cl->cl_minorversion)
6489 ++ status = nfserr_deleg_revoked;
6490 ++ goto out;
6491 ++ }
6492 + flags = share_access_to_flags(open->op_share_access);
6493 + status = nfs4_check_delegmode(deleg, flags);
6494 + if (status) {
6495 +@@ -4858,6 +4865,16 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
6496 + struct nfs4_stid **s, struct nfsd_net *nn)
6497 + {
6498 + __be32 status;
6499 ++ bool return_revoked = false;
6500 ++
6501 ++ /*
6502 ++ * only return revoked delegations if explicitly asked.
6503 ++ * otherwise we report revoked or bad_stateid status.
6504 ++ */
6505 ++ if (typemask & NFS4_REVOKED_DELEG_STID)
6506 ++ return_revoked = true;
6507 ++ else if (typemask & NFS4_DELEG_STID)
6508 ++ typemask |= NFS4_REVOKED_DELEG_STID;
6509 +
6510 + if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
6511 + return nfserr_bad_stateid;
6512 +@@ -4872,6 +4889,12 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
6513 + *s = find_stateid_by_type(cstate->clp, stateid, typemask);
6514 + if (!*s)
6515 + return nfserr_bad_stateid;
6516 ++ if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
6517 ++ nfs4_put_stid(*s);
6518 ++ if (cstate->minorversion)
6519 ++ return nfserr_deleg_revoked;
6520 ++ return nfserr_bad_stateid;
6521 ++ }
6522 + return nfs_ok;
6523 + }
6524 +
6525 +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
6526 +index 70ded52dc1dd..50e12956c737 100644
6527 +--- a/fs/nilfs2/segment.c
6528 ++++ b/fs/nilfs2/segment.c
6529 +@@ -1958,8 +1958,6 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
6530 + err, ii->vfs_inode.i_ino);
6531 + return err;
6532 + }
6533 +- mark_buffer_dirty(ibh);
6534 +- nilfs_mdt_mark_dirty(ifile);
6535 + spin_lock(&nilfs->ns_inode_lock);
6536 + if (likely(!ii->i_bh))
6537 + ii->i_bh = ibh;
6538 +@@ -1968,6 +1966,10 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
6539 + goto retry;
6540 + }
6541 +
6542 ++ // Always redirty the buffer to avoid race condition
6543 ++ mark_buffer_dirty(ii->i_bh);
6544 ++ nilfs_mdt_mark_dirty(ifile);
6545 ++
6546 + clear_bit(NILFS_I_QUEUED, &ii->i_state);
6547 + set_bit(NILFS_I_BUSY, &ii->i_state);
6548 + list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
6549 +diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
6550 +index 09640b546363..3c7053207297 100644
6551 +--- a/fs/notify/fanotify/fanotify.c
6552 ++++ b/fs/notify/fanotify/fanotify.c
6553 +@@ -65,19 +65,8 @@ static int fanotify_get_response(struct fsnotify_group *group,
6554 +
6555 + pr_debug("%s: group=%p event=%p\n", __func__, group, event);
6556 +
6557 +- /*
6558 +- * fsnotify_prepare_user_wait() fails if we race with mark deletion.
6559 +- * Just let the operation pass in that case.
6560 +- */
6561 +- if (!fsnotify_prepare_user_wait(iter_info)) {
6562 +- event->response = FAN_ALLOW;
6563 +- goto out;
6564 +- }
6565 +-
6566 + wait_event(group->fanotify_data.access_waitq, event->response);
6567 +
6568 +- fsnotify_finish_user_wait(iter_info);
6569 +-out:
6570 + /* userspace responded, convert to something usable */
6571 + switch (event->response) {
6572 + case FAN_ALLOW:
6573 +@@ -212,9 +201,21 @@ static int fanotify_handle_event(struct fsnotify_group *group,
6574 + pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
6575 + mask);
6576 +
6577 ++#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
6578 ++ if (mask & FAN_ALL_PERM_EVENTS) {
6579 ++ /*
6580 ++ * fsnotify_prepare_user_wait() fails if we race with mark
6581 ++ * deletion. Just let the operation pass in that case.
6582 ++ */
6583 ++ if (!fsnotify_prepare_user_wait(iter_info))
6584 ++ return 0;
6585 ++ }
6586 ++#endif
6587 ++
6588 + event = fanotify_alloc_event(inode, mask, data);
6589 ++ ret = -ENOMEM;
6590 + if (unlikely(!event))
6591 +- return -ENOMEM;
6592 ++ goto finish;
6593 +
6594 + fsn_event = &event->fse;
6595 + ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
6596 +@@ -224,7 +225,8 @@ static int fanotify_handle_event(struct fsnotify_group *group,
6597 + /* Our event wasn't used in the end. Free it. */
6598 + fsnotify_destroy_event(group, fsn_event);
6599 +
6600 +- return 0;
6601 ++ ret = 0;
6602 ++ goto finish;
6603 + }
6604 +
6605 + #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
6606 +@@ -233,6 +235,11 @@ static int fanotify_handle_event(struct fsnotify_group *group,
6607 + iter_info);
6608 + fsnotify_destroy_event(group, fsn_event);
6609 + }
6610 ++finish:
6611 ++ if (mask & FAN_ALL_PERM_EVENTS)
6612 ++ fsnotify_finish_user_wait(iter_info);
6613 ++#else
6614 ++finish:
6615 + #endif
6616 + return ret;
6617 + }
6618 +diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
6619 +index 0c4583b61717..074716293829 100644
6620 +--- a/fs/notify/fsnotify.c
6621 ++++ b/fs/notify/fsnotify.c
6622 +@@ -335,6 +335,13 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
6623 + struct fsnotify_mark, obj_list);
6624 + vfsmount_group = vfsmount_mark->group;
6625 + }
6626 ++ /*
6627 ++ * Need to protect both marks against freeing so that we can
6628 ++ * continue iteration from this place, regardless of which mark
6629 ++ * we actually happen to send an event for.
6630 ++ */
6631 ++ iter_info.inode_mark = inode_mark;
6632 ++ iter_info.vfsmount_mark = vfsmount_mark;
6633 +
6634 + if (inode_group && vfsmount_group) {
6635 + int cmp = fsnotify_compare_groups(inode_group,
6636 +@@ -348,9 +355,6 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
6637 + }
6638 + }
6639 +
6640 +- iter_info.inode_mark = inode_mark;
6641 +- iter_info.vfsmount_mark = vfsmount_mark;
6642 +-
6643 + ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask,
6644 + data, data_is, cookie, file_name,
6645 + &iter_info);
6646 +diff --git a/fs/notify/mark.c b/fs/notify/mark.c
6647 +index 9991f8826734..258d99087183 100644
6648 +--- a/fs/notify/mark.c
6649 ++++ b/fs/notify/mark.c
6650 +@@ -109,16 +109,6 @@ void fsnotify_get_mark(struct fsnotify_mark *mark)
6651 + atomic_inc(&mark->refcnt);
6652 + }
6653 +
6654 +-/*
6655 +- * Get mark reference when we found the mark via lockless traversal of object
6656 +- * list. Mark can be already removed from the list by now and on its way to be
6657 +- * destroyed once SRCU period ends.
6658 +- */
6659 +-static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
6660 +-{
6661 +- return atomic_inc_not_zero(&mark->refcnt);
6662 +-}
6663 +-
6664 + static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
6665 + {
6666 + u32 new_mask = 0;
6667 +@@ -256,32 +246,60 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
6668 + FSNOTIFY_REAPER_DELAY);
6669 + }
6670 +
6671 +-bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
6672 ++/*
6673 ++ * Get mark reference when we found the mark via lockless traversal of object
6674 ++ * list. Mark can be already removed from the list by now and on its way to be
6675 ++ * destroyed once SRCU period ends.
6676 ++ *
6677 ++ * Also pin the group so it doesn't disappear under us.
6678 ++ */
6679 ++static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
6680 + {
6681 +- struct fsnotify_group *group;
6682 +-
6683 +- if (WARN_ON_ONCE(!iter_info->inode_mark && !iter_info->vfsmount_mark))
6684 +- return false;
6685 +-
6686 +- if (iter_info->inode_mark)
6687 +- group = iter_info->inode_mark->group;
6688 +- else
6689 +- group = iter_info->vfsmount_mark->group;
6690 ++ if (!mark)
6691 ++ return true;
6692 ++
6693 ++ if (atomic_inc_not_zero(&mark->refcnt)) {
6694 ++ spin_lock(&mark->lock);
6695 ++ if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) {
6696 ++ /* mark is attached, group is still alive then */
6697 ++ atomic_inc(&mark->group->user_waits);
6698 ++ spin_unlock(&mark->lock);
6699 ++ return true;
6700 ++ }
6701 ++ spin_unlock(&mark->lock);
6702 ++ fsnotify_put_mark(mark);
6703 ++ }
6704 ++ return false;
6705 ++}
6706 +
6707 +- /*
6708 +- * Since acquisition of mark reference is an atomic op as well, we can
6709 +- * be sure this inc is seen before any effect of refcount increment.
6710 +- */
6711 +- atomic_inc(&group->user_waits);
6712 ++/*
6713 ++ * Puts marks and wakes up group destruction if necessary.
6714 ++ *
6715 ++ * Pairs with fsnotify_get_mark_safe()
6716 ++ */
6717 ++static void fsnotify_put_mark_wake(struct fsnotify_mark *mark)
6718 ++{
6719 ++ if (mark) {
6720 ++ struct fsnotify_group *group = mark->group;
6721 +
6722 +- if (iter_info->inode_mark) {
6723 +- /* This can fail if mark is being removed */
6724 +- if (!fsnotify_get_mark_safe(iter_info->inode_mark))
6725 +- goto out_wait;
6726 ++ fsnotify_put_mark(mark);
6727 ++ /*
6728 ++ * We abuse notification_waitq on group shutdown for waiting for
6729 ++ * all marks pinned when waiting for userspace.
6730 ++ */
6731 ++ if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
6732 ++ wake_up(&group->notification_waitq);
6733 + }
6734 +- if (iter_info->vfsmount_mark) {
6735 +- if (!fsnotify_get_mark_safe(iter_info->vfsmount_mark))
6736 +- goto out_inode;
6737 ++}
6738 ++
6739 ++bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
6740 ++{
6741 ++ /* This can fail if mark is being removed */
6742 ++ if (!fsnotify_get_mark_safe(iter_info->inode_mark))
6743 ++ return false;
6744 ++ if (!fsnotify_get_mark_safe(iter_info->vfsmount_mark)) {
6745 ++ fsnotify_put_mark_wake(iter_info->inode_mark);
6746 ++ return false;
6747 + }
6748 +
6749 + /*
6750 +@@ -292,34 +310,13 @@ bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
6751 + srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx);
6752 +
6753 + return true;
6754 +-out_inode:
6755 +- if (iter_info->inode_mark)
6756 +- fsnotify_put_mark(iter_info->inode_mark);
6757 +-out_wait:
6758 +- if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
6759 +- wake_up(&group->notification_waitq);
6760 +- return false;
6761 + }
6762 +
6763 + void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info)
6764 + {
6765 +- struct fsnotify_group *group = NULL;
6766 +-
6767 + iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
6768 +- if (iter_info->inode_mark) {
6769 +- group = iter_info->inode_mark->group;
6770 +- fsnotify_put_mark(iter_info->inode_mark);
6771 +- }
6772 +- if (iter_info->vfsmount_mark) {
6773 +- group = iter_info->vfsmount_mark->group;
6774 +- fsnotify_put_mark(iter_info->vfsmount_mark);
6775 +- }
6776 +- /*
6777 +- * We abuse notification_waitq on group shutdown for waiting for all
6778 +- * marks pinned when waiting for userspace.
6779 +- */
6780 +- if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
6781 +- wake_up(&group->notification_waitq);
6782 ++ fsnotify_put_mark_wake(iter_info->inode_mark);
6783 ++ fsnotify_put_mark_wake(iter_info->vfsmount_mark);
6784 + }
6785 +
6786 + /*
6787 +diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
6788 +index a12dc10bf726..bc6d5c5a3443 100644
6789 +--- a/fs/overlayfs/namei.c
6790 ++++ b/fs/overlayfs/namei.c
6791 +@@ -630,7 +630,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
6792 + err = ovl_check_origin(upperdentry, roe->lowerstack,
6793 + roe->numlower, &stack, &ctr);
6794 + if (err)
6795 +- goto out;
6796 ++ goto out_put_upper;
6797 + }
6798 +
6799 + if (d.redirect) {
6800 +diff --git a/include/linux/genhd.h b/include/linux/genhd.h
6801 +index 44790523057f..5ade8f2a6987 100644
6802 +--- a/include/linux/genhd.h
6803 ++++ b/include/linux/genhd.h
6804 +@@ -243,6 +243,7 @@ static inline dev_t part_devt(struct hd_struct *part)
6805 + return part_to_dev(part)->devt;
6806 + }
6807 +
6808 ++extern struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
6809 + extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno);
6810 +
6811 + static inline void disk_put_part(struct hd_struct *part)
6812 +diff --git a/include/linux/irq.h b/include/linux/irq.h
6813 +index 4536286cc4d2..0d53626405bf 100644
6814 +--- a/include/linux/irq.h
6815 ++++ b/include/linux/irq.h
6816 +@@ -211,6 +211,7 @@ struct irq_data {
6817 + * IRQD_MANAGED_SHUTDOWN - Interrupt was shutdown due to empty affinity
6818 + * mask. Applies only to affinity managed irqs.
6819 + * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
6820 ++ * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
6821 + */
6822 + enum {
6823 + IRQD_TRIGGER_MASK = 0xf,
6824 +@@ -231,6 +232,7 @@ enum {
6825 + IRQD_IRQ_STARTED = (1 << 22),
6826 + IRQD_MANAGED_SHUTDOWN = (1 << 23),
6827 + IRQD_SINGLE_TARGET = (1 << 24),
6828 ++ IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
6829 + };
6830 +
6831 + #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
6832 +@@ -260,18 +262,25 @@ static inline void irqd_mark_affinity_was_set(struct irq_data *d)
6833 + __irqd_to_state(d) |= IRQD_AFFINITY_SET;
6834 + }
6835 +
6836 ++static inline bool irqd_trigger_type_was_set(struct irq_data *d)
6837 ++{
6838 ++ return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
6839 ++}
6840 ++
6841 + static inline u32 irqd_get_trigger_type(struct irq_data *d)
6842 + {
6843 + return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
6844 + }
6845 +
6846 + /*
6847 +- * Must only be called inside irq_chip.irq_set_type() functions.
6848 ++ * Must only be called inside irq_chip.irq_set_type() functions or
6849 ++ * from the DT/ACPI setup code.
6850 + */
6851 + static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
6852 + {
6853 + __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
6854 + __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
6855 ++ __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
6856 + }
6857 +
6858 + static inline bool irqd_is_level_type(struct irq_data *d)
6859 +diff --git a/include/net/tls.h b/include/net/tls.h
6860 +index b89d397dd62f..c06db1eadac2 100644
6861 +--- a/include/net/tls.h
6862 ++++ b/include/net/tls.h
6863 +@@ -35,6 +35,10 @@
6864 + #define _TLS_OFFLOAD_H
6865 +
6866 + #include <linux/types.h>
6867 ++#include <asm/byteorder.h>
6868 ++#include <linux/socket.h>
6869 ++#include <linux/tcp.h>
6870 ++#include <net/tcp.h>
6871 +
6872 + #include <uapi/linux/tls.h>
6873 +
6874 +diff --git a/include/sound/control.h b/include/sound/control.h
6875 +index a1f1152bc687..ca13a44ae9d4 100644
6876 +--- a/include/sound/control.h
6877 ++++ b/include/sound/control.h
6878 +@@ -249,7 +249,9 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
6879 + void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
6880 + #define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true)
6881 + int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
6882 +- int (*func)(struct snd_kcontrol *, void *),
6883 ++ int (*func)(struct snd_kcontrol *vslave,
6884 ++ struct snd_kcontrol *slave,
6885 ++ void *arg),
6886 + void *arg);
6887 +
6888 + /*
6889 +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
6890 +index f5db145e68ec..0d924e968c94 100644
6891 +--- a/include/target/target_core_base.h
6892 ++++ b/include/target/target_core_base.h
6893 +@@ -490,6 +490,7 @@ struct se_cmd {
6894 + #define CMD_T_STOP (1 << 5)
6895 + #define CMD_T_TAS (1 << 10)
6896 + #define CMD_T_FABRIC_STOP (1 << 11)
6897 ++#define CMD_T_PRE_EXECUTE (1 << 12)
6898 + spinlock_t t_state_lock;
6899 + struct kref cmd_kref;
6900 + struct completion t_transport_stop_comp;
6901 +diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
6902 +index 25a7739514cd..3868b4752324 100644
6903 +--- a/include/trace/events/sunrpc.h
6904 ++++ b/include/trace/events/sunrpc.h
6905 +@@ -456,20 +456,22 @@ TRACE_EVENT(svc_recv,
6906 + TP_ARGS(rqst, status),
6907 +
6908 + TP_STRUCT__entry(
6909 +- __field(struct sockaddr *, addr)
6910 + __field(__be32, xid)
6911 + __field(int, status)
6912 + __field(unsigned long, flags)
6913 ++ __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
6914 + ),
6915 +
6916 + TP_fast_assign(
6917 +- __entry->addr = (struct sockaddr *)&rqst->rq_addr;
6918 + __entry->xid = status > 0 ? rqst->rq_xid : 0;
6919 + __entry->status = status;
6920 + __entry->flags = rqst->rq_flags;
6921 ++ memcpy(__get_dynamic_array(addr),
6922 ++ &rqst->rq_addr, rqst->rq_addrlen);
6923 + ),
6924 +
6925 +- TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr,
6926 ++ TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s",
6927 ++ (struct sockaddr *)__get_dynamic_array(addr),
6928 + be32_to_cpu(__entry->xid), __entry->status,
6929 + show_rqstp_flags(__entry->flags))
6930 + );
6931 +@@ -514,22 +516,23 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
6932 + TP_ARGS(rqst, status),
6933 +
6934 + TP_STRUCT__entry(
6935 +- __field(struct sockaddr *, addr)
6936 + __field(__be32, xid)
6937 +- __field(int, dropme)
6938 + __field(int, status)
6939 + __field(unsigned long, flags)
6940 ++ __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
6941 + ),
6942 +
6943 + TP_fast_assign(
6944 +- __entry->addr = (struct sockaddr *)&rqst->rq_addr;
6945 + __entry->xid = rqst->rq_xid;
6946 + __entry->status = status;
6947 + __entry->flags = rqst->rq_flags;
6948 ++ memcpy(__get_dynamic_array(addr),
6949 ++ &rqst->rq_addr, rqst->rq_addrlen);
6950 + ),
6951 +
6952 + TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s",
6953 +- __entry->addr, be32_to_cpu(__entry->xid),
6954 ++ (struct sockaddr *)__get_dynamic_array(addr),
6955 ++ be32_to_cpu(__entry->xid),
6956 + __entry->status, show_rqstp_flags(__entry->flags))
6957 + );
6958 +
6959 +diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h
6960 +index 9656aad8f8f7..9d4afea308a4 100644
6961 +--- a/include/uapi/linux/rxrpc.h
6962 ++++ b/include/uapi/linux/rxrpc.h
6963 +@@ -20,12 +20,12 @@
6964 + * RxRPC socket address
6965 + */
6966 + struct sockaddr_rxrpc {
6967 +- sa_family_t srx_family; /* address family */
6968 +- u16 srx_service; /* service desired */
6969 +- u16 transport_type; /* type of transport socket (SOCK_DGRAM) */
6970 +- u16 transport_len; /* length of transport address */
6971 ++ __kernel_sa_family_t srx_family; /* address family */
6972 ++ __u16 srx_service; /* service desired */
6973 ++ __u16 transport_type; /* type of transport socket (SOCK_DGRAM) */
6974 ++ __u16 transport_len; /* length of transport address */
6975 + union {
6976 +- sa_family_t family; /* transport address family */
6977 ++ __kernel_sa_family_t family; /* transport address family */
6978 + struct sockaddr_in sin; /* IPv4 transport address */
6979 + struct sockaddr_in6 sin6; /* IPv6 transport address */
6980 + } transport;
6981 +diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h
6982 +index d5e0682ab837..293b2cdad88d 100644
6983 +--- a/include/uapi/linux/tls.h
6984 ++++ b/include/uapi/linux/tls.h
6985 +@@ -35,10 +35,6 @@
6986 + #define _UAPI_LINUX_TLS_H
6987 +
6988 + #include <linux/types.h>
6989 +-#include <asm/byteorder.h>
6990 +-#include <linux/socket.h>
6991 +-#include <linux/tcp.h>
6992 +-#include <net/tcp.h>
6993 +
6994 + /* TLS socket options */
6995 + #define TLS_TX 1 /* Set transmit parameters */
6996 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
6997 +index 4bff6a10ae8e..b02caa442776 100644
6998 +--- a/kernel/irq/manage.c
6999 ++++ b/kernel/irq/manage.c
7000 +@@ -1245,7 +1245,18 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
7001 + * set the trigger type must match. Also all must
7002 + * agree on ONESHOT.
7003 + */
7004 +- unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data);
7005 ++ unsigned int oldtype;
7006 ++
7007 ++ /*
7008 ++ * If nobody did set the configuration before, inherit
7009 ++ * the one provided by the requester.
7010 ++ */
7011 ++ if (irqd_trigger_type_was_set(&desc->irq_data)) {
7012 ++ oldtype = irqd_get_trigger_type(&desc->irq_data);
7013 ++ } else {
7014 ++ oldtype = new->flags & IRQF_TRIGGER_MASK;
7015 ++ irqd_set_trigger_type(&desc->irq_data, oldtype);
7016 ++ }
7017 +
7018 + if (!((old->flags & new->flags) & IRQF_SHARED) ||
7019 + (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
7020 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
7021 +index d17c5da523a0..8fa7b6f9e19b 100644
7022 +--- a/kernel/sched/core.c
7023 ++++ b/kernel/sched/core.c
7024 +@@ -505,8 +505,7 @@ void resched_cpu(int cpu)
7025 + struct rq *rq = cpu_rq(cpu);
7026 + unsigned long flags;
7027 +
7028 +- if (!raw_spin_trylock_irqsave(&rq->lock, flags))
7029 +- return;
7030 ++ raw_spin_lock_irqsave(&rq->lock, flags);
7031 + resched_curr(rq);
7032 + raw_spin_unlock_irqrestore(&rq->lock, flags);
7033 + }
7034 +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
7035 +index ba0da243fdd8..2f52ec0f1539 100644
7036 +--- a/kernel/sched/cpufreq_schedutil.c
7037 ++++ b/kernel/sched/cpufreq_schedutil.c
7038 +@@ -282,8 +282,12 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
7039 + * Do not reduce the frequency if the CPU has not been idle
7040 + * recently, as the reduction is likely to be premature then.
7041 + */
7042 +- if (busy && next_f < sg_policy->next_freq)
7043 ++ if (busy && next_f < sg_policy->next_freq) {
7044 + next_f = sg_policy->next_freq;
7045 ++
7046 ++ /* Reset cached freq as next_freq has changed */
7047 ++ sg_policy->cached_raw_freq = 0;
7048 ++ }
7049 + }
7050 + sugov_update_commit(sg_policy, time, next_f);
7051 + }
7052 +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
7053 +index 3c96c80e0992..d8c43d73e078 100644
7054 +--- a/kernel/sched/rt.c
7055 ++++ b/kernel/sched/rt.c
7056 +@@ -74,10 +74,6 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
7057 + raw_spin_unlock(&rt_b->rt_runtime_lock);
7058 + }
7059 +
7060 +-#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
7061 +-static void push_irq_work_func(struct irq_work *work);
7062 +-#endif
7063 +-
7064 + void init_rt_rq(struct rt_rq *rt_rq)
7065 + {
7066 + struct rt_prio_array *array;
7067 +@@ -97,13 +93,6 @@ void init_rt_rq(struct rt_rq *rt_rq)
7068 + rt_rq->rt_nr_migratory = 0;
7069 + rt_rq->overloaded = 0;
7070 + plist_head_init(&rt_rq->pushable_tasks);
7071 +-
7072 +-#ifdef HAVE_RT_PUSH_IPI
7073 +- rt_rq->push_flags = 0;
7074 +- rt_rq->push_cpu = nr_cpu_ids;
7075 +- raw_spin_lock_init(&rt_rq->push_lock);
7076 +- init_irq_work(&rt_rq->push_work, push_irq_work_func);
7077 +-#endif
7078 + #endif /* CONFIG_SMP */
7079 + /* We start is dequeued state, because no RT tasks are queued */
7080 + rt_rq->rt_queued = 0;
7081 +@@ -1876,241 +1865,166 @@ static void push_rt_tasks(struct rq *rq)
7082 + }
7083 +
7084 + #ifdef HAVE_RT_PUSH_IPI
7085 ++
7086 + /*
7087 +- * The search for the next cpu always starts at rq->cpu and ends
7088 +- * when we reach rq->cpu again. It will never return rq->cpu.
7089 +- * This returns the next cpu to check, or nr_cpu_ids if the loop
7090 +- * is complete.
7091 ++ * When a high priority task schedules out from a CPU and a lower priority
7092 ++ * task is scheduled in, a check is made to see if there's any RT tasks
7093 ++ * on other CPUs that are waiting to run because a higher priority RT task
7094 ++ * is currently running on its CPU. In this case, the CPU with multiple RT
7095 ++ * tasks queued on it (overloaded) needs to be notified that a CPU has opened
7096 ++ * up that may be able to run one of its non-running queued RT tasks.
7097 ++ *
7098 ++ * All CPUs with overloaded RT tasks need to be notified as there is currently
7099 ++ * no way to know which of these CPUs have the highest priority task waiting
7100 ++ * to run. Instead of trying to take a spinlock on each of these CPUs,
7101 ++ * which has shown to cause large latency when done on machines with many
7102 ++ * CPUs, sending an IPI to the CPUs to have them push off the overloaded
7103 ++ * RT tasks waiting to run.
7104 ++ *
7105 ++ * Just sending an IPI to each of the CPUs is also an issue, as on large
7106 ++ * count CPU machines, this can cause an IPI storm on a CPU, especially
7107 ++ * if its the only CPU with multiple RT tasks queued, and a large number
7108 ++ * of CPUs scheduling a lower priority task at the same time.
7109 ++ *
7110 ++ * Each root domain has its own irq work function that can iterate over
7111 ++ * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
7112 ++ * tassk must be checked if there's one or many CPUs that are lowering
7113 ++ * their priority, there's a single irq work iterator that will try to
7114 ++ * push off RT tasks that are waiting to run.
7115 ++ *
7116 ++ * When a CPU schedules a lower priority task, it will kick off the
7117 ++ * irq work iterator that will jump to each CPU with overloaded RT tasks.
7118 ++ * As it only takes the first CPU that schedules a lower priority task
7119 ++ * to start the process, the rto_start variable is incremented and if
7120 ++ * the atomic result is one, then that CPU will try to take the rto_lock.
7121 ++ * This prevents high contention on the lock as the process handles all
7122 ++ * CPUs scheduling lower priority tasks.
7123 ++ *
7124 ++ * All CPUs that are scheduling a lower priority task will increment the
7125 ++ * rt_loop_next variable. This will make sure that the irq work iterator
7126 ++ * checks all RT overloaded CPUs whenever a CPU schedules a new lower
7127 ++ * priority task, even if the iterator is in the middle of a scan. Incrementing
7128 ++ * the rt_loop_next will cause the iterator to perform another scan.
7129 + *
7130 +- * rq->rt.push_cpu holds the last cpu returned by this function,
7131 +- * or if this is the first instance, it must hold rq->cpu.
7132 + */
7133 + static int rto_next_cpu(struct rq *rq)
7134 + {
7135 +- int prev_cpu = rq->rt.push_cpu;
7136 ++ struct root_domain *rd = rq->rd;
7137 ++ int next;
7138 + int cpu;
7139 +
7140 +- cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
7141 +-
7142 + /*
7143 +- * If the previous cpu is less than the rq's CPU, then it already
7144 +- * passed the end of the mask, and has started from the beginning.
7145 +- * We end if the next CPU is greater or equal to rq's CPU.
7146 ++ * When starting the IPI RT pushing, the rto_cpu is set to -1,
7147 ++ * rt_next_cpu() will simply return the first CPU found in
7148 ++ * the rto_mask.
7149 ++ *
7150 ++ * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
7151 ++ * will return the next CPU found in the rto_mask.
7152 ++ *
7153 ++ * If there are no more CPUs left in the rto_mask, then a check is made
7154 ++ * against rto_loop and rto_loop_next. rto_loop is only updated with
7155 ++ * the rto_lock held, but any CPU may increment the rto_loop_next
7156 ++ * without any locking.
7157 + */
7158 +- if (prev_cpu < rq->cpu) {
7159 +- if (cpu >= rq->cpu)
7160 +- return nr_cpu_ids;
7161 ++ for (;;) {
7162 +
7163 +- } else if (cpu >= nr_cpu_ids) {
7164 +- /*
7165 +- * We passed the end of the mask, start at the beginning.
7166 +- * If the result is greater or equal to the rq's CPU, then
7167 +- * the loop is finished.
7168 +- */
7169 +- cpu = cpumask_first(rq->rd->rto_mask);
7170 +- if (cpu >= rq->cpu)
7171 +- return nr_cpu_ids;
7172 +- }
7173 +- rq->rt.push_cpu = cpu;
7174 ++ /* When rto_cpu is -1 this acts like cpumask_first() */
7175 ++ cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
7176 +
7177 +- /* Return cpu to let the caller know if the loop is finished or not */
7178 +- return cpu;
7179 +-}
7180 ++ rd->rto_cpu = cpu;
7181 +
7182 +-static int find_next_push_cpu(struct rq *rq)
7183 +-{
7184 +- struct rq *next_rq;
7185 +- int cpu;
7186 ++ if (cpu < nr_cpu_ids)
7187 ++ return cpu;
7188 +
7189 +- while (1) {
7190 +- cpu = rto_next_cpu(rq);
7191 +- if (cpu >= nr_cpu_ids)
7192 +- break;
7193 +- next_rq = cpu_rq(cpu);
7194 ++ rd->rto_cpu = -1;
7195 ++
7196 ++ /*
7197 ++ * ACQUIRE ensures we see the @rto_mask changes
7198 ++ * made prior to the @next value observed.
7199 ++ *
7200 ++ * Matches WMB in rt_set_overload().
7201 ++ */
7202 ++ next = atomic_read_acquire(&rd->rto_loop_next);
7203 +
7204 +- /* Make sure the next rq can push to this rq */
7205 +- if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
7206 ++ if (rd->rto_loop == next)
7207 + break;
7208 ++
7209 ++ rd->rto_loop = next;
7210 + }
7211 +
7212 +- return cpu;
7213 ++ return -1;
7214 + }
7215 +
7216 +-#define RT_PUSH_IPI_EXECUTING 1
7217 +-#define RT_PUSH_IPI_RESTART 2
7218 ++static inline bool rto_start_trylock(atomic_t *v)
7219 ++{
7220 ++ return !atomic_cmpxchg_acquire(v, 0, 1);
7221 ++}
7222 +
7223 +-/*
7224 +- * When a high priority task schedules out from a CPU and a lower priority
7225 +- * task is scheduled in, a check is made to see if there's any RT tasks
7226 +- * on other CPUs that are waiting to run because a higher priority RT task
7227 +- * is currently running on its CPU. In this case, the CPU with multiple RT
7228 +- * tasks queued on it (overloaded) needs to be notified that a CPU has opened
7229 +- * up that may be able to run one of its non-running queued RT tasks.
7230 +- *
7231 +- * On large CPU boxes, there's the case that several CPUs could schedule
7232 +- * a lower priority task at the same time, in which case it will look for
7233 +- * any overloaded CPUs that it could pull a task from. To do this, the runqueue
7234 +- * lock must be taken from that overloaded CPU. Having 10s of CPUs all fighting
7235 +- * for a single overloaded CPU's runqueue lock can produce a large latency.
7236 +- * (This has actually been observed on large boxes running cyclictest).
7237 +- * Instead of taking the runqueue lock of the overloaded CPU, each of the
7238 +- * CPUs that scheduled a lower priority task simply sends an IPI to the
7239 +- * overloaded CPU. An IPI is much cheaper than taking an runqueue lock with
7240 +- * lots of contention. The overloaded CPU will look to push its non-running
7241 +- * RT task off, and if it does, it can then ignore the other IPIs coming
7242 +- * in, and just pass those IPIs off to any other overloaded CPU.
7243 +- *
7244 +- * When a CPU schedules a lower priority task, it only sends an IPI to
7245 +- * the "next" CPU that has overloaded RT tasks. This prevents IPI storms,
7246 +- * as having 10 CPUs scheduling lower priority tasks and 10 CPUs with
7247 +- * RT overloaded tasks, would cause 100 IPIs to go out at once.
7248 +- *
7249 +- * The overloaded RT CPU, when receiving an IPI, will try to push off its
7250 +- * overloaded RT tasks and then send an IPI to the next CPU that has
7251 +- * overloaded RT tasks. This stops when all CPUs with overloaded RT tasks
7252 +- * have completed. Just because a CPU may have pushed off its own overloaded
7253 +- * RT task does not mean it should stop sending the IPI around to other
7254 +- * overloaded CPUs. There may be another RT task waiting to run on one of
7255 +- * those CPUs that are of higher priority than the one that was just
7256 +- * pushed.
7257 +- *
7258 +- * An optimization that could possibly be made is to make a CPU array similar
7259 +- * to the cpupri array mask of all running RT tasks, but for the overloaded
7260 +- * case, then the IPI could be sent to only the CPU with the highest priority
7261 +- * RT task waiting, and that CPU could send off further IPIs to the CPU with
7262 +- * the next highest waiting task. Since the overloaded case is much less likely
7263 +- * to happen, the complexity of this implementation may not be worth it.
7264 +- * Instead, just send an IPI around to all overloaded CPUs.
7265 +- *
7266 +- * The rq->rt.push_flags holds the status of the IPI that is going around.
7267 +- * A run queue can only send out a single IPI at a time. The possible flags
7268 +- * for rq->rt.push_flags are:
7269 +- *
7270 +- * (None or zero): No IPI is going around for the current rq
7271 +- * RT_PUSH_IPI_EXECUTING: An IPI for the rq is being passed around
7272 +- * RT_PUSH_IPI_RESTART: The priority of the running task for the rq
7273 +- * has changed, and the IPI should restart
7274 +- * circulating the overloaded CPUs again.
7275 +- *
7276 +- * rq->rt.push_cpu contains the CPU that is being sent the IPI. It is updated
7277 +- * before sending to the next CPU.
7278 +- *
7279 +- * Instead of having all CPUs that schedule a lower priority task send
7280 +- * an IPI to the same "first" CPU in the RT overload mask, they send it
7281 +- * to the next overloaded CPU after their own CPU. This helps distribute
7282 +- * the work when there's more than one overloaded CPU and multiple CPUs
7283 +- * scheduling in lower priority tasks.
7284 +- *
7285 +- * When a rq schedules a lower priority task than what was currently
7286 +- * running, the next CPU with overloaded RT tasks is examined first.
7287 +- * That is, if CPU 1 and 5 are overloaded, and CPU 3 schedules a lower
7288 +- * priority task, it will send an IPI first to CPU 5, then CPU 5 will
7289 +- * send to CPU 1 if it is still overloaded. CPU 1 will clear the
7290 +- * rq->rt.push_flags if RT_PUSH_IPI_RESTART is not set.
7291 +- *
7292 +- * The first CPU to notice IPI_RESTART is set, will clear that flag and then
7293 +- * send an IPI to the next overloaded CPU after the rq->cpu and not the next
7294 +- * CPU after push_cpu. That is, if CPU 1, 4 and 5 are overloaded when CPU 3
7295 +- * schedules a lower priority task, and the IPI_RESTART gets set while the
7296 +- * handling is being done on CPU 5, it will clear the flag and send it back to
7297 +- * CPU 4 instead of CPU 1.
7298 +- *
7299 +- * Note, the above logic can be disabled by turning off the sched_feature
7300 +- * RT_PUSH_IPI. Then the rq lock of the overloaded CPU will simply be
7301 +- * taken by the CPU requesting a pull and the waiting RT task will be pulled
7302 +- * by that CPU. This may be fine for machines with few CPUs.
7303 +- */
7304 +-static void tell_cpu_to_push(struct rq *rq)
7305 ++static inline void rto_start_unlock(atomic_t *v)
7306 + {
7307 +- int cpu;
7308 ++ atomic_set_release(v, 0);
7309 ++}
7310 +
7311 +- if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
7312 +- raw_spin_lock(&rq->rt.push_lock);
7313 +- /* Make sure it's still executing */
7314 +- if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
7315 +- /*
7316 +- * Tell the IPI to restart the loop as things have
7317 +- * changed since it started.
7318 +- */
7319 +- rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
7320 +- raw_spin_unlock(&rq->rt.push_lock);
7321 +- return;
7322 +- }
7323 +- raw_spin_unlock(&rq->rt.push_lock);
7324 +- }
7325 ++static void tell_cpu_to_push(struct rq *rq)
7326 ++{
7327 ++ int cpu = -1;
7328 +
7329 +- /* When here, there's no IPI going around */
7330 ++ /* Keep the loop going if the IPI is currently active */
7331 ++ atomic_inc(&rq->rd->rto_loop_next);
7332 +
7333 +- rq->rt.push_cpu = rq->cpu;
7334 +- cpu = find_next_push_cpu(rq);
7335 +- if (cpu >= nr_cpu_ids)
7336 ++ /* Only one CPU can initiate a loop at a time */
7337 ++ if (!rto_start_trylock(&rq->rd->rto_loop_start))
7338 + return;
7339 +
7340 +- rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
7341 ++ raw_spin_lock(&rq->rd->rto_lock);
7342 ++
7343 ++ /*
7344 ++ * The rto_cpu is updated under the lock, if it has a valid cpu
7345 ++ * then the IPI is still running and will continue due to the
7346 ++ * update to loop_next, and nothing needs to be done here.
7347 ++ * Otherwise it is finishing up and an ipi needs to be sent.
7348 ++ */
7349 ++ if (rq->rd->rto_cpu < 0)
7350 ++ cpu = rto_next_cpu(rq);
7351 +
7352 +- irq_work_queue_on(&rq->rt.push_work, cpu);
7353 ++ raw_spin_unlock(&rq->rd->rto_lock);
7354 ++
7355 ++ rto_start_unlock(&rq->rd->rto_loop_start);
7356 ++
7357 ++ if (cpu >= 0)
7358 ++ irq_work_queue_on(&rq->rd->rto_push_work, cpu);
7359 + }
7360 +
7361 + /* Called from hardirq context */
7362 +-static void try_to_push_tasks(void *arg)
7363 ++void rto_push_irq_work_func(struct irq_work *work)
7364 + {
7365 +- struct rt_rq *rt_rq = arg;
7366 +- struct rq *rq, *src_rq;
7367 +- int this_cpu;
7368 ++ struct rq *rq;
7369 + int cpu;
7370 +
7371 +- this_cpu = rt_rq->push_cpu;
7372 ++ rq = this_rq();
7373 +
7374 +- /* Paranoid check */
7375 +- BUG_ON(this_cpu != smp_processor_id());
7376 +-
7377 +- rq = cpu_rq(this_cpu);
7378 +- src_rq = rq_of_rt_rq(rt_rq);
7379 +-
7380 +-again:
7381 ++ /*
7382 ++ * We do not need to grab the lock to check for has_pushable_tasks.
7383 ++ * When it gets updated, a check is made if a push is possible.
7384 ++ */
7385 + if (has_pushable_tasks(rq)) {
7386 + raw_spin_lock(&rq->lock);
7387 +- push_rt_task(rq);
7388 ++ push_rt_tasks(rq);
7389 + raw_spin_unlock(&rq->lock);
7390 + }
7391 +
7392 +- /* Pass the IPI to the next rt overloaded queue */
7393 +- raw_spin_lock(&rt_rq->push_lock);
7394 +- /*
7395 +- * If the source queue changed since the IPI went out,
7396 +- * we need to restart the search from that CPU again.
7397 +- */
7398 +- if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
7399 +- rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
7400 +- rt_rq->push_cpu = src_rq->cpu;
7401 +- }
7402 ++ raw_spin_lock(&rq->rd->rto_lock);
7403 +
7404 +- cpu = find_next_push_cpu(src_rq);
7405 ++ /* Pass the IPI to the next rt overloaded queue */
7406 ++ cpu = rto_next_cpu(rq);
7407 +
7408 +- if (cpu >= nr_cpu_ids)
7409 +- rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
7410 +- raw_spin_unlock(&rt_rq->push_lock);
7411 ++ raw_spin_unlock(&rq->rd->rto_lock);
7412 +
7413 +- if (cpu >= nr_cpu_ids)
7414 ++ if (cpu < 0)
7415 + return;
7416 +
7417 +- /*
7418 +- * It is possible that a restart caused this CPU to be
7419 +- * chosen again. Don't bother with an IPI, just see if we
7420 +- * have more to push.
7421 +- */
7422 +- if (unlikely(cpu == rq->cpu))
7423 +- goto again;
7424 +-
7425 + /* Try the next RT overloaded CPU */
7426 +- irq_work_queue_on(&rt_rq->push_work, cpu);
7427 +-}
7428 +-
7429 +-static void push_irq_work_func(struct irq_work *work)
7430 +-{
7431 +- struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
7432 +-
7433 +- try_to_push_tasks(rt_rq);
7434 ++ irq_work_queue_on(&rq->rd->rto_push_work, cpu);
7435 + }
7436 + #endif /* HAVE_RT_PUSH_IPI */
7437 +
7438 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
7439 +index 3b448ba82225..b732e779fe7d 100644
7440 +--- a/kernel/sched/sched.h
7441 ++++ b/kernel/sched/sched.h
7442 +@@ -502,7 +502,7 @@ static inline int rt_bandwidth_enabled(void)
7443 + }
7444 +
7445 + /* RT IPI pull logic requires IRQ_WORK */
7446 +-#ifdef CONFIG_IRQ_WORK
7447 ++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
7448 + # define HAVE_RT_PUSH_IPI
7449 + #endif
7450 +
7451 +@@ -524,12 +524,6 @@ struct rt_rq {
7452 + unsigned long rt_nr_total;
7453 + int overloaded;
7454 + struct plist_head pushable_tasks;
7455 +-#ifdef HAVE_RT_PUSH_IPI
7456 +- int push_flags;
7457 +- int push_cpu;
7458 +- struct irq_work push_work;
7459 +- raw_spinlock_t push_lock;
7460 +-#endif
7461 + #endif /* CONFIG_SMP */
7462 + int rt_queued;
7463 +
7464 +@@ -638,6 +632,19 @@ struct root_domain {
7465 + struct dl_bw dl_bw;
7466 + struct cpudl cpudl;
7467 +
7468 ++#ifdef HAVE_RT_PUSH_IPI
7469 ++ /*
7470 ++ * For IPI pull requests, loop across the rto_mask.
7471 ++ */
7472 ++ struct irq_work rto_push_work;
7473 ++ raw_spinlock_t rto_lock;
7474 ++ /* These are only updated and read within rto_lock */
7475 ++ int rto_loop;
7476 ++ int rto_cpu;
7477 ++ /* These atomics are updated outside of a lock */
7478 ++ atomic_t rto_loop_next;
7479 ++ atomic_t rto_loop_start;
7480 ++#endif
7481 + /*
7482 + * The "RT overload" flag: it gets set if a CPU has more than
7483 + * one runnable RT task.
7484 +@@ -655,6 +662,9 @@ extern void init_defrootdomain(void);
7485 + extern int sched_init_domains(const struct cpumask *cpu_map);
7486 + extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
7487 +
7488 ++#ifdef HAVE_RT_PUSH_IPI
7489 ++extern void rto_push_irq_work_func(struct irq_work *work);
7490 ++#endif
7491 + #endif /* CONFIG_SMP */
7492 +
7493 + /*
7494 +diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
7495 +index 6798276d29af..093f2ceba2e2 100644
7496 +--- a/kernel/sched/topology.c
7497 ++++ b/kernel/sched/topology.c
7498 +@@ -269,6 +269,12 @@ static int init_rootdomain(struct root_domain *rd)
7499 + if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
7500 + goto free_dlo_mask;
7501 +
7502 ++#ifdef HAVE_RT_PUSH_IPI
7503 ++ rd->rto_cpu = -1;
7504 ++ raw_spin_lock_init(&rd->rto_lock);
7505 ++ init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
7506 ++#endif
7507 ++
7508 + init_dl_bw(&rd->dl_bw);
7509 + if (cpudl_init(&rd->cpudl) != 0)
7510 + goto free_rto_mask;
7511 +diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
7512 +index e24388a863a7..468fb7cd1221 100644
7513 +--- a/lib/mpi/mpi-pow.c
7514 ++++ b/lib/mpi/mpi-pow.c
7515 +@@ -26,6 +26,7 @@
7516 + * however I decided to publish this code under the plain GPL.
7517 + */
7518 +
7519 ++#include <linux/sched.h>
7520 + #include <linux/string.h>
7521 + #include "mpi-internal.h"
7522 + #include "longlong.h"
7523 +@@ -256,6 +257,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
7524 + }
7525 + e <<= 1;
7526 + c--;
7527 ++ cond_resched();
7528 + }
7529 +
7530 + i--;
7531 +diff --git a/mm/z3fold.c b/mm/z3fold.c
7532 +index b2ba2ba585f3..39e19125d6a0 100644
7533 +--- a/mm/z3fold.c
7534 ++++ b/mm/z3fold.c
7535 +@@ -404,8 +404,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
7536 + WARN_ON(z3fold_page_trylock(zhdr));
7537 + else
7538 + z3fold_page_lock(zhdr);
7539 +- if (test_bit(PAGE_STALE, &page->private) ||
7540 +- !test_and_clear_bit(NEEDS_COMPACTING, &page->private)) {
7541 ++ if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
7542 + z3fold_page_unlock(zhdr);
7543 + return;
7544 + }
7545 +@@ -413,6 +412,11 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
7546 + list_del_init(&zhdr->buddy);
7547 + spin_unlock(&pool->lock);
7548 +
7549 ++ if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
7550 ++ atomic64_dec(&pool->pages_nr);
7551 ++ return;
7552 ++ }
7553 ++
7554 + z3fold_compact_page(zhdr);
7555 + unbuddied = get_cpu_ptr(pool->unbuddied);
7556 + fchunks = num_free_chunks(zhdr);
7557 +@@ -753,9 +757,11 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
7558 + list_del_init(&zhdr->buddy);
7559 + spin_unlock(&pool->lock);
7560 + zhdr->cpu = -1;
7561 ++ kref_get(&zhdr->refcount);
7562 + do_compact_page(zhdr, true);
7563 + return;
7564 + }
7565 ++ kref_get(&zhdr->refcount);
7566 + queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
7567 + z3fold_page_unlock(zhdr);
7568 + }
7569 +diff --git a/net/9p/client.c b/net/9p/client.c
7570 +index 4674235b0d9b..b433aff5ff13 100644
7571 +--- a/net/9p/client.c
7572 ++++ b/net/9p/client.c
7573 +@@ -82,7 +82,7 @@ int p9_show_client_options(struct seq_file *m, struct p9_client *clnt)
7574 + {
7575 + if (clnt->msize != 8192)
7576 + seq_printf(m, ",msize=%u", clnt->msize);
7577 +- seq_printf(m, "trans=%s", clnt->trans_mod->name);
7578 ++ seq_printf(m, ",trans=%s", clnt->trans_mod->name);
7579 +
7580 + switch (clnt->proto_version) {
7581 + case p9_proto_legacy:
7582 +@@ -773,8 +773,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
7583 + }
7584 + again:
7585 + /* Wait for the response */
7586 +- err = wait_event_interruptible(*req->wq,
7587 +- req->status >= REQ_STATUS_RCVD);
7588 ++ err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
7589 +
7590 + /*
7591 + * Make sure our req is coherent with regard to updates in other
7592 +diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
7593 +index 903a190319b9..985046ae4231 100644
7594 +--- a/net/9p/trans_fd.c
7595 ++++ b/net/9p/trans_fd.c
7596 +@@ -724,12 +724,12 @@ static int p9_fd_show_options(struct seq_file *m, struct p9_client *clnt)
7597 + {
7598 + if (clnt->trans_mod == &p9_tcp_trans) {
7599 + if (clnt->trans_opts.tcp.port != P9_PORT)
7600 +- seq_printf(m, "port=%u", clnt->trans_opts.tcp.port);
7601 ++ seq_printf(m, ",port=%u", clnt->trans_opts.tcp.port);
7602 + } else if (clnt->trans_mod == &p9_fd_trans) {
7603 + if (clnt->trans_opts.fd.rfd != ~0)
7604 +- seq_printf(m, "rfd=%u", clnt->trans_opts.fd.rfd);
7605 ++ seq_printf(m, ",rfd=%u", clnt->trans_opts.fd.rfd);
7606 + if (clnt->trans_opts.fd.wfd != ~0)
7607 +- seq_printf(m, "wfd=%u", clnt->trans_opts.fd.wfd);
7608 ++ seq_printf(m, ",wfd=%u", clnt->trans_opts.fd.wfd);
7609 + }
7610 + return 0;
7611 + }
7612 +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
7613 +index f24b25c25106..f3a4efcf1456 100644
7614 +--- a/net/9p/trans_virtio.c
7615 ++++ b/net/9p/trans_virtio.c
7616 +@@ -286,8 +286,8 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
7617 + if (err == -ENOSPC) {
7618 + chan->ring_bufs_avail = 0;
7619 + spin_unlock_irqrestore(&chan->lock, flags);
7620 +- err = wait_event_interruptible(*chan->vc_wq,
7621 +- chan->ring_bufs_avail);
7622 ++ err = wait_event_killable(*chan->vc_wq,
7623 ++ chan->ring_bufs_avail);
7624 + if (err == -ERESTARTSYS)
7625 + return err;
7626 +
7627 +@@ -327,7 +327,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
7628 + * Other zc request to finish here
7629 + */
7630 + if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
7631 +- err = wait_event_interruptible(vp_wq,
7632 ++ err = wait_event_killable(vp_wq,
7633 + (atomic_read(&vp_pinned) < chan->p9_max_pages));
7634 + if (err == -ERESTARTSYS)
7635 + return err;
7636 +@@ -471,8 +471,8 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
7637 + if (err == -ENOSPC) {
7638 + chan->ring_bufs_avail = 0;
7639 + spin_unlock_irqrestore(&chan->lock, flags);
7640 +- err = wait_event_interruptible(*chan->vc_wq,
7641 +- chan->ring_bufs_avail);
7642 ++ err = wait_event_killable(*chan->vc_wq,
7643 ++ chan->ring_bufs_avail);
7644 + if (err == -ERESTARTSYS)
7645 + goto err_out;
7646 +
7647 +@@ -489,8 +489,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
7648 + virtqueue_kick(chan->vq);
7649 + spin_unlock_irqrestore(&chan->lock, flags);
7650 + p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
7651 +- err = wait_event_interruptible(*req->wq,
7652 +- req->status >= REQ_STATUS_RCVD);
7653 ++ err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
7654 + /*
7655 + * Non kernel buffers are pinned, unpin them
7656 + */
7657 +diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
7658 +index 6ad3e043c617..325c56043007 100644
7659 +--- a/net/9p/trans_xen.c
7660 ++++ b/net/9p/trans_xen.c
7661 +@@ -156,8 +156,8 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
7662 + ring = &priv->rings[num];
7663 +
7664 + again:
7665 +- while (wait_event_interruptible(ring->wq,
7666 +- p9_xen_write_todo(ring, size)) != 0)
7667 ++ while (wait_event_killable(ring->wq,
7668 ++ p9_xen_write_todo(ring, size)) != 0)
7669 + ;
7670 +
7671 + spin_lock_irqsave(&ring->lock, flags);
7672 +diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
7673 +index 489610ac1cdd..bf9d079cbafd 100644
7674 +--- a/net/ceph/crypto.c
7675 ++++ b/net/ceph/crypto.c
7676 +@@ -37,7 +37,9 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
7677 + return -ENOTSUPP;
7678 + }
7679 +
7680 +- WARN_ON(!key->len);
7681 ++ if (!key->len)
7682 ++ return -EINVAL;
7683 ++
7684 + key->key = kmemdup(buf, key->len, GFP_NOIO);
7685 + if (!key->key) {
7686 + ret = -ENOMEM;
7687 +diff --git a/net/nfc/core.c b/net/nfc/core.c
7688 +index 5cf33df888c3..c699d64a0753 100644
7689 +--- a/net/nfc/core.c
7690 ++++ b/net/nfc/core.c
7691 +@@ -1106,7 +1106,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
7692 + err_free_dev:
7693 + kfree(dev);
7694 +
7695 +- return ERR_PTR(rc);
7696 ++ return NULL;
7697 + }
7698 + EXPORT_SYMBOL(nfc_allocate_device);
7699 +
7700 +diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
7701 +index 992594b7cc6b..af7893501e40 100644
7702 +--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
7703 ++++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
7704 +@@ -133,6 +133,10 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
7705 + if (ret)
7706 + goto out_err;
7707 +
7708 ++ /* Bump page refcnt so Send completion doesn't release
7709 ++ * the rq_buffer before all retransmits are complete.
7710 ++ */
7711 ++ get_page(virt_to_page(rqst->rq_buffer));
7712 + ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0);
7713 + if (ret)
7714 + goto out_unmap;
7715 +@@ -165,7 +169,6 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
7716 + return -EINVAL;
7717 + }
7718 +
7719 +- /* svc_rdma_sendto releases this page */
7720 + page = alloc_page(RPCRDMA_DEF_GFP);
7721 + if (!page)
7722 + return -ENOMEM;
7723 +@@ -184,6 +187,7 @@ xprt_rdma_bc_free(struct rpc_task *task)
7724 + {
7725 + struct rpc_rqst *rqst = task->tk_rqstp;
7726 +
7727 ++ put_page(virt_to_page(rqst->rq_buffer));
7728 + kfree(rqst->rq_rbuffer);
7729 + }
7730 +
7731 +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
7732 +index a93a4235a332..10e7ef7a8804 100644
7733 +--- a/sound/core/pcm_lib.c
7734 ++++ b/sound/core/pcm_lib.c
7735 +@@ -248,8 +248,10 @@ static void update_audio_tstamp(struct snd_pcm_substream *substream,
7736 + runtime->rate);
7737 + *audio_tstamp = ns_to_timespec(audio_nsecs);
7738 + }
7739 +- runtime->status->audio_tstamp = *audio_tstamp;
7740 +- runtime->status->tstamp = *curr_tstamp;
7741 ++ if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
7742 ++ runtime->status->audio_tstamp = *audio_tstamp;
7743 ++ runtime->status->tstamp = *curr_tstamp;
7744 ++ }
7745 +
7746 + /*
7747 + * re-take a driver timestamp to let apps detect if the reference tstamp
7748 +diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
7749 +index 59127b6ef39e..e00f7e399e46 100644
7750 +--- a/sound/core/timer_compat.c
7751 ++++ b/sound/core/timer_compat.c
7752 +@@ -66,11 +66,11 @@ static int snd_timer_user_info_compat(struct file *file,
7753 + struct snd_timer *t;
7754 +
7755 + tu = file->private_data;
7756 +- if (snd_BUG_ON(!tu->timeri))
7757 +- return -ENXIO;
7758 ++ if (!tu->timeri)
7759 ++ return -EBADFD;
7760 + t = tu->timeri->timer;
7761 +- if (snd_BUG_ON(!t))
7762 +- return -ENXIO;
7763 ++ if (!t)
7764 ++ return -EBADFD;
7765 + memset(&info, 0, sizeof(info));
7766 + info.card = t->card ? t->card->number : -1;
7767 + if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
7768 +@@ -99,8 +99,8 @@ static int snd_timer_user_status_compat(struct file *file,
7769 + struct snd_timer_status32 status;
7770 +
7771 + tu = file->private_data;
7772 +- if (snd_BUG_ON(!tu->timeri))
7773 +- return -ENXIO;
7774 ++ if (!tu->timeri)
7775 ++ return -EBADFD;
7776 + memset(&status, 0, sizeof(status));
7777 + status.tstamp.tv_sec = tu->tstamp.tv_sec;
7778 + status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
7779 +diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
7780 +index e43af18d4383..8632301489fa 100644
7781 +--- a/sound/core/vmaster.c
7782 ++++ b/sound/core/vmaster.c
7783 +@@ -495,7 +495,9 @@ EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster);
7784 + * Returns 0 if successful, or a negative error code.
7785 + */
7786 + int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
7787 +- int (*func)(struct snd_kcontrol *, void *),
7788 ++ int (*func)(struct snd_kcontrol *vslave,
7789 ++ struct snd_kcontrol *slave,
7790 ++ void *arg),
7791 + void *arg)
7792 + {
7793 + struct link_master *master;
7794 +@@ -507,7 +509,7 @@ int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
7795 + if (err < 0)
7796 + return err;
7797 + list_for_each_entry(slave, &master->slaves, list) {
7798 +- err = func(&slave->slave, arg);
7799 ++ err = func(slave->kctl, &slave->slave, arg);
7800 + if (err < 0)
7801 + return err;
7802 + }
7803 +diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
7804 +index 81acc20c2535..f21633cd9b38 100644
7805 +--- a/sound/hda/hdmi_chmap.c
7806 ++++ b/sound/hda/hdmi_chmap.c
7807 +@@ -746,7 +746,7 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
7808 + memset(pcm_chmap, 0, sizeof(pcm_chmap));
7809 + chmap->ops.get_chmap(chmap->hdac, pcm_idx, pcm_chmap);
7810 +
7811 +- for (i = 0; i < sizeof(chmap); i++)
7812 ++ for (i = 0; i < ARRAY_SIZE(pcm_chmap); i++)
7813 + ucontrol->value.integer.value[i] = pcm_chmap[i];
7814 +
7815 + return 0;
7816 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
7817 +index a0989d231fd0..417abbb1f72c 100644
7818 +--- a/sound/pci/hda/hda_codec.c
7819 ++++ b/sound/pci/hda/hda_codec.c
7820 +@@ -1823,7 +1823,9 @@ struct slave_init_arg {
7821 + };
7822 +
7823 + /* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */
7824 +-static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
7825 ++static int init_slave_0dB(struct snd_kcontrol *slave,
7826 ++ struct snd_kcontrol *kctl,
7827 ++ void *_arg)
7828 + {
7829 + struct slave_init_arg *arg = _arg;
7830 + int _tlv[4];
7831 +@@ -1860,7 +1862,7 @@ static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
7832 + arg->step = step;
7833 + val = -tlv[2] / step;
7834 + if (val > 0) {
7835 +- put_kctl_with_value(kctl, val);
7836 ++ put_kctl_with_value(slave, val);
7837 + return val;
7838 + }
7839 +
7840 +@@ -1868,7 +1870,9 @@ static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
7841 + }
7842 +
7843 + /* unmute the slave via snd_ctl_apply_vmaster_slaves() */
7844 +-static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg)
7845 ++static int init_slave_unmute(struct snd_kcontrol *slave,
7846 ++ struct snd_kcontrol *kctl,
7847 ++ void *_arg)
7848 + {
7849 + return put_kctl_with_value(slave, 1);
7850 + }
7851 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
7852 +index f958d8d54d15..c71dcacea807 100644
7853 +--- a/sound/pci/hda/hda_intel.c
7854 ++++ b/sound/pci/hda/hda_intel.c
7855 +@@ -2463,6 +2463,9 @@ static const struct pci_device_id azx_ids[] = {
7856 + /* AMD Hudson */
7857 + { PCI_DEVICE(0x1022, 0x780d),
7858 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
7859 ++ /* AMD Raven */
7860 ++ { PCI_DEVICE(0x1022, 0x15e3),
7861 ++ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
7862 + /* ATI HDMI */
7863 + { PCI_DEVICE(0x1002, 0x0002),
7864 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
7865 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7866 +index dce0682c5001..7c39114d124f 100644
7867 +--- a/sound/pci/hda/patch_realtek.c
7868 ++++ b/sound/pci/hda/patch_realtek.c
7869 +@@ -341,6 +341,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
7870 + case 0x10ec0299:
7871 + alc_update_coef_idx(codec, 0x10, 1<<9, 0);
7872 + break;
7873 ++ case 0x10ec0275:
7874 ++ alc_update_coef_idx(codec, 0xe, 0, 1<<0);
7875 ++ break;
7876 + case 0x10ec0293:
7877 + alc_update_coef_idx(codec, 0xa, 1<<13, 0);
7878 + break;
7879 +@@ -6863,7 +6866,7 @@ static int patch_alc269(struct hda_codec *codec)
7880 + case 0x10ec0703:
7881 + spec->codec_variant = ALC269_TYPE_ALC700;
7882 + spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
7883 +- alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
7884 ++ alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
7885 + break;
7886 +
7887 + }
7888 +diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
7889 +index abfb710df7cb..7a312168f864 100644
7890 +--- a/sound/soc/sunxi/sun8i-codec.c
7891 ++++ b/sound/soc/sunxi/sun8i-codec.c
7892 +@@ -73,6 +73,7 @@
7893 + #define SUN8I_SYS_SR_CTRL_AIF2_FS_MASK GENMASK(11, 8)
7894 + #define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK GENMASK(5, 4)
7895 + #define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK GENMASK(8, 6)
7896 ++#define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK GENMASK(12, 9)
7897 +
7898 + struct sun8i_codec {
7899 + struct device *dev;
7900 +@@ -170,11 +171,11 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
7901 +
7902 + /* clock masters */
7903 + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
7904 +- case SND_SOC_DAIFMT_CBS_CFS: /* DAI Slave */
7905 +- value = 0x0; /* Codec Master */
7906 ++ case SND_SOC_DAIFMT_CBS_CFS: /* Codec slave, DAI master */
7907 ++ value = 0x1;
7908 + break;
7909 +- case SND_SOC_DAIFMT_CBM_CFM: /* DAI Master */
7910 +- value = 0x1; /* Codec Slave */
7911 ++ case SND_SOC_DAIFMT_CBM_CFM: /* Codec Master, DAI slave */
7912 ++ value = 0x0;
7913 + break;
7914 + default:
7915 + return -EINVAL;
7916 +@@ -199,7 +200,7 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
7917 + value << SUN8I_AIF1CLK_CTRL_AIF1_BCLK_INV);
7918 + regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
7919 + BIT(SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV),
7920 +- value << SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV);
7921 ++ !value << SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV);
7922 +
7923 + /* DAI format */
7924 + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
7925 +@@ -226,12 +227,57 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
7926 + return 0;
7927 + }
7928 +
7929 ++struct sun8i_codec_clk_div {
7930 ++ u8 div;
7931 ++ u8 val;
7932 ++};
7933 ++
7934 ++static const struct sun8i_codec_clk_div sun8i_codec_bclk_div[] = {
7935 ++ { .div = 1, .val = 0 },
7936 ++ { .div = 2, .val = 1 },
7937 ++ { .div = 4, .val = 2 },
7938 ++ { .div = 6, .val = 3 },
7939 ++ { .div = 8, .val = 4 },
7940 ++ { .div = 12, .val = 5 },
7941 ++ { .div = 16, .val = 6 },
7942 ++ { .div = 24, .val = 7 },
7943 ++ { .div = 32, .val = 8 },
7944 ++ { .div = 48, .val = 9 },
7945 ++ { .div = 64, .val = 10 },
7946 ++ { .div = 96, .val = 11 },
7947 ++ { .div = 128, .val = 12 },
7948 ++ { .div = 192, .val = 13 },
7949 ++};
7950 ++
7951 ++static u8 sun8i_codec_get_bclk_div(struct sun8i_codec *scodec,
7952 ++ unsigned int rate,
7953 ++ unsigned int word_size)
7954 ++{
7955 ++ unsigned long clk_rate = clk_get_rate(scodec->clk_module);
7956 ++ unsigned int div = clk_rate / rate / word_size / 2;
7957 ++ unsigned int best_val = 0, best_diff = ~0;
7958 ++ int i;
7959 ++
7960 ++ for (i = 0; i < ARRAY_SIZE(sun8i_codec_bclk_div); i++) {
7961 ++ const struct sun8i_codec_clk_div *bdiv = &sun8i_codec_bclk_div[i];
7962 ++ unsigned int diff = abs(bdiv->div - div);
7963 ++
7964 ++ if (diff < best_diff) {
7965 ++ best_diff = diff;
7966 ++ best_val = bdiv->val;
7967 ++ }
7968 ++ }
7969 ++
7970 ++ return best_val;
7971 ++}
7972 ++
7973 + static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
7974 + struct snd_pcm_hw_params *params,
7975 + struct snd_soc_dai *dai)
7976 + {
7977 + struct sun8i_codec *scodec = snd_soc_codec_get_drvdata(dai->codec);
7978 + int sample_rate;
7979 ++ u8 bclk_div;
7980 +
7981 + /*
7982 + * The CPU DAI handles only a sample of 16 bits. Configure the
7983 +@@ -241,6 +287,11 @@ static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
7984 + SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK,
7985 + SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_16);
7986 +
7987 ++ bclk_div = sun8i_codec_get_bclk_div(scodec, params_rate(params), 16);
7988 ++ regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
7989 ++ SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK,
7990 ++ bclk_div << SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV);
7991 ++
7992 + regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
7993 + SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK,
7994 + SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_16);
7995 +diff --git a/sound/usb/clock.c b/sound/usb/clock.c
7996 +index 26dd5f20f149..eb3396ffba4c 100644
7997 +--- a/sound/usb/clock.c
7998 ++++ b/sound/usb/clock.c
7999 +@@ -43,7 +43,7 @@ static struct uac_clock_source_descriptor *
8000 + while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
8001 + ctrl_iface->extralen,
8002 + cs, UAC2_CLOCK_SOURCE))) {
8003 +- if (cs->bClockID == clock_id)
8004 ++ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
8005 + return cs;
8006 + }
8007 +
8008 +@@ -59,8 +59,11 @@ static struct uac_clock_selector_descriptor *
8009 + while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
8010 + ctrl_iface->extralen,
8011 + cs, UAC2_CLOCK_SELECTOR))) {
8012 +- if (cs->bClockID == clock_id)
8013 ++ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) {
8014 ++ if (cs->bLength < 5 + cs->bNrInPins)
8015 ++ return NULL;
8016 + return cs;
8017 ++ }
8018 + }
8019 +
8020 + return NULL;
8021 +@@ -75,7 +78,7 @@ static struct uac_clock_multiplier_descriptor *
8022 + while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
8023 + ctrl_iface->extralen,
8024 + cs, UAC2_CLOCK_MULTIPLIER))) {
8025 +- if (cs->bClockID == clock_id)
8026 ++ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
8027 + return cs;
8028 + }
8029 +
8030 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
8031 +index 91bc8f18791e..2b835cca41b1 100644
8032 +--- a/sound/usb/mixer.c
8033 ++++ b/sound/usb/mixer.c
8034 +@@ -1469,6 +1469,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
8035 + __u8 *bmaControls;
8036 +
8037 + if (state->mixer->protocol == UAC_VERSION_1) {
8038 ++ if (hdr->bLength < 7) {
8039 ++ usb_audio_err(state->chip,
8040 ++ "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
8041 ++ unitid);
8042 ++ return -EINVAL;
8043 ++ }
8044 + csize = hdr->bControlSize;
8045 + if (!csize) {
8046 + usb_audio_dbg(state->chip,
8047 +@@ -1486,6 +1492,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
8048 + }
8049 + } else {
8050 + struct uac2_feature_unit_descriptor *ftr = _ftr;
8051 ++ if (hdr->bLength < 6) {
8052 ++ usb_audio_err(state->chip,
8053 ++ "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
8054 ++ unitid);
8055 ++ return -EINVAL;
8056 ++ }
8057 + csize = 4;
8058 + channels = (hdr->bLength - 6) / 4 - 1;
8059 + bmaControls = ftr->bmaControls;
8060 +@@ -2086,7 +2098,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
8061 + const struct usbmix_name_map *map;
8062 + char **namelist;
8063 +
8064 +- if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
8065 ++ if (desc->bLength < 5 || !desc->bNrInPins ||
8066 ++ desc->bLength < 5 + desc->bNrInPins) {
8067 + usb_audio_err(state->chip,
8068 + "invalid SELECTOR UNIT descriptor %d\n", unitid);
8069 + return -EINVAL;