Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.2 commit in: /
Date: Thu, 29 Aug 2019 14:27:54
Message-Id: 1567088832.26fab6b6424908ae47ad7e488177505589c3bc52.mpagano@gentoo
1 commit: 26fab6b6424908ae47ad7e488177505589c3bc52
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Aug 29 14:27:12 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Aug 29 14:27:12 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=26fab6b6
7
8 Linux patch 5.2.11
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1010_linux-5.2.11.patch | 6480 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6484 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 2056b84..374124c 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -83,6 +83,10 @@ Patch: 1009_linux-5.2.10.patch
21 From: https://www.kernel.org
22 Desc: Linux 5.2.10
23
24 +Patch: 1010_linux-5.2.11.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 5.2.11
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1010_linux-5.2.11.patch b/1010_linux-5.2.11.patch
33 new file mode 100644
34 index 0000000..4df5b0e
35 --- /dev/null
36 +++ b/1010_linux-5.2.11.patch
37 @@ -0,0 +1,6480 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index 0d40729d080f..cb17fde4164f 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -4055,6 +4055,13 @@
43 + Run specified binary instead of /init from the ramdisk,
44 + used for early userspace startup. See initrd.
45 +
46 ++ rdrand= [X86]
47 ++ force - Override the decision by the kernel to hide the
48 ++ advertisement of RDRAND support (this affects
49 ++ certain AMD processors because of buggy BIOS
50 ++ support, specifically around the suspend/resume
51 ++ path).
52 ++
53 + rdt= [HW,X86,RDT]
54 + Turn on/off individual RDT features. List is:
55 + cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
56 +diff --git a/Documentation/devicetree/bindings/riscv/sifive.yaml b/Documentation/devicetree/bindings/riscv/sifive.yaml
57 +index 9d17dc2f3f84..3ab532713dc1 100644
58 +--- a/Documentation/devicetree/bindings/riscv/sifive.yaml
59 ++++ b/Documentation/devicetree/bindings/riscv/sifive.yaml
60 +@@ -19,7 +19,7 @@ properties:
61 + compatible:
62 + items:
63 + - enum:
64 +- - sifive,freedom-unleashed-a00
65 ++ - sifive,hifive-unleashed-a00
66 + - const: sifive,fu540-c000
67 + - const: sifive,fu540
68 + ...
69 +diff --git a/Makefile b/Makefile
70 +index 35fee16d5006..a3b26dcfc5c8 100644
71 +--- a/Makefile
72 ++++ b/Makefile
73 +@@ -1,7 +1,7 @@
74 + # SPDX-License-Identifier: GPL-2.0
75 + VERSION = 5
76 + PATCHLEVEL = 2
77 +-SUBLEVEL = 10
78 ++SUBLEVEL = 11
79 + EXTRAVERSION =
80 + NAME = Bobtail Squid
81 +
82 +diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
83 +index d2806bcff8bb..07745ee022a1 100644
84 +--- a/arch/arm/kvm/coproc.c
85 ++++ b/arch/arm/kvm/coproc.c
86 +@@ -651,13 +651,22 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
87 + }
88 +
89 + static void reset_coproc_regs(struct kvm_vcpu *vcpu,
90 +- const struct coproc_reg *table, size_t num)
91 ++ const struct coproc_reg *table, size_t num,
92 ++ unsigned long *bmap)
93 + {
94 + unsigned long i;
95 +
96 + for (i = 0; i < num; i++)
97 +- if (table[i].reset)
98 ++ if (table[i].reset) {
99 ++ int reg = table[i].reg;
100 ++
101 + table[i].reset(vcpu, &table[i]);
102 ++ if (reg > 0 && reg < NR_CP15_REGS) {
103 ++ set_bit(reg, bmap);
104 ++ if (table[i].is_64bit)
105 ++ set_bit(reg + 1, bmap);
106 ++ }
107 ++ }
108 + }
109 +
110 + static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
111 +@@ -1432,17 +1441,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
112 + {
113 + size_t num;
114 + const struct coproc_reg *table;
115 +-
116 +- /* Catch someone adding a register without putting in reset entry. */
117 +- memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
118 ++ DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
119 +
120 + /* Generic chip reset first (so target could override). */
121 +- reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
122 ++ reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
123 +
124 + table = get_target_table(vcpu->arch.target, &num);
125 +- reset_coproc_regs(vcpu, table, num);
126 ++ reset_coproc_regs(vcpu, table, num, bmap);
127 +
128 + for (num = 1; num < NR_CP15_REGS; num++)
129 +- WARN(vcpu_cp15(vcpu, num) == 0x42424242,
130 ++ WARN(!test_bit(num, bmap),
131 + "Didn't reset vcpu_cp15(vcpu, %zi)", num);
132 + }
133 +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
134 +index ce933f296049..5b7085ca213d 100644
135 +--- a/arch/arm64/kvm/sys_regs.c
136 ++++ b/arch/arm64/kvm/sys_regs.c
137 +@@ -632,7 +632,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
138 + */
139 + val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
140 + | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
141 +- __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
142 ++ __vcpu_sys_reg(vcpu, r->reg) = val;
143 + }
144 +
145 + static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
146 +@@ -981,13 +981,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
147 + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
148 + #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
149 + { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
150 +- trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
151 ++ trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
152 + { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
153 +- trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
154 ++ trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
155 + { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
156 +- trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
157 ++ trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
158 + { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
159 +- trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
160 ++ trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
161 +
162 + /* Macro to expand the PMEVCNTRn_EL0 register */
163 + #define PMU_PMEVCNTR_EL0(n) \
164 +@@ -1540,7 +1540,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
165 + { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
166 + { SYS_DESC(SYS_CTR_EL0), access_ctr },
167 +
168 +- { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
169 ++ { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
170 + { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
171 + { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
172 + { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
173 +@@ -2254,13 +2254,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
174 + }
175 +
176 + static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
177 +- const struct sys_reg_desc *table, size_t num)
178 ++ const struct sys_reg_desc *table, size_t num,
179 ++ unsigned long *bmap)
180 + {
181 + unsigned long i;
182 +
183 + for (i = 0; i < num; i++)
184 +- if (table[i].reset)
185 ++ if (table[i].reset) {
186 ++ int reg = table[i].reg;
187 ++
188 + table[i].reset(vcpu, &table[i]);
189 ++ if (reg > 0 && reg < NR_SYS_REGS)
190 ++ set_bit(reg, bmap);
191 ++ }
192 + }
193 +
194 + /**
195 +@@ -2774,18 +2780,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
196 + {
197 + size_t num;
198 + const struct sys_reg_desc *table;
199 +-
200 +- /* Catch someone adding a register without putting in reset entry. */
201 +- memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
202 ++ DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
203 +
204 + /* Generic chip reset first (so target could override). */
205 +- reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
206 ++ reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
207 +
208 + table = get_target_table(vcpu->arch.target, true, &num);
209 +- reset_sys_reg_descs(vcpu, table, num);
210 ++ reset_sys_reg_descs(vcpu, table, num, bmap);
211 +
212 + for (num = 1; num < NR_SYS_REGS; num++) {
213 +- if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
214 ++ if (WARN(!test_bit(num, bmap),
215 + "Didn't reset __vcpu_sys_reg(%zi)\n", num))
216 + break;
217 + }
218 +diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
219 +index e0dd66881da6..f777e44653d5 100644
220 +--- a/arch/mips/kernel/cacheinfo.c
221 ++++ b/arch/mips/kernel/cacheinfo.c
222 +@@ -69,6 +69,8 @@ static int __populate_cache_leaves(unsigned int cpu)
223 + if (c->tcache.waysize)
224 + populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
225 +
226 ++ this_cpu_ci->cpu_map_populated = true;
227 ++
228 + return 0;
229 + }
230 +
231 +diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
232 +index 5f209f111e59..df7ddd246eaa 100644
233 +--- a/arch/mips/kernel/i8253.c
234 ++++ b/arch/mips/kernel/i8253.c
235 +@@ -32,7 +32,8 @@ void __init setup_pit_timer(void)
236 +
237 + static int __init init_pit_clocksource(void)
238 + {
239 +- if (num_possible_cpus() > 1) /* PIT does not scale! */
240 ++ if (num_possible_cpus() > 1 || /* PIT does not scale! */
241 ++ !clockevent_state_periodic(&i8253_clockevent))
242 + return 0;
243 +
244 + return clocksource_i8253_init();
245 +diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
246 +index 1ad4089dd110..d4d096f80f4b 100644
247 +--- a/arch/powerpc/kernel/misc_64.S
248 ++++ b/arch/powerpc/kernel/misc_64.S
249 +@@ -130,7 +130,7 @@ _GLOBAL_TOC(flush_dcache_range)
250 + subf r8,r6,r4 /* compute length */
251 + add r8,r8,r5 /* ensure we get enough */
252 + lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */
253 +- srw. r8,r8,r9 /* compute line count */
254 ++ srd. r8,r8,r9 /* compute line count */
255 + beqlr /* nothing to do? */
256 + mtctr r8
257 + 0: dcbst 0,r6
258 +@@ -148,7 +148,7 @@ _GLOBAL(flush_inval_dcache_range)
259 + subf r8,r6,r4 /* compute length */
260 + add r8,r8,r5 /* ensure we get enough */
261 + lwz r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
262 +- srw. r8,r8,r9 /* compute line count */
263 ++ srd. r8,r8,r9 /* compute line count */
264 + beqlr /* nothing to do? */
265 + sync
266 + isync
267 +diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
268 +index 3c49bde8aa5e..b8aa6a9f937b 100644
269 +--- a/arch/s390/boot/ipl_parm.c
270 ++++ b/arch/s390/boot/ipl_parm.c
271 +@@ -48,9 +48,7 @@ void store_ipl_parmblock(void)
272 + {
273 + int rc;
274 +
275 +- uv_set_shared(__pa(&ipl_block));
276 + rc = __diag308(DIAG308_STORE, &ipl_block);
277 +- uv_remove_shared(__pa(&ipl_block));
278 + if (rc == DIAG308_RC_OK &&
279 + ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
280 + ipl_block_valid = 1;
281 +diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
282 +index 2c0a515428d6..6837affc19e8 100644
283 +--- a/arch/s390/kernel/ipl.c
284 ++++ b/arch/s390/kernel/ipl.c
285 +@@ -31,7 +31,6 @@
286 + #include <asm/os_info.h>
287 + #include <asm/sections.h>
288 + #include <asm/boot_data.h>
289 +-#include <asm/uv.h>
290 + #include "entry.h"
291 +
292 + #define IPL_PARM_BLOCK_VERSION 0
293 +@@ -892,21 +891,15 @@ static void __reipl_run(void *unused)
294 + {
295 + switch (reipl_type) {
296 + case IPL_TYPE_CCW:
297 +- uv_set_shared(__pa(reipl_block_ccw));
298 + diag308(DIAG308_SET, reipl_block_ccw);
299 +- uv_remove_shared(__pa(reipl_block_ccw));
300 + diag308(DIAG308_LOAD_CLEAR, NULL);
301 + break;
302 + case IPL_TYPE_FCP:
303 +- uv_set_shared(__pa(reipl_block_fcp));
304 + diag308(DIAG308_SET, reipl_block_fcp);
305 +- uv_remove_shared(__pa(reipl_block_fcp));
306 + diag308(DIAG308_LOAD_CLEAR, NULL);
307 + break;
308 + case IPL_TYPE_NSS:
309 +- uv_set_shared(__pa(reipl_block_nss));
310 + diag308(DIAG308_SET, reipl_block_nss);
311 +- uv_remove_shared(__pa(reipl_block_nss));
312 + diag308(DIAG308_LOAD_CLEAR, NULL);
313 + break;
314 + case IPL_TYPE_UNKNOWN:
315 +@@ -1176,9 +1169,7 @@ static struct kset *dump_kset;
316 +
317 + static void diag308_dump(void *dump_block)
318 + {
319 +- uv_set_shared(__pa(dump_block));
320 + diag308(DIAG308_SET, dump_block);
321 +- uv_remove_shared(__pa(dump_block));
322 + while (1) {
323 + if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302)
324 + break;
325 +diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
326 +index 49d55327de0b..7e0eb4020917 100644
327 +--- a/arch/s390/kernel/vmlinux.lds.S
328 ++++ b/arch/s390/kernel/vmlinux.lds.S
329 +@@ -32,10 +32,9 @@ PHDRS {
330 + SECTIONS
331 + {
332 + . = 0x100000;
333 +- _stext = .; /* Start of text section */
334 + .text : {
335 +- /* Text and read-only data */
336 +- _text = .;
337 ++ _stext = .; /* Start of text section */
338 ++ _text = .; /* Text and read-only data */
339 + HEAD_TEXT
340 + TEXT_TEXT
341 + SCHED_TEXT
342 +@@ -47,11 +46,10 @@ SECTIONS
343 + *(.text.*_indirect_*)
344 + *(.fixup)
345 + *(.gnu.warning)
346 ++ . = ALIGN(PAGE_SIZE);
347 ++ _etext = .; /* End of text section */
348 + } :text = 0x0700
349 +
350 +- . = ALIGN(PAGE_SIZE);
351 +- _etext = .; /* End of text section */
352 +-
353 + NOTES :text :note
354 +
355 + .dummy : { *(.dummy) } :data
356 +diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
357 +index 3b93ba0b5d8d..5d67b81c704a 100644
358 +--- a/arch/s390/mm/dump_pagetables.c
359 ++++ b/arch/s390/mm/dump_pagetables.c
360 +@@ -161,9 +161,9 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
361 + }
362 + #endif
363 +
364 +- for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
365 ++ pmd = pmd_offset(pud, addr);
366 ++ for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++, pmd++) {
367 + st->current_address = addr;
368 +- pmd = pmd_offset(pud, addr);
369 + if (!pmd_none(*pmd)) {
370 + if (pmd_large(*pmd)) {
371 + prot = pmd_val(*pmd) &
372 +@@ -192,9 +192,9 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
373 + }
374 + #endif
375 +
376 +- for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
377 ++ pud = pud_offset(p4d, addr);
378 ++ for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++, pud++) {
379 + st->current_address = addr;
380 +- pud = pud_offset(p4d, addr);
381 + if (!pud_none(*pud))
382 + if (pud_large(*pud)) {
383 + prot = pud_val(*pud) &
384 +@@ -222,9 +222,9 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
385 + }
386 + #endif
387 +
388 +- for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
389 ++ p4d = p4d_offset(pgd, addr);
390 ++ for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++, p4d++) {
391 + st->current_address = addr;
392 +- p4d = p4d_offset(pgd, addr);
393 + if (!p4d_none(*p4d))
394 + walk_pud_level(m, st, p4d, addr);
395 + else
396 +diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
397 +index f6f6ef436599..b16a6c7da6eb 100644
398 +--- a/arch/x86/include/asm/bootparam_utils.h
399 ++++ b/arch/x86/include/asm/bootparam_utils.h
400 +@@ -18,6 +18,20 @@
401 + * Note: efi_info is commonly left uninitialized, but that field has a
402 + * private magic, so it is better to leave it unchanged.
403 + */
404 ++
405 ++#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
406 ++
407 ++#define BOOT_PARAM_PRESERVE(struct_member) \
408 ++ { \
409 ++ .start = offsetof(struct boot_params, struct_member), \
410 ++ .len = sizeof_mbr(struct boot_params, struct_member), \
411 ++ }
412 ++
413 ++struct boot_params_to_save {
414 ++ unsigned int start;
415 ++ unsigned int len;
416 ++};
417 ++
418 + static void sanitize_boot_params(struct boot_params *boot_params)
419 + {
420 + /*
421 +@@ -35,21 +49,40 @@ static void sanitize_boot_params(struct boot_params *boot_params)
422 + * problems again.
423 + */
424 + if (boot_params->sentinel) {
425 +- /* fields in boot_params are left uninitialized, clear them */
426 +- boot_params->acpi_rsdp_addr = 0;
427 +- memset(&boot_params->ext_ramdisk_image, 0,
428 +- (char *)&boot_params->efi_info -
429 +- (char *)&boot_params->ext_ramdisk_image);
430 +- memset(&boot_params->kbd_status, 0,
431 +- (char *)&boot_params->hdr -
432 +- (char *)&boot_params->kbd_status);
433 +- memset(&boot_params->_pad7[0], 0,
434 +- (char *)&boot_params->edd_mbr_sig_buffer[0] -
435 +- (char *)&boot_params->_pad7[0]);
436 +- memset(&boot_params->_pad8[0], 0,
437 +- (char *)&boot_params->eddbuf[0] -
438 +- (char *)&boot_params->_pad8[0]);
439 +- memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
440 ++ static struct boot_params scratch;
441 ++ char *bp_base = (char *)boot_params;
442 ++ char *save_base = (char *)&scratch;
443 ++ int i;
444 ++
445 ++ const struct boot_params_to_save to_save[] = {
446 ++ BOOT_PARAM_PRESERVE(screen_info),
447 ++ BOOT_PARAM_PRESERVE(apm_bios_info),
448 ++ BOOT_PARAM_PRESERVE(tboot_addr),
449 ++ BOOT_PARAM_PRESERVE(ist_info),
450 ++ BOOT_PARAM_PRESERVE(hd0_info),
451 ++ BOOT_PARAM_PRESERVE(hd1_info),
452 ++ BOOT_PARAM_PRESERVE(sys_desc_table),
453 ++ BOOT_PARAM_PRESERVE(olpc_ofw_header),
454 ++ BOOT_PARAM_PRESERVE(efi_info),
455 ++ BOOT_PARAM_PRESERVE(alt_mem_k),
456 ++ BOOT_PARAM_PRESERVE(scratch),
457 ++ BOOT_PARAM_PRESERVE(e820_entries),
458 ++ BOOT_PARAM_PRESERVE(eddbuf_entries),
459 ++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
460 ++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
461 ++ BOOT_PARAM_PRESERVE(hdr),
462 ++ BOOT_PARAM_PRESERVE(e820_table),
463 ++ BOOT_PARAM_PRESERVE(eddbuf),
464 ++ };
465 ++
466 ++ memset(&scratch, 0, sizeof(scratch));
467 ++
468 ++ for (i = 0; i < ARRAY_SIZE(to_save); i++) {
469 ++ memcpy(save_base + to_save[i].start,
470 ++ bp_base + to_save[i].start, to_save[i].len);
471 ++ }
472 ++
473 ++ memcpy(boot_params, save_base, sizeof(*boot_params));
474 + }
475 + }
476 +
477 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
478 +index 979ef971cc78..41e41ec5d646 100644
479 +--- a/arch/x86/include/asm/msr-index.h
480 ++++ b/arch/x86/include/asm/msr-index.h
481 +@@ -372,6 +372,7 @@
482 + #define MSR_AMD64_PATCH_LEVEL 0x0000008b
483 + #define MSR_AMD64_TSC_RATIO 0xc0000104
484 + #define MSR_AMD64_NB_CFG 0xc001001f
485 ++#define MSR_AMD64_CPUID_FN_1 0xc0011004
486 + #define MSR_AMD64_PATCH_LOADER 0xc0010020
487 + #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
488 + #define MSR_AMD64_OSVW_STATUS 0xc0010141
489 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
490 +index 109f974f9835..80bc209c0708 100644
491 +--- a/arch/x86/include/asm/nospec-branch.h
492 ++++ b/arch/x86/include/asm/nospec-branch.h
493 +@@ -192,7 +192,7 @@
494 + " lfence;\n" \
495 + " jmp 902b;\n" \
496 + " .align 16\n" \
497 +- "903: addl $4, %%esp;\n" \
498 ++ "903: lea 4(%%esp), %%esp;\n" \
499 + " pushl %[thunk_target];\n" \
500 + " ret;\n" \
501 + " .align 16\n" \
502 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
503 +index 530cf1fd68a2..2f067b443326 100644
504 +--- a/arch/x86/kernel/apic/apic.c
505 ++++ b/arch/x86/kernel/apic/apic.c
506 +@@ -722,7 +722,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
507 + static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
508 +
509 + /*
510 +- * Temporary interrupt handler.
511 ++ * Temporary interrupt handler and polled calibration function.
512 + */
513 + static void __init lapic_cal_handler(struct clock_event_device *dev)
514 + {
515 +@@ -824,7 +824,8 @@ static int __init lapic_init_clockevent(void)
516 + static int __init calibrate_APIC_clock(void)
517 + {
518 + struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
519 +- void (*real_handler)(struct clock_event_device *dev);
520 ++ u64 tsc_perj = 0, tsc_start = 0;
521 ++ unsigned long jif_start;
522 + unsigned long deltaj;
523 + long delta, deltatsc;
524 + int pm_referenced = 0;
525 +@@ -851,28 +852,64 @@ static int __init calibrate_APIC_clock(void)
526 + apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
527 + "calibrating APIC timer ...\n");
528 +
529 ++ /*
530 ++ * There are platforms w/o global clockevent devices. Instead of
531 ++ * making the calibration conditional on that, use a polling based
532 ++ * approach everywhere.
533 ++ */
534 + local_irq_disable();
535 +
536 +- /* Replace the global interrupt handler */
537 +- real_handler = global_clock_event->event_handler;
538 +- global_clock_event->event_handler = lapic_cal_handler;
539 +-
540 + /*
541 + * Setup the APIC counter to maximum. There is no way the lapic
542 + * can underflow in the 100ms detection time frame
543 + */
544 + __setup_APIC_LVTT(0xffffffff, 0, 0);
545 +
546 +- /* Let the interrupts run */
547 ++ /*
548 ++ * Methods to terminate the calibration loop:
549 ++ * 1) Global clockevent if available (jiffies)
550 ++ * 2) TSC if available and frequency is known
551 ++ */
552 ++ jif_start = READ_ONCE(jiffies);
553 ++
554 ++ if (tsc_khz) {
555 ++ tsc_start = rdtsc();
556 ++ tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
557 ++ }
558 ++
559 ++ /*
560 ++ * Enable interrupts so the tick can fire, if a global
561 ++ * clockevent device is available
562 ++ */
563 + local_irq_enable();
564 +
565 +- while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
566 +- cpu_relax();
567 ++ while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
568 ++ /* Wait for a tick to elapse */
569 ++ while (1) {
570 ++ if (tsc_khz) {
571 ++ u64 tsc_now = rdtsc();
572 ++ if ((tsc_now - tsc_start) >= tsc_perj) {
573 ++ tsc_start += tsc_perj;
574 ++ break;
575 ++ }
576 ++ } else {
577 ++ unsigned long jif_now = READ_ONCE(jiffies);
578 +
579 +- local_irq_disable();
580 ++ if (time_after(jif_now, jif_start)) {
581 ++ jif_start = jif_now;
582 ++ break;
583 ++ }
584 ++ }
585 ++ cpu_relax();
586 ++ }
587 +
588 +- /* Restore the real event handler */
589 +- global_clock_event->event_handler = real_handler;
590 ++ /* Invoke the calibration routine */
591 ++ local_irq_disable();
592 ++ lapic_cal_handler(NULL);
593 ++ local_irq_enable();
594 ++ }
595 ++
596 ++ local_irq_disable();
597 +
598 + /* Build delta t1-t2 as apic timer counts down */
599 + delta = lapic_cal_t1 - lapic_cal_t2;
600 +@@ -916,10 +953,11 @@ static int __init calibrate_APIC_clock(void)
601 + levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
602 +
603 + /*
604 +- * PM timer calibration failed or not turned on
605 +- * so lets try APIC timer based calibration
606 ++ * PM timer calibration failed or not turned on so lets try APIC
607 ++ * timer based calibration, if a global clockevent device is
608 ++ * available.
609 + */
610 +- if (!pm_referenced) {
611 ++ if (!pm_referenced && global_clock_event) {
612 + apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
613 +
614 + /*
615 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
616 +index 8d4e50428b68..68c363c341bf 100644
617 +--- a/arch/x86/kernel/cpu/amd.c
618 ++++ b/arch/x86/kernel/cpu/amd.c
619 +@@ -804,6 +804,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
620 + msr_set_bit(MSR_AMD64_DE_CFG, 31);
621 + }
622 +
623 ++static bool rdrand_force;
624 ++
625 ++static int __init rdrand_cmdline(char *str)
626 ++{
627 ++ if (!str)
628 ++ return -EINVAL;
629 ++
630 ++ if (!strcmp(str, "force"))
631 ++ rdrand_force = true;
632 ++ else
633 ++ return -EINVAL;
634 ++
635 ++ return 0;
636 ++}
637 ++early_param("rdrand", rdrand_cmdline);
638 ++
639 ++static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
640 ++{
641 ++ /*
642 ++ * Saving of the MSR used to hide the RDRAND support during
643 ++ * suspend/resume is done by arch/x86/power/cpu.c, which is
644 ++ * dependent on CONFIG_PM_SLEEP.
645 ++ */
646 ++ if (!IS_ENABLED(CONFIG_PM_SLEEP))
647 ++ return;
648 ++
649 ++ /*
650 ++ * The nordrand option can clear X86_FEATURE_RDRAND, so check for
651 ++ * RDRAND support using the CPUID function directly.
652 ++ */
653 ++ if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
654 ++ return;
655 ++
656 ++ msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
657 ++
658 ++ /*
659 ++ * Verify that the CPUID change has occurred in case the kernel is
660 ++ * running virtualized and the hypervisor doesn't support the MSR.
661 ++ */
662 ++ if (cpuid_ecx(1) & BIT(30)) {
663 ++ pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
664 ++ return;
665 ++ }
666 ++
667 ++ clear_cpu_cap(c, X86_FEATURE_RDRAND);
668 ++ pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
669 ++}
670 ++
671 ++static void init_amd_jg(struct cpuinfo_x86 *c)
672 ++{
673 ++ /*
674 ++ * Some BIOS implementations do not restore proper RDRAND support
675 ++ * across suspend and resume. Check on whether to hide the RDRAND
676 ++ * instruction support via CPUID.
677 ++ */
678 ++ clear_rdrand_cpuid_bit(c);
679 ++}
680 ++
681 + static void init_amd_bd(struct cpuinfo_x86 *c)
682 + {
683 + u64 value;
684 +@@ -818,6 +876,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
685 + wrmsrl_safe(MSR_F15H_IC_CFG, value);
686 + }
687 + }
688 ++
689 ++ /*
690 ++ * Some BIOS implementations do not restore proper RDRAND support
691 ++ * across suspend and resume. Check on whether to hide the RDRAND
692 ++ * instruction support via CPUID.
693 ++ */
694 ++ clear_rdrand_cpuid_bit(c);
695 + }
696 +
697 + static void init_amd_zn(struct cpuinfo_x86 *c)
698 +@@ -860,6 +925,7 @@ static void init_amd(struct cpuinfo_x86 *c)
699 + case 0x10: init_amd_gh(c); break;
700 + case 0x12: init_amd_ln(c); break;
701 + case 0x15: init_amd_bd(c); break;
702 ++ case 0x16: init_amd_jg(c); break;
703 + case 0x17: init_amd_zn(c); break;
704 + }
705 +
706 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
707 +index 8d95c81b2c82..01f04db1fa61 100644
708 +--- a/arch/x86/kvm/mmu.c
709 ++++ b/arch/x86/kvm/mmu.c
710 +@@ -5649,38 +5649,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
711 + struct kvm_memory_slot *slot,
712 + struct kvm_page_track_notifier_node *node)
713 + {
714 +- struct kvm_mmu_page *sp;
715 +- LIST_HEAD(invalid_list);
716 +- unsigned long i;
717 +- bool flush;
718 +- gfn_t gfn;
719 +-
720 +- spin_lock(&kvm->mmu_lock);
721 +-
722 +- if (list_empty(&kvm->arch.active_mmu_pages))
723 +- goto out_unlock;
724 +-
725 +- flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
726 +-
727 +- for (i = 0; i < slot->npages; i++) {
728 +- gfn = slot->base_gfn + i;
729 +-
730 +- for_each_valid_sp(kvm, sp, gfn) {
731 +- if (sp->gfn != gfn)
732 +- continue;
733 +-
734 +- kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
735 +- }
736 +- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
737 +- kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
738 +- flush = false;
739 +- cond_resched_lock(&kvm->mmu_lock);
740 +- }
741 +- }
742 +- kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
743 +-
744 +-out_unlock:
745 +- spin_unlock(&kvm->mmu_lock);
746 ++ kvm_mmu_zap_all(kvm);
747 + }
748 +
749 + void kvm_mmu_init_vm(struct kvm *kvm)
750 +diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
751 +index 04967cdce5d1..7ad68917a51e 100644
752 +--- a/arch/x86/lib/cpu.c
753 ++++ b/arch/x86/lib/cpu.c
754 +@@ -1,6 +1,7 @@
755 + // SPDX-License-Identifier: GPL-2.0-only
756 + #include <linux/types.h>
757 + #include <linux/export.h>
758 ++#include <asm/cpu.h>
759 +
760 + unsigned int x86_family(unsigned int sig)
761 + {
762 +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
763 +index 24b079e94bc2..c9ef6a7a4a1a 100644
764 +--- a/arch/x86/power/cpu.c
765 ++++ b/arch/x86/power/cpu.c
766 +@@ -12,6 +12,7 @@
767 + #include <linux/smp.h>
768 + #include <linux/perf_event.h>
769 + #include <linux/tboot.h>
770 ++#include <linux/dmi.h>
771 +
772 + #include <asm/pgtable.h>
773 + #include <asm/proto.h>
774 +@@ -23,7 +24,7 @@
775 + #include <asm/debugreg.h>
776 + #include <asm/cpu.h>
777 + #include <asm/mmu_context.h>
778 +-#include <linux/dmi.h>
779 ++#include <asm/cpu_device_id.h>
780 +
781 + #ifdef CONFIG_X86_32
782 + __visible unsigned long saved_context_ebx;
783 +@@ -397,15 +398,14 @@ static int __init bsp_pm_check_init(void)
784 +
785 + core_initcall(bsp_pm_check_init);
786 +
787 +-static int msr_init_context(const u32 *msr_id, const int total_num)
788 ++static int msr_build_context(const u32 *msr_id, const int num)
789 + {
790 +- int i = 0;
791 ++ struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
792 + struct saved_msr *msr_array;
793 ++ int total_num;
794 ++ int i, j;
795 +
796 +- if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
797 +- pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
798 +- return -EINVAL;
799 +- }
800 ++ total_num = saved_msrs->num + num;
801 +
802 + msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
803 + if (!msr_array) {
804 +@@ -413,19 +413,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
805 + return -ENOMEM;
806 + }
807 +
808 +- for (i = 0; i < total_num; i++) {
809 +- msr_array[i].info.msr_no = msr_id[i];
810 ++ if (saved_msrs->array) {
811 ++ /*
812 ++ * Multiple callbacks can invoke this function, so copy any
813 ++ * MSR save requests from previous invocations.
814 ++ */
815 ++ memcpy(msr_array, saved_msrs->array,
816 ++ sizeof(struct saved_msr) * saved_msrs->num);
817 ++
818 ++ kfree(saved_msrs->array);
819 ++ }
820 ++
821 ++ for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
822 ++ msr_array[i].info.msr_no = msr_id[j];
823 + msr_array[i].valid = false;
824 + msr_array[i].info.reg.q = 0;
825 + }
826 +- saved_context.saved_msrs.num = total_num;
827 +- saved_context.saved_msrs.array = msr_array;
828 ++ saved_msrs->num = total_num;
829 ++ saved_msrs->array = msr_array;
830 +
831 + return 0;
832 + }
833 +
834 + /*
835 +- * The following section is a quirk framework for problematic BIOSen:
836 ++ * The following sections are a quirk framework for problematic BIOSen:
837 + * Sometimes MSRs are modified by the BIOSen after suspended to
838 + * RAM, this might cause unexpected behavior after wakeup.
839 + * Thus we save/restore these specified MSRs across suspend/resume
840 +@@ -440,7 +451,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
841 + u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
842 +
843 + pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
844 +- return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
845 ++ return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
846 + }
847 +
848 + static const struct dmi_system_id msr_save_dmi_table[] = {
849 +@@ -455,9 +466,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
850 + {}
851 + };
852 +
853 ++static int msr_save_cpuid_features(const struct x86_cpu_id *c)
854 ++{
855 ++ u32 cpuid_msr_id[] = {
856 ++ MSR_AMD64_CPUID_FN_1,
857 ++ };
858 ++
859 ++ pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
860 ++ c->family);
861 ++
862 ++ return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
863 ++}
864 ++
865 ++static const struct x86_cpu_id msr_save_cpu_table[] = {
866 ++ {
867 ++ .vendor = X86_VENDOR_AMD,
868 ++ .family = 0x15,
869 ++ .model = X86_MODEL_ANY,
870 ++ .feature = X86_FEATURE_ANY,
871 ++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
872 ++ },
873 ++ {
874 ++ .vendor = X86_VENDOR_AMD,
875 ++ .family = 0x16,
876 ++ .model = X86_MODEL_ANY,
877 ++ .feature = X86_FEATURE_ANY,
878 ++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
879 ++ },
880 ++ {}
881 ++};
882 ++
883 ++typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
884 ++static int pm_cpu_check(const struct x86_cpu_id *c)
885 ++{
886 ++ const struct x86_cpu_id *m;
887 ++ int ret = 0;
888 ++
889 ++ m = x86_match_cpu(msr_save_cpu_table);
890 ++ if (m) {
891 ++ pm_cpu_match_t fn;
892 ++
893 ++ fn = (pm_cpu_match_t)m->driver_data;
894 ++ ret = fn(m);
895 ++ }
896 ++
897 ++ return ret;
898 ++}
899 ++
900 + static int pm_check_save_msr(void)
901 + {
902 + dmi_check_system(msr_save_dmi_table);
903 ++ pm_cpu_check(msr_save_cpu_table);
904 ++
905 + return 0;
906 + }
907 +
908 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
909 +index 404e776aa36d..b528710364e9 100644
910 +--- a/block/bfq-iosched.c
911 ++++ b/block/bfq-iosched.c
912 +@@ -2085,9 +2085,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
913 + blk_rq_pos(container_of(rb_prev(&req->rb_node),
914 + struct request, rb_node))) {
915 + struct bfq_queue *bfqq = bfq_init_rq(req);
916 +- struct bfq_data *bfqd = bfqq->bfqd;
917 ++ struct bfq_data *bfqd;
918 + struct request *prev, *next_rq;
919 +
920 ++ if (!bfqq)
921 ++ return;
922 ++
923 ++ bfqd = bfqq->bfqd;
924 ++
925 + /* Reposition request in its sort_list */
926 + elv_rb_del(&bfqq->sort_list, req);
927 + elv_rb_add(&bfqq->sort_list, req);
928 +@@ -2134,6 +2139,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
929 + struct bfq_queue *bfqq = bfq_init_rq(rq),
930 + *next_bfqq = bfq_init_rq(next);
931 +
932 ++ if (!bfqq)
933 ++ return;
934 ++
935 + /*
936 + * If next and rq belong to the same bfq_queue and next is older
937 + * than rq, then reposition rq in the fifo (by substituting next
938 +@@ -5061,12 +5069,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
939 +
940 + spin_lock_irq(&bfqd->lock);
941 + bfqq = bfq_init_rq(rq);
942 +- if (at_head || blk_rq_is_passthrough(rq)) {
943 ++ if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
944 + if (at_head)
945 + list_add(&rq->queuelist, &bfqd->dispatch);
946 + else
947 + list_add_tail(&rq->queuelist, &bfqd->dispatch);
948 +- } else { /* bfqq is assumed to be non null here */
949 ++ } else {
950 + idle_timer_disabled = __bfq_insert_request(bfqd, rq);
951 + /*
952 + * Update bfqq, because, if a queue merge has occurred
953 +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
954 +index 391ac0503dc0..76d0f9de767b 100644
955 +--- a/drivers/ata/libata-scsi.c
956 ++++ b/drivers/ata/libata-scsi.c
957 +@@ -1786,6 +1786,21 @@ nothing_to_do:
958 + return 1;
959 + }
960 +
961 ++static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
962 ++{
963 ++ struct request *rq = scmd->request;
964 ++ u32 req_blocks;
965 ++
966 ++ if (!blk_rq_is_passthrough(rq))
967 ++ return true;
968 ++
969 ++ req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
970 ++ if (n_blocks > req_blocks)
971 ++ return false;
972 ++
973 ++ return true;
974 ++}
975 ++
976 + /**
977 + * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
978 + * @qc: Storage for translated ATA taskfile
979 +@@ -1830,6 +1845,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
980 + scsi_10_lba_len(cdb, &block, &n_block);
981 + if (cdb[1] & (1 << 3))
982 + tf_flags |= ATA_TFLAG_FUA;
983 ++ if (!ata_check_nblocks(scmd, n_block))
984 ++ goto invalid_fld;
985 + break;
986 + case READ_6:
987 + case WRITE_6:
988 +@@ -1844,6 +1861,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
989 + */
990 + if (!n_block)
991 + n_block = 256;
992 ++ if (!ata_check_nblocks(scmd, n_block))
993 ++ goto invalid_fld;
994 + break;
995 + case READ_16:
996 + case WRITE_16:
997 +@@ -1854,6 +1873,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
998 + scsi_16_lba_len(cdb, &block, &n_block);
999 + if (cdb[1] & (1 << 3))
1000 + tf_flags |= ATA_TFLAG_FUA;
1001 ++ if (!ata_check_nblocks(scmd, n_block))
1002 ++ goto invalid_fld;
1003 + break;
1004 + default:
1005 + DPRINTK("no-byte command\n");
1006 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
1007 +index 10aa27882142..4f115adb4ee8 100644
1008 +--- a/drivers/ata/libata-sff.c
1009 ++++ b/drivers/ata/libata-sff.c
1010 +@@ -658,6 +658,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
1011 + unsigned int offset;
1012 + unsigned char *buf;
1013 +
1014 ++ if (!qc->cursg) {
1015 ++ qc->curbytes = qc->nbytes;
1016 ++ return;
1017 ++ }
1018 + if (qc->curbytes == qc->nbytes - qc->sect_size)
1019 + ap->hsm_task_state = HSM_ST_LAST;
1020 +
1021 +@@ -683,6 +687,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
1022 +
1023 + if (qc->cursg_ofs == qc->cursg->length) {
1024 + qc->cursg = sg_next(qc->cursg);
1025 ++ if (!qc->cursg)
1026 ++ ap->hsm_task_state = HSM_ST_LAST;
1027 + qc->cursg_ofs = 0;
1028 + }
1029 + }
1030 +diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
1031 +index 7c37f2ff09e4..deae466395de 100644
1032 +--- a/drivers/ata/pata_rb532_cf.c
1033 ++++ b/drivers/ata/pata_rb532_cf.c
1034 +@@ -158,7 +158,6 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
1035 + static int rb532_pata_driver_remove(struct platform_device *pdev)
1036 + {
1037 + struct ata_host *ah = platform_get_drvdata(pdev);
1038 +- struct rb532_cf_info *info = ah->private_data;
1039 +
1040 + ata_host_detach(ah);
1041 +
1042 +diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
1043 +index 5b49f1b33ebe..e2ea2356da06 100644
1044 +--- a/drivers/block/aoe/aoedev.c
1045 ++++ b/drivers/block/aoe/aoedev.c
1046 +@@ -323,10 +323,14 @@ flush(const char __user *str, size_t cnt, int exiting)
1047 + }
1048 +
1049 + flush_scheduled_work();
1050 +- /* pass one: without sleeping, do aoedev_downdev */
1051 ++ /* pass one: do aoedev_downdev, which might sleep */
1052 ++restart1:
1053 + spin_lock_irqsave(&devlist_lock, flags);
1054 + for (d = devlist; d; d = d->next) {
1055 + spin_lock(&d->lock);
1056 ++ if (d->flags & DEVFL_TKILL)
1057 ++ goto cont;
1058 ++
1059 + if (exiting) {
1060 + /* unconditionally take each device down */
1061 + } else if (specified) {
1062 +@@ -338,8 +342,11 @@ flush(const char __user *str, size_t cnt, int exiting)
1063 + || d->ref)
1064 + goto cont;
1065 +
1066 ++ spin_unlock(&d->lock);
1067 ++ spin_unlock_irqrestore(&devlist_lock, flags);
1068 + aoedev_downdev(d);
1069 + d->flags |= DEVFL_TKILL;
1070 ++ goto restart1;
1071 + cont:
1072 + spin_unlock(&d->lock);
1073 + }
1074 +@@ -348,7 +355,7 @@ cont:
1075 + /* pass two: call freedev, which might sleep,
1076 + * for aoedevs marked with DEVFL_TKILL
1077 + */
1078 +-restart:
1079 ++restart2:
1080 + spin_lock_irqsave(&devlist_lock, flags);
1081 + for (d = devlist; d; d = d->next) {
1082 + spin_lock(&d->lock);
1083 +@@ -357,7 +364,7 @@ restart:
1084 + spin_unlock(&d->lock);
1085 + spin_unlock_irqrestore(&devlist_lock, flags);
1086 + freedev(d);
1087 +- goto restart;
1088 ++ goto restart2;
1089 + }
1090 + spin_unlock(&d->lock);
1091 + }
1092 +diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
1093 +index 5c50e723ecae..1a191eeeebba 100644
1094 +--- a/drivers/clk/socfpga/clk-periph-s10.c
1095 ++++ b/drivers/clk/socfpga/clk-periph-s10.c
1096 +@@ -38,7 +38,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
1097 + if (socfpgaclk->fixed_div) {
1098 + div = socfpgaclk->fixed_div;
1099 + } else {
1100 +- if (!socfpgaclk->bypass_reg)
1101 ++ if (socfpgaclk->hw.reg)
1102 + div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
1103 + }
1104 +
1105 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1106 +index 4f333d6f2e23..7f9f75201138 100644
1107 +--- a/drivers/gpio/gpiolib.c
1108 ++++ b/drivers/gpio/gpiolib.c
1109 +@@ -1091,9 +1091,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1110 + if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
1111 + lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
1112 + if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
1113 +- lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
1114 ++ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
1115 ++ GPIOLINE_FLAG_IS_OUT);
1116 + if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
1117 +- lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
1118 ++ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
1119 ++ GPIOLINE_FLAG_IS_OUT);
1120 +
1121 + if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
1122 + return -EFAULT;
1123 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1124 +index 02955e6e9dd9..c21ef99cc590 100644
1125 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1126 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1127 +@@ -1317,6 +1317,39 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
1128 + return 0;
1129 + }
1130 +
1131 ++static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev)
1132 ++{
1133 ++ int r;
1134 ++
1135 ++ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
1136 ++ if (unlikely(r != 0))
1137 ++ return r;
1138 ++
1139 ++ r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
1140 ++ AMDGPU_GEM_DOMAIN_VRAM);
1141 ++ if (!r)
1142 ++ adev->gfx.rlc.clear_state_gpu_addr =
1143 ++ amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
1144 ++
1145 ++ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1146 ++
1147 ++ return r;
1148 ++}
1149 ++
1150 ++static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev)
1151 ++{
1152 ++ int r;
1153 ++
1154 ++ if (!adev->gfx.rlc.clear_state_obj)
1155 ++ return;
1156 ++
1157 ++ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1158 ++ if (likely(r == 0)) {
1159 ++ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1160 ++ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1161 ++ }
1162 ++}
1163 ++
1164 + static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
1165 + {
1166 + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1167 +@@ -4777,6 +4810,10 @@ static int gfx_v8_0_hw_init(void *handle)
1168 + gfx_v8_0_init_golden_registers(adev);
1169 + gfx_v8_0_constants_init(adev);
1170 +
1171 ++ r = gfx_v8_0_csb_vram_pin(adev);
1172 ++ if (r)
1173 ++ return r;
1174 ++
1175 + r = adev->gfx.rlc.funcs->resume(adev);
1176 + if (r)
1177 + return r;
1178 +@@ -4893,6 +4930,9 @@ static int gfx_v8_0_hw_fini(void *handle)
1179 + else
1180 + pr_err("rlc is busy, skip halt rlc\n");
1181 + amdgpu_gfx_rlc_exit_safe_mode(adev);
1182 ++
1183 ++ gfx_v8_0_csb_vram_unpin(adev);
1184 ++
1185 + return 0;
1186 + }
1187 +
1188 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1189 +index 2f7f0a2e4a6c..0332177c0302 100644
1190 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1191 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1192 +@@ -596,6 +596,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1193 + (adev->gfx.rlc_feature_version < 1) ||
1194 + !adev->gfx.rlc.is_rlc_v2_1)
1195 + adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1196 ++ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1197 ++ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1198 ++ AMD_PG_SUPPORT_CP |
1199 ++ AMD_PG_SUPPORT_RLC_SMU_HS;
1200 + break;
1201 + default:
1202 + break;
1203 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
1204 +index b7e594c2bfb4..84c34712e39e 100644
1205 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
1206 ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
1207 +@@ -949,11 +949,6 @@ static int soc15_common_early_init(void *handle)
1208 +
1209 + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1210 + }
1211 +-
1212 +- if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1213 +- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1214 +- AMD_PG_SUPPORT_CP |
1215 +- AMD_PG_SUPPORT_RLC_SMU_HS;
1216 + break;
1217 + default:
1218 + /* FIXME: not supported yet */
1219 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
1220 +index b4e7404fe660..a11637b0f6cc 100644
1221 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
1222 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
1223 +@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1224 + u8 *ptr = msg->buf;
1225 +
1226 + while (remaining) {
1227 +- u8 cnt = (remaining > 16) ? 16 : remaining;
1228 +- u8 cmd;
1229 ++ u8 cnt, retries, cmd;
1230 +
1231 + if (msg->flags & I2C_M_RD)
1232 + cmd = 1;
1233 +@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1234 + if (mcnt || remaining > 16)
1235 + cmd |= 4; /* MOT */
1236 +
1237 +- ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
1238 +- if (ret < 0) {
1239 +- nvkm_i2c_aux_release(aux);
1240 +- return ret;
1241 ++ for (retries = 0, cnt = 0;
1242 ++ retries < 32 && !cnt;
1243 ++ retries++) {
1244 ++ cnt = min_t(u8, remaining, 16);
1245 ++ ret = aux->func->xfer(aux, true, cmd,
1246 ++ msg->addr, ptr, &cnt);
1247 ++ if (ret < 0)
1248 ++ goto out;
1249 ++ }
1250 ++ if (!cnt) {
1251 ++ AUX_TRACE(aux, "no data after 32 retries");
1252 ++ ret = -EIO;
1253 ++ goto out;
1254 + }
1255 +
1256 + ptr += cnt;
1257 +@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1258 + msg++;
1259 + }
1260 +
1261 ++ ret = num;
1262 ++out:
1263 + nvkm_i2c_aux_release(aux);
1264 +- return num;
1265 ++ return ret;
1266 + }
1267 +
1268 + static u32
1269 +diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
1270 +index 95e5c517a15f..9aae3d8e99ef 100644
1271 +--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
1272 ++++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
1273 +@@ -432,7 +432,7 @@ static int rockchip_dp_resume(struct device *dev)
1274 +
1275 + static const struct dev_pm_ops rockchip_dp_pm_ops = {
1276 + #ifdef CONFIG_PM_SLEEP
1277 +- .suspend = rockchip_dp_suspend,
1278 ++ .suspend_late = rockchip_dp_suspend,
1279 + .resume_early = rockchip_dp_resume,
1280 + #endif
1281 + };
1282 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
1283 +index e4e09d47c5c0..59e9d05ab928 100644
1284 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
1285 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
1286 +@@ -389,8 +389,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
1287 + break;
1288 + }
1289 +
1290 +- if (retries == RETRIES)
1291 ++ if (retries == RETRIES) {
1292 ++ kfree(reply);
1293 + return -EINVAL;
1294 ++ }
1295 +
1296 + *msg_len = reply_len;
1297 + *msg = reply;
1298 +diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
1299 +index 98bf694626f7..3a8c4a5971f7 100644
1300 +--- a/drivers/hid/hid-a4tech.c
1301 ++++ b/drivers/hid/hid-a4tech.c
1302 +@@ -23,12 +23,36 @@
1303 + #define A4_2WHEEL_MOUSE_HACK_7 0x01
1304 + #define A4_2WHEEL_MOUSE_HACK_B8 0x02
1305 +
1306 ++#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8)
1307 ++
1308 + struct a4tech_sc {
1309 + unsigned long quirks;
1310 + unsigned int hw_wheel;
1311 + __s32 delayed_value;
1312 + };
1313 +
1314 ++static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
1315 ++ struct hid_field *field, struct hid_usage *usage,
1316 ++ unsigned long **bit, int *max)
1317 ++{
1318 ++ struct a4tech_sc *a4 = hid_get_drvdata(hdev);
1319 ++
1320 ++ if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
1321 ++ usage->hid == A4_WHEEL_ORIENTATION) {
1322 ++ /*
1323 ++ * We do not want to have this usage mapped to anything as it's
1324 ++ * nonstandard and doesn't really behave like an HID report.
1325 ++ * It's only selecting the orientation (vertical/horizontal) of
1326 ++ * the previous mouse wheel report. The input_events will be
1327 ++ * generated once both reports are recorded in a4_event().
1328 ++ */
1329 ++ return -1;
1330 ++ }
1331 ++
1332 ++ return 0;
1333 ++
1334 ++}
1335 ++
1336 + static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
1337 + struct hid_field *field, struct hid_usage *usage,
1338 + unsigned long **bit, int *max)
1339 +@@ -52,8 +76,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
1340 + struct a4tech_sc *a4 = hid_get_drvdata(hdev);
1341 + struct input_dev *input;
1342 +
1343 +- if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
1344 +- !usage->type)
1345 ++ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
1346 + return 0;
1347 +
1348 + input = field->hidinput->input;
1349 +@@ -64,7 +87,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
1350 + return 1;
1351 + }
1352 +
1353 +- if (usage->hid == 0x000100b8) {
1354 ++ if (usage->hid == A4_WHEEL_ORIENTATION) {
1355 + input_event(input, EV_REL, value ? REL_HWHEEL :
1356 + REL_WHEEL, a4->delayed_value);
1357 + input_event(input, EV_REL, value ? REL_HWHEEL_HI_RES :
1358 +@@ -131,6 +154,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
1359 + static struct hid_driver a4_driver = {
1360 + .name = "a4tech",
1361 + .id_table = a4_devices,
1362 ++ .input_mapping = a4_input_mapping,
1363 + .input_mapped = a4_input_mapped,
1364 + .event = a4_event,
1365 + .probe = a4_probe,
1366 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1367 +index 34a812025b94..76aa474e92c1 100644
1368 +--- a/drivers/hid/hid-ids.h
1369 ++++ b/drivers/hid/hid-ids.h
1370 +@@ -990,6 +990,7 @@
1371 + #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
1372 + #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
1373 + #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
1374 ++#define USB_DEVICE_ID_SAITEK_X52 0x075c
1375 +
1376 + #define USB_VENDOR_ID_SAMSUNG 0x0419
1377 + #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
1378 +diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
1379 +index cf05816a601f..34e2b3f9d540 100644
1380 +--- a/drivers/hid/hid-logitech-hidpp.c
1381 ++++ b/drivers/hid/hid-logitech-hidpp.c
1382 +@@ -3749,15 +3749,45 @@ static const struct hid_device_id hidpp_devices[] = {
1383 +
1384 + { L27MHZ_DEVICE(HID_ANY_ID) },
1385 +
1386 +- { /* Logitech G403 Gaming Mouse over USB */
1387 ++ { /* Logitech G203/Prodigy Gaming Mouse */
1388 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC084) },
1389 ++ { /* Logitech G302 Gaming Mouse */
1390 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07F) },
1391 ++ { /* Logitech G303 Gaming Mouse */
1392 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC080) },
1393 ++ { /* Logitech G400 Gaming Mouse */
1394 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07E) },
1395 ++ { /* Logitech G403 Wireless Gaming Mouse over USB */
1396 + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
1397 ++ { /* Logitech G403 Gaming Mouse */
1398 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC083) },
1399 ++ { /* Logitech G403 Hero Gaming Mouse over USB */
1400 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08F) },
1401 ++ { /* Logitech G502 Proteus Core Gaming Mouse */
1402 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07D) },
1403 ++ { /* Logitech G502 Proteus Spectrum Gaming Mouse over USB */
1404 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) },
1405 ++ { /* Logitech G502 Hero Gaming Mouse over USB */
1406 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) },
1407 + { /* Logitech G700 Gaming Mouse over USB */
1408 + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
1409 ++ { /* Logitech G700s Gaming Mouse over USB */
1410 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) },
1411 ++ { /* Logitech G703 Gaming Mouse over USB */
1412 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) },
1413 ++ { /* Logitech G703 Hero Gaming Mouse over USB */
1414 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC090) },
1415 + { /* Logitech G900 Gaming Mouse over USB */
1416 + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) },
1417 ++ { /* Logitech G903 Gaming Mouse over USB */
1418 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) },
1419 ++ { /* Logitech G903 Hero Gaming Mouse over USB */
1420 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) },
1421 + { /* Logitech G920 Wheel over USB */
1422 + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
1423 + .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
1424 ++ { /* Logitech G Pro Gaming Mouse over USB */
1425 ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
1426 +
1427 + { /* MX5000 keyboard over Bluetooth */
1428 + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
1429 +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
1430 +index 5b669f7d653f..4fe2c3ab76f9 100644
1431 +--- a/drivers/hid/hid-quirks.c
1432 ++++ b/drivers/hid/hid-quirks.c
1433 +@@ -141,6 +141,7 @@ static const struct hid_device_id hid_quirks[] = {
1434 + { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
1435 + { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
1436 + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
1437 ++ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
1438 + { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
1439 + { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
1440 + { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
1441 +diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
1442 +index e12f2588ddeb..bdfc5ff3b2c5 100644
1443 +--- a/drivers/hid/hid-tmff.c
1444 ++++ b/drivers/hid/hid-tmff.c
1445 +@@ -22,6 +22,8 @@
1446 +
1447 + #include "hid-ids.h"
1448 +
1449 ++#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
1450 ++
1451 + static const signed short ff_rumble[] = {
1452 + FF_RUMBLE,
1453 + -1
1454 +@@ -76,6 +78,7 @@ static int tmff_play(struct input_dev *dev, void *data,
1455 + struct hid_field *ff_field = tmff->ff_field;
1456 + int x, y;
1457 + int left, right; /* Rumbling */
1458 ++ int motor_swap;
1459 +
1460 + switch (effect->type) {
1461 + case FF_CONSTANT:
1462 +@@ -100,6 +103,13 @@ static int tmff_play(struct input_dev *dev, void *data,
1463 + ff_field->logical_minimum,
1464 + ff_field->logical_maximum);
1465 +
1466 ++ /* 2-in-1 strong motor is left */
1467 ++ if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
1468 ++ motor_swap = left;
1469 ++ left = right;
1470 ++ right = motor_swap;
1471 ++ }
1472 ++
1473 + dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
1474 + ff_field->value[0] = left;
1475 + ff_field->value[1] = right;
1476 +@@ -226,6 +236,8 @@ static const struct hid_device_id tm_devices[] = {
1477 + .driver_data = (unsigned long)ff_rumble },
1478 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
1479 + .driver_data = (unsigned long)ff_rumble },
1480 ++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
1481 ++ .driver_data = (unsigned long)ff_rumble },
1482 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
1483 + .driver_data = (unsigned long)ff_rumble },
1484 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
1485 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1486 +index 926c597f5f46..53ed51adb8ac 100644
1487 +--- a/drivers/hid/wacom_wac.c
1488 ++++ b/drivers/hid/wacom_wac.c
1489 +@@ -846,6 +846,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
1490 + y >>= 1;
1491 + distance >>= 1;
1492 + }
1493 ++ if (features->type == INTUOSHT2)
1494 ++ distance = features->distance_max - distance;
1495 + input_report_abs(input, ABS_X, x);
1496 + input_report_abs(input, ABS_Y, y);
1497 + input_report_abs(input, ABS_DISTANCE, distance);
1498 +@@ -1059,7 +1061,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
1499 + input_report_key(input, BTN_BASE2, (data[11] & 0x02));
1500 +
1501 + if (data[12] & 0x80)
1502 +- input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
1503 ++ input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
1504 + else
1505 + input_report_abs(input, ABS_WHEEL, 0);
1506 +
1507 +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
1508 +index 5f9505a087f6..23f358cb7f49 100644
1509 +--- a/drivers/hv/channel.c
1510 ++++ b/drivers/hv/channel.c
1511 +@@ -26,7 +26,7 @@
1512 +
1513 + static unsigned long virt_to_hvpfn(void *addr)
1514 + {
1515 +- unsigned long paddr;
1516 ++ phys_addr_t paddr;
1517 +
1518 + if (is_vmalloc_addr(addr))
1519 + paddr = page_to_phys(vmalloc_to_page(addr)) +
1520 +diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
1521 +index fe7e7097e00a..7e9527ab6d64 100644
1522 +--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
1523 ++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
1524 +@@ -2576,18 +2576,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
1525 + hfi1_kern_clear_hw_flow(priv->rcd, qp);
1526 + }
1527 +
1528 +-static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
1529 +- struct hfi1_packet *packet, u8 rcv_type,
1530 +- u8 opcode)
1531 ++static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
1532 + {
1533 + struct rvt_qp *qp = packet->qp;
1534 +- struct hfi1_qp_priv *qpriv = qp->priv;
1535 +- u32 ipsn;
1536 +- struct ib_other_headers *ohdr = packet->ohdr;
1537 +- struct rvt_ack_entry *e;
1538 +- struct tid_rdma_request *req;
1539 +- struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1540 +- u32 i;
1541 +
1542 + if (rcv_type >= RHF_RCV_TYPE_IB)
1543 + goto done;
1544 +@@ -2604,41 +2595,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
1545 + if (rcv_type == RHF_RCV_TYPE_EAGER) {
1546 + hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
1547 + hfi1_schedule_send(qp);
1548 +- goto done_unlock;
1549 +- }
1550 +-
1551 +- /*
1552 +- * For TID READ response, error out QP after freeing the tid
1553 +- * resources.
1554 +- */
1555 +- if (opcode == TID_OP(READ_RESP)) {
1556 +- ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
1557 +- if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
1558 +- cmp_psn(ipsn, qp->s_psn) < 0) {
1559 +- hfi1_kern_read_tid_flow_free(qp);
1560 +- spin_unlock(&qp->s_lock);
1561 +- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
1562 +- goto done;
1563 +- }
1564 +- goto done_unlock;
1565 +- }
1566 +-
1567 +- /*
1568 +- * Error out the qp for TID RDMA WRITE
1569 +- */
1570 +- hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
1571 +- for (i = 0; i < rvt_max_atomic(rdi); i++) {
1572 +- e = &qp->s_ack_queue[i];
1573 +- if (e->opcode == TID_OP(WRITE_REQ)) {
1574 +- req = ack_to_tid_req(e);
1575 +- hfi1_kern_exp_rcv_clear_all(req);
1576 +- }
1577 + }
1578 +- spin_unlock(&qp->s_lock);
1579 +- rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
1580 +- goto done;
1581 +
1582 +-done_unlock:
1583 ++ /* Since no payload is delivered, just drop the packet */
1584 + spin_unlock(&qp->s_lock);
1585 + done:
1586 + return true;
1587 +@@ -2689,12 +2648,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
1588 + u32 fpsn;
1589 +
1590 + lockdep_assert_held(&qp->r_lock);
1591 ++ spin_lock(&qp->s_lock);
1592 + /* If the psn is out of valid range, drop the packet */
1593 + if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
1594 + cmp_psn(ibpsn, qp->s_psn) > 0)
1595 +- return ret;
1596 ++ goto s_unlock;
1597 +
1598 +- spin_lock(&qp->s_lock);
1599 + /*
1600 + * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1601 + * requests and implicitly NAK RDMA read and atomic requests issued
1602 +@@ -2742,9 +2701,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
1603 +
1604 + wqe = do_rc_completion(qp, wqe, ibp);
1605 + if (qp->s_acked == qp->s_tail)
1606 +- break;
1607 ++ goto s_unlock;
1608 + }
1609 +
1610 ++ if (qp->s_acked == qp->s_tail)
1611 ++ goto s_unlock;
1612 ++
1613 + /* Handle the eflags for the request */
1614 + if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
1615 + goto s_unlock;
1616 +@@ -2924,7 +2886,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
1617 + if (lnh == HFI1_LRH_GRH)
1618 + goto r_unlock;
1619 +
1620 +- if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode))
1621 ++ if (tid_rdma_tid_err(packet, rcv_type))
1622 + goto r_unlock;
1623 + }
1624 +
1625 +@@ -2944,8 +2906,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
1626 + */
1627 + spin_lock(&qp->s_lock);
1628 + qpriv = qp->priv;
1629 ++ if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
1630 ++ qpriv->r_tid_tail == qpriv->r_tid_head)
1631 ++ goto unlock;
1632 + e = &qp->s_ack_queue[qpriv->r_tid_tail];
1633 ++ if (e->opcode != TID_OP(WRITE_REQ))
1634 ++ goto unlock;
1635 + req = ack_to_tid_req(e);
1636 ++ if (req->comp_seg == req->cur_seg)
1637 ++ goto unlock;
1638 + flow = &req->flows[req->clear_tail];
1639 + trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
1640 + trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
1641 +@@ -4511,7 +4480,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
1642 + struct rvt_swqe *wqe;
1643 + struct tid_rdma_request *req;
1644 + struct tid_rdma_flow *flow;
1645 +- u32 aeth, psn, req_psn, ack_psn, fspsn, resync_psn, ack_kpsn;
1646 ++ u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
1647 + unsigned long flags;
1648 + u16 fidx;
1649 +
1650 +@@ -4540,6 +4509,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
1651 + ack_kpsn--;
1652 + }
1653 +
1654 ++ if (unlikely(qp->s_acked == qp->s_tail))
1655 ++ goto ack_op_err;
1656 ++
1657 + wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1658 +
1659 + if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
1660 +@@ -4552,7 +4524,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
1661 + trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
1662 +
1663 + /* Drop stale ACK/NAK */
1664 +- if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0)
1665 ++ if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
1666 ++ cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
1667 + goto ack_op_err;
1668 +
1669 + while (cmp_psn(ack_kpsn,
1670 +@@ -4714,8 +4687,12 @@ done:
1671 + switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
1672 + IB_AETH_CREDIT_MASK) {
1673 + case 0: /* PSN sequence error */
1674 ++ if (!req->flows)
1675 ++ break;
1676 + flow = &req->flows[req->acked_tail];
1677 +- fspsn = full_flow_psn(flow, flow->flow_state.spsn);
1678 ++ flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
1679 ++ if (cmp_psn(psn, flpsn) > 0)
1680 ++ break;
1681 + trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
1682 + flow);
1683 + req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
1684 +diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
1685 +index 0e224232f746..008a74a1ed44 100644
1686 +--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
1687 ++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
1688 +@@ -1394,6 +1394,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
1689 + printk(KERN_DEBUG
1690 + "%s: %s: alloc urb for fifo %i failed",
1691 + hw->name, __func__, fifo->fifonum);
1692 ++ continue;
1693 + }
1694 + fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
1695 + fifo->iso[i].indx = i;
1696 +@@ -1692,13 +1693,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
1697 + static int
1698 + setup_hfcsusb(struct hfcsusb *hw)
1699 + {
1700 ++ void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
1701 + u_char b;
1702 ++ int ret;
1703 +
1704 + if (debug & DBG_HFC_CALL_TRACE)
1705 + printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
1706 +
1707 ++ if (!dmabuf)
1708 ++ return -ENOMEM;
1709 ++
1710 ++ ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
1711 ++
1712 ++ memcpy(&b, dmabuf, sizeof(u_char));
1713 ++ kfree(dmabuf);
1714 ++
1715 + /* check the chip id */
1716 +- if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
1717 ++ if (ret != 1) {
1718 + printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
1719 + hw->name, __func__);
1720 + return 1;
1721 +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
1722 +index b6b5acc92ca2..2a48ea3f1b30 100644
1723 +--- a/drivers/md/dm-bufio.c
1724 ++++ b/drivers/md/dm-bufio.c
1725 +@@ -1599,7 +1599,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1726 + unsigned long freed;
1727 +
1728 + c = container_of(shrink, struct dm_bufio_client, shrinker);
1729 +- if (!dm_bufio_trylock(c))
1730 ++ if (sc->gfp_mask & __GFP_FS)
1731 ++ dm_bufio_lock(c);
1732 ++ else if (!dm_bufio_trylock(c))
1733 + return SHRINK_STOP;
1734 +
1735 + freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1736 +diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
1737 +index 845f376a72d9..8288887b7f94 100644
1738 +--- a/drivers/md/dm-dust.c
1739 ++++ b/drivers/md/dm-dust.c
1740 +@@ -25,6 +25,7 @@ struct dust_device {
1741 + unsigned long long badblock_count;
1742 + spinlock_t dust_lock;
1743 + unsigned int blksz;
1744 ++ int sect_per_block_shift;
1745 + unsigned int sect_per_block;
1746 + sector_t start;
1747 + bool fail_read_on_bb:1;
1748 +@@ -79,7 +80,7 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block)
1749 + unsigned long flags;
1750 +
1751 + spin_lock_irqsave(&dd->dust_lock, flags);
1752 +- bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
1753 ++ bblock = dust_rb_search(&dd->badblocklist, block);
1754 +
1755 + if (bblock == NULL) {
1756 + if (!dd->quiet_mode) {
1757 +@@ -113,7 +114,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
1758 + }
1759 +
1760 + spin_lock_irqsave(&dd->dust_lock, flags);
1761 +- bblock->bb = block * dd->sect_per_block;
1762 ++ bblock->bb = block;
1763 + if (!dust_rb_insert(&dd->badblocklist, bblock)) {
1764 + if (!dd->quiet_mode) {
1765 + DMERR("%s: block %llu already in badblocklist",
1766 +@@ -138,7 +139,7 @@ static int dust_query_block(struct dust_device *dd, unsigned long long block)
1767 + unsigned long flags;
1768 +
1769 + spin_lock_irqsave(&dd->dust_lock, flags);
1770 +- bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
1771 ++ bblock = dust_rb_search(&dd->badblocklist, block);
1772 + if (bblock != NULL)
1773 + DMINFO("%s: block %llu found in badblocklist", __func__, block);
1774 + else
1775 +@@ -165,6 +166,7 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock,
1776 + int ret = DM_MAPIO_REMAPPED;
1777 +
1778 + if (fail_read_on_bb) {
1779 ++ thisblock >>= dd->sect_per_block_shift;
1780 + spin_lock_irqsave(&dd->dust_lock, flags);
1781 + ret = __dust_map_read(dd, thisblock);
1782 + spin_unlock_irqrestore(&dd->dust_lock, flags);
1783 +@@ -195,6 +197,7 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock,
1784 + unsigned long flags;
1785 +
1786 + if (fail_read_on_bb) {
1787 ++ thisblock >>= dd->sect_per_block_shift;
1788 + spin_lock_irqsave(&dd->dust_lock, flags);
1789 + __dust_map_write(dd, thisblock);
1790 + spin_unlock_irqrestore(&dd->dust_lock, flags);
1791 +@@ -331,6 +334,8 @@ static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1792 + dd->blksz = blksz;
1793 + dd->start = tmp;
1794 +
1795 ++ dd->sect_per_block_shift = __ffs(sect_per_block);
1796 ++
1797 + /*
1798 + * Whether to fail a read on a "bad" block.
1799 + * Defaults to false; enabled later by message.
1800 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
1801 +index 44e76cda087a..29a5e5b4c63c 100644
1802 +--- a/drivers/md/dm-integrity.c
1803 ++++ b/drivers/md/dm-integrity.c
1804 +@@ -1940,7 +1940,22 @@ offload_to_thread:
1805 + queue_work(ic->wait_wq, &dio->work);
1806 + return;
1807 + }
1808 ++ if (journal_read_pos != NOT_FOUND)
1809 ++ dio->range.n_sectors = ic->sectors_per_block;
1810 + wait_and_add_new_range(ic, &dio->range);
1811 ++ /*
1812 ++ * wait_and_add_new_range drops the spinlock, so the journal
1813 ++ * may have been changed arbitrarily. We need to recheck.
1814 ++ * To simplify the code, we restrict I/O size to just one block.
1815 ++ */
1816 ++ if (journal_read_pos != NOT_FOUND) {
1817 ++ sector_t next_sector;
1818 ++ unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1819 ++ if (unlikely(new_pos != journal_read_pos)) {
1820 ++ remove_range_unlocked(ic, &dio->range);
1821 ++ goto retry;
1822 ++ }
1823 ++ }
1824 + }
1825 + spin_unlock_irq(&ic->endio_wait.lock);
1826 +
1827 +diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
1828 +index 671c24332802..3f694d9061ec 100644
1829 +--- a/drivers/md/dm-kcopyd.c
1830 ++++ b/drivers/md/dm-kcopyd.c
1831 +@@ -548,8 +548,10 @@ static int run_io_job(struct kcopyd_job *job)
1832 + * no point in continuing.
1833 + */
1834 + if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
1835 +- job->master_job->write_err)
1836 ++ job->master_job->write_err) {
1837 ++ job->write_err = job->master_job->write_err;
1838 + return -EIO;
1839 ++ }
1840 +
1841 + io_job_start(job->kc->throttle);
1842 +
1843 +@@ -601,6 +603,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
1844 + else
1845 + job->read_err = 1;
1846 + push(&kc->complete_jobs, job);
1847 ++ wake(kc);
1848 + break;
1849 + }
1850 +
1851 +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
1852 +index 9fdef6897316..01aaac2c15be 100644
1853 +--- a/drivers/md/dm-raid.c
1854 ++++ b/drivers/md/dm-raid.c
1855 +@@ -3194,7 +3194,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1856 + */
1857 + r = rs_prepare_reshape(rs);
1858 + if (r)
1859 +- return r;
1860 ++ goto bad;
1861 +
1862 + /* Reshaping ain't recovery, so disable recovery */
1863 + rs_setup_recovery(rs, MaxSector);
1864 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1865 +index ec8b27e20de3..c840c587083b 100644
1866 +--- a/drivers/md/dm-table.c
1867 ++++ b/drivers/md/dm-table.c
1868 +@@ -1334,7 +1334,7 @@ void dm_table_event(struct dm_table *t)
1869 + }
1870 + EXPORT_SYMBOL(dm_table_event);
1871 +
1872 +-sector_t dm_table_get_size(struct dm_table *t)
1873 ++inline sector_t dm_table_get_size(struct dm_table *t)
1874 + {
1875 + return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1876 + }
1877 +@@ -1359,6 +1359,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1878 + unsigned int l, n = 0, k = 0;
1879 + sector_t *node;
1880 +
1881 ++ if (unlikely(sector >= dm_table_get_size(t)))
1882 ++ return &t->targets[t->num_targets];
1883 ++
1884 + for (l = 0; l < t->depth; l++) {
1885 + n = get_child(n, k);
1886 + node = get_node(t, l, n);
1887 +diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
1888 +index 4cdde7a02e94..7e8d7fc99410 100644
1889 +--- a/drivers/md/dm-zoned-metadata.c
1890 ++++ b/drivers/md/dm-zoned-metadata.c
1891 +@@ -401,15 +401,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
1892 + sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
1893 + struct bio *bio;
1894 +
1895 ++ if (dmz_bdev_is_dying(zmd->dev))
1896 ++ return ERR_PTR(-EIO);
1897 ++
1898 + /* Get a new block and a BIO to read it */
1899 + mblk = dmz_alloc_mblock(zmd, mblk_no);
1900 + if (!mblk)
1901 +- return NULL;
1902 ++ return ERR_PTR(-ENOMEM);
1903 +
1904 + bio = bio_alloc(GFP_NOIO, 1);
1905 + if (!bio) {
1906 + dmz_free_mblock(zmd, mblk);
1907 +- return NULL;
1908 ++ return ERR_PTR(-ENOMEM);
1909 + }
1910 +
1911 + spin_lock(&zmd->mblk_lock);
1912 +@@ -540,8 +543,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
1913 + if (!mblk) {
1914 + /* Cache miss: read the block from disk */
1915 + mblk = dmz_get_mblock_slow(zmd, mblk_no);
1916 +- if (!mblk)
1917 +- return ERR_PTR(-ENOMEM);
1918 ++ if (IS_ERR(mblk))
1919 ++ return mblk;
1920 + }
1921 +
1922 + /* Wait for on-going read I/O and check for error */
1923 +@@ -569,16 +572,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
1924 + /*
1925 + * Issue a metadata block write BIO.
1926 + */
1927 +-static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
1928 +- unsigned int set)
1929 ++static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
1930 ++ unsigned int set)
1931 + {
1932 + sector_t block = zmd->sb[set].block + mblk->no;
1933 + struct bio *bio;
1934 +
1935 ++ if (dmz_bdev_is_dying(zmd->dev))
1936 ++ return -EIO;
1937 ++
1938 + bio = bio_alloc(GFP_NOIO, 1);
1939 + if (!bio) {
1940 + set_bit(DMZ_META_ERROR, &mblk->state);
1941 +- return;
1942 ++ return -ENOMEM;
1943 + }
1944 +
1945 + set_bit(DMZ_META_WRITING, &mblk->state);
1946 +@@ -590,6 +596,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
1947 + bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
1948 + bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
1949 + submit_bio(bio);
1950 ++
1951 ++ return 0;
1952 + }
1953 +
1954 + /*
1955 +@@ -601,6 +609,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
1956 + struct bio *bio;
1957 + int ret;
1958 +
1959 ++ if (dmz_bdev_is_dying(zmd->dev))
1960 ++ return -EIO;
1961 ++
1962 + bio = bio_alloc(GFP_NOIO, 1);
1963 + if (!bio)
1964 + return -ENOMEM;
1965 +@@ -658,22 +669,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
1966 + {
1967 + struct dmz_mblock *mblk;
1968 + struct blk_plug plug;
1969 +- int ret = 0;
1970 ++ int ret = 0, nr_mblks_submitted = 0;
1971 +
1972 + /* Issue writes */
1973 + blk_start_plug(&plug);
1974 +- list_for_each_entry(mblk, write_list, link)
1975 +- dmz_write_mblock(zmd, mblk, set);
1976 ++ list_for_each_entry(mblk, write_list, link) {
1977 ++ ret = dmz_write_mblock(zmd, mblk, set);
1978 ++ if (ret)
1979 ++ break;
1980 ++ nr_mblks_submitted++;
1981 ++ }
1982 + blk_finish_plug(&plug);
1983 +
1984 + /* Wait for completion */
1985 + list_for_each_entry(mblk, write_list, link) {
1986 ++ if (!nr_mblks_submitted)
1987 ++ break;
1988 + wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
1989 + TASK_UNINTERRUPTIBLE);
1990 + if (test_bit(DMZ_META_ERROR, &mblk->state)) {
1991 + clear_bit(DMZ_META_ERROR, &mblk->state);
1992 + ret = -EIO;
1993 + }
1994 ++ nr_mblks_submitted--;
1995 + }
1996 +
1997 + /* Flush drive cache (this will also sync data) */
1998 +@@ -735,6 +753,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
1999 + */
2000 + dmz_lock_flush(zmd);
2001 +
2002 ++ if (dmz_bdev_is_dying(zmd->dev)) {
2003 ++ ret = -EIO;
2004 ++ goto out;
2005 ++ }
2006 ++
2007 + /* Get dirty blocks */
2008 + spin_lock(&zmd->mblk_lock);
2009 + list_splice_init(&zmd->mblk_dirty_list, &write_list);
2010 +@@ -1534,7 +1557,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
2011 + struct dm_zone *zone;
2012 +
2013 + if (list_empty(&zmd->map_rnd_list))
2014 +- return NULL;
2015 ++ return ERR_PTR(-EBUSY);
2016 +
2017 + list_for_each_entry(zone, &zmd->map_rnd_list, link) {
2018 + if (dmz_is_buf(zone))
2019 +@@ -1545,7 +1568,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
2020 + return dzone;
2021 + }
2022 +
2023 +- return NULL;
2024 ++ return ERR_PTR(-EBUSY);
2025 + }
2026 +
2027 + /*
2028 +@@ -1556,7 +1579,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
2029 + struct dm_zone *zone;
2030 +
2031 + if (list_empty(&zmd->map_seq_list))
2032 +- return NULL;
2033 ++ return ERR_PTR(-EBUSY);
2034 +
2035 + list_for_each_entry(zone, &zmd->map_seq_list, link) {
2036 + if (!zone->bzone)
2037 +@@ -1565,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
2038 + return zone;
2039 + }
2040 +
2041 +- return NULL;
2042 ++ return ERR_PTR(-EBUSY);
2043 + }
2044 +
2045 + /*
2046 +@@ -1623,6 +1646,10 @@ again:
2047 + /* Alloate a random zone */
2048 + dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
2049 + if (!dzone) {
2050 ++ if (dmz_bdev_is_dying(zmd->dev)) {
2051 ++ dzone = ERR_PTR(-EIO);
2052 ++ goto out;
2053 ++ }
2054 + dmz_wait_for_free_zones(zmd);
2055 + goto again;
2056 + }
2057 +@@ -1720,6 +1747,10 @@ again:
2058 + /* Alloate a random zone */
2059 + bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
2060 + if (!bzone) {
2061 ++ if (dmz_bdev_is_dying(zmd->dev)) {
2062 ++ bzone = ERR_PTR(-EIO);
2063 ++ goto out;
2064 ++ }
2065 + dmz_wait_for_free_zones(zmd);
2066 + goto again;
2067 + }
2068 +diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
2069 +index edf4b95eb075..9470b8f77a33 100644
2070 +--- a/drivers/md/dm-zoned-reclaim.c
2071 ++++ b/drivers/md/dm-zoned-reclaim.c
2072 +@@ -37,7 +37,7 @@ enum {
2073 + /*
2074 + * Number of seconds of target BIO inactivity to consider the target idle.
2075 + */
2076 +-#define DMZ_IDLE_PERIOD (10UL * HZ)
2077 ++#define DMZ_IDLE_PERIOD (10UL * HZ)
2078 +
2079 + /*
2080 + * Percentage of unmapped (free) random zones below which reclaim starts
2081 +@@ -134,6 +134,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
2082 + set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
2083 +
2084 + while (block < end_block) {
2085 ++ if (dev->flags & DMZ_BDEV_DYING)
2086 ++ return -EIO;
2087 ++
2088 + /* Get a valid region from the source zone */
2089 + ret = dmz_first_valid_block(zmd, src_zone, &block);
2090 + if (ret <= 0)
2091 +@@ -215,7 +218,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
2092 +
2093 + dmz_unlock_flush(zmd);
2094 +
2095 +- return 0;
2096 ++ return ret;
2097 + }
2098 +
2099 + /*
2100 +@@ -259,7 +262,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
2101 +
2102 + dmz_unlock_flush(zmd);
2103 +
2104 +- return 0;
2105 ++ return ret;
2106 + }
2107 +
2108 + /*
2109 +@@ -312,7 +315,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
2110 +
2111 + dmz_unlock_flush(zmd);
2112 +
2113 +- return 0;
2114 ++ return ret;
2115 + }
2116 +
2117 + /*
2118 +@@ -334,7 +337,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
2119 + /*
2120 + * Find a candidate zone for reclaim and process it.
2121 + */
2122 +-static void dmz_reclaim(struct dmz_reclaim *zrc)
2123 ++static int dmz_do_reclaim(struct dmz_reclaim *zrc)
2124 + {
2125 + struct dmz_metadata *zmd = zrc->metadata;
2126 + struct dm_zone *dzone;
2127 +@@ -344,8 +347,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
2128 +
2129 + /* Get a data zone */
2130 + dzone = dmz_get_zone_for_reclaim(zmd);
2131 +- if (!dzone)
2132 +- return;
2133 ++ if (IS_ERR(dzone))
2134 ++ return PTR_ERR(dzone);
2135 +
2136 + start = jiffies;
2137 +
2138 +@@ -391,13 +394,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
2139 + out:
2140 + if (ret) {
2141 + dmz_unlock_zone_reclaim(dzone);
2142 +- return;
2143 ++ return ret;
2144 + }
2145 +
2146 +- (void) dmz_flush_metadata(zrc->metadata);
2147 ++ ret = dmz_flush_metadata(zrc->metadata);
2148 ++ if (ret) {
2149 ++ dmz_dev_debug(zrc->dev,
2150 ++ "Metadata flush for zone %u failed, err %d\n",
2151 ++ dmz_id(zmd, rzone), ret);
2152 ++ return ret;
2153 ++ }
2154 +
2155 + dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
2156 + dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
2157 ++ return 0;
2158 + }
2159 +
2160 + /*
2161 +@@ -442,6 +452,10 @@ static void dmz_reclaim_work(struct work_struct *work)
2162 + struct dmz_metadata *zmd = zrc->metadata;
2163 + unsigned int nr_rnd, nr_unmap_rnd;
2164 + unsigned int p_unmap_rnd;
2165 ++ int ret;
2166 ++
2167 ++ if (dmz_bdev_is_dying(zrc->dev))
2168 ++ return;
2169 +
2170 + if (!dmz_should_reclaim(zrc)) {
2171 + mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
2172 +@@ -471,7 +485,17 @@ static void dmz_reclaim_work(struct work_struct *work)
2173 + (dmz_target_idle(zrc) ? "Idle" : "Busy"),
2174 + p_unmap_rnd, nr_unmap_rnd, nr_rnd);
2175 +
2176 +- dmz_reclaim(zrc);
2177 ++ ret = dmz_do_reclaim(zrc);
2178 ++ if (ret) {
2179 ++ dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
2180 ++ if (ret == -EIO)
2181 ++ /*
2182 ++ * LLD might be performing some error handling sequence
2183 ++ * at the underlying device. To not interfere, do not
2184 ++ * attempt to schedule the next reclaim run immediately.
2185 ++ */
2186 ++ return;
2187 ++ }
2188 +
2189 + dmz_schedule_reclaim(zrc);
2190 + }
2191 +diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
2192 +index 51d029bbb740..ff3fd011796e 100644
2193 +--- a/drivers/md/dm-zoned-target.c
2194 ++++ b/drivers/md/dm-zoned-target.c
2195 +@@ -133,6 +133,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
2196 +
2197 + refcount_inc(&bioctx->ref);
2198 + generic_make_request(clone);
2199 ++ if (clone->bi_status == BLK_STS_IOERR)
2200 ++ return -EIO;
2201 +
2202 + if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
2203 + zone->wp_block += nr_blocks;
2204 +@@ -277,8 +279,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
2205 +
2206 + /* Get the buffer zone. One will be allocated if needed */
2207 + bzone = dmz_get_chunk_buffer(zmd, zone);
2208 +- if (!bzone)
2209 +- return -ENOSPC;
2210 ++ if (IS_ERR(bzone))
2211 ++ return PTR_ERR(bzone);
2212 +
2213 + if (dmz_is_readonly(bzone))
2214 + return -EROFS;
2215 +@@ -389,6 +391,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
2216 +
2217 + dmz_lock_metadata(zmd);
2218 +
2219 ++ if (dmz->dev->flags & DMZ_BDEV_DYING) {
2220 ++ ret = -EIO;
2221 ++ goto out;
2222 ++ }
2223 ++
2224 + /*
2225 + * Get the data zone mapping the chunk. There may be no
2226 + * mapping for read and discard. If a mapping is obtained,
2227 +@@ -493,6 +500,8 @@ static void dmz_flush_work(struct work_struct *work)
2228 +
2229 + /* Flush dirty metadata blocks */
2230 + ret = dmz_flush_metadata(dmz->metadata);
2231 ++ if (ret)
2232 ++ dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
2233 +
2234 + /* Process queued flush requests */
2235 + while (1) {
2236 +@@ -513,22 +522,24 @@ static void dmz_flush_work(struct work_struct *work)
2237 + * Get a chunk work and start it to process a new BIO.
2238 + * If the BIO chunk has no work yet, create one.
2239 + */
2240 +-static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
2241 ++static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
2242 + {
2243 + unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
2244 + struct dm_chunk_work *cw;
2245 ++ int ret = 0;
2246 +
2247 + mutex_lock(&dmz->chunk_lock);
2248 +
2249 + /* Get the BIO chunk work. If one is not active yet, create one */
2250 + cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
2251 + if (!cw) {
2252 +- int ret;
2253 +
2254 + /* Create a new chunk work */
2255 + cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
2256 +- if (!cw)
2257 ++ if (unlikely(!cw)) {
2258 ++ ret = -ENOMEM;
2259 + goto out;
2260 ++ }
2261 +
2262 + INIT_WORK(&cw->work, dmz_chunk_work);
2263 + refcount_set(&cw->refcount, 0);
2264 +@@ -539,7 +550,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
2265 + ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
2266 + if (unlikely(ret)) {
2267 + kfree(cw);
2268 +- cw = NULL;
2269 + goto out;
2270 + }
2271 + }
2272 +@@ -547,10 +557,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
2273 + bio_list_add(&cw->bio_list, bio);
2274 + dmz_get_chunk_work(cw);
2275 +
2276 ++ dmz_reclaim_bio_acc(dmz->reclaim);
2277 + if (queue_work(dmz->chunk_wq, &cw->work))
2278 + dmz_get_chunk_work(cw);
2279 + out:
2280 + mutex_unlock(&dmz->chunk_lock);
2281 ++ return ret;
2282 ++}
2283 ++
2284 ++/*
2285 ++ * Check the backing device availability. If it's on the way out,
2286 ++ * start failing I/O. Reclaim and metadata components also call this
2287 ++ * function to cleanly abort operation in the event of such failure.
2288 ++ */
2289 ++bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
2290 ++{
2291 ++ struct gendisk *disk;
2292 ++
2293 ++ if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
2294 ++ disk = dmz_dev->bdev->bd_disk;
2295 ++ if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
2296 ++ dmz_dev_warn(dmz_dev, "Backing device queue dying");
2297 ++ dmz_dev->flags |= DMZ_BDEV_DYING;
2298 ++ } else if (disk->fops->check_events) {
2299 ++ if (disk->fops->check_events(disk, 0) &
2300 ++ DISK_EVENT_MEDIA_CHANGE) {
2301 ++ dmz_dev_warn(dmz_dev, "Backing device offline");
2302 ++ dmz_dev->flags |= DMZ_BDEV_DYING;
2303 ++ }
2304 ++ }
2305 ++ }
2306 ++
2307 ++ return dmz_dev->flags & DMZ_BDEV_DYING;
2308 + }
2309 +
2310 + /*
2311 +@@ -564,6 +602,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
2312 + sector_t sector = bio->bi_iter.bi_sector;
2313 + unsigned int nr_sectors = bio_sectors(bio);
2314 + sector_t chunk_sector;
2315 ++ int ret;
2316 ++
2317 ++ if (dmz_bdev_is_dying(dmz->dev))
2318 ++ return DM_MAPIO_KILL;
2319 +
2320 + dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
2321 + bio_op(bio), (unsigned long long)sector, nr_sectors,
2322 +@@ -601,8 +643,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
2323 + dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
2324 +
2325 + /* Now ready to handle this BIO */
2326 +- dmz_reclaim_bio_acc(dmz->reclaim);
2327 +- dmz_queue_chunk_work(dmz, bio);
2328 ++ ret = dmz_queue_chunk_work(dmz, bio);
2329 ++ if (ret) {
2330 ++ dmz_dev_debug(dmz->dev,
2331 ++ "BIO op %d, can't process chunk %llu, err %i\n",
2332 ++ bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
2333 ++ ret);
2334 ++ return DM_MAPIO_REQUEUE;
2335 ++ }
2336 +
2337 + return DM_MAPIO_SUBMITTED;
2338 + }
2339 +@@ -855,6 +903,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
2340 + {
2341 + struct dmz_target *dmz = ti->private;
2342 +
2343 ++ if (dmz_bdev_is_dying(dmz->dev))
2344 ++ return -ENODEV;
2345 ++
2346 + *bdev = dmz->dev->bdev;
2347 +
2348 + return 0;
2349 +diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
2350 +index ed8de49c9a08..93a64529f219 100644
2351 +--- a/drivers/md/dm-zoned.h
2352 ++++ b/drivers/md/dm-zoned.h
2353 +@@ -56,6 +56,8 @@ struct dmz_dev {
2354 +
2355 + unsigned int nr_zones;
2356 +
2357 ++ unsigned int flags;
2358 ++
2359 + sector_t zone_nr_sectors;
2360 + unsigned int zone_nr_sectors_shift;
2361 +
2362 +@@ -67,6 +69,9 @@ struct dmz_dev {
2363 + (dev)->zone_nr_sectors_shift)
2364 + #define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
2365 +
2366 ++/* Device flags. */
2367 ++#define DMZ_BDEV_DYING (1 << 0)
2368 ++
2369 + /*
2370 + * Zone descriptor.
2371 + */
2372 +@@ -245,4 +250,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
2373 + void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
2374 + void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
2375 +
2376 ++/*
2377 ++ * Functions defined in dm-zoned-target.c
2378 ++ */
2379 ++bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
2380 ++
2381 + #endif /* DM_ZONED_H */
2382 +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
2383 +index 58b319757b1e..8aae0624a297 100644
2384 +--- a/drivers/md/persistent-data/dm-btree.c
2385 ++++ b/drivers/md/persistent-data/dm-btree.c
2386 +@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
2387 +
2388 + new_parent = shadow_current(s);
2389 +
2390 ++ pn = dm_block_data(new_parent);
2391 ++ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
2392 ++ sizeof(__le64) : s->info->value_type.size;
2393 ++
2394 ++ /* create & init the left block */
2395 + r = new_block(s->info, &left);
2396 + if (r < 0)
2397 + return r;
2398 +
2399 ++ ln = dm_block_data(left);
2400 ++ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
2401 ++
2402 ++ ln->header.flags = pn->header.flags;
2403 ++ ln->header.nr_entries = cpu_to_le32(nr_left);
2404 ++ ln->header.max_entries = pn->header.max_entries;
2405 ++ ln->header.value_size = pn->header.value_size;
2406 ++ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
2407 ++ memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
2408 ++
2409 ++ /* create & init the right block */
2410 + r = new_block(s->info, &right);
2411 + if (r < 0) {
2412 + unlock_block(s->info, left);
2413 + return r;
2414 + }
2415 +
2416 +- pn = dm_block_data(new_parent);
2417 +- ln = dm_block_data(left);
2418 + rn = dm_block_data(right);
2419 +-
2420 +- nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
2421 + nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
2422 +
2423 +- ln->header.flags = pn->header.flags;
2424 +- ln->header.nr_entries = cpu_to_le32(nr_left);
2425 +- ln->header.max_entries = pn->header.max_entries;
2426 +- ln->header.value_size = pn->header.value_size;
2427 +-
2428 + rn->header.flags = pn->header.flags;
2429 + rn->header.nr_entries = cpu_to_le32(nr_right);
2430 + rn->header.max_entries = pn->header.max_entries;
2431 + rn->header.value_size = pn->header.value_size;
2432 +-
2433 +- memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
2434 + memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
2435 +-
2436 +- size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
2437 +- sizeof(__le64) : s->info->value_type.size;
2438 +- memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
2439 + memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
2440 + nr_right * size);
2441 +
2442 +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
2443 +index aec449243966..25328582cc48 100644
2444 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c
2445 ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
2446 +@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
2447 + }
2448 +
2449 + if (smm->recursion_count == 1)
2450 +- apply_bops(smm);
2451 ++ r = apply_bops(smm);
2452 +
2453 + smm->recursion_count--;
2454 +
2455 +diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
2456 +index eda5d7fcb79f..fe9e57a81b6f 100644
2457 +--- a/drivers/misc/habanalabs/firmware_if.c
2458 ++++ b/drivers/misc/habanalabs/firmware_if.c
2459 +@@ -24,7 +24,7 @@ int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
2460 + {
2461 + const struct firmware *fw;
2462 + const u64 *fw_data;
2463 +- size_t fw_size, i;
2464 ++ size_t fw_size;
2465 + int rc;
2466 +
2467 + rc = request_firmware(&fw, fw_name, hdev->dev);
2468 +@@ -45,22 +45,7 @@ int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
2469 +
2470 + fw_data = (const u64 *) fw->data;
2471 +
2472 +- if ((fw->size % 8) != 0)
2473 +- fw_size -= 8;
2474 +-
2475 +- for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
2476 +- if (!(i & (0x80000 - 1))) {
2477 +- dev_dbg(hdev->dev,
2478 +- "copied so far %zu out of %zu for %s firmware",
2479 +- i, fw_size, fw_name);
2480 +- usleep_range(20, 100);
2481 +- }
2482 +-
2483 +- writeq(*fw_data, dst);
2484 +- }
2485 +-
2486 +- if ((fw->size % 8) != 0)
2487 +- writel(*(const u32 *) fw_data, dst);
2488 ++ memcpy_toio(dst, fw_data, fw_size);
2489 +
2490 + out:
2491 + release_firmware(fw);
2492 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2493 +index f183cadd14e3..e8f48f3cdf94 100644
2494 +--- a/drivers/net/bonding/bond_main.c
2495 ++++ b/drivers/net/bonding/bond_main.c
2496 +@@ -2205,6 +2205,15 @@ static void bond_miimon_commit(struct bonding *bond)
2497 + bond_for_each_slave(bond, slave, iter) {
2498 + switch (slave->new_link) {
2499 + case BOND_LINK_NOCHANGE:
2500 ++ /* For 802.3ad mode, check current slave speed and
2501 ++ * duplex again in case its port was disabled after
2502 ++ * invalid speed/duplex reporting but recovered before
2503 ++ * link monitoring could make a decision on the actual
2504 ++ * link status
2505 ++ */
2506 ++ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
2507 ++ slave->link == BOND_LINK_UP)
2508 ++ bond_3ad_adapter_speed_duplex_changed(slave);
2509 + continue;
2510 +
2511 + case BOND_LINK_UP:
2512 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
2513 +index b6b93a2d93a5..483d270664cc 100644
2514 +--- a/drivers/net/can/dev.c
2515 ++++ b/drivers/net/can/dev.c
2516 +@@ -1249,6 +1249,8 @@ int register_candev(struct net_device *dev)
2517 + return -EINVAL;
2518 +
2519 + dev->rtnl_link_ops = &can_link_ops;
2520 ++ netif_carrier_off(dev);
2521 ++
2522 + return register_netdev(dev);
2523 + }
2524 + EXPORT_SYMBOL_GPL(register_candev);
2525 +diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
2526 +index 185c7f7d38a4..5e0d5e8101c8 100644
2527 +--- a/drivers/net/can/sja1000/peak_pcmcia.c
2528 ++++ b/drivers/net/can/sja1000/peak_pcmcia.c
2529 +@@ -479,7 +479,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
2530 + if (!netdev)
2531 + continue;
2532 +
2533 +- strncpy(name, netdev->name, IFNAMSIZ);
2534 ++ strlcpy(name, netdev->name, IFNAMSIZ);
2535 +
2536 + unregister_sja1000dev(netdev);
2537 +
2538 +diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
2539 +index 44e99e3d7134..2aec934fab0c 100644
2540 +--- a/drivers/net/can/spi/mcp251x.c
2541 ++++ b/drivers/net/can/spi/mcp251x.c
2542 +@@ -664,17 +664,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable)
2543 + return regulator_disable(reg);
2544 + }
2545 +
2546 +-static void mcp251x_open_clean(struct net_device *net)
2547 +-{
2548 +- struct mcp251x_priv *priv = netdev_priv(net);
2549 +- struct spi_device *spi = priv->spi;
2550 +-
2551 +- free_irq(spi->irq, priv);
2552 +- mcp251x_hw_sleep(spi);
2553 +- mcp251x_power_enable(priv->transceiver, 0);
2554 +- close_candev(net);
2555 +-}
2556 +-
2557 + static int mcp251x_stop(struct net_device *net)
2558 + {
2559 + struct mcp251x_priv *priv = netdev_priv(net);
2560 +@@ -940,37 +929,43 @@ static int mcp251x_open(struct net_device *net)
2561 + flags | IRQF_ONESHOT, DEVICE_NAME, priv);
2562 + if (ret) {
2563 + dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
2564 +- mcp251x_power_enable(priv->transceiver, 0);
2565 +- close_candev(net);
2566 +- goto open_unlock;
2567 ++ goto out_close;
2568 + }
2569 +
2570 + priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
2571 + 0);
2572 ++ if (!priv->wq) {
2573 ++ ret = -ENOMEM;
2574 ++ goto out_clean;
2575 ++ }
2576 + INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
2577 + INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
2578 +
2579 + ret = mcp251x_hw_reset(spi);
2580 +- if (ret) {
2581 +- mcp251x_open_clean(net);
2582 +- goto open_unlock;
2583 +- }
2584 ++ if (ret)
2585 ++ goto out_free_wq;
2586 + ret = mcp251x_setup(net, spi);
2587 +- if (ret) {
2588 +- mcp251x_open_clean(net);
2589 +- goto open_unlock;
2590 +- }
2591 ++ if (ret)
2592 ++ goto out_free_wq;
2593 + ret = mcp251x_set_normal_mode(spi);
2594 +- if (ret) {
2595 +- mcp251x_open_clean(net);
2596 +- goto open_unlock;
2597 +- }
2598 ++ if (ret)
2599 ++ goto out_free_wq;
2600 +
2601 + can_led_event(net, CAN_LED_EVENT_OPEN);
2602 +
2603 + netif_wake_queue(net);
2604 ++ mutex_unlock(&priv->mcp_lock);
2605 +
2606 +-open_unlock:
2607 ++ return 0;
2608 ++
2609 ++out_free_wq:
2610 ++ destroy_workqueue(priv->wq);
2611 ++out_clean:
2612 ++ free_irq(spi->irq, priv);
2613 ++ mcp251x_hw_sleep(spi);
2614 ++out_close:
2615 ++ mcp251x_power_enable(priv->transceiver, 0);
2616 ++ close_candev(net);
2617 + mutex_unlock(&priv->mcp_lock);
2618 + return ret;
2619 + }
2620 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
2621 +index 22b9c8e6d040..65dce642b86b 100644
2622 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
2623 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
2624 +@@ -855,7 +855,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
2625 +
2626 + dev_prev_siblings = dev->prev_siblings;
2627 + dev->state &= ~PCAN_USB_STATE_CONNECTED;
2628 +- strncpy(name, netdev->name, IFNAMSIZ);
2629 ++ strlcpy(name, netdev->name, IFNAMSIZ);
2630 +
2631 + unregister_netdev(netdev);
2632 +
2633 +diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2634 +index 1e82b9efe447..58f89f6a040f 100644
2635 +--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2636 ++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2637 +@@ -3269,7 +3269,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2638 + if (!adapter->regs) {
2639 + dev_err(&pdev->dev, "cannot map device registers\n");
2640 + err = -ENOMEM;
2641 +- goto out_free_adapter;
2642 ++ goto out_free_adapter_nofail;
2643 + }
2644 +
2645 + adapter->pdev = pdev;
2646 +@@ -3397,6 +3397,9 @@ out_free_dev:
2647 + if (adapter->port[i])
2648 + free_netdev(adapter->port[i]);
2649 +
2650 ++out_free_adapter_nofail:
2651 ++ kfree_skb(adapter->nofail_skb);
2652 ++
2653 + out_free_adapter:
2654 + kfree(adapter);
2655 +
2656 +diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
2657 +index 8429f5c1d810..a268e74b1834 100644
2658 +--- a/drivers/net/ethernet/freescale/enetc/Kconfig
2659 ++++ b/drivers/net/ethernet/freescale/enetc/Kconfig
2660 +@@ -2,6 +2,7 @@
2661 + config FSL_ENETC
2662 + tristate "ENETC PF driver"
2663 + depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
2664 ++ select PHYLIB
2665 + help
2666 + This driver supports NXP ENETC gigabit ethernet controller PCIe
2667 + physical function (PF) devices, managing ENETC Ports at a privileged
2668 +@@ -12,6 +13,7 @@ config FSL_ENETC
2669 + config FSL_ENETC_VF
2670 + tristate "ENETC VF driver"
2671 + depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
2672 ++ select PHYLIB
2673 + help
2674 + This driver supports NXP ENETC gigabit ethernet controller PCIe
2675 + virtual function (VF) devices enabled by the ENETC PF driver.
2676 +diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
2677 +index e1f2978506fd..51cf6b0db904 100644
2678 +--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
2679 ++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
2680 +@@ -153,6 +153,7 @@ struct hip04_priv {
2681 + unsigned int reg_inten;
2682 +
2683 + struct napi_struct napi;
2684 ++ struct device *dev;
2685 + struct net_device *ndev;
2686 +
2687 + struct tx_desc *tx_desc;
2688 +@@ -181,7 +182,7 @@ struct hip04_priv {
2689 +
2690 + static inline unsigned int tx_count(unsigned int head, unsigned int tail)
2691 + {
2692 +- return (head - tail) % (TX_DESC_NUM - 1);
2693 ++ return (head - tail) % TX_DESC_NUM;
2694 + }
2695 +
2696 + static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
2697 +@@ -383,7 +384,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
2698 + }
2699 +
2700 + if (priv->tx_phys[tx_tail]) {
2701 +- dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
2702 ++ dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
2703 + priv->tx_skb[tx_tail]->len,
2704 + DMA_TO_DEVICE);
2705 + priv->tx_phys[tx_tail] = 0;
2706 +@@ -434,8 +435,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2707 + return NETDEV_TX_BUSY;
2708 + }
2709 +
2710 +- phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2711 +- if (dma_mapping_error(&ndev->dev, phys)) {
2712 ++ phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
2713 ++ if (dma_mapping_error(priv->dev, phys)) {
2714 + dev_kfree_skb(skb);
2715 + return NETDEV_TX_OK;
2716 + }
2717 +@@ -494,6 +495,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
2718 + u16 len;
2719 + u32 err;
2720 +
2721 ++ /* clean up tx descriptors */
2722 ++ tx_remaining = hip04_tx_reclaim(ndev, false);
2723 ++
2724 + while (cnt && !last) {
2725 + buf = priv->rx_buf[priv->rx_head];
2726 + skb = build_skb(buf, priv->rx_buf_size);
2727 +@@ -502,7 +506,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
2728 + goto refill;
2729 + }
2730 +
2731 +- dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
2732 ++ dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
2733 + RX_BUF_SIZE, DMA_FROM_DEVICE);
2734 + priv->rx_phys[priv->rx_head] = 0;
2735 +
2736 +@@ -531,9 +535,9 @@ refill:
2737 + buf = netdev_alloc_frag(priv->rx_buf_size);
2738 + if (!buf)
2739 + goto done;
2740 +- phys = dma_map_single(&ndev->dev, buf,
2741 ++ phys = dma_map_single(priv->dev, buf,
2742 + RX_BUF_SIZE, DMA_FROM_DEVICE);
2743 +- if (dma_mapping_error(&ndev->dev, phys))
2744 ++ if (dma_mapping_error(priv->dev, phys))
2745 + goto done;
2746 + priv->rx_buf[priv->rx_head] = buf;
2747 + priv->rx_phys[priv->rx_head] = phys;
2748 +@@ -554,8 +558,7 @@ refill:
2749 + }
2750 + napi_complete_done(napi, rx);
2751 + done:
2752 +- /* clean up tx descriptors and start a new timer if necessary */
2753 +- tx_remaining = hip04_tx_reclaim(ndev, false);
2754 ++ /* start a new timer if necessary */
2755 + if (rx < budget && tx_remaining)
2756 + hip04_start_tx_timer(priv);
2757 +
2758 +@@ -637,9 +640,9 @@ static int hip04_mac_open(struct net_device *ndev)
2759 + for (i = 0; i < RX_DESC_NUM; i++) {
2760 + dma_addr_t phys;
2761 +
2762 +- phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
2763 ++ phys = dma_map_single(priv->dev, priv->rx_buf[i],
2764 + RX_BUF_SIZE, DMA_FROM_DEVICE);
2765 +- if (dma_mapping_error(&ndev->dev, phys))
2766 ++ if (dma_mapping_error(priv->dev, phys))
2767 + return -EIO;
2768 +
2769 + priv->rx_phys[i] = phys;
2770 +@@ -673,7 +676,7 @@ static int hip04_mac_stop(struct net_device *ndev)
2771 +
2772 + for (i = 0; i < RX_DESC_NUM; i++) {
2773 + if (priv->rx_phys[i]) {
2774 +- dma_unmap_single(&ndev->dev, priv->rx_phys[i],
2775 ++ dma_unmap_single(priv->dev, priv->rx_phys[i],
2776 + RX_BUF_SIZE, DMA_FROM_DEVICE);
2777 + priv->rx_phys[i] = 0;
2778 + }
2779 +@@ -817,6 +820,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
2780 + return -ENOMEM;
2781 +
2782 + priv = netdev_priv(ndev);
2783 ++ priv->dev = d;
2784 + priv->ndev = ndev;
2785 + platform_set_drvdata(pdev, ndev);
2786 + SET_NETDEV_DEV(ndev, &pdev->dev);
2787 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2788 +index 50ed1bdb632d..885529701de9 100644
2789 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2790 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2791 +@@ -4580,9 +4580,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
2792 + else
2793 + ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
2794 +
2795 +- ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
2796 +- ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
2797 +- MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
2798 ++ ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
2799 ++ MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
2800 ++ ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
2801 +
2802 + if (old_ctrl0 != ctrl0)
2803 + writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
2804 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
2805 +index fdfedbc8e431..70a771cd8788 100644
2806 +--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
2807 ++++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
2808 +@@ -1093,7 +1093,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
2809 + snprintf(bit_name, 30,
2810 + p_aeu->bit_name, num);
2811 + else
2812 +- strncpy(bit_name,
2813 ++ strlcpy(bit_name,
2814 + p_aeu->bit_name, 30);
2815 +
2816 + /* We now need to pass bitmask in its
2817 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2818 +index 13802b825d65..909422d93903 100644
2819 +--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2820 ++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2821 +@@ -442,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
2822 + /* Vendor specific information */
2823 + dev->vendor_id = cdev->vendor_id;
2824 + dev->vendor_part_id = cdev->device_id;
2825 +- dev->hw_ver = 0;
2826 ++ dev->hw_ver = cdev->chip_rev;
2827 + dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
2828 + (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
2829 +
2830 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
2831 +index e3850938cf2f..d7bf0ad954b8 100644
2832 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
2833 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
2834 +@@ -85,6 +85,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
2835 + u32 value;
2836 +
2837 + base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
2838 ++ if (queue >= 4)
2839 ++ queue -= 4;
2840 +
2841 + value = readl(ioaddr + base_register);
2842 +
2843 +@@ -102,6 +104,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
2844 + u32 value;
2845 +
2846 + base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
2847 ++ if (queue >= 4)
2848 ++ queue -= 4;
2849 +
2850 + value = readl(ioaddr + base_register);
2851 +
2852 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
2853 +index 64b8cb88ea45..d4bd99770f5d 100644
2854 +--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
2855 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
2856 +@@ -106,6 +106,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
2857 + u32 value, reg;
2858 +
2859 + reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
2860 ++ if (queue >= 4)
2861 ++ queue -= 4;
2862 +
2863 + value = readl(ioaddr + reg);
2864 + value &= ~XGMAC_PSRQ(queue);
2865 +@@ -169,6 +171,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
2866 + u32 value, reg;
2867 +
2868 + reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
2869 ++ if (queue >= 4)
2870 ++ queue -= 4;
2871 +
2872 + value = readl(ioaddr + reg);
2873 + value &= ~XGMAC_QxMDMACH(queue);
2874 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2875 +index 0f0f4b31eb7e..9b5218a8c15b 100644
2876 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2877 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2878 +@@ -385,6 +385,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
2879 + return ERR_PTR(-ENOMEM);
2880 +
2881 + *mac = of_get_mac_address(np);
2882 ++ if (IS_ERR(*mac)) {
2883 ++ if (PTR_ERR(*mac) == -EPROBE_DEFER)
2884 ++ return ERR_CAST(*mac);
2885 ++
2886 ++ *mac = NULL;
2887 ++ }
2888 ++
2889 + plat->interface = of_get_phy_mode(np);
2890 +
2891 + /* Get max speed of operation from device tree */
2892 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2893 +index 58ea18af9813..37c0bc699cd9 100644
2894 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2895 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2896 +@@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
2897 + entry = &priv->tc_entries[i];
2898 + if (!entry->in_use && !first && free)
2899 + first = entry;
2900 +- if (entry->handle == loc && !free)
2901 ++ if ((entry->handle == loc) && !free && !entry->is_frag)
2902 + dup = entry;
2903 + }
2904 +
2905 +diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
2906 +index b86a4b2116f8..59a94e07e7c5 100644
2907 +--- a/drivers/net/phy/phy_led_triggers.c
2908 ++++ b/drivers/net/phy/phy_led_triggers.c
2909 +@@ -48,8 +48,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
2910 + if (!phy->last_triggered)
2911 + led_trigger_event(&phy->led_link_trigger->trigger,
2912 + LED_FULL);
2913 ++ else
2914 ++ led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
2915 +
2916 +- led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
2917 + led_trigger_event(&plt->trigger, LED_FULL);
2918 + phy->last_triggered = plt;
2919 + }
2920 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2921 +index 8b4ad10cf940..26c5207466af 100644
2922 +--- a/drivers/net/usb/qmi_wwan.c
2923 ++++ b/drivers/net/usb/qmi_wwan.c
2924 +@@ -1294,6 +1294,7 @@ static const struct usb_device_id products[] = {
2925 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
2926 + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
2927 + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
2928 ++ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
2929 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
2930 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
2931 + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
2932 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
2933 +index fba242284507..fa81ad67539f 100644
2934 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
2935 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
2936 +@@ -1627,6 +1627,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
2937 + init_completion(&drv->request_firmware_complete);
2938 + INIT_LIST_HEAD(&drv->list);
2939 +
2940 ++ iwl_load_fw_dbg_tlv(drv->trans->dev, drv->trans);
2941 ++
2942 + #ifdef CONFIG_IWLWIFI_DEBUGFS
2943 + /* Create the device debugfs entries. */
2944 + drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
2945 +@@ -1647,8 +1649,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
2946 + err_fw:
2947 + #ifdef CONFIG_IWLWIFI_DEBUGFS
2948 + debugfs_remove_recursive(drv->dbgfs_drv);
2949 +- iwl_fw_dbg_free(drv->trans);
2950 + #endif
2951 ++ iwl_fw_dbg_free(drv->trans);
2952 + kfree(drv);
2953 + err:
2954 + return ERR_PTR(ret);
2955 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
2956 +index 964c7baabede..a6183281ee1e 100644
2957 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
2958 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
2959 +@@ -207,11 +207,11 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
2960 + },
2961 + };
2962 +
2963 +-static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2964 +- enum set_key_cmd cmd,
2965 +- struct ieee80211_vif *vif,
2966 +- struct ieee80211_sta *sta,
2967 +- struct ieee80211_key_conf *key);
2968 ++static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2969 ++ enum set_key_cmd cmd,
2970 ++ struct ieee80211_vif *vif,
2971 ++ struct ieee80211_sta *sta,
2972 ++ struct ieee80211_key_conf *key);
2973 +
2974 + void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
2975 + {
2976 +@@ -474,7 +474,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
2977 + ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
2978 + ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
2979 + ieee80211_hw_set(hw, STA_MMPDU_TXQ);
2980 +- ieee80211_hw_set(hw, TX_AMSDU);
2981 ++ /*
2982 ++ * On older devices, enabling TX A-MSDU occasionally leads to
2983 ++ * something getting messed up, the command read from the FIFO
2984 ++ * gets out of sync and isn't a TX command, so that we have an
2985 ++ * assert EDC.
2986 ++ *
2987 ++ * It's not clear where the bug is, but since we didn't used to
2988 ++ * support A-MSDU until moving the mac80211 iTXQs, just leave it
2989 ++ * for older devices. We also don't see this issue on any newer
2990 ++ * devices.
2991 ++ */
2992 ++ if (mvm->cfg->device_family >= IWL_DEVICE_FAMILY_9000)
2993 ++ ieee80211_hw_set(hw, TX_AMSDU);
2994 + ieee80211_hw_set(hw, TX_FRAG_LIST);
2995 +
2996 + if (iwl_mvm_has_tlc_offload(mvm)) {
2997 +@@ -2725,7 +2737,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2998 +
2999 + mvmvif->ap_early_keys[i] = NULL;
3000 +
3001 +- ret = iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
3002 ++ ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
3003 + if (ret)
3004 + goto out_quota_failed;
3005 + }
3006 +@@ -3493,11 +3505,11 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
3007 + return ret;
3008 + }
3009 +
3010 +-static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3011 +- enum set_key_cmd cmd,
3012 +- struct ieee80211_vif *vif,
3013 +- struct ieee80211_sta *sta,
3014 +- struct ieee80211_key_conf *key)
3015 ++static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3016 ++ enum set_key_cmd cmd,
3017 ++ struct ieee80211_vif *vif,
3018 ++ struct ieee80211_sta *sta,
3019 ++ struct ieee80211_key_conf *key)
3020 + {
3021 + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3022 + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3023 +@@ -3552,8 +3564,6 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3024 + return -EOPNOTSUPP;
3025 + }
3026 +
3027 +- mutex_lock(&mvm->mutex);
3028 +-
3029 + switch (cmd) {
3030 + case SET_KEY:
3031 + if ((vif->type == NL80211_IFTYPE_ADHOC ||
3032 +@@ -3699,7 +3709,22 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3033 + ret = -EINVAL;
3034 + }
3035 +
3036 ++ return ret;
3037 ++}
3038 ++
3039 ++static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3040 ++ enum set_key_cmd cmd,
3041 ++ struct ieee80211_vif *vif,
3042 ++ struct ieee80211_sta *sta,
3043 ++ struct ieee80211_key_conf *key)
3044 ++{
3045 ++ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3046 ++ int ret;
3047 ++
3048 ++ mutex_lock(&mvm->mutex);
3049 ++ ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key);
3050 + mutex_unlock(&mvm->mutex);
3051 ++
3052 + return ret;
3053 + }
3054 +
3055 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
3056 +index ed8fc9a9204c..0c11a219e347 100644
3057 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
3058 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
3059 +@@ -1807,7 +1807,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3060 + #endif /* CONFIG_IWLWIFI_DEBUGFS */
3061 +
3062 + /* rate scaling */
3063 +-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync);
3064 ++int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq);
3065 + void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
3066 + int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
3067 + void rs_update_last_rssi(struct iwl_mvm *mvm,
3068 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
3069 +index 63fdb4e68e9d..01b032f18bc8 100644
3070 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
3071 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
3072 +@@ -1197,6 +1197,27 @@ static u8 rs_get_tid(struct ieee80211_hdr *hdr)
3073 + return tid;
3074 + }
3075 +
3076 ++void iwl_mvm_rs_init_wk(struct work_struct *wk)
3077 ++{
3078 ++ struct iwl_mvm_sta *mvmsta = container_of(wk, struct iwl_mvm_sta,
3079 ++ rs_init_wk);
3080 ++ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
3081 ++ struct ieee80211_sta *sta;
3082 ++
3083 ++ rcu_read_lock();
3084 ++
3085 ++ sta = rcu_dereference(mvmvif->mvm->fw_id_to_mac_id[mvmsta->sta_id]);
3086 ++ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
3087 ++ rcu_read_unlock();
3088 ++ return;
3089 ++ }
3090 ++
3091 ++ iwl_mvm_rs_rate_init(mvmvif->mvm, sta, mvmvif->phy_ctxt->channel->band,
3092 ++ true);
3093 ++
3094 ++ rcu_read_unlock();
3095 ++}
3096 ++
3097 + void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3098 + int tid, struct ieee80211_tx_info *info, bool ndp)
3099 + {
3100 +@@ -1269,7 +1290,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3101 + (unsigned long)(lq_sta->last_tx +
3102 + (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
3103 + IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
3104 +- iwl_mvm_rs_rate_init(mvm, sta, info->band, true);
3105 ++ schedule_work(&mvmsta->rs_init_wk);
3106 + return;
3107 + }
3108 + lq_sta->last_tx = jiffies;
3109 +@@ -1305,7 +1326,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3110 + IWL_DEBUG_RATE(mvm,
3111 + "Too many rates mismatch. Send sync LQ. rs_state %d\n",
3112 + lq_sta->rs_state);
3113 +- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
3114 ++ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
3115 + }
3116 + /* Regardless, ignore this status info for outdated rate */
3117 + return;
3118 +@@ -1367,7 +1388,8 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3119 + if (info->status.ampdu_ack_len == 0)
3120 + info->status.ampdu_len = 1;
3121 +
3122 +- rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, tx_resp_rate.index,
3123 ++ rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl,
3124 ++ tx_resp_rate.index,
3125 + info->status.ampdu_len,
3126 + info->status.ampdu_ack_len);
3127 +
3128 +@@ -1442,16 +1464,24 @@ static void rs_drv_mac80211_tx_status(void *mvm_r,
3129 + struct iwl_op_mode *op_mode = mvm_r;
3130 + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
3131 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3132 ++ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3133 +
3134 +- if (!iwl_mvm_sta_from_mac80211(sta)->vif)
3135 ++ if (!mvmsta->vif)
3136 + return;
3137 +
3138 + if (!ieee80211_is_data(hdr->frame_control) ||
3139 + info->flags & IEEE80211_TX_CTL_NO_ACK)
3140 + return;
3141 +
3142 ++ /* If it's locked we are in middle of init flow
3143 ++ * just wait for next tx status to update the lq_sta data
3144 ++ */
3145 ++ if (!mutex_trylock(&mvmsta->lq_sta.rs_drv.mutex))
3146 ++ return;
3147 ++
3148 + iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info,
3149 + ieee80211_is_qos_nullfunc(hdr->frame_control));
3150 ++ mutex_unlock(&mvmsta->lq_sta.rs_drv.mutex);
3151 + }
3152 +
3153 + /*
3154 +@@ -1794,7 +1824,7 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm,
3155 + struct iwl_scale_tbl_info *tbl)
3156 + {
3157 + rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
3158 +- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
3159 ++ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
3160 + }
3161 +
3162 + static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
3163 +@@ -2896,7 +2926,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
3164 + static void rs_initialize_lq(struct iwl_mvm *mvm,
3165 + struct ieee80211_sta *sta,
3166 + struct iwl_lq_sta *lq_sta,
3167 +- enum nl80211_band band, bool update)
3168 ++ enum nl80211_band band)
3169 + {
3170 + struct iwl_scale_tbl_info *tbl;
3171 + struct rs_rate *rate;
3172 +@@ -2926,7 +2956,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
3173 + rs_set_expected_tpt_table(lq_sta, tbl);
3174 + rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
3175 + /* TODO restore station should remember the lq cmd */
3176 +- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update);
3177 ++ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
3178 + }
3179 +
3180 + static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
3181 +@@ -3179,7 +3209,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
3182 + * Called after adding a new station to initialize rate scaling
3183 + */
3184 + static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3185 +- enum nl80211_band band, bool update)
3186 ++ enum nl80211_band band)
3187 + {
3188 + int i, j;
3189 + struct ieee80211_hw *hw = mvm->hw;
3190 +@@ -3259,7 +3289,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3191 + #ifdef CONFIG_IWLWIFI_DEBUGFS
3192 + iwl_mvm_reset_frame_stats(mvm);
3193 + #endif
3194 +- rs_initialize_lq(mvm, sta, lq_sta, band, update);
3195 ++ rs_initialize_lq(mvm, sta, lq_sta, band);
3196 + }
3197 +
3198 + static void rs_drv_rate_update(void *mvm_r,
3199 +@@ -3573,7 +3603,7 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
3200 +
3201 + bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
3202 + bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
3203 +- iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false);
3204 ++ iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd);
3205 +
3206 + ss_params |= LQ_SS_BFER_ALLOWED;
3207 + IWL_DEBUG_RATE(mvm,
3208 +@@ -3739,7 +3769,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
3209 +
3210 + if (lq_sta->pers.dbg_fixed_rate) {
3211 + rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL);
3212 +- iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false);
3213 ++ iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq);
3214 + }
3215 + }
3216 +
3217 +@@ -4136,10 +4166,15 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
3218 + void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3219 + enum nl80211_band band, bool update)
3220 + {
3221 +- if (iwl_mvm_has_tlc_offload(mvm))
3222 ++ if (iwl_mvm_has_tlc_offload(mvm)) {
3223 + rs_fw_rate_init(mvm, sta, band, update);
3224 +- else
3225 +- rs_drv_rate_init(mvm, sta, band, update);
3226 ++ } else {
3227 ++ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3228 ++
3229 ++ mutex_lock(&mvmsta->lq_sta.rs_drv.mutex);
3230 ++ rs_drv_rate_init(mvm, sta, band);
3231 ++ mutex_unlock(&mvmsta->lq_sta.rs_drv.mutex);
3232 ++ }
3233 + }
3234 +
3235 + int iwl_mvm_rate_control_register(void)
3236 +@@ -4169,7 +4204,7 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
3237 + lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
3238 + }
3239 +
3240 +- return iwl_mvm_send_lq_cmd(mvm, lq, false);
3241 ++ return iwl_mvm_send_lq_cmd(mvm, lq);
3242 + }
3243 +
3244 + /**
3245 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
3246 +index f7eb60dbaf20..086f47e2a4f0 100644
3247 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
3248 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
3249 +@@ -4,7 +4,7 @@
3250 + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
3251 + * Copyright(c) 2015 Intel Mobile Communications GmbH
3252 + * Copyright(c) 2017 Intel Deutschland GmbH
3253 +- * Copyright(c) 2018 Intel Corporation
3254 ++ * Copyright(c) 2018 - 2019 Intel Corporation
3255 + *
3256 + * Contact Information:
3257 + * Intel Linux Wireless <linuxwifi@×××××.com>
3258 +@@ -376,6 +376,9 @@ struct iwl_lq_sta {
3259 + /* tx power reduce for this sta */
3260 + int tpc_reduce;
3261 +
3262 ++ /* avoid races of reinit and update table from rx_tx */
3263 ++ struct mutex mutex;
3264 ++
3265 + /* persistent fields - initialized only once - keep last! */
3266 + struct lq_sta_pers {
3267 + #ifdef CONFIG_MAC80211_DEBUGFS
3268 +@@ -440,6 +443,8 @@ struct iwl_mvm_sta;
3269 + int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
3270 + bool enable);
3271 +
3272 ++void iwl_mvm_rs_init_wk(struct work_struct *wk);
3273 ++
3274 + #ifdef CONFIG_IWLWIFI_DEBUGFS
3275 + void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
3276 + #endif
3277 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
3278 +index f545a737a92d..22715cdb8317 100644
3279 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
3280 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
3281 +@@ -1684,6 +1684,10 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
3282 + */
3283 + if (iwl_mvm_has_tlc_offload(mvm))
3284 + iwl_mvm_rs_add_sta(mvm, mvm_sta);
3285 ++ else
3286 ++ mutex_init(&mvm_sta->lq_sta.rs_drv.mutex);
3287 ++
3288 ++ INIT_WORK(&mvm_sta->rs_init_wk, iwl_mvm_rs_init_wk);
3289 +
3290 + iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
3291 +
3292 +@@ -1846,6 +1850,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
3293 + if (ret)
3294 + return ret;
3295 +
3296 ++ cancel_work_sync(&mvm_sta->rs_init_wk);
3297 ++
3298 + /* flush its queues here since we are freeing mvm_sta */
3299 + ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
3300 + if (ret)
3301 +@@ -2972,7 +2978,7 @@ out:
3302 + IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3303 + sta->addr, tid);
3304 +
3305 +- return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
3306 ++ return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
3307 + }
3308 +
3309 + static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3310 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
3311 +index b4d4071b865d..6e93c30492b7 100644
3312 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
3313 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
3314 +@@ -421,6 +421,7 @@ struct iwl_mvm_sta {
3315 + struct iwl_lq_sta_rs_fw rs_fw;
3316 + struct iwl_lq_sta rs_drv;
3317 + } lq_sta;
3318 ++ struct work_struct rs_init_wk;
3319 + struct ieee80211_vif *vif;
3320 + struct iwl_mvm_key_pn __rcu *ptk_pn[4];
3321 + struct iwl_mvm_rxq_dup_data *dup_data;
3322 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
3323 +index cc56ab88fb43..a71277de2e0e 100644
3324 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
3325 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
3326 +@@ -641,12 +641,12 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
3327 + * this case to clear the state indicating that station creation is in
3328 + * progress.
3329 + */
3330 +-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync)
3331 ++int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
3332 + {
3333 + struct iwl_host_cmd cmd = {
3334 + .id = LQ_CMD,
3335 + .len = { sizeof(struct iwl_lq_cmd), },
3336 +- .flags = sync ? 0 : CMD_ASYNC,
3337 ++ .flags = CMD_ASYNC,
3338 + .data = { lq, },
3339 + };
3340 +
3341 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
3342 +index 1c699a9fa866..faec05ab4275 100644
3343 +--- a/drivers/net/wireless/mac80211_hwsim.c
3344 ++++ b/drivers/net/wireless/mac80211_hwsim.c
3345 +@@ -3615,10 +3615,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
3346 + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3347 + cb->nlh->nlmsg_seq, &hwsim_genl_family,
3348 + NLM_F_MULTI, HWSIM_CMD_GET_RADIO);
3349 +- if (!hdr)
3350 ++ if (hdr) {
3351 ++ genl_dump_check_consistent(cb, hdr);
3352 ++ genlmsg_end(skb, hdr);
3353 ++ } else {
3354 + res = -EMSGSIZE;
3355 +- genl_dump_check_consistent(cb, hdr);
3356 +- genlmsg_end(skb, hdr);
3357 ++ }
3358 + }
3359 +
3360 + done:
3361 +diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
3362 +index c3e10b6ab3a4..f25f1ec5f9e9 100644
3363 +--- a/drivers/nfc/st-nci/se.c
3364 ++++ b/drivers/nfc/st-nci/se.c
3365 +@@ -333,6 +333,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
3366 +
3367 + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
3368 + skb->len - 2, GFP_KERNEL);
3369 ++ if (!transaction)
3370 ++ return -ENOMEM;
3371 +
3372 + transaction->aid_len = skb->data[1];
3373 + memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
3374 +diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
3375 +index 06fc542fd198..6586378cacb0 100644
3376 +--- a/drivers/nfc/st21nfca/se.c
3377 ++++ b/drivers/nfc/st21nfca/se.c
3378 +@@ -317,6 +317,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
3379 +
3380 + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
3381 + skb->len - 2, GFP_KERNEL);
3382 ++ if (!transaction)
3383 ++ return -ENOMEM;
3384 +
3385 + transaction->aid_len = skb->data[1];
3386 + memcpy(transaction->aid, &skb->data[2],
3387 +diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c
3388 +index 6f303b91f6e7..9e0c429cd08a 100644
3389 +--- a/drivers/nvmem/nvmem-sysfs.c
3390 ++++ b/drivers/nvmem/nvmem-sysfs.c
3391 +@@ -224,10 +224,17 @@ int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
3392 + if (!config->base_dev)
3393 + return -EINVAL;
3394 +
3395 +- if (nvmem->read_only)
3396 +- nvmem->eeprom = bin_attr_ro_root_nvmem;
3397 +- else
3398 +- nvmem->eeprom = bin_attr_rw_root_nvmem;
3399 ++ if (nvmem->read_only) {
3400 ++ if (config->root_only)
3401 ++ nvmem->eeprom = bin_attr_ro_root_nvmem;
3402 ++ else
3403 ++ nvmem->eeprom = bin_attr_ro_nvmem;
3404 ++ } else {
3405 ++ if (config->root_only)
3406 ++ nvmem->eeprom = bin_attr_rw_root_nvmem;
3407 ++ else
3408 ++ nvmem->eeprom = bin_attr_rw_nvmem;
3409 ++ }
3410 + nvmem->eeprom.attr.name = "eeprom";
3411 + nvmem->eeprom.size = nvmem->size;
3412 + #ifdef CONFIG_DEBUG_LOCK_ALLOC
3413 +diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
3414 +index 152053361862..989506bd90b1 100644
3415 +--- a/drivers/regulator/axp20x-regulator.c
3416 ++++ b/drivers/regulator/axp20x-regulator.c
3417 +@@ -174,14 +174,14 @@
3418 + #define AXP803_DCDC5_1140mV_STEPS 35
3419 + #define AXP803_DCDC5_1140mV_END \
3420 + (AXP803_DCDC5_1140mV_START + AXP803_DCDC5_1140mV_STEPS)
3421 +-#define AXP803_DCDC5_NUM_VOLTAGES 68
3422 ++#define AXP803_DCDC5_NUM_VOLTAGES 69
3423 +
3424 + #define AXP803_DCDC6_600mV_START 0x00
3425 + #define AXP803_DCDC6_600mV_STEPS 50
3426 + #define AXP803_DCDC6_600mV_END \
3427 + (AXP803_DCDC6_600mV_START + AXP803_DCDC6_600mV_STEPS)
3428 + #define AXP803_DCDC6_1120mV_START 0x33
3429 +-#define AXP803_DCDC6_1120mV_STEPS 14
3430 ++#define AXP803_DCDC6_1120mV_STEPS 20
3431 + #define AXP803_DCDC6_1120mV_END \
3432 + (AXP803_DCDC6_1120mV_START + AXP803_DCDC6_1120mV_STEPS)
3433 + #define AXP803_DCDC6_NUM_VOLTAGES 72
3434 +@@ -240,7 +240,7 @@
3435 + #define AXP806_DCDCA_600mV_END \
3436 + (AXP806_DCDCA_600mV_START + AXP806_DCDCA_600mV_STEPS)
3437 + #define AXP806_DCDCA_1120mV_START 0x33
3438 +-#define AXP806_DCDCA_1120mV_STEPS 14
3439 ++#define AXP806_DCDCA_1120mV_STEPS 20
3440 + #define AXP806_DCDCA_1120mV_END \
3441 + (AXP806_DCDCA_1120mV_START + AXP806_DCDCA_1120mV_STEPS)
3442 + #define AXP806_DCDCA_NUM_VOLTAGES 72
3443 +@@ -774,8 +774,8 @@ static const struct regulator_linear_range axp806_dcdcd_ranges[] = {
3444 + AXP806_DCDCD_600mV_END,
3445 + 20000),
3446 + REGULATOR_LINEAR_RANGE(1600000,
3447 +- AXP806_DCDCD_600mV_START,
3448 +- AXP806_DCDCD_600mV_END,
3449 ++ AXP806_DCDCD_1600mV_START,
3450 ++ AXP806_DCDCD_1600mV_END,
3451 + 100000),
3452 + };
3453 +
3454 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
3455 +index 3fe3029617a8..aa3bfc20b737 100644
3456 +--- a/drivers/scsi/ufs/ufshcd.c
3457 ++++ b/drivers/scsi/ufs/ufshcd.c
3458 +@@ -7032,6 +7032,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
3459 + static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
3460 + struct ufs_vreg *vreg)
3461 + {
3462 ++ if (!vreg)
3463 ++ return 0;
3464 ++
3465 + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
3466 + }
3467 +
3468 +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
3469 +index af3f37ba82c8..1f32c9e3ca65 100644
3470 +--- a/drivers/spi/spi-pxa2xx.c
3471 ++++ b/drivers/spi/spi-pxa2xx.c
3472 +@@ -1453,6 +1453,14 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
3473 + { PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP },
3474 + { PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP },
3475 + { PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP },
3476 ++ /* TGL-LP */
3477 ++ { PCI_VDEVICE(INTEL, 0xa0aa), LPSS_CNL_SSP },
3478 ++ { PCI_VDEVICE(INTEL, 0xa0ab), LPSS_CNL_SSP },
3479 ++ { PCI_VDEVICE(INTEL, 0xa0de), LPSS_CNL_SSP },
3480 ++ { PCI_VDEVICE(INTEL, 0xa0df), LPSS_CNL_SSP },
3481 ++ { PCI_VDEVICE(INTEL, 0xa0fb), LPSS_CNL_SSP },
3482 ++ { PCI_VDEVICE(INTEL, 0xa0fd), LPSS_CNL_SSP },
3483 ++ { PCI_VDEVICE(INTEL, 0xa0fe), LPSS_CNL_SSP },
3484 + { },
3485 + };
3486 +
3487 +@@ -1817,14 +1825,16 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
3488 + status = devm_spi_register_controller(&pdev->dev, controller);
3489 + if (status != 0) {
3490 + dev_err(&pdev->dev, "problem registering spi controller\n");
3491 +- goto out_error_clock_enabled;
3492 ++ goto out_error_pm_runtime_enabled;
3493 + }
3494 +
3495 + return status;
3496 +
3497 +-out_error_clock_enabled:
3498 ++out_error_pm_runtime_enabled:
3499 + pm_runtime_put_noidle(&pdev->dev);
3500 + pm_runtime_disable(&pdev->dev);
3501 ++
3502 ++out_error_clock_enabled:
3503 + clk_disable_unprepare(ssp->clk);
3504 +
3505 + out_error_dma_irq_alloc:
3506 +diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c
3507 +index b6c6d66e4eb1..e2c7646588f8 100644
3508 +--- a/drivers/staging/fbtft/fb_bd663474.c
3509 ++++ b/drivers/staging/fbtft/fb_bd663474.c
3510 +@@ -24,7 +24,7 @@
3511 +
3512 + static int init_display(struct fbtft_par *par)
3513 + {
3514 +- if (!par->gpio.cs)
3515 ++ if (par->gpio.cs)
3516 + gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
3517 +
3518 + par->fbtftops.reset(par);
3519 +diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
3520 +index d609a2b67db9..fd32376700e2 100644
3521 +--- a/drivers/staging/fbtft/fb_ili9163.c
3522 ++++ b/drivers/staging/fbtft/fb_ili9163.c
3523 +@@ -77,7 +77,7 @@ static int init_display(struct fbtft_par *par)
3524 + {
3525 + par->fbtftops.reset(par);
3526 +
3527 +- if (!par->gpio.cs)
3528 ++ if (par->gpio.cs)
3529 + gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
3530 +
3531 + write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */
3532 +diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
3533 +index b090e7ab6fdd..85e54a10ed72 100644
3534 +--- a/drivers/staging/fbtft/fb_ili9325.c
3535 ++++ b/drivers/staging/fbtft/fb_ili9325.c
3536 +@@ -85,7 +85,7 @@ static int init_display(struct fbtft_par *par)
3537 + {
3538 + par->fbtftops.reset(par);
3539 +
3540 +- if (!par->gpio.cs)
3541 ++ if (par->gpio.cs)
3542 + gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
3543 +
3544 + bt &= 0x07;
3545 +diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
3546 +index b3d0701880fe..5a129b1352cc 100644
3547 +--- a/drivers/staging/fbtft/fb_s6d1121.c
3548 ++++ b/drivers/staging/fbtft/fb_s6d1121.c
3549 +@@ -29,7 +29,7 @@ static int init_display(struct fbtft_par *par)
3550 + {
3551 + par->fbtftops.reset(par);
3552 +
3553 +- if (!par->gpio.cs)
3554 ++ if (par->gpio.cs)
3555 + gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
3556 +
3557 + /* Initialization sequence from Lib_UTFT */
3558 +diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
3559 +index bbf75f795234..88a5b6925901 100644
3560 +--- a/drivers/staging/fbtft/fb_ssd1289.c
3561 ++++ b/drivers/staging/fbtft/fb_ssd1289.c
3562 +@@ -28,7 +28,7 @@ static int init_display(struct fbtft_par *par)
3563 + {
3564 + par->fbtftops.reset(par);
3565 +
3566 +- if (!par->gpio.cs)
3567 ++ if (par->gpio.cs)
3568 + gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
3569 +
3570 + write_reg(par, 0x00, 0x0001);
3571 +diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
3572 +index 4cfe9f8535d0..37622c9462aa 100644
3573 +--- a/drivers/staging/fbtft/fb_ssd1331.c
3574 ++++ b/drivers/staging/fbtft/fb_ssd1331.c
3575 +@@ -81,7 +81,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
3576 + va_start(args, len);
3577 +
3578 + *buf = (u8)va_arg(args, unsigned int);
3579 +- if (!par->gpio.dc)
3580 ++ if (par->gpio.dc)
3581 + gpiod_set_value(par->gpio.dc, 0);
3582 + ret = par->fbtftops.write(par, par->buf, sizeof(u8));
3583 + if (ret < 0) {
3584 +@@ -104,7 +104,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
3585 + return;
3586 + }
3587 + }
3588 +- if (!par->gpio.dc)
3589 ++ if (par->gpio.dc)
3590 + gpiod_set_value(par->gpio.dc, 1);
3591 + va_end(args);
3592 + }
3593 +diff --git a/drivers/staging/fbtft/fb_upd161704.c b/drivers/staging/fbtft/fb_upd161704.c
3594 +index 564a38e34440..c77832ae5e5b 100644
3595 +--- a/drivers/staging/fbtft/fb_upd161704.c
3596 ++++ b/drivers/staging/fbtft/fb_upd161704.c
3597 +@@ -26,7 +26,7 @@ static int init_display(struct fbtft_par *par)
3598 + {
3599 + par->fbtftops.reset(par);
3600 +
3601 +- if (!par->gpio.cs)
3602 ++ if (par->gpio.cs)
3603 + gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
3604 +
3605 + /* Initialization sequence from Lib_UTFT */
3606 +diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
3607 +index 2ea814d0dca5..63c65dd67b17 100644
3608 +--- a/drivers/staging/fbtft/fbtft-bus.c
3609 ++++ b/drivers/staging/fbtft/fbtft-bus.c
3610 +@@ -135,7 +135,7 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
3611 + remain = len / 2;
3612 + vmem16 = (u16 *)(par->info->screen_buffer + offset);
3613 +
3614 +- if (!par->gpio.dc)
3615 ++ if (par->gpio.dc)
3616 + gpiod_set_value(par->gpio.dc, 1);
3617 +
3618 + /* non buffered write */
3619 +diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
3620 +index bc750250ccd6..5127de922f6a 100644
3621 +--- a/drivers/staging/fbtft/fbtft-core.c
3622 ++++ b/drivers/staging/fbtft/fbtft-core.c
3623 +@@ -916,7 +916,7 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
3624 + return -EINVAL;
3625 +
3626 + par->fbtftops.reset(par);
3627 +- if (!par->gpio.cs)
3628 ++ if (par->gpio.cs)
3629 + gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
3630 +
3631 + while (p) {
3632 +@@ -1007,7 +1007,7 @@ int fbtft_init_display(struct fbtft_par *par)
3633 + }
3634 +
3635 + par->fbtftops.reset(par);
3636 +- if (!par->gpio.cs)
3637 ++ if (par->gpio.cs)
3638 + gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
3639 +
3640 + i = 0;
3641 +diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
3642 +index a47c541f8006..ba30f4f33e7c 100644
3643 +--- a/fs/ceph/addr.c
3644 ++++ b/fs/ceph/addr.c
3645 +@@ -912,8 +912,9 @@ get_more_pages:
3646 + if (page_offset(page) >= ceph_wbc.i_size) {
3647 + dout("%p page eof %llu\n",
3648 + page, ceph_wbc.i_size);
3649 +- if (ceph_wbc.size_stable ||
3650 +- page_offset(page) >= i_size_read(inode))
3651 ++ if ((ceph_wbc.size_stable ||
3652 ++ page_offset(page) >= i_size_read(inode)) &&
3653 ++ clear_page_dirty_for_io(page))
3654 + mapping->a_ops->invalidatepage(page,
3655 + 0, PAGE_SIZE);
3656 + unlock_page(page);
3657 +diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
3658 +index ac9b53b89365..5083e238ad15 100644
3659 +--- a/fs/ceph/locks.c
3660 ++++ b/fs/ceph/locks.c
3661 +@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
3662 + req->r_wait_for_completion = ceph_lock_wait_for_completion;
3663 +
3664 + err = ceph_mdsc_do_request(mdsc, inode, req);
3665 +-
3666 +- if (operation == CEPH_MDS_OP_GETFILELOCK) {
3667 ++ if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
3668 + fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
3669 + if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
3670 + fl->fl_type = F_RDLCK;
3671 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3672 +index 2ec37dc589a7..42de31d20616 100644
3673 +--- a/fs/cifs/smb2ops.c
3674 ++++ b/fs/cifs/smb2ops.c
3675 +@@ -3439,7 +3439,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
3676 + static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3677 + unsigned int buflen)
3678 + {
3679 +- sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
3680 ++ void *addr;
3681 ++ /*
3682 ++ * VMAP_STACK (at least) puts stack into the vmalloc address space
3683 ++ */
3684 ++ if (is_vmalloc_addr(buf))
3685 ++ addr = vmalloc_to_page(buf);
3686 ++ else
3687 ++ addr = virt_to_page(buf);
3688 ++ sg_set_page(sg, addr, buflen, offset_in_page(buf));
3689 + }
3690 +
3691 + /* Assumes the first rqst has a transform header as the first iov.
3692 +@@ -4015,7 +4023,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
3693 + {
3694 + int ret, length;
3695 + char *buf = server->smallbuf;
3696 +- char *tmpbuf;
3697 + struct smb2_sync_hdr *shdr;
3698 + unsigned int pdu_length = server->pdu_size;
3699 + unsigned int buf_size;
3700 +@@ -4045,18 +4052,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
3701 + return length;
3702 +
3703 + next_is_large = server->large_buf;
3704 +- one_more:
3705 ++one_more:
3706 + shdr = (struct smb2_sync_hdr *)buf;
3707 + if (shdr->NextCommand) {
3708 +- if (next_is_large) {
3709 +- tmpbuf = server->bigbuf;
3710 ++ if (next_is_large)
3711 + next_buffer = (char *)cifs_buf_get();
3712 +- } else {
3713 +- tmpbuf = server->smallbuf;
3714 ++ else
3715 + next_buffer = (char *)cifs_small_buf_get();
3716 +- }
3717 + memcpy(next_buffer,
3718 +- tmpbuf + le32_to_cpu(shdr->NextCommand),
3719 ++ buf + le32_to_cpu(shdr->NextCommand),
3720 + pdu_length - le32_to_cpu(shdr->NextCommand));
3721 + }
3722 +
3723 +@@ -4085,12 +4089,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
3724 + pdu_length -= le32_to_cpu(shdr->NextCommand);
3725 + server->large_buf = next_is_large;
3726 + if (next_is_large)
3727 +- server->bigbuf = next_buffer;
3728 ++ server->bigbuf = buf = next_buffer;
3729 + else
3730 +- server->smallbuf = next_buffer;
3731 +-
3732 +- buf += le32_to_cpu(shdr->NextCommand);
3733 ++ server->smallbuf = buf = next_buffer;
3734 + goto one_more;
3735 ++ } else if (ret != 0) {
3736 ++ /*
3737 ++ * ret != 0 here means that we didn't get to handle_mid() thus
3738 ++ * server->smallbuf and server->bigbuf are still valid. We need
3739 ++ * to free next_buffer because it is not going to be used
3740 ++ * anywhere.
3741 ++ */
3742 ++ if (next_is_large)
3743 ++ free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
3744 ++ else
3745 ++ free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
3746 + }
3747 +
3748 + return ret;
3749 +diff --git a/fs/io_uring.c b/fs/io_uring.c
3750 +index 61018559e8fe..03cd8f5bba85 100644
3751 +--- a/fs/io_uring.c
3752 ++++ b/fs/io_uring.c
3753 +@@ -618,6 +618,13 @@ static void io_put_req(struct io_kiocb *req)
3754 + io_free_req(req);
3755 + }
3756 +
3757 ++static unsigned io_cqring_events(struct io_cq_ring *ring)
3758 ++{
3759 ++ /* See comment at the top of this file */
3760 ++ smp_rmb();
3761 ++ return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
3762 ++}
3763 ++
3764 + /*
3765 + * Find and free completed poll iocbs
3766 + */
3767 +@@ -709,7 +716,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
3768 + static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
3769 + long min)
3770 + {
3771 +- while (!list_empty(&ctx->poll_list)) {
3772 ++ while (!list_empty(&ctx->poll_list) && !need_resched()) {
3773 + int ret;
3774 +
3775 + ret = io_do_iopoll(ctx, nr_events, min);
3776 +@@ -736,6 +743,12 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
3777 + unsigned int nr_events = 0;
3778 +
3779 + io_iopoll_getevents(ctx, &nr_events, 1);
3780 ++
3781 ++ /*
3782 ++ * Ensure we allow local-to-the-cpu processing to take place,
3783 ++ * in this case we need to ensure that we reap all events.
3784 ++ */
3785 ++ cond_resched();
3786 + }
3787 + mutex_unlock(&ctx->uring_lock);
3788 + }
3789 +@@ -743,11 +756,42 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
3790 + static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
3791 + long min)
3792 + {
3793 +- int ret = 0;
3794 ++ int iters, ret = 0;
3795 +
3796 ++ /*
3797 ++ * We disallow the app entering submit/complete with polling, but we
3798 ++ * still need to lock the ring to prevent racing with polled issue
3799 ++ * that got punted to a workqueue.
3800 ++ */
3801 ++ mutex_lock(&ctx->uring_lock);
3802 ++
3803 ++ iters = 0;
3804 + do {
3805 + int tmin = 0;
3806 +
3807 ++ /*
3808 ++ * Don't enter poll loop if we already have events pending.
3809 ++ * If we do, we can potentially be spinning for commands that
3810 ++ * already triggered a CQE (eg in error).
3811 ++ */
3812 ++ if (io_cqring_events(ctx->cq_ring))
3813 ++ break;
3814 ++
3815 ++ /*
3816 ++ * If a submit got punted to a workqueue, we can have the
3817 ++ * application entering polling for a command before it gets
3818 ++ * issued. That app will hold the uring_lock for the duration
3819 ++ * of the poll right here, so we need to take a breather every
3820 ++ * now and then to ensure that the issue has a chance to add
3821 ++ * the poll to the issued list. Otherwise we can spin here
3822 ++ * forever, while the workqueue is stuck trying to acquire the
3823 ++ * very same mutex.
3824 ++ */
3825 ++ if (!(++iters & 7)) {
3826 ++ mutex_unlock(&ctx->uring_lock);
3827 ++ mutex_lock(&ctx->uring_lock);
3828 ++ }
3829 ++
3830 + if (*nr_events < min)
3831 + tmin = min - *nr_events;
3832 +
3833 +@@ -757,6 +801,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
3834 + ret = 0;
3835 + } while (min && !*nr_events && !need_resched());
3836 +
3837 ++ mutex_unlock(&ctx->uring_lock);
3838 + return ret;
3839 + }
3840 +
3841 +@@ -2073,15 +2118,7 @@ static int io_sq_thread(void *data)
3842 + unsigned nr_events = 0;
3843 +
3844 + if (ctx->flags & IORING_SETUP_IOPOLL) {
3845 +- /*
3846 +- * We disallow the app entering submit/complete
3847 +- * with polling, but we still need to lock the
3848 +- * ring to prevent racing with polled issue
3849 +- * that got punted to a workqueue.
3850 +- */
3851 +- mutex_lock(&ctx->uring_lock);
3852 + io_iopoll_check(ctx, &nr_events, 0);
3853 +- mutex_unlock(&ctx->uring_lock);
3854 + } else {
3855 + /*
3856 + * Normal IO, just pretend everything completed.
3857 +@@ -2216,13 +2253,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
3858 + return submit;
3859 + }
3860 +
3861 +-static unsigned io_cqring_events(struct io_cq_ring *ring)
3862 +-{
3863 +- /* See comment at the top of this file */
3864 +- smp_rmb();
3865 +- return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
3866 +-}
3867 +-
3868 + /*
3869 + * Wait until events become available, if we don't already have some. The
3870 + * application must reap them itself, as they reside on the shared cq ring.
3871 +@@ -2978,9 +3008,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3872 + min_complete = min(min_complete, ctx->cq_entries);
3873 +
3874 + if (ctx->flags & IORING_SETUP_IOPOLL) {
3875 +- mutex_lock(&ctx->uring_lock);
3876 + ret = io_iopoll_check(ctx, &nr_events, min_complete);
3877 +- mutex_unlock(&ctx->uring_lock);
3878 + } else {
3879 + ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
3880 + }
3881 +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
3882 +index 0af854cce8ff..071b90a45933 100644
3883 +--- a/fs/nfs/delegation.c
3884 ++++ b/fs/nfs/delegation.c
3885 +@@ -1046,6 +1046,22 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp)
3886 + nfs4_schedule_state_manager(clp);
3887 + }
3888 +
3889 ++static void
3890 ++nfs_delegation_test_free_expired(struct inode *inode,
3891 ++ nfs4_stateid *stateid,
3892 ++ const struct cred *cred)
3893 ++{
3894 ++ struct nfs_server *server = NFS_SERVER(inode);
3895 ++ const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
3896 ++ int status;
3897 ++
3898 ++ if (!cred)
3899 ++ return;
3900 ++ status = ops->test_and_free_expired(server, stateid, cred);
3901 ++ if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
3902 ++ nfs_remove_bad_delegation(inode, stateid);
3903 ++}
3904 ++
3905 + /**
3906 + * nfs_reap_expired_delegations - reap expired delegations
3907 + * @clp: nfs_client to process
3908 +@@ -1057,7 +1073,6 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp)
3909 + */
3910 + void nfs_reap_expired_delegations(struct nfs_client *clp)
3911 + {
3912 +- const struct nfs4_minor_version_ops *ops = clp->cl_mvops;
3913 + struct nfs_delegation *delegation;
3914 + struct nfs_server *server;
3915 + struct inode *inode;
3916 +@@ -1088,11 +1103,7 @@ restart:
3917 + nfs4_stateid_copy(&stateid, &delegation->stateid);
3918 + clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
3919 + rcu_read_unlock();
3920 +- if (cred != NULL &&
3921 +- ops->test_and_free_expired(server, &stateid, cred) < 0) {
3922 +- nfs_revoke_delegation(inode, &stateid);
3923 +- nfs_inode_find_state_and_recover(inode, &stateid);
3924 +- }
3925 ++ nfs_delegation_test_free_expired(inode, &stateid, cred);
3926 + put_cred(cred);
3927 + if (nfs4_server_rebooted(clp)) {
3928 + nfs_inode_mark_test_expired_delegation(server,inode);
3929 +diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
3930 +index 53507aa96b0b..3800ab6f08fa 100644
3931 +--- a/fs/nfs/fscache.c
3932 ++++ b/fs/nfs/fscache.c
3933 +@@ -114,6 +114,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
3934 + struct rb_node **p, *parent;
3935 + int diff;
3936 +
3937 ++ nfss->fscache_key = NULL;
3938 ++ nfss->fscache = NULL;
3939 ++ if (!(nfss->options & NFS_OPTION_FSCACHE))
3940 ++ return;
3941 + if (!uniq) {
3942 + uniq = "";
3943 + ulen = 1;
3944 +@@ -226,10 +230,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
3945 + void nfs_fscache_init_inode(struct inode *inode)
3946 + {
3947 + struct nfs_fscache_inode_auxdata auxdata;
3948 ++ struct nfs_server *nfss = NFS_SERVER(inode);
3949 + struct nfs_inode *nfsi = NFS_I(inode);
3950 +
3951 + nfsi->fscache = NULL;
3952 +- if (!S_ISREG(inode->i_mode))
3953 ++ if (!(nfss->fscache && S_ISREG(inode->i_mode)))
3954 + return;
3955 +
3956 + memset(&auxdata, 0, sizeof(auxdata));
3957 +diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
3958 +index 25a75e40d91d..ad041cfbf9ec 100644
3959 +--- a/fs/nfs/fscache.h
3960 ++++ b/fs/nfs/fscache.h
3961 +@@ -182,7 +182,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
3962 + */
3963 + static inline const char *nfs_server_fscache_state(struct nfs_server *server)
3964 + {
3965 +- if (server->fscache && (server->options & NFS_OPTION_FSCACHE))
3966 ++ if (server->fscache)
3967 + return "yes";
3968 + return "no ";
3969 + }
3970 +diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
3971 +index 8a38a254f516..235919156edd 100644
3972 +--- a/fs/nfs/nfs4_fs.h
3973 ++++ b/fs/nfs/nfs4_fs.h
3974 +@@ -465,7 +465,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
3975 +
3976 + extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, const struct cred *, gfp_t);
3977 + extern void nfs4_put_state_owner(struct nfs4_state_owner *);
3978 +-extern void nfs4_purge_state_owners(struct nfs_server *);
3979 ++extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
3980 ++extern void nfs4_free_state_owners(struct list_head *head);
3981 + extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
3982 + extern void nfs4_put_open_state(struct nfs4_state *);
3983 + extern void nfs4_close_state(struct nfs4_state *, fmode_t);
3984 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
3985 +index 81b9b6d7927a..208a236dc235 100644
3986 +--- a/fs/nfs/nfs4client.c
3987 ++++ b/fs/nfs/nfs4client.c
3988 +@@ -758,9 +758,12 @@ out:
3989 +
3990 + static void nfs4_destroy_server(struct nfs_server *server)
3991 + {
3992 ++ LIST_HEAD(freeme);
3993 ++
3994 + nfs_server_return_all_delegations(server);
3995 + unset_pnfs_layoutdriver(server);
3996 +- nfs4_purge_state_owners(server);
3997 ++ nfs4_purge_state_owners(server, &freeme);
3998 ++ nfs4_free_state_owners(&freeme);
3999 + }
4000 +
4001 + /*
4002 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4003 +index 63edda145d1b..2023011c7a8f 100644
4004 +--- a/fs/nfs/nfs4proc.c
4005 ++++ b/fs/nfs/nfs4proc.c
4006 +@@ -1654,6 +1654,14 @@ static void nfs_state_set_open_stateid(struct nfs4_state *state,
4007 + write_sequnlock(&state->seqlock);
4008 + }
4009 +
4010 ++static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
4011 ++{
4012 ++ clear_bit(NFS_O_RDWR_STATE, &state->flags);
4013 ++ clear_bit(NFS_O_WRONLY_STATE, &state->flags);
4014 ++ clear_bit(NFS_O_RDONLY_STATE, &state->flags);
4015 ++ clear_bit(NFS_OPEN_STATE, &state->flags);
4016 ++}
4017 ++
4018 + static void nfs_state_set_delegation(struct nfs4_state *state,
4019 + const nfs4_stateid *deleg_stateid,
4020 + fmode_t fmode)
4021 +@@ -2049,13 +2057,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
4022 + {
4023 + int ret;
4024 +
4025 +- /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
4026 +- clear_bit(NFS_O_RDWR_STATE, &state->flags);
4027 +- clear_bit(NFS_O_WRONLY_STATE, &state->flags);
4028 +- clear_bit(NFS_O_RDONLY_STATE, &state->flags);
4029 + /* memory barrier prior to reading state->n_* */
4030 +- clear_bit(NFS_DELEGATED_STATE, &state->flags);
4031 +- clear_bit(NFS_OPEN_STATE, &state->flags);
4032 + smp_rmb();
4033 + ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
4034 + if (ret != 0)
4035 +@@ -2131,6 +2133,8 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
4036 + ctx = nfs4_state_find_open_context(state);
4037 + if (IS_ERR(ctx))
4038 + return -EAGAIN;
4039 ++ clear_bit(NFS_DELEGATED_STATE, &state->flags);
4040 ++ nfs_state_clear_open_state_flags(state);
4041 + ret = nfs4_do_open_reclaim(ctx, state);
4042 + put_nfs_open_context(ctx);
4043 + return ret;
4044 +@@ -2146,6 +2150,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
4045 + case -ENOENT:
4046 + case -EAGAIN:
4047 + case -ESTALE:
4048 ++ case -ETIMEDOUT:
4049 + break;
4050 + case -NFS4ERR_BADSESSION:
4051 + case -NFS4ERR_BADSLOT:
4052 +@@ -2466,6 +2471,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data,
4053 + if (!ctx) {
4054 + nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
4055 + data->is_recover = true;
4056 ++ task_setup_data.flags |= RPC_TASK_TIMEOUT;
4057 + } else {
4058 + nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
4059 + pnfs_lgopen_prepare(data, ctx);
4060 +@@ -2672,6 +2678,7 @@ static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
4061 + {
4062 + /* NFSv4.0 doesn't allow for delegation recovery on open expire */
4063 + nfs40_clear_delegation_stateid(state);
4064 ++ nfs_state_clear_open_state_flags(state);
4065 + return nfs4_open_expired(sp, state);
4066 + }
4067 +
4068 +@@ -2714,13 +2721,13 @@ out_free:
4069 + return -NFS4ERR_EXPIRED;
4070 + }
4071 +
4072 +-static void nfs41_check_delegation_stateid(struct nfs4_state *state)
4073 ++static int nfs41_check_delegation_stateid(struct nfs4_state *state)
4074 + {
4075 + struct nfs_server *server = NFS_SERVER(state->inode);
4076 + nfs4_stateid stateid;
4077 + struct nfs_delegation *delegation;
4078 + const struct cred *cred = NULL;
4079 +- int status;
4080 ++ int status, ret = NFS_OK;
4081 +
4082 + /* Get the delegation credential for use by test/free_stateid */
4083 + rcu_read_lock();
4084 +@@ -2728,20 +2735,15 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
4085 + if (delegation == NULL) {
4086 + rcu_read_unlock();
4087 + nfs_state_clear_delegation(state);
4088 +- return;
4089 ++ return NFS_OK;
4090 + }
4091 +
4092 + nfs4_stateid_copy(&stateid, &delegation->stateid);
4093 +- if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
4094 +- rcu_read_unlock();
4095 +- nfs_state_clear_delegation(state);
4096 +- return;
4097 +- }
4098 +
4099 + if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
4100 + &delegation->flags)) {
4101 + rcu_read_unlock();
4102 +- return;
4103 ++ return NFS_OK;
4104 + }
4105 +
4106 + if (delegation->cred)
4107 +@@ -2751,9 +2753,24 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
4108 + trace_nfs4_test_delegation_stateid(state, NULL, status);
4109 + if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
4110 + nfs_finish_clear_delegation_stateid(state, &stateid);
4111 ++ else
4112 ++ ret = status;
4113 +
4114 +- if (delegation->cred)
4115 +- put_cred(cred);
4116 ++ put_cred(cred);
4117 ++ return ret;
4118 ++}
4119 ++
4120 ++static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
4121 ++{
4122 ++ nfs4_stateid tmp;
4123 ++
4124 ++ if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
4125 ++ nfs4_copy_delegation_stateid(state->inode, state->state,
4126 ++ &tmp, NULL) &&
4127 ++ nfs4_stateid_match_other(&state->stateid, &tmp))
4128 ++ nfs_state_set_delegation(state, &tmp, state->state);
4129 ++ else
4130 ++ nfs_state_clear_delegation(state);
4131 + }
4132 +
4133 + /**
4134 +@@ -2823,21 +2840,12 @@ static int nfs41_check_open_stateid(struct nfs4_state *state)
4135 + const struct cred *cred = state->owner->so_cred;
4136 + int status;
4137 +
4138 +- if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) {
4139 +- if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) {
4140 +- if (nfs4_have_delegation(state->inode, state->state))
4141 +- return NFS_OK;
4142 +- return -NFS4ERR_OPENMODE;
4143 +- }
4144 ++ if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
4145 + return -NFS4ERR_BAD_STATEID;
4146 +- }
4147 + status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
4148 + trace_nfs4_test_open_stateid(state, NULL, status);
4149 + if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
4150 +- clear_bit(NFS_O_RDONLY_STATE, &state->flags);
4151 +- clear_bit(NFS_O_WRONLY_STATE, &state->flags);
4152 +- clear_bit(NFS_O_RDWR_STATE, &state->flags);
4153 +- clear_bit(NFS_OPEN_STATE, &state->flags);
4154 ++ nfs_state_clear_open_state_flags(state);
4155 + stateid->type = NFS4_INVALID_STATEID_TYPE;
4156 + return status;
4157 + }
4158 +@@ -2850,7 +2858,11 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
4159 + {
4160 + int status;
4161 +
4162 +- nfs41_check_delegation_stateid(state);
4163 ++ status = nfs41_check_delegation_stateid(state);
4164 ++ if (status != NFS_OK)
4165 ++ return status;
4166 ++ nfs41_delegation_recover_stateid(state);
4167 ++
4168 + status = nfs41_check_expired_locks(state);
4169 + if (status != NFS_OK)
4170 + return status;
4171 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
4172 +index e2e3c4f04d3e..0e69cd846afb 100644
4173 +--- a/fs/nfs/nfs4state.c
4174 ++++ b/fs/nfs/nfs4state.c
4175 +@@ -624,24 +624,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
4176 + /**
4177 + * nfs4_purge_state_owners - Release all cached state owners
4178 + * @server: nfs_server with cached state owners to release
4179 ++ * @head: resulting list of state owners
4180 + *
4181 + * Called at umount time. Remaining state owners will be on
4182 + * the LRU with ref count of zero.
4183 ++ * Note that the state owners are not freed, but are added
4184 ++ * to the list @head, which can later be used as an argument
4185 ++ * to nfs4_free_state_owners.
4186 + */
4187 +-void nfs4_purge_state_owners(struct nfs_server *server)
4188 ++void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
4189 + {
4190 + struct nfs_client *clp = server->nfs_client;
4191 + struct nfs4_state_owner *sp, *tmp;
4192 +- LIST_HEAD(doomed);
4193 +
4194 + spin_lock(&clp->cl_lock);
4195 + list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
4196 +- list_move(&sp->so_lru, &doomed);
4197 ++ list_move(&sp->so_lru, head);
4198 + nfs4_remove_state_owner_locked(sp);
4199 + }
4200 + spin_unlock(&clp->cl_lock);
4201 ++}
4202 +
4203 +- list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
4204 ++/**
4205 ++ * nfs4_purge_state_owners - Release all cached state owners
4206 ++ * @head: resulting list of state owners
4207 ++ *
4208 ++ * Frees a list of state owners that was generated by
4209 ++ * nfs4_purge_state_owners
4210 ++ */
4211 ++void nfs4_free_state_owners(struct list_head *head)
4212 ++{
4213 ++ struct nfs4_state_owner *sp, *tmp;
4214 ++
4215 ++ list_for_each_entry_safe(sp, tmp, head, so_lru) {
4216 + list_del(&sp->so_lru);
4217 + nfs4_free_state_owner(sp);
4218 + }
4219 +@@ -1513,6 +1528,7 @@ restart:
4220 + switch (status) {
4221 + case 0:
4222 + break;
4223 ++ case -ETIMEDOUT:
4224 + case -ESTALE:
4225 + case -NFS4ERR_ADMIN_REVOKED:
4226 + case -NFS4ERR_STALE_STATEID:
4227 +@@ -1606,6 +1622,7 @@ static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_st
4228 + static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
4229 + {
4230 + struct nfs4_state *state;
4231 ++ unsigned int loop = 0;
4232 + int status = 0;
4233 +
4234 + /* Note: we rely on the sp->so_states list being ordered
4235 +@@ -1632,8 +1649,10 @@ restart:
4236 +
4237 + switch (status) {
4238 + default:
4239 +- if (status >= 0)
4240 ++ if (status >= 0) {
4241 ++ loop = 0;
4242 + break;
4243 ++ }
4244 + printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status);
4245 + /* Fall through */
4246 + case -ENOENT:
4247 +@@ -1647,6 +1666,10 @@ restart:
4248 + break;
4249 + case -EAGAIN:
4250 + ssleep(1);
4251 ++ if (loop++ < 10) {
4252 ++ set_bit(ops->state_flag_bit, &state->flags);
4253 ++ break;
4254 ++ }
4255 + /* Fall through */
4256 + case -NFS4ERR_ADMIN_REVOKED:
4257 + case -NFS4ERR_STALE_STATEID:
4258 +@@ -1659,11 +1682,13 @@ restart:
4259 + case -NFS4ERR_EXPIRED:
4260 + case -NFS4ERR_NO_GRACE:
4261 + nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
4262 ++ /* Fall through */
4263 + case -NFS4ERR_STALE_CLIENTID:
4264 + case -NFS4ERR_BADSESSION:
4265 + case -NFS4ERR_BADSLOT:
4266 + case -NFS4ERR_BAD_HIGH_SLOT:
4267 + case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4268 ++ case -ETIMEDOUT:
4269 + goto out_err;
4270 + }
4271 + nfs4_put_open_state(state);
4272 +@@ -1857,12 +1882,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
4273 + struct nfs4_state_owner *sp;
4274 + struct nfs_server *server;
4275 + struct rb_node *pos;
4276 ++ LIST_HEAD(freeme);
4277 + int status = 0;
4278 +
4279 + restart:
4280 + rcu_read_lock();
4281 + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
4282 +- nfs4_purge_state_owners(server);
4283 ++ nfs4_purge_state_owners(server, &freeme);
4284 + spin_lock(&clp->cl_lock);
4285 + for (pos = rb_first(&server->state_owners);
4286 + pos != NULL;
4287 +@@ -1891,6 +1917,7 @@ restart:
4288 + spin_unlock(&clp->cl_lock);
4289 + }
4290 + rcu_read_unlock();
4291 ++ nfs4_free_state_owners(&freeme);
4292 + return 0;
4293 + }
4294 +
4295 +@@ -1946,7 +1973,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
4296 + return -EPERM;
4297 + case -EACCES:
4298 + case -NFS4ERR_DELAY:
4299 +- case -ETIMEDOUT:
4300 + case -EAGAIN:
4301 + ssleep(1);
4302 + break;
4303 +@@ -2575,7 +2601,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
4304 + }
4305 +
4306 + /* Now recover expired state... */
4307 +- if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
4308 ++ if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
4309 + section = "reclaim nograce";
4310 + status = nfs4_do_reclaim(clp,
4311 + clp->cl_mvops->nograce_recovery_ops);
4312 +@@ -2583,6 +2609,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
4313 + continue;
4314 + if (status < 0)
4315 + goto out_error;
4316 ++ clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
4317 + }
4318 +
4319 + nfs4_end_drain_session(clp);
4320 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
4321 +index f88ddac2dcdf..4d375b517eda 100644
4322 +--- a/fs/nfs/super.c
4323 ++++ b/fs/nfs/super.c
4324 +@@ -2239,6 +2239,7 @@ nfs_compare_remount_data(struct nfs_server *nfss,
4325 + data->acdirmin != nfss->acdirmin / HZ ||
4326 + data->acdirmax != nfss->acdirmax / HZ ||
4327 + data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
4328 ++ (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
4329 + data->nfs_server.port != nfss->port ||
4330 + data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
4331 + !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
4332 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
4333 +index ccbdbd62f0d8..fe6d804a38dc 100644
4334 +--- a/fs/userfaultfd.c
4335 ++++ b/fs/userfaultfd.c
4336 +@@ -880,6 +880,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
4337 + /* len == 0 means wake all */
4338 + struct userfaultfd_wake_range range = { .len = 0, };
4339 + unsigned long new_flags;
4340 ++ bool still_valid;
4341 +
4342 + WRITE_ONCE(ctx->released, true);
4343 +
4344 +@@ -895,8 +896,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
4345 + * taking the mmap_sem for writing.
4346 + */
4347 + down_write(&mm->mmap_sem);
4348 +- if (!mmget_still_valid(mm))
4349 +- goto skip_mm;
4350 ++ still_valid = mmget_still_valid(mm);
4351 + prev = NULL;
4352 + for (vma = mm->mmap; vma; vma = vma->vm_next) {
4353 + cond_resched();
4354 +@@ -907,19 +907,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
4355 + continue;
4356 + }
4357 + new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
4358 +- prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
4359 +- new_flags, vma->anon_vma,
4360 +- vma->vm_file, vma->vm_pgoff,
4361 +- vma_policy(vma),
4362 +- NULL_VM_UFFD_CTX);
4363 +- if (prev)
4364 +- vma = prev;
4365 +- else
4366 +- prev = vma;
4367 ++ if (still_valid) {
4368 ++ prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
4369 ++ new_flags, vma->anon_vma,
4370 ++ vma->vm_file, vma->vm_pgoff,
4371 ++ vma_policy(vma),
4372 ++ NULL_VM_UFFD_CTX);
4373 ++ if (prev)
4374 ++ vma = prev;
4375 ++ else
4376 ++ prev = vma;
4377 ++ }
4378 + vma->vm_flags = new_flags;
4379 + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
4380 + }
4381 +-skip_mm:
4382 + up_write(&mm->mmap_sem);
4383 + mmput(mm);
4384 + wakeup:
4385 +diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
4386 +index 74047bd0c1ae..e427ad097e2e 100644
4387 +--- a/fs/xfs/xfs_iops.c
4388 ++++ b/fs/xfs/xfs_iops.c
4389 +@@ -803,6 +803,7 @@ xfs_setattr_nonsize(
4390 +
4391 + out_cancel:
4392 + xfs_trans_cancel(tp);
4393 ++ xfs_iunlock(ip, XFS_ILOCK_EXCL);
4394 + out_dqrele:
4395 + xfs_qm_dqrele(udqp);
4396 + xfs_qm_dqrele(gdqp);
4397 +diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
4398 +index 8fb5be3ca0ca..8b13bd05befa 100644
4399 +--- a/include/net/cfg80211.h
4400 ++++ b/include/net/cfg80211.h
4401 +@@ -7254,6 +7254,21 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
4402 + struct cfg80211_pmsr_request *req,
4403 + gfp_t gfp);
4404 +
4405 ++/**
4406 ++ * cfg80211_iftype_allowed - check whether the interface can be allowed
4407 ++ * @wiphy: the wiphy
4408 ++ * @iftype: interface type
4409 ++ * @is_4addr: use_4addr flag, must be '0' when check_swif is '1'
4410 ++ * @check_swif: check iftype against software interfaces
4411 ++ *
4412 ++ * Check whether the interface is allowed to operate; additionally, this API
4413 ++ * can be used to check iftype against the software interfaces when
4414 ++ * check_swif is '1'.
4415 ++ */
4416 ++bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
4417 ++ bool is_4addr, u8 check_swif);
4418 ++
4419 ++
4420 + /* Logging, debugging and troubleshooting/diagnostic helpers. */
4421 +
4422 + /* wiphy_printk helpers, similar to dev_printk */
4423 +diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
4424 +index 3429888347e7..b3609e4c46e0 100644
4425 +--- a/include/sound/simple_card_utils.h
4426 ++++ b/include/sound/simple_card_utils.h
4427 +@@ -149,6 +149,10 @@ inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
4428 + {
4429 + struct device *dev = simple_priv_to_dev(priv);
4430 +
4431 ++ /* dai might be NULL */
4432 ++ if (!dai)
4433 ++ return;
4434 ++
4435 + if (dai->name)
4436 + dev_dbg(dev, "%s dai name = %s\n",
4437 + name, dai->name);
4438 +diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
4439 +index cc1d060cbf13..fa06b528c73c 100644
4440 +--- a/include/trace/events/rxrpc.h
4441 ++++ b/include/trace/events/rxrpc.h
4442 +@@ -498,10 +498,10 @@ rxrpc_tx_points;
4443 + #define E_(a, b) { a, b }
4444 +
4445 + TRACE_EVENT(rxrpc_local,
4446 +- TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op,
4447 ++ TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
4448 + int usage, const void *where),
4449 +
4450 +- TP_ARGS(local, op, usage, where),
4451 ++ TP_ARGS(local_debug_id, op, usage, where),
4452 +
4453 + TP_STRUCT__entry(
4454 + __field(unsigned int, local )
4455 +@@ -511,7 +511,7 @@ TRACE_EVENT(rxrpc_local,
4456 + ),
4457 +
4458 + TP_fast_assign(
4459 +- __entry->local = local->debug_id;
4460 ++ __entry->local = local_debug_id;
4461 + __entry->op = op;
4462 + __entry->usage = usage;
4463 + __entry->where = where;
4464 +diff --git a/include/uapi/sound/sof/fw.h b/include/uapi/sound/sof/fw.h
4465 +index 1afca973eb09..e9f697467a86 100644
4466 +--- a/include/uapi/sound/sof/fw.h
4467 ++++ b/include/uapi/sound/sof/fw.h
4468 +@@ -13,6 +13,8 @@
4469 + #ifndef __INCLUDE_UAPI_SOF_FW_H__
4470 + #define __INCLUDE_UAPI_SOF_FW_H__
4471 +
4472 ++#include <linux/types.h>
4473 ++
4474 + #define SND_SOF_FW_SIG_SIZE 4
4475 + #define SND_SOF_FW_ABI 1
4476 + #define SND_SOF_FW_SIG "Reef"
4477 +@@ -46,8 +48,8 @@ enum snd_sof_fw_blk_type {
4478 +
4479 + struct snd_sof_blk_hdr {
4480 + enum snd_sof_fw_blk_type type;
4481 +- uint32_t size; /* bytes minus this header */
4482 +- uint32_t offset; /* offset from base */
4483 ++ __u32 size; /* bytes minus this header */
4484 ++ __u32 offset; /* offset from base */
4485 + } __packed;
4486 +
4487 + /*
4488 +@@ -61,8 +63,8 @@ enum snd_sof_fw_mod_type {
4489 +
4490 + struct snd_sof_mod_hdr {
4491 + enum snd_sof_fw_mod_type type;
4492 +- uint32_t size; /* bytes minus this header */
4493 +- uint32_t num_blocks; /* number of blocks */
4494 ++ __u32 size; /* bytes minus this header */
4495 ++ __u32 num_blocks; /* number of blocks */
4496 + } __packed;
4497 +
4498 + /*
4499 +@@ -70,9 +72,9 @@ struct snd_sof_mod_hdr {
4500 + */
4501 + struct snd_sof_fw_header {
4502 + unsigned char sig[SND_SOF_FW_SIG_SIZE]; /* "Reef" */
4503 +- uint32_t file_size; /* size of file minus this header */
4504 +- uint32_t num_modules; /* number of modules */
4505 +- uint32_t abi; /* version of header format */
4506 ++ __u32 file_size; /* size of file minus this header */
4507 ++ __u32 num_modules; /* number of modules */
4508 ++ __u32 abi; /* version of header format */
4509 + } __packed;
4510 +
4511 + #endif
4512 +diff --git a/include/uapi/sound/sof/header.h b/include/uapi/sound/sof/header.h
4513 +index 7868990b0d6f..5f4518e7a972 100644
4514 +--- a/include/uapi/sound/sof/header.h
4515 ++++ b/include/uapi/sound/sof/header.h
4516 +@@ -9,6 +9,8 @@
4517 + #ifndef __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__
4518 + #define __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__
4519 +
4520 ++#include <linux/types.h>
4521 ++
4522 + /*
4523 + * Header for all non IPC ABI data.
4524 + *
4525 +@@ -16,12 +18,12 @@
4526 + * Used by any bespoke component data structures or binary blobs.
4527 + */
4528 + struct sof_abi_hdr {
4529 +- uint32_t magic; /**< 'S', 'O', 'F', '\0' */
4530 +- uint32_t type; /**< component specific type */
4531 +- uint32_t size; /**< size in bytes of data excl. this struct */
4532 +- uint32_t abi; /**< SOF ABI version */
4533 +- uint32_t reserved[4]; /**< reserved for future use */
4534 +- uint32_t data[0]; /**< Component data - opaque to core */
4535 ++ __u32 magic; /**< 'S', 'O', 'F', '\0' */
4536 ++ __u32 type; /**< component specific type */
4537 ++ __u32 size; /**< size in bytes of data excl. this struct */
4538 ++ __u32 abi; /**< SOF ABI version */
4539 ++ __u32 reserved[4]; /**< reserved for future use */
4540 ++ __u32 data[0]; /**< Component data - opaque to core */
4541 + } __packed;
4542 +
4543 + #endif
4544 +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
4545 +index 9484e88dabc2..9be995fc3c5a 100644
4546 +--- a/kernel/irq/irqdesc.c
4547 ++++ b/kernel/irq/irqdesc.c
4548 +@@ -295,6 +295,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
4549 + }
4550 + }
4551 +
4552 ++static void irq_sysfs_del(struct irq_desc *desc)
4553 ++{
4554 ++ /*
4555 ++ * If irq_sysfs_init() has not yet been invoked (early boot), then
4556 ++ * irq_kobj_base is NULL and the descriptor was never added.
4557 ++ * kobject_del() complains about a object with no parent, so make
4558 ++ * it conditional.
4559 ++ */
4560 ++ if (irq_kobj_base)
4561 ++ kobject_del(&desc->kobj);
4562 ++}
4563 ++
4564 + static int __init irq_sysfs_init(void)
4565 + {
4566 + struct irq_desc *desc;
4567 +@@ -325,6 +337,7 @@ static struct kobj_type irq_kobj_type = {
4568 + };
4569 +
4570 + static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
4571 ++static void irq_sysfs_del(struct irq_desc *desc) {}
4572 +
4573 + #endif /* CONFIG_SYSFS */
4574 +
4575 +@@ -438,7 +451,7 @@ static void free_desc(unsigned int irq)
4576 + * The sysfs entry must be serialized against a concurrent
4577 + * irq_sysfs_init() as well.
4578 + */
4579 +- kobject_del(&desc->kobj);
4580 ++ irq_sysfs_del(desc);
4581 + delete_irq_desc(irq);
4582 +
4583 + /*
4584 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
4585 +index 43901fa3f269..1c66480afda8 100644
4586 +--- a/kernel/sched/deadline.c
4587 ++++ b/kernel/sched/deadline.c
4588 +@@ -2088,17 +2088,13 @@ retry:
4589 + }
4590 +
4591 + deactivate_task(rq, next_task, 0);
4592 +- sub_running_bw(&next_task->dl, &rq->dl);
4593 +- sub_rq_bw(&next_task->dl, &rq->dl);
4594 + set_task_cpu(next_task, later_rq->cpu);
4595 +- add_rq_bw(&next_task->dl, &later_rq->dl);
4596 +
4597 + /*
4598 + * Update the later_rq clock here, because the clock is used
4599 + * by the cpufreq_update_util() inside __add_running_bw().
4600 + */
4601 + update_rq_clock(later_rq);
4602 +- add_running_bw(&next_task->dl, &later_rq->dl);
4603 + activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
4604 + ret = 1;
4605 +
4606 +@@ -2186,11 +2182,7 @@ static void pull_dl_task(struct rq *this_rq)
4607 + resched = true;
4608 +
4609 + deactivate_task(src_rq, p, 0);
4610 +- sub_running_bw(&p->dl, &src_rq->dl);
4611 +- sub_rq_bw(&p->dl, &src_rq->dl);
4612 + set_task_cpu(p, this_cpu);
4613 +- add_rq_bw(&p->dl, &this_rq->dl);
4614 +- add_running_bw(&p->dl, &this_rq->dl);
4615 + activate_task(this_rq, p, 0);
4616 + dmin = p->dl.deadline;
4617 +
4618 +diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
4619 +index 7acc632c3b82..6e52b67b420e 100644
4620 +--- a/kernel/sched/psi.c
4621 ++++ b/kernel/sched/psi.c
4622 +@@ -1051,7 +1051,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
4623 +
4624 + if (!rcu_access_pointer(group->poll_kworker)) {
4625 + struct sched_param param = {
4626 +- .sched_priority = MAX_RT_PRIO - 1,
4627 ++ .sched_priority = 1,
4628 + };
4629 + struct kthread_worker *kworker;
4630 +
4631 +@@ -1061,7 +1061,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
4632 + mutex_unlock(&group->trigger_lock);
4633 + return ERR_CAST(kworker);
4634 + }
4635 +- sched_setscheduler(kworker->task, SCHED_FIFO, &param);
4636 ++ sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
4637 + kthread_init_delayed_work(&group->poll_work,
4638 + psi_poll_work);
4639 + rcu_assign_pointer(group->poll_kworker, kworker);
4640 +@@ -1131,7 +1131,15 @@ static void psi_trigger_destroy(struct kref *ref)
4641 + * deadlock while waiting for psi_poll_work to acquire trigger_lock
4642 + */
4643 + if (kworker_to_destroy) {
4644 ++ /*
4645 ++ * After the RCU grace period has expired, the worker
4646 ++ * can no longer be found through group->poll_kworker.
4647 ++ * But it might have been already scheduled before
4648 ++ * that - deschedule it cleanly before destroying it.
4649 ++ */
4650 + kthread_cancel_delayed_work_sync(&group->poll_work);
4651 ++ atomic_set(&group->poll_scheduled, 0);
4652 ++
4653 + kthread_destroy_worker(kworker_to_destroy);
4654 + }
4655 + kfree(t);
4656 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
4657 +index 885642c82aaa..83236e22a8a8 100644
4658 +--- a/mm/huge_memory.c
4659 ++++ b/mm/huge_memory.c
4660 +@@ -32,6 +32,7 @@
4661 + #include <linux/shmem_fs.h>
4662 + #include <linux/oom.h>
4663 + #include <linux/numa.h>
4664 ++#include <linux/page_owner.h>
4665 +
4666 + #include <asm/tlb.h>
4667 + #include <asm/pgalloc.h>
4668 +@@ -2500,6 +2501,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
4669 + }
4670 +
4671 + ClearPageCompound(head);
4672 ++
4673 ++ split_page_owner(head, HPAGE_PMD_ORDER);
4674 ++
4675 + /* See comment in __split_huge_page_tail() */
4676 + if (PageAnon(head)) {
4677 + /* Additional pin to swap cache */
4678 +diff --git a/mm/kasan/common.c b/mm/kasan/common.c
4679 +index 242fdc01aaa9..874d75bb65eb 100644
4680 +--- a/mm/kasan/common.c
4681 ++++ b/mm/kasan/common.c
4682 +@@ -409,8 +409,14 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
4683 + if (IS_ENABLED(CONFIG_KASAN_GENERIC))
4684 + return shadow_byte < 0 ||
4685 + shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
4686 +- else
4687 +- return tag != (u8)shadow_byte;
4688 ++
4689 ++ /* else CONFIG_KASAN_SW_TAGS: */
4690 ++ if ((u8)shadow_byte == KASAN_TAG_INVALID)
4691 ++ return true;
4692 ++ if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
4693 ++ return true;
4694 ++
4695 ++ return false;
4696 + }
4697 +
4698 + static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
4699 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4700 +index 8f5dabfaf94d..bb783c27ba21 100644
4701 +--- a/mm/memcontrol.c
4702 ++++ b/mm/memcontrol.c
4703 +@@ -3150,6 +3150,60 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
4704 + }
4705 + }
4706 +
4707 ++static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
4708 ++{
4709 ++ unsigned long stat[MEMCG_NR_STAT];
4710 ++ struct mem_cgroup *mi;
4711 ++ int node, cpu, i;
4712 ++
4713 ++ for (i = 0; i < MEMCG_NR_STAT; i++)
4714 ++ stat[i] = 0;
4715 ++
4716 ++ for_each_online_cpu(cpu)
4717 ++ for (i = 0; i < MEMCG_NR_STAT; i++)
4718 ++ stat[i] += raw_cpu_read(memcg->vmstats_percpu->stat[i]);
4719 ++
4720 ++ for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
4721 ++ for (i = 0; i < MEMCG_NR_STAT; i++)
4722 ++ atomic_long_add(stat[i], &mi->vmstats[i]);
4723 ++
4724 ++ for_each_node(node) {
4725 ++ struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
4726 ++ struct mem_cgroup_per_node *pi;
4727 ++
4728 ++ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
4729 ++ stat[i] = 0;
4730 ++
4731 ++ for_each_online_cpu(cpu)
4732 ++ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
4733 ++ stat[i] += raw_cpu_read(
4734 ++ pn->lruvec_stat_cpu->count[i]);
4735 ++
4736 ++ for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
4737 ++ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
4738 ++ atomic_long_add(stat[i], &pi->lruvec_stat[i]);
4739 ++ }
4740 ++}
4741 ++
4742 ++static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
4743 ++{
4744 ++ unsigned long events[NR_VM_EVENT_ITEMS];
4745 ++ struct mem_cgroup *mi;
4746 ++ int cpu, i;
4747 ++
4748 ++ for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
4749 ++ events[i] = 0;
4750 ++
4751 ++ for_each_online_cpu(cpu)
4752 ++ for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
4753 ++ events[i] += raw_cpu_read(
4754 ++ memcg->vmstats_percpu->events[i]);
4755 ++
4756 ++ for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
4757 ++ for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
4758 ++ atomic_long_add(events[i], &mi->vmevents[i]);
4759 ++}
4760 ++
4761 + #ifdef CONFIG_MEMCG_KMEM
4762 + static int memcg_online_kmem(struct mem_cgroup *memcg)
4763 + {
4764 +@@ -4551,6 +4605,12 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
4765 + {
4766 + int node;
4767 +
4768 ++ /*
4769 ++ * Flush percpu vmstats and vmevents to guarantee the value correctness
4770 ++ * on parent's and all ancestor levels.
4771 ++ */
4772 ++ memcg_flush_percpu_vmstats(memcg);
4773 ++ memcg_flush_percpu_vmevents(memcg);
4774 + for_each_node(node)
4775 + free_mem_cgroup_per_node_info(memcg, node);
4776 + free_percpu(memcg->vmstats_percpu);
4777 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4778 +index 8e3bc949ebcc..81177ed87a38 100644
4779 +--- a/mm/page_alloc.c
4780 ++++ b/mm/page_alloc.c
4781 +@@ -2167,27 +2167,12 @@ static int move_freepages(struct zone *zone,
4782 + unsigned int order;
4783 + int pages_moved = 0;
4784 +
4785 +-#ifndef CONFIG_HOLES_IN_ZONE
4786 +- /*
4787 +- * page_zone is not safe to call in this context when
4788 +- * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
4789 +- * anyway as we check zone boundaries in move_freepages_block().
4790 +- * Remove at a later date when no bug reports exist related to
4791 +- * grouping pages by mobility
4792 +- */
4793 +- VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
4794 +- pfn_valid(page_to_pfn(end_page)) &&
4795 +- page_zone(start_page) != page_zone(end_page));
4796 +-#endif
4797 + for (page = start_page; page <= end_page;) {
4798 + if (!pfn_valid_within(page_to_pfn(page))) {
4799 + page++;
4800 + continue;
4801 + }
4802 +
4803 +- /* Make sure we are not inadvertently changing nodes */
4804 +- VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
4805 +-
4806 + if (!PageBuddy(page)) {
4807 + /*
4808 + * We assume that pages that could be isolated for
4809 +@@ -2202,6 +2187,10 @@ static int move_freepages(struct zone *zone,
4810 + continue;
4811 + }
4812 +
4813 ++ /* Make sure we are not inadvertently changing nodes */
4814 ++ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
4815 ++ VM_BUG_ON_PAGE(page_zone(page) != zone, page);
4816 ++
4817 + order = page_order(page);
4818 + move_to_free_area(page, &zone->free_area[order], migratetype);
4819 + page += 1 << order;
4820 +diff --git a/mm/z3fold.c b/mm/z3fold.c
4821 +index c4debbe683eb..46686d0e3df8 100644
4822 +--- a/mm/z3fold.c
4823 ++++ b/mm/z3fold.c
4824 +@@ -41,6 +41,7 @@
4825 + #include <linux/workqueue.h>
4826 + #include <linux/slab.h>
4827 + #include <linux/spinlock.h>
4828 ++#include <linux/wait.h>
4829 + #include <linux/zpool.h>
4830 +
4831 + /*
4832 +@@ -144,6 +145,8 @@ struct z3fold_header {
4833 + * @release_wq: workqueue for safe page release
4834 + * @work: work_struct for safe page release
4835 + * @inode: inode for z3fold pseudo filesystem
4836 ++ * @destroying: bool to stop migration once we start destruction
4837 ++ * @isolated: int to count the number of pages currently in isolation
4838 + *
4839 + * This structure is allocated at pool creation time and maintains metadata
4840 + * pertaining to a particular z3fold pool.
4841 +@@ -162,8 +165,11 @@ struct z3fold_pool {
4842 + const struct zpool_ops *zpool_ops;
4843 + struct workqueue_struct *compact_wq;
4844 + struct workqueue_struct *release_wq;
4845 ++ struct wait_queue_head isolate_wait;
4846 + struct work_struct work;
4847 + struct inode *inode;
4848 ++ bool destroying;
4849 ++ int isolated;
4850 + };
4851 +
4852 + /*
4853 +@@ -771,6 +777,7 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
4854 + goto out_c;
4855 + spin_lock_init(&pool->lock);
4856 + spin_lock_init(&pool->stale_lock);
4857 ++ init_waitqueue_head(&pool->isolate_wait);
4858 + pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
4859 + if (!pool->unbuddied)
4860 + goto out_pool;
4861 +@@ -810,6 +817,15 @@ out:
4862 + return NULL;
4863 + }
4864 +
4865 ++static bool pool_isolated_are_drained(struct z3fold_pool *pool)
4866 ++{
4867 ++ bool ret;
4868 ++
4869 ++ spin_lock(&pool->lock);
4870 ++ ret = pool->isolated == 0;
4871 ++ spin_unlock(&pool->lock);
4872 ++ return ret;
4873 ++}
4874 + /**
4875 + * z3fold_destroy_pool() - destroys an existing z3fold pool
4876 + * @pool: the z3fold pool to be destroyed
4877 +@@ -819,6 +835,22 @@ out:
4878 + static void z3fold_destroy_pool(struct z3fold_pool *pool)
4879 + {
4880 + kmem_cache_destroy(pool->c_handle);
4881 ++ /*
4882 ++ * We set pool-> destroying under lock to ensure that
4883 ++ * z3fold_page_isolate() sees any changes to destroying. This way we
4884 ++ * avoid the need for any memory barriers.
4885 ++ */
4886 ++
4887 ++ spin_lock(&pool->lock);
4888 ++ pool->destroying = true;
4889 ++ spin_unlock(&pool->lock);
4890 ++
4891 ++ /*
4892 ++ * We need to ensure that no pages are being migrated while we destroy
4893 ++ * these workqueues, as migration can queue work on either of the
4894 ++ * workqueues.
4895 ++ */
4896 ++ wait_event(pool->isolate_wait, !pool_isolated_are_drained(pool));
4897 +
4898 + /*
4899 + * We need to destroy pool->compact_wq before pool->release_wq,
4900 +@@ -1309,6 +1341,28 @@ static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
4901 + return atomic64_read(&pool->pages_nr);
4902 + }
4903 +
4904 ++/*
4905 ++ * z3fold_dec_isolated() expects to be called while pool->lock is held.
4906 ++ */
4907 ++static void z3fold_dec_isolated(struct z3fold_pool *pool)
4908 ++{
4909 ++ assert_spin_locked(&pool->lock);
4910 ++ VM_BUG_ON(pool->isolated <= 0);
4911 ++ pool->isolated--;
4912 ++
4913 ++ /*
4914 ++ * If we have no more isolated pages, we have to see if
4915 ++ * z3fold_destroy_pool() is waiting for a signal.
4916 ++ */
4917 ++ if (pool->isolated == 0 && waitqueue_active(&pool->isolate_wait))
4918 ++ wake_up_all(&pool->isolate_wait);
4919 ++}
4920 ++
4921 ++static void z3fold_inc_isolated(struct z3fold_pool *pool)
4922 ++{
4923 ++ pool->isolated++;
4924 ++}
4925 ++
4926 + static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
4927 + {
4928 + struct z3fold_header *zhdr;
4929 +@@ -1335,6 +1389,33 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
4930 + spin_lock(&pool->lock);
4931 + if (!list_empty(&page->lru))
4932 + list_del(&page->lru);
4933 ++ /*
4934 ++ * We need to check for destruction while holding pool->lock, as
4935 ++ * otherwise destruction could see 0 isolated pages, and
4936 ++ * proceed.
4937 ++ */
4938 ++ if (unlikely(pool->destroying)) {
4939 ++ spin_unlock(&pool->lock);
4940 ++ /*
4941 ++ * If this page isn't stale, somebody else holds a
4942 ++ * reference to it. Let't drop our refcount so that they
4943 ++ * can call the release logic.
4944 ++ */
4945 ++ if (unlikely(kref_put(&zhdr->refcount,
4946 ++ release_z3fold_page_locked))) {
4947 ++ /*
4948 ++ * If we get here we have kref problems, so we
4949 ++ * should freak out.
4950 ++ */
4951 ++ WARN(1, "Z3fold is experiencing kref problems\n");
4952 ++ return false;
4953 ++ }
4954 ++ z3fold_page_unlock(zhdr);
4955 ++ return false;
4956 ++ }
4957 ++
4958 ++
4959 ++ z3fold_inc_isolated(pool);
4960 + spin_unlock(&pool->lock);
4961 + z3fold_page_unlock(zhdr);
4962 + return true;
4963 +@@ -1408,6 +1489,10 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
4964 +
4965 + queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
4966 +
4967 ++ spin_lock(&pool->lock);
4968 ++ z3fold_dec_isolated(pool);
4969 ++ spin_unlock(&pool->lock);
4970 ++
4971 + page_mapcount_reset(page);
4972 + unlock_page(page);
4973 + put_page(page);
4974 +@@ -1428,10 +1513,14 @@ static void z3fold_page_putback(struct page *page)
4975 + INIT_LIST_HEAD(&page->lru);
4976 + if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
4977 + atomic64_dec(&pool->pages_nr);
4978 ++ spin_lock(&pool->lock);
4979 ++ z3fold_dec_isolated(pool);
4980 ++ spin_unlock(&pool->lock);
4981 + return;
4982 + }
4983 + spin_lock(&pool->lock);
4984 + list_add(&page->lru, &pool->lru);
4985 ++ z3fold_dec_isolated(pool);
4986 + spin_unlock(&pool->lock);
4987 + z3fold_page_unlock(zhdr);
4988 + }
4989 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
4990 +index 0787d33b80d8..515b00801af2 100644
4991 +--- a/mm/zsmalloc.c
4992 ++++ b/mm/zsmalloc.c
4993 +@@ -53,6 +53,7 @@
4994 + #include <linux/zpool.h>
4995 + #include <linux/mount.h>
4996 + #include <linux/migrate.h>
4997 ++#include <linux/wait.h>
4998 + #include <linux/pagemap.h>
4999 + #include <linux/fs.h>
5000 +
5001 +@@ -267,6 +268,10 @@ struct zs_pool {
5002 + #ifdef CONFIG_COMPACTION
5003 + struct inode *inode;
5004 + struct work_struct free_work;
5005 ++ /* A wait queue for when migration races with async_free_zspage() */
5006 ++ struct wait_queue_head migration_wait;
5007 ++ atomic_long_t isolated_pages;
5008 ++ bool destroying;
5009 + #endif
5010 + };
5011 +
5012 +@@ -1882,6 +1887,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
5013 + zspage->isolated--;
5014 + }
5015 +
5016 ++static void putback_zspage_deferred(struct zs_pool *pool,
5017 ++ struct size_class *class,
5018 ++ struct zspage *zspage)
5019 ++{
5020 ++ enum fullness_group fg;
5021 ++
5022 ++ fg = putback_zspage(class, zspage);
5023 ++ if (fg == ZS_EMPTY)
5024 ++ schedule_work(&pool->free_work);
5025 ++
5026 ++}
5027 ++
5028 ++static inline void zs_pool_dec_isolated(struct zs_pool *pool)
5029 ++{
5030 ++ VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
5031 ++ atomic_long_dec(&pool->isolated_pages);
5032 ++ /*
5033 ++ * There's no possibility of racing, since wait_for_isolated_drain()
5034 ++ * checks the isolated count under &class->lock after enqueuing
5035 ++ * on migration_wait.
5036 ++ */
5037 ++ if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
5038 ++ wake_up_all(&pool->migration_wait);
5039 ++}
5040 ++
5041 + static void replace_sub_page(struct size_class *class, struct zspage *zspage,
5042 + struct page *newpage, struct page *oldpage)
5043 + {
5044 +@@ -1951,6 +1981,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
5045 + */
5046 + if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
5047 + get_zspage_mapping(zspage, &class_idx, &fullness);
5048 ++ atomic_long_inc(&pool->isolated_pages);
5049 + remove_zspage(class, zspage, fullness);
5050 + }
5051 +
5052 +@@ -2050,8 +2081,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
5053 + * Page migration is done so let's putback isolated zspage to
5054 + * the list if @page is final isolated subpage in the zspage.
5055 + */
5056 +- if (!is_zspage_isolated(zspage))
5057 +- putback_zspage(class, zspage);
5058 ++ if (!is_zspage_isolated(zspage)) {
5059 ++ /*
5060 ++ * We cannot race with zs_destroy_pool() here because we wait
5061 ++ * for isolation to hit zero before we start destroying.
5062 ++ * Also, we ensure that everyone can see pool->destroying before
5063 ++ * we start waiting.
5064 ++ */
5065 ++ putback_zspage_deferred(pool, class, zspage);
5066 ++ zs_pool_dec_isolated(pool);
5067 ++ }
5068 +
5069 + reset_page(page);
5070 + put_page(page);
5071 +@@ -2097,13 +2136,12 @@ static void zs_page_putback(struct page *page)
5072 + spin_lock(&class->lock);
5073 + dec_zspage_isolation(zspage);
5074 + if (!is_zspage_isolated(zspage)) {
5075 +- fg = putback_zspage(class, zspage);
5076 + /*
5077 + * Due to page_lock, we cannot free zspage immediately
5078 + * so let's defer.
5079 + */
5080 +- if (fg == ZS_EMPTY)
5081 +- schedule_work(&pool->free_work);
5082 ++ putback_zspage_deferred(pool, class, zspage);
5083 ++ zs_pool_dec_isolated(pool);
5084 + }
5085 + spin_unlock(&class->lock);
5086 + }
5087 +@@ -2127,8 +2165,36 @@ static int zs_register_migration(struct zs_pool *pool)
5088 + return 0;
5089 + }
5090 +
5091 ++static bool pool_isolated_are_drained(struct zs_pool *pool)
5092 ++{
5093 ++ return atomic_long_read(&pool->isolated_pages) == 0;
5094 ++}
5095 ++
5096 ++/* Function for resolving migration */
5097 ++static void wait_for_isolated_drain(struct zs_pool *pool)
5098 ++{
5099 ++
5100 ++ /*
5101 ++ * We're in the process of destroying the pool, so there are no
5102 ++ * active allocations. zs_page_isolate() fails for completely free
5103 ++ * zspages, so we need only wait for the zs_pool's isolated
5104 ++ * count to hit zero.
5105 ++ */
5106 ++ wait_event(pool->migration_wait,
5107 ++ pool_isolated_are_drained(pool));
5108 ++}
5109 ++
5110 + static void zs_unregister_migration(struct zs_pool *pool)
5111 + {
5112 ++ pool->destroying = true;
5113 ++ /*
5114 ++ * We need a memory barrier here to ensure global visibility of
5115 ++ * pool->destroying. Thus pool->isolated pages will either be 0 in which
5116 ++ * case we don't care, or it will be > 0 and pool->destroying will
5117 ++ * ensure that we wake up once isolation hits 0.
5118 ++ */
5119 ++ smp_mb();
5120 ++ wait_for_isolated_drain(pool); /* This can block */
5121 + flush_work(&pool->free_work);
5122 + iput(pool->inode);
5123 + }
5124 +@@ -2366,6 +2432,8 @@ struct zs_pool *zs_create_pool(const char *name)
5125 + if (!pool->name)
5126 + goto err;
5127 +
5128 ++ init_waitqueue_head(&pool->migration_wait);
5129 ++
5130 + if (create_cache(pool))
5131 + goto err;
5132 +
5133 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
5134 +index 1fa9ac483173..c8177a89f52c 100644
5135 +--- a/net/bridge/netfilter/ebtables.c
5136 ++++ b/net/bridge/netfilter/ebtables.c
5137 +@@ -2267,8 +2267,10 @@ static int compat_do_replace(struct net *net, void __user *user,
5138 + state.buf_kern_len = size64;
5139 +
5140 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
5141 +- if (WARN_ON(ret < 0))
5142 ++ if (WARN_ON(ret < 0)) {
5143 ++ vfree(entries_tmp);
5144 + goto out_unlock;
5145 ++ }
5146 +
5147 + vfree(entries_tmp);
5148 + tmp.entries_size = size64;
5149 +diff --git a/net/can/gw.c b/net/can/gw.c
5150 +index 5275ddf580bc..72711053ebe6 100644
5151 +--- a/net/can/gw.c
5152 ++++ b/net/can/gw.c
5153 +@@ -1046,32 +1046,50 @@ static __init int cgw_module_init(void)
5154 + pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n",
5155 + max_hops);
5156 +
5157 +- register_pernet_subsys(&cangw_pernet_ops);
5158 ++ ret = register_pernet_subsys(&cangw_pernet_ops);
5159 ++ if (ret)
5160 ++ return ret;
5161 ++
5162 ++ ret = -ENOMEM;
5163 + cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
5164 + 0, 0, NULL);
5165 +-
5166 + if (!cgw_cache)
5167 +- return -ENOMEM;
5168 ++ goto out_cache_create;
5169 +
5170 + /* set notifier */
5171 + notifier.notifier_call = cgw_notifier;
5172 +- register_netdevice_notifier(&notifier);
5173 ++ ret = register_netdevice_notifier(&notifier);
5174 ++ if (ret)
5175 ++ goto out_register_notifier;
5176 +
5177 + ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
5178 + NULL, cgw_dump_jobs, 0);
5179 +- if (ret) {
5180 +- unregister_netdevice_notifier(&notifier);
5181 +- kmem_cache_destroy(cgw_cache);
5182 +- return -ENOBUFS;
5183 +- }
5184 +-
5185 +- /* Only the first call to rtnl_register_module can fail */
5186 +- rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
5187 +- cgw_create_job, NULL, 0);
5188 +- rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
5189 +- cgw_remove_job, NULL, 0);
5190 ++ if (ret)
5191 ++ goto out_rtnl_register1;
5192 ++
5193 ++ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
5194 ++ cgw_create_job, NULL, 0);
5195 ++ if (ret)
5196 ++ goto out_rtnl_register2;
5197 ++ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
5198 ++ cgw_remove_job, NULL, 0);
5199 ++ if (ret)
5200 ++ goto out_rtnl_register3;
5201 +
5202 + return 0;
5203 ++
5204 ++out_rtnl_register3:
5205 ++ rtnl_unregister(PF_CAN, RTM_NEWROUTE);
5206 ++out_rtnl_register2:
5207 ++ rtnl_unregister(PF_CAN, RTM_GETROUTE);
5208 ++out_rtnl_register1:
5209 ++ unregister_netdevice_notifier(&notifier);
5210 ++out_register_notifier:
5211 ++ kmem_cache_destroy(cgw_cache);
5212 ++out_cache_create:
5213 ++ unregister_pernet_subsys(&cangw_pernet_ops);
5214 ++
5215 ++ return ret;
5216 + }
5217 +
5218 + static __exit void cgw_module_exit(void)
5219 +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
5220 +index 9a8eca5eda65..565ea889673c 100644
5221 +--- a/net/ceph/osd_client.c
5222 ++++ b/net/ceph/osd_client.c
5223 +@@ -1504,7 +1504,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
5224 + struct ceph_osds up, acting;
5225 + bool force_resend = false;
5226 + bool unpaused = false;
5227 +- bool legacy_change;
5228 ++ bool legacy_change = false;
5229 + bool split = false;
5230 + bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
5231 + bool recovery_deletes = ceph_osdmap_flag(osdc,
5232 +@@ -1592,15 +1592,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
5233 + t->osd = acting.primary;
5234 + }
5235 +
5236 +- if (unpaused || legacy_change || force_resend ||
5237 +- (split && con && CEPH_HAVE_FEATURE(con->peer_features,
5238 +- RESEND_ON_SPLIT)))
5239 ++ if (unpaused || legacy_change || force_resend || split)
5240 + ct_res = CALC_TARGET_NEED_RESEND;
5241 + else
5242 + ct_res = CALC_TARGET_NO_ACTION;
5243 +
5244 + out:
5245 +- dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
5246 ++ dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
5247 ++ legacy_change, force_resend, split, ct_res, t->osd);
5248 + return ct_res;
5249 + }
5250 +
5251 +diff --git a/net/core/sock_map.c b/net/core/sock_map.c
5252 +index be6092ac69f8..8a4a45e7c29d 100644
5253 +--- a/net/core/sock_map.c
5254 ++++ b/net/core/sock_map.c
5255 +@@ -252,6 +252,8 @@ static void sock_map_free(struct bpf_map *map)
5256 + raw_spin_unlock_bh(&stab->lock);
5257 + rcu_read_unlock();
5258 +
5259 ++ synchronize_rcu();
5260 ++
5261 + bpf_map_area_free(stab->sks);
5262 + kfree(stab);
5263 + }
5264 +@@ -281,16 +283,20 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
5265 + struct sock **psk)
5266 + {
5267 + struct sock *sk;
5268 ++ int err = 0;
5269 +
5270 + raw_spin_lock_bh(&stab->lock);
5271 + sk = *psk;
5272 + if (!sk_test || sk_test == sk)
5273 +- *psk = NULL;
5274 ++ sk = xchg(psk, NULL);
5275 ++
5276 ++ if (likely(sk))
5277 ++ sock_map_unref(sk, psk);
5278 ++ else
5279 ++ err = -EINVAL;
5280 ++
5281 + raw_spin_unlock_bh(&stab->lock);
5282 +- if (unlikely(!sk))
5283 +- return -EINVAL;
5284 +- sock_map_unref(sk, psk);
5285 +- return 0;
5286 ++ return err;
5287 + }
5288 +
5289 + static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
5290 +@@ -333,6 +339,7 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
5291 + struct sock *sk, u64 flags)
5292 + {
5293 + struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
5294 ++ struct inet_connection_sock *icsk = inet_csk(sk);
5295 + struct sk_psock_link *link;
5296 + struct sk_psock *psock;
5297 + struct sock *osk;
5298 +@@ -343,6 +350,8 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
5299 + return -EINVAL;
5300 + if (unlikely(idx >= map->max_entries))
5301 + return -E2BIG;
5302 ++ if (unlikely(icsk->icsk_ulp_data))
5303 ++ return -EINVAL;
5304 +
5305 + link = sk_psock_init_link();
5306 + if (!link)
5307 +diff --git a/net/mac80211/util.c b/net/mac80211/util.c
5308 +index 1b224fa27367..ad1e58184c4e 100644
5309 +--- a/net/mac80211/util.c
5310 ++++ b/net/mac80211/util.c
5311 +@@ -3796,9 +3796,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
5312 + }
5313 +
5314 + /* Always allow software iftypes */
5315 +- if (local->hw.wiphy->software_iftypes & BIT(iftype) ||
5316 +- (iftype == NL80211_IFTYPE_AP_VLAN &&
5317 +- local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) {
5318 ++ if (cfg80211_iftype_allowed(local->hw.wiphy, iftype, 0, 1)) {
5319 + if (radar_detect)
5320 + return -EINVAL;
5321 + return 0;
5322 +@@ -3833,7 +3831,8 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
5323 +
5324 + if (sdata_iter == sdata ||
5325 + !ieee80211_sdata_running(sdata_iter) ||
5326 +- local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
5327 ++ cfg80211_iftype_allowed(local->hw.wiphy,
5328 ++ wdev_iter->iftype, 0, 1))
5329 + continue;
5330 +
5331 + params.iftype_num[wdev_iter->iftype]++;
5332 +diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
5333 +index b73c37b3a791..cfe7b556775f 100644
5334 +--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
5335 ++++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
5336 +@@ -227,7 +227,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
5337 +
5338 + e.id = ip_to_id(map, ip);
5339 +
5340 +- if (opt->flags & IPSET_DIM_ONE_SRC)
5341 ++ if (opt->flags & IPSET_DIM_TWO_SRC)
5342 + ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
5343 + else
5344 + ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
5345 +diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
5346 +index 16afa0df4004..e103c875383a 100644
5347 +--- a/net/netfilter/ipset/ip_set_core.c
5348 ++++ b/net/netfilter/ipset/ip_set_core.c
5349 +@@ -1161,7 +1161,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
5350 + return -ENOENT;
5351 +
5352 + write_lock_bh(&ip_set_ref_lock);
5353 +- if (set->ref != 0) {
5354 ++ if (set->ref != 0 || set->ref_netlink != 0) {
5355 + ret = -IPSET_ERR_REFERENCED;
5356 + goto out;
5357 + }
5358 +diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
5359 +index faf59b6a998f..24d8f4df4230 100644
5360 +--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
5361 ++++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
5362 +@@ -89,15 +89,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
5363 + struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
5364 + struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
5365 +
5366 +- /* MAC can be src only */
5367 +- if (!(opt->flags & IPSET_DIM_TWO_SRC))
5368 +- return 0;
5369 +-
5370 + if (skb_mac_header(skb) < skb->head ||
5371 + (skb_mac_header(skb) + ETH_HLEN) > skb->data)
5372 + return -EINVAL;
5373 +
5374 +- if (opt->flags & IPSET_DIM_ONE_SRC)
5375 ++ if (opt->flags & IPSET_DIM_TWO_SRC)
5376 + ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
5377 + else
5378 + ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
5379 +diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
5380 +index d09eaf153544..8c9bd3ae9edf 100644
5381 +--- a/net/rxrpc/af_rxrpc.c
5382 ++++ b/net/rxrpc/af_rxrpc.c
5383 +@@ -193,7 +193,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
5384 +
5385 + service_in_use:
5386 + write_unlock(&local->services_lock);
5387 +- rxrpc_put_local(local);
5388 ++ rxrpc_unuse_local(local);
5389 + ret = -EADDRINUSE;
5390 + error_unlock:
5391 + release_sock(&rx->sk);
5392 +@@ -901,7 +901,7 @@ static int rxrpc_release_sock(struct sock *sk)
5393 + rxrpc_queue_work(&rxnet->service_conn_reaper);
5394 + rxrpc_queue_work(&rxnet->client_conn_reaper);
5395 +
5396 +- rxrpc_put_local(rx->local);
5397 ++ rxrpc_unuse_local(rx->local);
5398 + rx->local = NULL;
5399 + key_put(rx->key);
5400 + rx->key = NULL;
5401 +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
5402 +index 80335b4ee4fd..9796c45d2f6a 100644
5403 +--- a/net/rxrpc/ar-internal.h
5404 ++++ b/net/rxrpc/ar-internal.h
5405 +@@ -254,7 +254,8 @@ struct rxrpc_security {
5406 + */
5407 + struct rxrpc_local {
5408 + struct rcu_head rcu;
5409 +- atomic_t usage;
5410 ++ atomic_t active_users; /* Number of users of the local endpoint */
5411 ++ atomic_t usage; /* Number of references to the structure */
5412 + struct rxrpc_net *rxnet; /* The network ns in which this resides */
5413 + struct list_head link;
5414 + struct socket *socket; /* my UDP socket */
5415 +@@ -1002,6 +1003,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc
5416 + struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
5417 + struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
5418 + void rxrpc_put_local(struct rxrpc_local *);
5419 ++struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
5420 ++void rxrpc_unuse_local(struct rxrpc_local *);
5421 + void rxrpc_queue_local(struct rxrpc_local *);
5422 + void rxrpc_destroy_all_locals(struct rxrpc_net *);
5423 +
5424 +@@ -1061,6 +1064,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
5425 + struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
5426 + struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
5427 + void rxrpc_put_peer(struct rxrpc_peer *);
5428 ++void rxrpc_put_peer_locked(struct rxrpc_peer *);
5429 +
5430 + /*
5431 + * proc.c
5432 +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
5433 +index 5bd6f1546e5c..ee95d1cd1cdf 100644
5434 +--- a/net/rxrpc/input.c
5435 ++++ b/net/rxrpc/input.c
5436 +@@ -1108,8 +1108,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
5437 + {
5438 + _enter("%p,%p", local, skb);
5439 +
5440 +- skb_queue_tail(&local->event_queue, skb);
5441 +- rxrpc_queue_local(local);
5442 ++ if (rxrpc_get_local_maybe(local)) {
5443 ++ skb_queue_tail(&local->event_queue, skb);
5444 ++ rxrpc_queue_local(local);
5445 ++ } else {
5446 ++ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
5447 ++ }
5448 + }
5449 +
5450 + /*
5451 +@@ -1119,8 +1123,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
5452 + {
5453 + CHECK_SLAB_OKAY(&local->usage);
5454 +
5455 +- skb_queue_tail(&local->reject_queue, skb);
5456 +- rxrpc_queue_local(local);
5457 ++ if (rxrpc_get_local_maybe(local)) {
5458 ++ skb_queue_tail(&local->reject_queue, skb);
5459 ++ rxrpc_queue_local(local);
5460 ++ } else {
5461 ++ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
5462 ++ }
5463 + }
5464 +
5465 + /*
5466 +diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
5467 +index b1c71bad510b..72a6e12a9304 100644
5468 +--- a/net/rxrpc/local_object.c
5469 ++++ b/net/rxrpc/local_object.c
5470 +@@ -79,6 +79,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
5471 + local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
5472 + if (local) {
5473 + atomic_set(&local->usage, 1);
5474 ++ atomic_set(&local->active_users, 1);
5475 + local->rxnet = rxnet;
5476 + INIT_LIST_HEAD(&local->link);
5477 + INIT_WORK(&local->processor, rxrpc_local_processor);
5478 +@@ -92,7 +93,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
5479 + local->debug_id = atomic_inc_return(&rxrpc_debug_id);
5480 + memcpy(&local->srx, srx, sizeof(*srx));
5481 + local->srx.srx_service = 0;
5482 +- trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
5483 ++ trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
5484 + }
5485 +
5486 + _leave(" = %p", local);
5487 +@@ -266,11 +267,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
5488 + * bind the transport socket may still fail if we're attempting
5489 + * to use a local address that the dying object is still using.
5490 + */
5491 +- if (!rxrpc_get_local_maybe(local)) {
5492 +- cursor = cursor->next;
5493 +- list_del_init(&local->link);
5494 ++ if (!rxrpc_use_local(local))
5495 + break;
5496 +- }
5497 +
5498 + age = "old";
5499 + goto found;
5500 +@@ -284,7 +282,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
5501 + if (ret < 0)
5502 + goto sock_error;
5503 +
5504 +- list_add_tail(&local->link, cursor);
5505 ++ if (cursor != &rxnet->local_endpoints)
5506 ++ list_replace_init(cursor, &local->link);
5507 ++ else
5508 ++ list_add_tail(&local->link, cursor);
5509 + age = "new";
5510 +
5511 + found:
5512 +@@ -320,7 +321,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
5513 + int n;
5514 +
5515 + n = atomic_inc_return(&local->usage);
5516 +- trace_rxrpc_local(local, rxrpc_local_got, n, here);
5517 ++ trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
5518 + return local;
5519 + }
5520 +
5521 +@@ -334,7 +335,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
5522 + if (local) {
5523 + int n = atomic_fetch_add_unless(&local->usage, 1, 0);
5524 + if (n > 0)
5525 +- trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
5526 ++ trace_rxrpc_local(local->debug_id, rxrpc_local_got,
5527 ++ n + 1, here);
5528 + else
5529 + local = NULL;
5530 + }
5531 +@@ -342,24 +344,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
5532 + }
5533 +
5534 + /*
5535 +- * Queue a local endpoint.
5536 ++ * Queue a local endpoint and pass the caller's reference to the work item.
5537 + */
5538 + void rxrpc_queue_local(struct rxrpc_local *local)
5539 + {
5540 + const void *here = __builtin_return_address(0);
5541 ++ unsigned int debug_id = local->debug_id;
5542 ++ int n = atomic_read(&local->usage);
5543 +
5544 + if (rxrpc_queue_work(&local->processor))
5545 +- trace_rxrpc_local(local, rxrpc_local_queued,
5546 +- atomic_read(&local->usage), here);
5547 +-}
5548 +-
5549 +-/*
5550 +- * A local endpoint reached its end of life.
5551 +- */
5552 +-static void __rxrpc_put_local(struct rxrpc_local *local)
5553 +-{
5554 +- _enter("%d", local->debug_id);
5555 +- rxrpc_queue_work(&local->processor);
5556 ++ trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
5557 ++ else
5558 ++ rxrpc_put_local(local);
5559 + }
5560 +
5561 + /*
5562 +@@ -372,10 +368,47 @@ void rxrpc_put_local(struct rxrpc_local *local)
5563 +
5564 + if (local) {
5565 + n = atomic_dec_return(&local->usage);
5566 +- trace_rxrpc_local(local, rxrpc_local_put, n, here);
5567 ++ trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
5568 +
5569 + if (n == 0)
5570 +- __rxrpc_put_local(local);
5571 ++ call_rcu(&local->rcu, rxrpc_local_rcu);
5572 ++ }
5573 ++}
5574 ++
5575 ++/*
5576 ++ * Start using a local endpoint.
5577 ++ */
5578 ++struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
5579 ++{
5580 ++ unsigned int au;
5581 ++
5582 ++ local = rxrpc_get_local_maybe(local);
5583 ++ if (!local)
5584 ++ return NULL;
5585 ++
5586 ++ au = atomic_fetch_add_unless(&local->active_users, 1, 0);
5587 ++ if (au == 0) {
5588 ++ rxrpc_put_local(local);
5589 ++ return NULL;
5590 ++ }
5591 ++
5592 ++ return local;
5593 ++}
5594 ++
5595 ++/*
5596 ++ * Cease using a local endpoint. Once the number of active users reaches 0, we
5597 ++ * start the closure of the transport in the work processor.
5598 ++ */
5599 ++void rxrpc_unuse_local(struct rxrpc_local *local)
5600 ++{
5601 ++ unsigned int au;
5602 ++
5603 ++ if (local) {
5604 ++ au = atomic_dec_return(&local->active_users);
5605 ++ if (au == 0)
5606 ++ rxrpc_queue_local(local);
5607 ++ else
5608 ++ rxrpc_put_local(local);
5609 + }
5610 + }
5611 +
5612 +@@ -393,16 +426,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
5613 +
5614 + _enter("%d", local->debug_id);
5615 +
5616 +- /* We can get a race between an incoming call packet queueing the
5617 +- * processor again and the work processor starting the destruction
5618 +- * process which will shut down the UDP socket.
5619 +- */
5620 +- if (local->dead) {
5621 +- _leave(" [already dead]");
5622 +- return;
5623 +- }
5624 +- local->dead = true;
5625 +-
5626 + mutex_lock(&rxnet->local_mutex);
5627 + list_del_init(&local->link);
5628 + mutex_unlock(&rxnet->local_mutex);
5629 +@@ -422,13 +445,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
5630 + */
5631 + rxrpc_purge_queue(&local->reject_queue);
5632 + rxrpc_purge_queue(&local->event_queue);
5633 +-
5634 +- _debug("rcu local %d", local->debug_id);
5635 +- call_rcu(&local->rcu, rxrpc_local_rcu);
5636 + }
5637 +
5638 + /*
5639 +- * Process events on an endpoint
5640 ++ * Process events on an endpoint. The work item carries a ref which
5641 ++ * we must release.
5642 + */
5643 + static void rxrpc_local_processor(struct work_struct *work)
5644 + {
5645 +@@ -436,13 +457,15 @@ static void rxrpc_local_processor(struct work_struct *work)
5646 + container_of(work, struct rxrpc_local, processor);
5647 + bool again;
5648 +
5649 +- trace_rxrpc_local(local, rxrpc_local_processing,
5650 ++ trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
5651 + atomic_read(&local->usage), NULL);
5652 +
5653 + do {
5654 + again = false;
5655 +- if (atomic_read(&local->usage) == 0)
5656 +- return rxrpc_local_destroyer(local);
5657 ++ if (atomic_read(&local->active_users) == 0) {
5658 ++ rxrpc_local_destroyer(local);
5659 ++ break;
5660 ++ }
5661 +
5662 + if (!skb_queue_empty(&local->reject_queue)) {
5663 + rxrpc_reject_packets(local);
5664 +@@ -454,6 +477,8 @@ static void rxrpc_local_processor(struct work_struct *work)
5665 + again = true;
5666 + }
5667 + } while (again);
5668 ++
5669 ++ rxrpc_put_local(local);
5670 + }
5671 +
5672 + /*
5673 +diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
5674 +index 9f2f45c09e58..7666ec72d37e 100644
5675 +--- a/net/rxrpc/peer_event.c
5676 ++++ b/net/rxrpc/peer_event.c
5677 +@@ -378,7 +378,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
5678 + spin_lock_bh(&rxnet->peer_hash_lock);
5679 + list_add_tail(&peer->keepalive_link,
5680 + &rxnet->peer_keepalive[slot & mask]);
5681 +- rxrpc_put_peer(peer);
5682 ++ rxrpc_put_peer_locked(peer);
5683 + }
5684 +
5685 + spin_unlock_bh(&rxnet->peer_hash_lock);
5686 +diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
5687 +index 9d3ce81cf8ae..9c3ac96f71cb 100644
5688 +--- a/net/rxrpc/peer_object.c
5689 ++++ b/net/rxrpc/peer_object.c
5690 +@@ -436,6 +436,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
5691 + }
5692 + }
5693 +
5694 ++/*
5695 ++ * Drop a ref on a peer record where the caller already holds the
5696 ++ * peer_hash_lock.
5697 ++ */
5698 ++void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
5699 ++{
5700 ++ const void *here = __builtin_return_address(0);
5701 ++ int n;
5702 ++
5703 ++ n = atomic_dec_return(&peer->usage);
5704 ++ trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
5705 ++ if (n == 0) {
5706 ++ hash_del_rcu(&peer->hash_link);
5707 ++ list_del_init(&peer->keepalive_link);
5708 ++ kfree_rcu(peer, rcu);
5709 ++ }
5710 ++}
5711 ++
5712 + /*
5713 + * Make sure all peer records have been discarded.
5714 + */
5715 +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
5716 +index 5d3f33ce6d41..bae14438f869 100644
5717 +--- a/net/rxrpc/sendmsg.c
5718 ++++ b/net/rxrpc/sendmsg.c
5719 +@@ -226,6 +226,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
5720 + rxrpc_set_call_completion(call,
5721 + RXRPC_CALL_LOCAL_ERROR,
5722 + 0, ret);
5723 ++ rxrpc_notify_socket(call);
5724 + goto out;
5725 + }
5726 + _debug("need instant resend %d", ret);
5727 +diff --git a/net/wireless/core.c b/net/wireless/core.c
5728 +index 53ad3dbb76fe..ed24a0b071c3 100644
5729 +--- a/net/wireless/core.c
5730 ++++ b/net/wireless/core.c
5731 +@@ -1397,10 +1397,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
5732 + }
5733 + break;
5734 + case NETDEV_PRE_UP:
5735 +- if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) &&
5736 +- !(wdev->iftype == NL80211_IFTYPE_AP_VLAN &&
5737 +- rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP &&
5738 +- wdev->use_4addr))
5739 ++ if (!cfg80211_iftype_allowed(wdev->wiphy, wdev->iftype,
5740 ++ wdev->use_4addr, 0))
5741 + return notifier_from_errno(-EOPNOTSUPP);
5742 +
5743 + if (rfkill_blocked(rdev->rfkill))
5744 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5745 +index 520d437aa8d1..88a1de9def11 100644
5746 +--- a/net/wireless/nl80211.c
5747 ++++ b/net/wireless/nl80211.c
5748 +@@ -3481,9 +3481,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
5749 + return err;
5750 + }
5751 +
5752 +- if (!(rdev->wiphy.interface_modes & (1 << type)) &&
5753 +- !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr &&
5754 +- rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP))
5755 ++ if (!cfg80211_iftype_allowed(&rdev->wiphy, type, params.use_4addr, 0))
5756 + return -EOPNOTSUPP;
5757 +
5758 + err = nl80211_parse_mon_options(rdev, type, info, &params);
5759 +diff --git a/net/wireless/util.c b/net/wireless/util.c
5760 +index 1c39d6a2e850..d0e35b7b9e35 100644
5761 +--- a/net/wireless/util.c
5762 ++++ b/net/wireless/util.c
5763 +@@ -1697,7 +1697,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
5764 + for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
5765 + num_interfaces += params->iftype_num[iftype];
5766 + if (params->iftype_num[iftype] > 0 &&
5767 +- !(wiphy->software_iftypes & BIT(iftype)))
5768 ++ !cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
5769 + used_iftypes |= BIT(iftype);
5770 + }
5771 +
5772 +@@ -1719,7 +1719,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
5773 + return -ENOMEM;
5774 +
5775 + for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
5776 +- if (wiphy->software_iftypes & BIT(iftype))
5777 ++ if (cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
5778 + continue;
5779 + for (j = 0; j < c->n_limits; j++) {
5780 + all_iftypes |= limits[j].types;
5781 +@@ -2072,3 +2072,26 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
5782 + return max_vht_nss;
5783 + }
5784 + EXPORT_SYMBOL(ieee80211_get_vht_max_nss);
5785 ++
5786 ++bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
5787 ++ bool is_4addr, u8 check_swif)
5788 ++
5789 ++{
5790 ++ bool is_vlan = iftype == NL80211_IFTYPE_AP_VLAN;
5791 ++
5792 ++ switch (check_swif) {
5793 ++ case 0:
5794 ++ if (is_vlan && is_4addr)
5795 ++ return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
5796 ++ return wiphy->interface_modes & BIT(iftype);
5797 ++ case 1:
5798 ++ if (!(wiphy->software_iftypes & BIT(iftype)) && is_vlan)
5799 ++ return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
5800 ++ return wiphy->software_iftypes & BIT(iftype);
5801 ++ default:
5802 ++ break;
5803 ++ }
5804 ++
5805 ++ return false;
5806 ++}
5807 ++EXPORT_SYMBOL(cfg80211_iftype_allowed);
5808 +diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
5809 +index 9775bda2a4ca..d8aa6ab3f68b 100644
5810 +--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
5811 ++++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
5812 +@@ -367,9 +367,11 @@ static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_pcm_substream *substream)
5813 +
5814 + static int acp3x_dma_new(struct snd_soc_pcm_runtime *rtd)
5815 + {
5816 ++ struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
5817 ++ DRV_NAME);
5818 ++ struct device *parent = component->dev->parent;
5819 + snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
5820 +- rtd->pcm->card->dev,
5821 +- MIN_BUFFER, MAX_BUFFER);
5822 ++ parent, MIN_BUFFER, MAX_BUFFER);
5823 + return 0;
5824 + }
5825 +
5826 +diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
5827 +index 70ed28d97d49..6398741ebd0e 100644
5828 +--- a/sound/soc/generic/audio-graph-card.c
5829 ++++ b/sound/soc/generic/audio-graph-card.c
5830 +@@ -63,6 +63,7 @@ static int graph_get_dai_id(struct device_node *ep)
5831 + struct device_node *endpoint;
5832 + struct of_endpoint info;
5833 + int i, id;
5834 ++ const u32 *reg;
5835 + int ret;
5836 +
5837 + /* use driver specified DAI ID if exist */
5838 +@@ -83,8 +84,9 @@ static int graph_get_dai_id(struct device_node *ep)
5839 + return info.id;
5840 +
5841 + node = of_get_parent(ep);
5842 ++ reg = of_get_property(node, "reg", NULL);
5843 + of_node_put(node);
5844 +- if (of_get_property(node, "reg", NULL))
5845 ++ if (reg)
5846 + return info.port;
5847 + }
5848 + node = of_graph_get_port_parent(ep);
5849 +@@ -222,10 +224,6 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
5850 +
5851 + dev_dbg(dev, "link_of DPCM (%pOF)\n", ep);
5852 +
5853 +- of_node_put(ports);
5854 +- of_node_put(port);
5855 +- of_node_put(node);
5856 +-
5857 + if (li->cpu) {
5858 + int is_single_links = 0;
5859 +
5860 +@@ -243,17 +241,17 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
5861 +
5862 + ret = asoc_simple_parse_cpu(ep, dai_link, &is_single_links);
5863 + if (ret)
5864 +- return ret;
5865 ++ goto out_put_node;
5866 +
5867 + ret = asoc_simple_parse_clk_cpu(dev, ep, dai_link, dai);
5868 + if (ret < 0)
5869 +- return ret;
5870 ++ goto out_put_node;
5871 +
5872 + ret = asoc_simple_set_dailink_name(dev, dai_link,
5873 + "fe.%s",
5874 + dai_link->cpu_dai_name);
5875 + if (ret < 0)
5876 +- return ret;
5877 ++ goto out_put_node;
5878 +
5879 + /* card->num_links includes Codec */
5880 + asoc_simple_canonicalize_cpu(dai_link, is_single_links);
5881 +@@ -277,17 +275,17 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
5882 +
5883 + ret = asoc_simple_parse_codec(ep, dai_link);
5884 + if (ret < 0)
5885 +- return ret;
5886 ++ goto out_put_node;
5887 +
5888 + ret = asoc_simple_parse_clk_codec(dev, ep, dai_link, dai);
5889 + if (ret < 0)
5890 +- return ret;
5891 ++ goto out_put_node;
5892 +
5893 + ret = asoc_simple_set_dailink_name(dev, dai_link,
5894 + "be.%s",
5895 + codecs->dai_name);
5896 + if (ret < 0)
5897 +- return ret;
5898 ++ goto out_put_node;
5899 +
5900 + /* check "prefix" from top node */
5901 + snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
5902 +@@ -307,19 +305,23 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
5903 +
5904 + ret = asoc_simple_parse_tdm(ep, dai);
5905 + if (ret)
5906 +- return ret;
5907 ++ goto out_put_node;
5908 +
5909 + ret = asoc_simple_parse_daifmt(dev, cpu_ep, codec_ep,
5910 + NULL, &dai_link->dai_fmt);
5911 + if (ret < 0)
5912 +- return ret;
5913 ++ goto out_put_node;
5914 +
5915 + dai_link->dpcm_playback = 1;
5916 + dai_link->dpcm_capture = 1;
5917 + dai_link->ops = &graph_ops;
5918 + dai_link->init = asoc_simple_dai_init;
5919 +
5920 +- return 0;
5921 ++out_put_node:
5922 ++ of_node_put(ports);
5923 ++ of_node_put(port);
5924 ++ of_node_put(node);
5925 ++ return ret;
5926 + }
5927 +
5928 + static int graph_dai_link_of(struct asoc_simple_priv *priv,
5929 +diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
5930 +index 9b568f578bcd..2712a2b20102 100644
5931 +--- a/sound/soc/generic/simple-card.c
5932 ++++ b/sound/soc/generic/simple-card.c
5933 +@@ -138,8 +138,6 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
5934 +
5935 + li->link++;
5936 +
5937 +- of_node_put(node);
5938 +-
5939 + /* For single DAI link & old style of DT node */
5940 + if (is_top)
5941 + prefix = PREFIX;
5942 +@@ -161,17 +159,17 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
5943 +
5944 + ret = asoc_simple_parse_cpu(np, dai_link, &is_single_links);
5945 + if (ret)
5946 +- return ret;
5947 ++ goto out_put_node;
5948 +
5949 + ret = asoc_simple_parse_clk_cpu(dev, np, dai_link, dai);
5950 + if (ret < 0)
5951 +- return ret;
5952 ++ goto out_put_node;
5953 +
5954 + ret = asoc_simple_set_dailink_name(dev, dai_link,
5955 + "fe.%s",
5956 + dai_link->cpu_dai_name);
5957 + if (ret < 0)
5958 +- return ret;
5959 ++ goto out_put_node;
5960 +
5961 + asoc_simple_canonicalize_cpu(dai_link, is_single_links);
5962 + } else {
5963 +@@ -194,17 +192,17 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
5964 +
5965 + ret = asoc_simple_parse_codec(np, dai_link);
5966 + if (ret < 0)
5967 +- return ret;
5968 ++ goto out_put_node;
5969 +
5970 + ret = asoc_simple_parse_clk_codec(dev, np, dai_link, dai);
5971 + if (ret < 0)
5972 +- return ret;
5973 ++ goto out_put_node;
5974 +
5975 + ret = asoc_simple_set_dailink_name(dev, dai_link,
5976 + "be.%s",
5977 + codecs->dai_name);
5978 + if (ret < 0)
5979 +- return ret;
5980 ++ goto out_put_node;
5981 +
5982 + /* check "prefix" from top node */
5983 + snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
5984 +@@ -222,19 +220,21 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
5985 +
5986 + ret = asoc_simple_parse_tdm(np, dai);
5987 + if (ret)
5988 +- return ret;
5989 ++ goto out_put_node;
5990 +
5991 + ret = asoc_simple_parse_daifmt(dev, node, codec,
5992 + prefix, &dai_link->dai_fmt);
5993 + if (ret < 0)
5994 +- return ret;
5995 ++ goto out_put_node;
5996 +
5997 + dai_link->dpcm_playback = 1;
5998 + dai_link->dpcm_capture = 1;
5999 + dai_link->ops = &simple_ops;
6000 + dai_link->init = asoc_simple_dai_init;
6001 +
6002 +- return 0;
6003 ++out_put_node:
6004 ++ of_node_put(node);
6005 ++ return ret;
6006 + }
6007 +
6008 + static int simple_dai_link_of(struct asoc_simple_priv *priv,
6009 +@@ -378,8 +378,6 @@ static int simple_for_each_link(struct asoc_simple_priv *priv,
6010 + goto error;
6011 + }
6012 +
6013 +- of_node_put(codec);
6014 +-
6015 + /* get convert-xxx property */
6016 + memset(&adata, 0, sizeof(adata));
6017 + for_each_child_of_node(node, np)
6018 +@@ -401,11 +399,13 @@ static int simple_for_each_link(struct asoc_simple_priv *priv,
6019 + ret = func_noml(priv, np, codec, li, is_top);
6020 +
6021 + if (ret < 0) {
6022 ++ of_node_put(codec);
6023 + of_node_put(np);
6024 + goto error;
6025 + }
6026 + }
6027 +
6028 ++ of_node_put(codec);
6029 + node = of_get_next_child(top, node);
6030 + } while (!is_top && node);
6031 +
6032 +diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
6033 +index 2fe1ce879123..c360ebc3ccc7 100644
6034 +--- a/sound/soc/intel/boards/bytcht_es8316.c
6035 ++++ b/sound/soc/intel/boards/bytcht_es8316.c
6036 +@@ -436,6 +436,14 @@ static const struct acpi_gpio_mapping byt_cht_es8316_gpios[] = {
6037 +
6038 + /* Please keep this list alphabetically sorted */
6039 + static const struct dmi_system_id byt_cht_es8316_quirk_table[] = {
6040 ++ { /* Irbis NB41 */
6041 ++ .matches = {
6042 ++ DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
6043 ++ DMI_MATCH(DMI_PRODUCT_NAME, "NB41"),
6044 ++ },
6045 ++ .driver_data = (void *)(BYT_CHT_ES8316_INTMIC_IN2_MAP
6046 ++ | BYT_CHT_ES8316_JD_INVERTED),
6047 ++ },
6048 + { /* Teclast X98 Plus II */
6049 + .matches = {
6050 + DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
6051 +diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
6052 +index 0a34d0eb8dba..88ebaf6e1880 100644
6053 +--- a/sound/soc/rockchip/rockchip_i2s.c
6054 ++++ b/sound/soc/rockchip/rockchip_i2s.c
6055 +@@ -326,7 +326,6 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
6056 + val |= I2S_CHN_4;
6057 + break;
6058 + case 2:
6059 +- case 1:
6060 + val |= I2S_CHN_2;
6061 + break;
6062 + default:
6063 +@@ -459,7 +458,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
6064 + },
6065 + .capture = {
6066 + .stream_name = "Capture",
6067 +- .channels_min = 1,
6068 ++ .channels_min = 2,
6069 + .channels_max = 2,
6070 + .rates = SNDRV_PCM_RATE_8000_192000,
6071 + .formats = (SNDRV_PCM_FMTBIT_S8 |
6072 +@@ -659,7 +658,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
6073 + }
6074 +
6075 + if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) {
6076 +- if (val >= 1 && val <= 8)
6077 ++ if (val >= 2 && val <= 8)
6078 + soc_dai->capture.channels_max = val;
6079 + }
6080 +
6081 +diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c
6082 +index e688169ff12a..d606e48fe551 100644
6083 +--- a/sound/soc/samsung/odroid.c
6084 ++++ b/sound/soc/samsung/odroid.c
6085 +@@ -275,9 +275,8 @@ static int odroid_audio_probe(struct platform_device *pdev)
6086 + }
6087 +
6088 + of_node_put(cpu);
6089 +- of_node_put(codec);
6090 + if (ret < 0)
6091 +- return ret;
6092 ++ goto err_put_node;
6093 +
6094 + ret = snd_soc_of_get_dai_link_codecs(dev, codec, codec_link);
6095 + if (ret < 0)
6096 +@@ -300,7 +299,6 @@ static int odroid_audio_probe(struct platform_device *pdev)
6097 + ret = PTR_ERR(priv->clk_i2s_bus);
6098 + goto err_put_sclk;
6099 + }
6100 +- of_node_put(cpu_dai);
6101 +
6102 + ret = devm_snd_soc_register_card(dev, card);
6103 + if (ret < 0) {
6104 +@@ -308,6 +306,8 @@ static int odroid_audio_probe(struct platform_device *pdev)
6105 + goto err_put_clk_i2s;
6106 + }
6107 +
6108 ++ of_node_put(cpu_dai);
6109 ++ of_node_put(codec);
6110 + return 0;
6111 +
6112 + err_put_clk_i2s:
6113 +@@ -317,6 +317,8 @@ err_put_sclk:
6114 + err_put_cpu_dai:
6115 + of_node_put(cpu_dai);
6116 + snd_soc_of_put_dai_link_codecs(codec_link);
6117 ++err_put_node:
6118 ++ of_node_put(codec);
6119 + return ret;
6120 + }
6121 +
6122 +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
6123 +index 6aeba0d66ec5..dd0f43a1c5e1 100644
6124 +--- a/sound/soc/soc-core.c
6125 ++++ b/sound/soc/soc-core.c
6126 +@@ -1605,8 +1605,11 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
6127 + }
6128 + }
6129 +
6130 +- if (dai_link->dai_fmt)
6131 +- snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
6132 ++ if (dai_link->dai_fmt) {
6133 ++ ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
6134 ++ if (ret)
6135 ++ return ret;
6136 ++ }
6137 +
6138 + ret = soc_post_component_init(rtd, dai_link->name);
6139 + if (ret)
6140 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
6141 +index c91df5a9c840..f40adb604c25 100644
6142 +--- a/sound/soc/soc-dapm.c
6143 ++++ b/sound/soc/soc-dapm.c
6144 +@@ -1156,8 +1156,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
6145 + list_add_tail(&widget->work_list, list);
6146 +
6147 + if (custom_stop_condition && custom_stop_condition(widget, dir)) {
6148 +- widget->endpoints[dir] = 1;
6149 +- return widget->endpoints[dir];
6150 ++ list = NULL;
6151 ++ custom_stop_condition = NULL;
6152 + }
6153 +
6154 + if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
6155 +@@ -1194,8 +1194,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
6156 + *
6157 + * Optionally, can be supplied with a function acting as a stopping condition.
6158 + * This function takes the dapm widget currently being examined and the walk
6159 +- * direction as an arguments, it should return true if the walk should be
6160 +- * stopped and false otherwise.
6161 ++ * direction as an arguments, it should return true if widgets from that point
6162 ++ * in the graph onwards should not be added to the widget list.
6163 + */
6164 + static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
6165 + struct list_head *list,
6166 +@@ -3705,6 +3705,8 @@ request_failed:
6167 + dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
6168 + w->name, ret);
6169 +
6170 ++ kfree_const(w->sname);
6171 ++ kfree(w);
6172 + return ERR_PTR(ret);
6173 + }
6174 +
6175 +diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
6176 +index 5e8e31743a28..56009d147208 100644
6177 +--- a/sound/soc/ti/davinci-mcasp.c
6178 ++++ b/sound/soc/ti/davinci-mcasp.c
6179 +@@ -194,7 +194,7 @@ static inline void mcasp_set_axr_pdir(struct davinci_mcasp *mcasp, bool enable)
6180 + {
6181 + u32 bit;
6182 +
6183 +- for_each_set_bit(bit, &mcasp->pdir, PIN_BIT_AFSR) {
6184 ++ for_each_set_bit(bit, &mcasp->pdir, PIN_BIT_AMUTE) {
6185 + if (enable)
6186 + mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit));
6187 + else
6188 +@@ -222,6 +222,7 @@ static void mcasp_start_rx(struct davinci_mcasp *mcasp)
6189 + if (mcasp_is_synchronous(mcasp)) {
6190 + mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
6191 + mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
6192 ++ mcasp_set_clk_pdir(mcasp, true);
6193 + }
6194 +
6195 + /* Activate serializer(s) */
6196 +@@ -1253,6 +1254,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
6197 + return ret;
6198 + }
6199 +
6200 ++static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
6201 ++ struct snd_pcm_hw_rule *rule)
6202 ++{
6203 ++ struct davinci_mcasp_ruledata *rd = rule->private;
6204 ++ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
6205 ++ struct snd_mask nfmt;
6206 ++ int i, slot_width;
6207 ++
6208 ++ snd_mask_none(&nfmt);
6209 ++ slot_width = rd->mcasp->slot_width;
6210 ++
6211 ++ for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
6212 ++ if (snd_mask_test(fmt, i)) {
6213 ++ if (snd_pcm_format_width(i) <= slot_width) {
6214 ++ snd_mask_set(&nfmt, i);
6215 ++ }
6216 ++ }
6217 ++ }
6218 ++
6219 ++ return snd_mask_refine(fmt, &nfmt);
6220 ++}
6221 ++
6222 + static const unsigned int davinci_mcasp_dai_rates[] = {
6223 + 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
6224 + 88200, 96000, 176400, 192000,
6225 +@@ -1360,7 +1383,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
6226 + struct davinci_mcasp_ruledata *ruledata =
6227 + &mcasp->ruledata[substream->stream];
6228 + u32 max_channels = 0;
6229 +- int i, dir;
6230 ++ int i, dir, ret;
6231 + int tdm_slots = mcasp->tdm_slots;
6232 +
6233 + /* Do not allow more then one stream per direction */
6234 +@@ -1389,6 +1412,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
6235 + max_channels++;
6236 + }
6237 + ruledata->serializers = max_channels;
6238 ++ ruledata->mcasp = mcasp;
6239 + max_channels *= tdm_slots;
6240 + /*
6241 + * If the already active stream has less channels than the calculated
6242 +@@ -1414,20 +1438,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
6243 + 0, SNDRV_PCM_HW_PARAM_CHANNELS,
6244 + &mcasp->chconstr[substream->stream]);
6245 +
6246 +- if (mcasp->slot_width)
6247 +- snd_pcm_hw_constraint_minmax(substream->runtime,
6248 +- SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
6249 +- 8, mcasp->slot_width);
6250 ++ if (mcasp->slot_width) {
6251 ++ /* Only allow formats require <= slot_width bits on the bus */
6252 ++ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
6253 ++ SNDRV_PCM_HW_PARAM_FORMAT,
6254 ++ davinci_mcasp_hw_rule_slot_width,
6255 ++ ruledata,
6256 ++ SNDRV_PCM_HW_PARAM_FORMAT, -1);
6257 ++ if (ret)
6258 ++ return ret;
6259 ++ }
6260 +
6261 + /*
6262 + * If we rely on implicit BCLK divider setting we should
6263 + * set constraints based on what we can provide.
6264 + */
6265 + if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
6266 +- int ret;
6267 +-
6268 +- ruledata->mcasp = mcasp;
6269 +-
6270 + ret = snd_pcm_hw_rule_add(substream->runtime, 0,
6271 + SNDRV_PCM_HW_PARAM_RATE,
6272 + davinci_mcasp_hw_rule_rate,
6273 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
6274 +index 3865a5d27251..77e14d995479 100644
6275 +--- a/tools/lib/bpf/libbpf.c
6276 ++++ b/tools/lib/bpf/libbpf.c
6277 +@@ -1044,8 +1044,13 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
6278 + if (!has_datasec && kind == BTF_KIND_VAR) {
6279 + /* replace VAR with INT */
6280 + t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
6281 +- t->size = sizeof(int);
6282 +- *(int *)(t+1) = BTF_INT_ENC(0, 0, 32);
6283 ++ /*
6284 ++ * using size = 1 is the safest choice, 4 will be too
6285 ++ * big and cause kernel BTF validation failure if
6286 ++ * original variable took less than 4 bytes
6287 ++ */
6288 ++ t->size = 1;
6289 ++ *(int *)(t+1) = BTF_INT_ENC(0, 0, 8);
6290 + } else if (!has_datasec && kind == BTF_KIND_DATASEC) {
6291 + /* replace DATASEC with STRUCT */
6292 + struct btf_var_secinfo *v = (void *)(t + 1);
6293 +diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
6294 +index ca272c5b67f4..fa948c5445ec 100644
6295 +--- a/tools/lib/bpf/xsk.c
6296 ++++ b/tools/lib/bpf/xsk.c
6297 +@@ -327,17 +327,16 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
6298 +
6299 + static int xsk_get_max_queues(struct xsk_socket *xsk)
6300 + {
6301 +- struct ethtool_channels channels;
6302 +- struct ifreq ifr;
6303 ++ struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
6304 ++ struct ifreq ifr = {};
6305 + int fd, err, ret;
6306 +
6307 + fd = socket(AF_INET, SOCK_DGRAM, 0);
6308 + if (fd < 0)
6309 + return -errno;
6310 +
6311 +- channels.cmd = ETHTOOL_GCHANNELS;
6312 + ifr.ifr_data = (void *)&channels;
6313 +- strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
6314 ++ memcpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
6315 + ifr.ifr_name[IFNAMSIZ - 1] = '\0';
6316 + err = ioctl(fd, SIOCETHTOOL, &ifr);
6317 + if (err && errno != EOPNOTSUPP) {
6318 +@@ -345,7 +344,7 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)
6319 + goto out;
6320 + }
6321 +
6322 +- if (channels.max_combined == 0 || errno == EOPNOTSUPP)
6323 ++ if (err || channels.max_combined == 0)
6324 + /* If the device says it has no channels, then all traffic
6325 + * is sent to a single stream, so max queues = 1.
6326 + */
6327 +@@ -562,7 +561,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
6328 + err = -errno;
6329 + goto out_socket;
6330 + }
6331 +- strncpy(xsk->ifname, ifname, IFNAMSIZ - 1);
6332 ++ memcpy(xsk->ifname, ifname, IFNAMSIZ - 1);
6333 + xsk->ifname[IFNAMSIZ - 1] = '\0';
6334 +
6335 + err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
6336 +diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
6337 +index a7784554a80d..23c27ca48abf 100644
6338 +--- a/tools/perf/bench/numa.c
6339 ++++ b/tools/perf/bench/numa.c
6340 +@@ -379,8 +379,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
6341 +
6342 + /* Allocate and initialize all memory on CPU#0: */
6343 + if (init_cpu0) {
6344 +- orig_mask = bind_to_node(0);
6345 +- bind_to_memnode(0);
6346 ++ int node = numa_node_of_cpu(0);
6347 ++
6348 ++ orig_mask = bind_to_node(node);
6349 ++ bind_to_memnode(node);
6350 + }
6351 +
6352 + bytes = bytes0 + HPSIZE;
6353 +diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
6354 +index 9c228c55e1fb..22386ab35050 100644
6355 +--- a/tools/perf/builtin-ftrace.c
6356 ++++ b/tools/perf/builtin-ftrace.c
6357 +@@ -173,7 +173,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap)
6358 + int last_cpu;
6359 +
6360 + last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
6361 +- mask_size = (last_cpu + 3) / 4 + 1;
6362 ++ mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
6363 + mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
6364 +
6365 + cpumask = malloc(mask_size);
6366 +diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
6367 +index 58f77fd0f59f..ed5423d8a95f 100644
6368 +--- a/tools/perf/pmu-events/jevents.c
6369 ++++ b/tools/perf/pmu-events/jevents.c
6370 +@@ -450,6 +450,7 @@ static struct fixed {
6371 + { "inst_retired.any_p", "event=0xc0" },
6372 + { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
6373 + { "cpu_clk_unhalted.thread", "event=0x3c" },
6374 ++ { "cpu_clk_unhalted.core", "event=0x3c" },
6375 + { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
6376 + { NULL, NULL},
6377 + };
6378 +diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
6379 +index 0b599229bc7e..0aba5b39c21e 100644
6380 +--- a/tools/perf/util/cpumap.c
6381 ++++ b/tools/perf/util/cpumap.c
6382 +@@ -701,7 +701,10 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
6383 + unsigned char *bitmap;
6384 + int last_cpu = cpu_map__cpu(map, map->nr - 1);
6385 +
6386 +- bitmap = zalloc((last_cpu + 7) / 8);
6387 ++ if (buf == NULL)
6388 ++ return 0;
6389 ++
6390 ++ bitmap = zalloc(last_cpu / 8 + 1);
6391 + if (bitmap == NULL) {
6392 + buf[0] = '\0';
6393 + return 0;
6394 +diff --git a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
6395 +index 5aeaa284fc47..a68062820410 100644
6396 +--- a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
6397 ++++ b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
6398 +@@ -41,8 +41,7 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
6399 + }
6400 +
6401 + /* Rewrite destination. */
6402 +- if ((ctx->user_ip6[0] & 0xFFFF) == bpf_htons(0xFACE) &&
6403 +- ctx->user_ip6[0] >> 16 == bpf_htons(0xB00C)) {
6404 ++ if (ctx->user_ip6[0] == bpf_htonl(0xFACEB00C)) {
6405 + ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0);
6406 + ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1);
6407 + ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2);
6408 +diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
6409 +index b0fda2877119..d438193804b2 100644
6410 +--- a/tools/testing/selftests/bpf/verifier/ctx_skb.c
6411 ++++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
6412 +@@ -974,6 +974,17 @@
6413 + .result = ACCEPT,
6414 + .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
6415 + },
6416 ++{
6417 ++ "read gso_segs from CGROUP_SKB",
6418 ++ .insns = {
6419 ++ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
6420 ++ offsetof(struct __sk_buff, gso_segs)),
6421 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
6422 ++ BPF_EXIT_INSN(),
6423 ++ },
6424 ++ .result = ACCEPT,
6425 ++ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
6426 ++},
6427 + {
6428 + "write gso_segs from CGROUP_SKB",
6429 + .insns = {
6430 +diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
6431 +new file mode 100644
6432 +index 000000000000..63ed533f73d6
6433 +--- /dev/null
6434 ++++ b/tools/testing/selftests/kvm/config
6435 +@@ -0,0 +1,3 @@
6436 ++CONFIG_KVM=y
6437 ++CONFIG_KVM_INTEL=y
6438 ++CONFIG_KVM_AMD=y
6439 +diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
6440 +index cca2baa03fb8..a8d8e8b3dc81 100755
6441 +--- a/tools/testing/selftests/net/forwarding/gre_multipath.sh
6442 ++++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh
6443 +@@ -93,18 +93,10 @@ sw1_create()
6444 + ip route add vrf v$ol1 192.0.2.16/28 \
6445 + nexthop dev g1a \
6446 + nexthop dev g1b
6447 +-
6448 +- tc qdisc add dev $ul1 clsact
6449 +- tc filter add dev $ul1 egress pref 111 prot ipv4 \
6450 +- flower dst_ip 192.0.2.66 action pass
6451 +- tc filter add dev $ul1 egress pref 222 prot ipv4 \
6452 +- flower dst_ip 192.0.2.82 action pass
6453 + }
6454 +
6455 + sw1_destroy()
6456 + {
6457 +- tc qdisc del dev $ul1 clsact
6458 +-
6459 + ip route del vrf v$ol1 192.0.2.16/28
6460 +
6461 + ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
6462 +@@ -139,10 +131,18 @@ sw2_create()
6463 + ip route add vrf v$ol2 192.0.2.0/28 \
6464 + nexthop dev g2a \
6465 + nexthop dev g2b
6466 ++
6467 ++ tc qdisc add dev $ul2 clsact
6468 ++ tc filter add dev $ul2 ingress pref 111 prot 802.1Q \
6469 ++ flower vlan_id 111 action pass
6470 ++ tc filter add dev $ul2 ingress pref 222 prot 802.1Q \
6471 ++ flower vlan_id 222 action pass
6472 + }
6473 +
6474 + sw2_destroy()
6475 + {
6476 ++ tc qdisc del dev $ul2 clsact
6477 ++
6478 + ip route del vrf v$ol2 192.0.2.0/28
6479 +
6480 + ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
6481 +@@ -187,12 +187,16 @@ setup_prepare()
6482 + sw1_create
6483 + sw2_create
6484 + h2_create
6485 ++
6486 ++ forwarding_enable
6487 + }
6488 +
6489 + cleanup()
6490 + {
6491 + pre_cleanup
6492 +
6493 ++ forwarding_restore
6494 ++
6495 + h2_destroy
6496 + sw2_destroy
6497 + sw1_destroy
6498 +@@ -211,15 +215,15 @@ multipath4_test()
6499 + nexthop dev g1a weight $weight1 \
6500 + nexthop dev g1b weight $weight2
6501 +
6502 +- local t0_111=$(tc_rule_stats_get $ul1 111 egress)
6503 +- local t0_222=$(tc_rule_stats_get $ul1 222 egress)
6504 ++ local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
6505 ++ local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
6506 +
6507 + ip vrf exec v$h1 \
6508 + $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
6509 + -d 1msec -t udp "sp=1024,dp=0-32768"
6510 +
6511 +- local t1_111=$(tc_rule_stats_get $ul1 111 egress)
6512 +- local t1_222=$(tc_rule_stats_get $ul1 222 egress)
6513 ++ local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
6514 ++ local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
6515 +
6516 + local d111=$((t1_111 - t0_111))
6517 + local d222=$((t1_222 - t0_222))