Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Wed, 23 Feb 2022 12:37:01
Message-Id: 1645619782.67aac33971201b1c044d266ec2d6bd40fed3d53f.mpagano@gentoo
1 commit: 67aac33971201b1c044d266ec2d6bd40fed3d53f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 23 12:36:22 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 23 12:36:22 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=67aac339
7
8 Linux patch 5.15.25
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1024_linux-5.15.25.patch | 7640 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 7644 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index fa9c4813..802c4d2b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -139,6 +139,10 @@ Patch: 1023_linux-5.15.24.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.24
23
24 +Patch: 1024_linux-5.15.25.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.25
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1024_linux-5.15.25.patch b/1024_linux-5.15.25.patch
33 new file mode 100644
34 index 00000000..1c6b7878
35 --- /dev/null
36 +++ b/1024_linux-5.15.25.patch
37 @@ -0,0 +1,7640 @@
38 +diff --git a/Makefile b/Makefile
39 +index c726a33e922f4..c50d4ec83be8d 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 15
46 +-SUBLEVEL = 24
47 ++SUBLEVEL = 25
48 + EXTRAVERSION =
49 + NAME = Trick or Treat
50 +
51 +diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
52 +index 6daaa645ae5d9..21413a9b7b6c6 100644
53 +--- a/arch/arm/mach-omap2/display.c
54 ++++ b/arch/arm/mach-omap2/display.c
55 +@@ -263,9 +263,9 @@ static int __init omapdss_init_of(void)
56 + }
57 +
58 + r = of_platform_populate(node, NULL, NULL, &pdev->dev);
59 ++ put_device(&pdev->dev);
60 + if (r) {
61 + pr_err("Unable to populate DSS submodule devices\n");
62 +- put_device(&pdev->dev);
63 + return r;
64 + }
65 +
66 +diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
67 +index 0c2936c7a3799..a5e9cffcac10c 100644
68 +--- a/arch/arm/mach-omap2/omap_hwmod.c
69 ++++ b/arch/arm/mach-omap2/omap_hwmod.c
70 +@@ -752,8 +752,10 @@ static int __init _init_clkctrl_providers(void)
71 +
72 + for_each_matching_node(np, ti_clkctrl_match_table) {
73 + ret = _setup_clkctrl_provider(np);
74 +- if (ret)
75 ++ if (ret) {
76 ++ of_node_put(np);
77 + break;
78 ++ }
79 + }
80 +
81 + return ret;
82 +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
83 +index 428449d98c0ae..a3a1ea0f21340 100644
84 +--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
85 ++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
86 +@@ -107,6 +107,12 @@
87 + no-map;
88 + };
89 +
90 ++ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
91 ++ secmon_reserved_bl32: secmon@5300000 {
92 ++ reg = <0x0 0x05300000 0x0 0x2000000>;
93 ++ no-map;
94 ++ };
95 ++
96 + linux,cma {
97 + compatible = "shared-dma-pool";
98 + reusable;
99 +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
100 +index d8838dde0f0f4..4fb31c2ba31c4 100644
101 +--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
102 ++++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
103 +@@ -157,14 +157,6 @@
104 + regulator-always-on;
105 + };
106 +
107 +- reserved-memory {
108 +- /* TEE Reserved Memory */
109 +- bl32_reserved: bl32@5000000 {
110 +- reg = <0x0 0x05300000 0x0 0x2000000>;
111 +- no-map;
112 +- };
113 +- };
114 +-
115 + sdio_pwrseq: sdio-pwrseq {
116 + compatible = "mmc-pwrseq-simple";
117 + reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
118 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
119 +index 6b457b2c30a4b..aa14ea017a613 100644
120 +--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
121 ++++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
122 +@@ -49,6 +49,12 @@
123 + no-map;
124 + };
125 +
126 ++ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
127 ++ secmon_reserved_bl32: secmon@5300000 {
128 ++ reg = <0x0 0x05300000 0x0 0x2000000>;
129 ++ no-map;
130 ++ };
131 ++
132 + linux,cma {
133 + compatible = "shared-dma-pool";
134 + reusable;
135 +diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
136 +index 427475846fc70..a5d79f2f7c196 100644
137 +--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
138 ++++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
139 +@@ -203,14 +203,6 @@
140 + regulator-always-on;
141 + };
142 +
143 +- reserved-memory {
144 +- /* TEE Reserved Memory */
145 +- bl32_reserved: bl32@5000000 {
146 +- reg = <0x0 0x05300000 0x0 0x2000000>;
147 +- no-map;
148 +- };
149 +- };
150 +-
151 + sdio_pwrseq: sdio-pwrseq {
152 + compatible = "mmc-pwrseq-simple";
153 + reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
154 +diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
155 +index 3198acb2aad8c..7f3c87f7a0cec 100644
156 +--- a/arch/arm64/include/asm/el2_setup.h
157 ++++ b/arch/arm64/include/asm/el2_setup.h
158 +@@ -106,7 +106,7 @@
159 + msr_s SYS_ICC_SRE_EL2, x0
160 + isb // Make sure SRE is now set
161 + mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
162 +- tbz x0, #0, 1f // and check that it sticks
163 ++ tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks
164 + msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
165 + .Lskip_gicv3_\@:
166 + .endm
167 +diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
168 +index aa4e883431c1a..5779d463b341f 100644
169 +--- a/arch/parisc/include/asm/bitops.h
170 ++++ b/arch/parisc/include/asm/bitops.h
171 +@@ -12,6 +12,14 @@
172 + #include <asm/barrier.h>
173 + #include <linux/atomic.h>
174 +
175 ++/* compiler build environment sanity checks: */
176 ++#if !defined(CONFIG_64BIT) && defined(__LP64__)
177 ++#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
178 ++#endif
179 ++#if defined(CONFIG_64BIT) && !defined(__LP64__)
180 ++#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
181 ++#endif
182 ++
183 + /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
184 + * on use of volatile and __*_bit() (set/clear/change):
185 + * *_bit() want use of volatile.
186 +diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
187 +index 367f6397bda7a..8603850580857 100644
188 +--- a/arch/parisc/lib/iomap.c
189 ++++ b/arch/parisc/lib/iomap.c
190 +@@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr)
191 + return *((u64 *)addr);
192 + }
193 +
194 ++u64 ioread64_lo_hi(const void __iomem *addr)
195 ++{
196 ++ u32 low, high;
197 ++
198 ++ low = ioread32(addr);
199 ++ high = ioread32(addr + sizeof(u32));
200 ++
201 ++ return low + ((u64)high << 32);
202 ++}
203 ++
204 + u64 ioread64_hi_lo(const void __iomem *addr)
205 + {
206 + u32 low, high;
207 +@@ -419,6 +429,12 @@ void iowrite64be(u64 datum, void __iomem *addr)
208 + }
209 + }
210 +
211 ++void iowrite64_lo_hi(u64 val, void __iomem *addr)
212 ++{
213 ++ iowrite32(val, addr);
214 ++ iowrite32(val >> 32, addr + sizeof(u32));
215 ++}
216 ++
217 + void iowrite64_hi_lo(u64 val, void __iomem *addr)
218 + {
219 + iowrite32(val >> 32, addr + sizeof(u32));
220 +@@ -530,6 +546,7 @@ EXPORT_SYMBOL(ioread32);
221 + EXPORT_SYMBOL(ioread32be);
222 + EXPORT_SYMBOL(ioread64);
223 + EXPORT_SYMBOL(ioread64be);
224 ++EXPORT_SYMBOL(ioread64_lo_hi);
225 + EXPORT_SYMBOL(ioread64_hi_lo);
226 + EXPORT_SYMBOL(iowrite8);
227 + EXPORT_SYMBOL(iowrite16);
228 +@@ -538,6 +555,7 @@ EXPORT_SYMBOL(iowrite32);
229 + EXPORT_SYMBOL(iowrite32be);
230 + EXPORT_SYMBOL(iowrite64);
231 + EXPORT_SYMBOL(iowrite64be);
232 ++EXPORT_SYMBOL(iowrite64_lo_hi);
233 + EXPORT_SYMBOL(iowrite64_hi_lo);
234 + EXPORT_SYMBOL(ioread8_rep);
235 + EXPORT_SYMBOL(ioread16_rep);
236 +diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
237 +index 65f50f072a87b..e5c18313b5d4f 100644
238 +--- a/arch/parisc/mm/init.c
239 ++++ b/arch/parisc/mm/init.c
240 +@@ -341,9 +341,9 @@ static void __init setup_bootmem(void)
241 +
242 + static bool kernel_set_to_readonly;
243 +
244 +-static void __init map_pages(unsigned long start_vaddr,
245 +- unsigned long start_paddr, unsigned long size,
246 +- pgprot_t pgprot, int force)
247 ++static void __ref map_pages(unsigned long start_vaddr,
248 ++ unsigned long start_paddr, unsigned long size,
249 ++ pgprot_t pgprot, int force)
250 + {
251 + pmd_t *pmd;
252 + pte_t *pg_table;
253 +@@ -453,7 +453,7 @@ void __init set_kernel_text_rw(int enable_read_write)
254 + flush_tlb_all();
255 + }
256 +
257 +-void __ref free_initmem(void)
258 ++void free_initmem(void)
259 + {
260 + unsigned long init_begin = (unsigned long)__init_begin;
261 + unsigned long init_end = (unsigned long)__init_end;
262 +@@ -467,7 +467,6 @@ void __ref free_initmem(void)
263 + /* The init text pages are marked R-X. We have to
264 + * flush the icache and mark them RW-
265 + *
266 +- * This is tricky, because map_pages is in the init section.
267 + * Do a dummy remap of the data section first (the data
268 + * section is already PAGE_KERNEL) to pull in the TLB entries
269 + * for map_kernel */
270 +diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
271 +index 68e5c0a7e99d1..2e2a8211b17be 100644
272 +--- a/arch/powerpc/kernel/head_book3s_32.S
273 ++++ b/arch/powerpc/kernel/head_book3s_32.S
274 +@@ -421,14 +421,14 @@ InstructionTLBMiss:
275 + */
276 + /* Get PTE (linux-style) and check access */
277 + mfspr r3,SPRN_IMISS
278 +-#ifdef CONFIG_MODULES
279 ++#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
280 + lis r1, TASK_SIZE@h /* check if kernel address */
281 + cmplw 0,r1,r3
282 + #endif
283 + mfspr r2, SPRN_SDR1
284 + li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
285 + rlwinm r2, r2, 28, 0xfffff000
286 +-#ifdef CONFIG_MODULES
287 ++#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
288 + bgt- 112f
289 + lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
290 + li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
291 +diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
292 +index d8d5f901cee1c..d8cc49f39fe47 100644
293 +--- a/arch/powerpc/lib/sstep.c
294 ++++ b/arch/powerpc/lib/sstep.c
295 +@@ -3181,12 +3181,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
296 + case BARRIER_EIEIO:
297 + eieio();
298 + break;
299 ++#ifdef CONFIG_PPC64
300 + case BARRIER_LWSYNC:
301 + asm volatile("lwsync" : : : "memory");
302 + break;
303 + case BARRIER_PTESYNC:
304 + asm volatile("ptesync" : : : "memory");
305 + break;
306 ++#endif
307 + }
308 + break;
309 +
310 +diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
311 +index 84b87538a15de..bab883c0b6fee 100644
312 +--- a/arch/x86/include/asm/bug.h
313 ++++ b/arch/x86/include/asm/bug.h
314 +@@ -22,7 +22,7 @@
315 +
316 + #ifdef CONFIG_DEBUG_BUGVERBOSE
317 +
318 +-#define _BUG_FLAGS(ins, flags) \
319 ++#define _BUG_FLAGS(ins, flags, extra) \
320 + do { \
321 + asm_inline volatile("1:\t" ins "\n" \
322 + ".pushsection __bug_table,\"aw\"\n" \
323 +@@ -31,7 +31,8 @@ do { \
324 + "\t.word %c1" "\t# bug_entry::line\n" \
325 + "\t.word %c2" "\t# bug_entry::flags\n" \
326 + "\t.org 2b+%c3\n" \
327 +- ".popsection" \
328 ++ ".popsection\n" \
329 ++ extra \
330 + : : "i" (__FILE__), "i" (__LINE__), \
331 + "i" (flags), \
332 + "i" (sizeof(struct bug_entry))); \
333 +@@ -39,14 +40,15 @@ do { \
334 +
335 + #else /* !CONFIG_DEBUG_BUGVERBOSE */
336 +
337 +-#define _BUG_FLAGS(ins, flags) \
338 ++#define _BUG_FLAGS(ins, flags, extra) \
339 + do { \
340 + asm_inline volatile("1:\t" ins "\n" \
341 + ".pushsection __bug_table,\"aw\"\n" \
342 + "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
343 + "\t.word %c0" "\t# bug_entry::flags\n" \
344 + "\t.org 2b+%c1\n" \
345 +- ".popsection" \
346 ++ ".popsection\n" \
347 ++ extra \
348 + : : "i" (flags), \
349 + "i" (sizeof(struct bug_entry))); \
350 + } while (0)
351 +@@ -55,7 +57,7 @@ do { \
352 +
353 + #else
354 +
355 +-#define _BUG_FLAGS(ins, flags) asm volatile(ins)
356 ++#define _BUG_FLAGS(ins, flags, extra) asm volatile(ins)
357 +
358 + #endif /* CONFIG_GENERIC_BUG */
359 +
360 +@@ -63,8 +65,8 @@ do { \
361 + #define BUG() \
362 + do { \
363 + instrumentation_begin(); \
364 +- _BUG_FLAGS(ASM_UD2, 0); \
365 +- unreachable(); \
366 ++ _BUG_FLAGS(ASM_UD2, 0, ""); \
367 ++ __builtin_unreachable(); \
368 + } while (0)
369 +
370 + /*
371 +@@ -75,9 +77,9 @@ do { \
372 + */
373 + #define __WARN_FLAGS(flags) \
374 + do { \
375 ++ __auto_type f = BUGFLAG_WARNING|(flags); \
376 + instrumentation_begin(); \
377 +- _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
378 +- annotate_reachable(); \
379 ++ _BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE); \
380 + instrumentation_end(); \
381 + } while (0)
382 +
383 +diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
384 +index 0772bad9165c5..f256f01056bdb 100644
385 +--- a/arch/x86/kvm/pmu.c
386 ++++ b/arch/x86/kvm/pmu.c
387 +@@ -95,7 +95,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
388 + }
389 +
390 + static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
391 +- unsigned config, bool exclude_user,
392 ++ u64 config, bool exclude_user,
393 + bool exclude_kernel, bool intr,
394 + bool in_tx, bool in_tx_cp)
395 + {
396 +@@ -173,8 +173,8 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
397 +
398 + void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
399 + {
400 +- unsigned config, type = PERF_TYPE_RAW;
401 +- u8 event_select, unit_mask;
402 ++ u64 config;
403 ++ u32 type = PERF_TYPE_RAW;
404 + struct kvm *kvm = pmc->vcpu->kvm;
405 + struct kvm_pmu_event_filter *filter;
406 + int i;
407 +@@ -206,23 +206,18 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
408 + if (!allow_event)
409 + return;
410 +
411 +- event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
412 +- unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
413 +-
414 + if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
415 + ARCH_PERFMON_EVENTSEL_INV |
416 + ARCH_PERFMON_EVENTSEL_CMASK |
417 + HSW_IN_TX |
418 + HSW_IN_TX_CHECKPOINTED))) {
419 +- config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc),
420 +- event_select,
421 +- unit_mask);
422 ++ config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
423 + if (config != PERF_COUNT_HW_MAX)
424 + type = PERF_TYPE_HARDWARE;
425 + }
426 +
427 + if (type == PERF_TYPE_RAW)
428 +- config = eventsel & X86_RAW_EVENT_MASK;
429 ++ config = eventsel & AMD64_RAW_EVENT_MASK;
430 +
431 + if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
432 + return;
433 +diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
434 +index 0e4f2b1fa9fbd..a06d95165ac7c 100644
435 +--- a/arch/x86/kvm/pmu.h
436 ++++ b/arch/x86/kvm/pmu.h
437 +@@ -24,8 +24,7 @@ struct kvm_event_hw_type_mapping {
438 + };
439 +
440 + struct kvm_pmu_ops {
441 +- unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
442 +- u8 unit_mask);
443 ++ unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
444 + unsigned (*find_fixed_event)(int idx);
445 + bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
446 + struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
447 +diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
448 +index 2fb6a6f00290d..cdbb48e12745c 100644
449 +--- a/arch/x86/kvm/svm/avic.c
450 ++++ b/arch/x86/kvm/svm/avic.c
451 +@@ -342,8 +342,6 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
452 + avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
453 + break;
454 + case AVIC_IPI_FAILURE_INVALID_TARGET:
455 +- WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
456 +- index, vcpu->vcpu_id, icrh, icrl);
457 + break;
458 + case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
459 + WARN_ONCE(1, "Invalid backing page\n");
460 +diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
461 +index de80ae42d044c..556e7a3f35627 100644
462 +--- a/arch/x86/kvm/svm/nested.c
463 ++++ b/arch/x86/kvm/svm/nested.c
464 +@@ -1357,18 +1357,6 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
465 + !nested_vmcb_valid_sregs(vcpu, save))
466 + goto out_free;
467 +
468 +- /*
469 +- * While the nested guest CR3 is already checked and set by
470 +- * KVM_SET_SREGS, it was set when nested state was yet loaded,
471 +- * thus MMU might not be initialized correctly.
472 +- * Set it again to fix this.
473 +- */
474 +-
475 +- ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
476 +- nested_npt_enabled(svm), false);
477 +- if (WARN_ON_ONCE(ret))
478 +- goto out_free;
479 +-
480 +
481 + /*
482 + * All checks done, we can enter guest mode. Userspace provides
483 +@@ -1394,6 +1382,20 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
484 +
485 + svm_switch_vmcb(svm, &svm->nested.vmcb02);
486 + nested_vmcb02_prepare_control(svm);
487 ++
488 ++ /*
489 ++ * While the nested guest CR3 is already checked and set by
490 ++ * KVM_SET_SREGS, it was set when nested state was yet loaded,
491 ++ * thus MMU might not be initialized correctly.
492 ++ * Set it again to fix this.
493 ++ */
494 ++
495 ++ ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
496 ++ nested_npt_enabled(svm), false);
497 ++ if (WARN_ON_ONCE(ret))
498 ++ goto out_free;
499 ++
500 ++
501 + kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
502 + ret = 0;
503 + out_free:
504 +diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
505 +index e152241d1d709..06f8034f62e4f 100644
506 +--- a/arch/x86/kvm/svm/pmu.c
507 ++++ b/arch/x86/kvm/svm/pmu.c
508 +@@ -134,10 +134,10 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
509 + return &pmu->gp_counters[msr_to_index(msr)];
510 + }
511 +
512 +-static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
513 +- u8 event_select,
514 +- u8 unit_mask)
515 ++static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
516 + {
517 ++ u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
518 ++ u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
519 + int i;
520 +
521 + for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
522 +@@ -320,7 +320,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
523 + }
524 +
525 + struct kvm_pmu_ops amd_pmu_ops = {
526 +- .find_arch_event = amd_find_arch_event,
527 ++ .pmc_perf_hw_id = amd_pmc_perf_hw_id,
528 + .find_fixed_event = amd_find_fixed_event,
529 + .pmc_is_enabled = amd_pmc_is_enabled,
530 + .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
531 +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
532 +index f05aa7290267d..26f2da1590eda 100644
533 +--- a/arch/x86/kvm/svm/svm.c
534 ++++ b/arch/x86/kvm/svm/svm.c
535 +@@ -1727,6 +1727,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
536 + {
537 + struct vcpu_svm *svm = to_svm(vcpu);
538 + u64 hcr0 = cr0;
539 ++ bool old_paging = is_paging(vcpu);
540 +
541 + #ifdef CONFIG_X86_64
542 + if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
543 +@@ -1743,8 +1744,11 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
544 + #endif
545 + vcpu->arch.cr0 = cr0;
546 +
547 +- if (!npt_enabled)
548 ++ if (!npt_enabled) {
549 + hcr0 |= X86_CR0_PG | X86_CR0_WP;
550 ++ if (old_paging != is_paging(vcpu))
551 ++ svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
552 ++ }
553 +
554 + /*
555 + * re-enable caching here because the QEMU bios
556 +@@ -1788,8 +1792,12 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
557 + svm_flush_tlb(vcpu);
558 +
559 + vcpu->arch.cr4 = cr4;
560 +- if (!npt_enabled)
561 ++ if (!npt_enabled) {
562 + cr4 |= X86_CR4_PAE;
563 ++
564 ++ if (!is_paging(vcpu))
565 ++ cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
566 ++ }
567 + cr4 |= host_cr4_mce;
568 + to_svm(vcpu)->vmcb->save.cr4 = cr4;
569 + vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
570 +@@ -4384,10 +4392,17 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
571 + * Enter the nested guest now
572 + */
573 +
574 ++ vmcb_mark_all_dirty(svm->vmcb01.ptr);
575 ++
576 + vmcb12 = map.hva;
577 + nested_load_control_from_vmcb12(svm, &vmcb12->control);
578 + ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
579 +
580 ++ if (ret)
581 ++ goto unmap_save;
582 ++
583 ++ svm->nested.nested_run_pending = 1;
584 ++
585 + unmap_save:
586 + kvm_vcpu_unmap(vcpu, &map_save, true);
587 + unmap_map:
588 +diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
589 +index 10cc4f65c4efd..6427d95de01cf 100644
590 +--- a/arch/x86/kvm/vmx/pmu_intel.c
591 ++++ b/arch/x86/kvm/vmx/pmu_intel.c
592 +@@ -68,10 +68,11 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
593 + reprogram_counter(pmu, bit);
594 + }
595 +
596 +-static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
597 +- u8 event_select,
598 +- u8 unit_mask)
599 ++static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
600 + {
601 ++ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
602 ++ u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
603 ++ u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
604 + int i;
605 +
606 + for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
607 +@@ -706,7 +707,7 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
608 + }
609 +
610 + struct kvm_pmu_ops intel_pmu_ops = {
611 +- .find_arch_event = intel_find_arch_event,
612 ++ .pmc_perf_hw_id = intel_pmc_perf_hw_id,
613 + .find_fixed_event = intel_find_fixed_event,
614 + .pmc_is_enabled = intel_pmc_is_enabled,
615 + .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
616 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
617 +index 44da933a756b3..322485ab9271c 100644
618 +--- a/arch/x86/kvm/vmx/vmx.c
619 ++++ b/arch/x86/kvm/vmx/vmx.c
620 +@@ -7532,6 +7532,7 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
621 + if (ret)
622 + return ret;
623 +
624 ++ vmx->nested.nested_run_pending = 1;
625 + vmx->nested.smm.guest_mode = false;
626 + }
627 + return 0;
628 +diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
629 +index 0787d6645573e..ab9f88de6deb9 100644
630 +--- a/arch/x86/kvm/xen.c
631 ++++ b/arch/x86/kvm/xen.c
632 +@@ -93,32 +93,57 @@ static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
633 + void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
634 + {
635 + struct kvm_vcpu_xen *vx = &v->arch.xen;
636 ++ struct gfn_to_hva_cache *ghc = &vx->runstate_cache;
637 ++ struct kvm_memslots *slots = kvm_memslots(v->kvm);
638 ++ bool atomic = (state == RUNSTATE_runnable);
639 + uint64_t state_entry_time;
640 +- unsigned int offset;
641 ++ int __user *user_state;
642 ++ uint64_t __user *user_times;
643 +
644 + kvm_xen_update_runstate(v, state);
645 +
646 + if (!vx->runstate_set)
647 + return;
648 +
649 +- BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
650 ++ if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) &&
651 ++ kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len))
652 ++ return;
653 ++
654 ++ /* We made sure it fits in a single page */
655 ++ BUG_ON(!ghc->memslot);
656 ++
657 ++ if (atomic)
658 ++ pagefault_disable();
659 +
660 +- offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time);
661 +-#ifdef CONFIG_X86_64
662 + /*
663 +- * The only difference is alignment of uint64_t in 32-bit.
664 +- * So the first field 'state' is accessed directly using
665 +- * offsetof() (where its offset happens to be zero), while the
666 +- * remaining fields which are all uint64_t, start at 'offset'
667 +- * which we tweak here by adding 4.
668 ++ * The only difference between 32-bit and 64-bit versions of the
669 ++ * runstate struct us the alignment of uint64_t in 32-bit, which
670 ++ * means that the 64-bit version has an additional 4 bytes of
671 ++ * padding after the first field 'state'.
672 ++ *
673 ++ * So we use 'int __user *user_state' to point to the state field,
674 ++ * and 'uint64_t __user *user_times' for runstate_entry_time. So
675 ++ * the actual array of time[] in each state starts at user_times[1].
676 + */
677 ++ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
678 ++ BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
679 ++ user_state = (int __user *)ghc->hva;
680 ++
681 ++ BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
682 ++
683 ++ user_times = (uint64_t __user *)(ghc->hva +
684 ++ offsetof(struct compat_vcpu_runstate_info,
685 ++ state_entry_time));
686 ++#ifdef CONFIG_X86_64
687 + BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
688 + offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
689 + BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
690 + offsetof(struct compat_vcpu_runstate_info, time) + 4);
691 +
692 + if (v->kvm->arch.xen.long_mode)
693 +- offset = offsetof(struct vcpu_runstate_info, state_entry_time);
694 ++ user_times = (uint64_t __user *)(ghc->hva +
695 ++ offsetof(struct vcpu_runstate_info,
696 ++ state_entry_time));
697 + #endif
698 + /*
699 + * First write the updated state_entry_time at the appropriate
700 +@@ -132,10 +157,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
701 + BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state_entry_time) !=
702 + sizeof(state_entry_time));
703 +
704 +- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
705 +- &state_entry_time, offset,
706 +- sizeof(state_entry_time)))
707 +- return;
708 ++ if (__put_user(state_entry_time, user_times))
709 ++ goto out;
710 + smp_wmb();
711 +
712 + /*
713 +@@ -149,11 +172,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
714 + BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state) !=
715 + sizeof(vx->current_runstate));
716 +
717 +- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
718 +- &vx->current_runstate,
719 +- offsetof(struct vcpu_runstate_info, state),
720 +- sizeof(vx->current_runstate)))
721 +- return;
722 ++ if (__put_user(vx->current_runstate, user_state))
723 ++ goto out;
724 +
725 + /*
726 + * Write the actual runstate times immediately after the
727 +@@ -168,24 +188,23 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
728 + BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) !=
729 + sizeof(vx->runstate_times));
730 +
731 +- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
732 +- &vx->runstate_times[0],
733 +- offset + sizeof(u64),
734 +- sizeof(vx->runstate_times)))
735 +- return;
736 +-
737 ++ if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)))
738 ++ goto out;
739 + smp_wmb();
740 +
741 + /*
742 + * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
743 + * runstate_entry_time field.
744 + */
745 +-
746 + state_entry_time &= ~XEN_RUNSTATE_UPDATE;
747 +- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
748 +- &state_entry_time, offset,
749 +- sizeof(state_entry_time)))
750 +- return;
751 ++ __put_user(state_entry_time, user_times);
752 ++ smp_wmb();
753 ++
754 ++ out:
755 ++ mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
756 ++
757 ++ if (atomic)
758 ++ pagefault_enable();
759 + }
760 +
761 + int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
762 +@@ -337,6 +356,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
763 + break;
764 + }
765 +
766 ++ /* It must fit within a single page */
767 ++ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_info) > PAGE_SIZE) {
768 ++ r = -EINVAL;
769 ++ break;
770 ++ }
771 ++
772 + r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
773 + &vcpu->arch.xen.vcpu_info_cache,
774 + data->u.gpa,
775 +@@ -354,6 +379,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
776 + break;
777 + }
778 +
779 ++ /* It must fit within a single page */
780 ++ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct pvclock_vcpu_time_info) > PAGE_SIZE) {
781 ++ r = -EINVAL;
782 ++ break;
783 ++ }
784 ++
785 + r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
786 + &vcpu->arch.xen.vcpu_time_info_cache,
787 + data->u.gpa,
788 +@@ -375,6 +406,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
789 + break;
790 + }
791 +
792 ++ /* It must fit within a single page */
793 ++ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) {
794 ++ r = -EINVAL;
795 ++ break;
796 ++ }
797 ++
798 + r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
799 + &vcpu->arch.xen.runstate_cache,
800 + data->u.gpa,
801 +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
802 +index a7b7d674f5005..133ef31639df1 100644
803 +--- a/arch/x86/xen/enlighten_pv.c
804 ++++ b/arch/x86/xen/enlighten_pv.c
805 +@@ -1364,10 +1364,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
806 +
807 + xen_acpi_sleep_register();
808 +
809 +- /* Avoid searching for BIOS MP tables */
810 +- x86_init.mpparse.find_smp_config = x86_init_noop;
811 +- x86_init.mpparse.get_smp_config = x86_init_uint_noop;
812 +-
813 + xen_boot_params_init_edd();
814 +
815 + #ifdef CONFIG_ACPI
816 +diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
817 +index 7ed56c6075b0c..477c484eb202c 100644
818 +--- a/arch/x86/xen/smp_pv.c
819 ++++ b/arch/x86/xen/smp_pv.c
820 +@@ -148,28 +148,12 @@ int xen_smp_intr_init_pv(unsigned int cpu)
821 + return rc;
822 + }
823 +
824 +-static void __init xen_fill_possible_map(void)
825 +-{
826 +- int i, rc;
827 +-
828 +- if (xen_initial_domain())
829 +- return;
830 +-
831 +- for (i = 0; i < nr_cpu_ids; i++) {
832 +- rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
833 +- if (rc >= 0) {
834 +- num_processors++;
835 +- set_cpu_possible(i, true);
836 +- }
837 +- }
838 +-}
839 +-
840 +-static void __init xen_filter_cpu_maps(void)
841 ++static void __init _get_smp_config(unsigned int early)
842 + {
843 + int i, rc;
844 + unsigned int subtract = 0;
845 +
846 +- if (!xen_initial_domain())
847 ++ if (early)
848 + return;
849 +
850 + num_processors = 0;
851 +@@ -210,7 +194,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void)
852 + * sure the old memory can be recycled. */
853 + make_lowmem_page_readwrite(xen_initial_gdt);
854 +
855 +- xen_filter_cpu_maps();
856 + xen_setup_vcpu_info_placement();
857 +
858 + /*
859 +@@ -486,5 +469,8 @@ static const struct smp_ops xen_smp_ops __initconst = {
860 + void __init xen_smp_init(void)
861 + {
862 + smp_ops = xen_smp_ops;
863 +- xen_fill_possible_map();
864 ++
865 ++ /* Avoid searching for BIOS MP tables */
866 ++ x86_init.mpparse.find_smp_config = x86_init_noop;
867 ++ x86_init.mpparse.get_smp_config = _get_smp_config;
868 + }
869 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
870 +index ea9a086d0498f..e66970bf27dbe 100644
871 +--- a/block/bfq-iosched.c
872 ++++ b/block/bfq-iosched.c
873 +@@ -6878,6 +6878,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
874 + spin_unlock_irq(&bfqd->lock);
875 + #endif
876 +
877 ++ wbt_enable_default(bfqd->queue);
878 ++
879 + kfree(bfqd);
880 + }
881 +
882 +diff --git a/block/blk-core.c b/block/blk-core.c
883 +index d42a0f3ff7361..42ac3a985c2d7 100644
884 +--- a/block/blk-core.c
885 ++++ b/block/blk-core.c
886 +@@ -350,13 +350,6 @@ void blk_queue_start_drain(struct request_queue *q)
887 + wake_up_all(&q->mq_freeze_wq);
888 + }
889 +
890 +-void blk_set_queue_dying(struct request_queue *q)
891 +-{
892 +- blk_queue_flag_set(QUEUE_FLAG_DYING, q);
893 +- blk_queue_start_drain(q);
894 +-}
895 +-EXPORT_SYMBOL_GPL(blk_set_queue_dying);
896 +-
897 + /**
898 + * blk_cleanup_queue - shutdown a request queue
899 + * @q: request queue to shutdown
900 +@@ -374,7 +367,8 @@ void blk_cleanup_queue(struct request_queue *q)
901 + WARN_ON_ONCE(blk_queue_registered(q));
902 +
903 + /* mark @q DYING, no new request or merges will be allowed afterwards */
904 +- blk_set_queue_dying(q);
905 ++ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
906 ++ blk_queue_start_drain(q);
907 +
908 + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
909 + blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
910 +diff --git a/block/elevator.c b/block/elevator.c
911 +index cd02ae332c4eb..1b5e57f6115f3 100644
912 +--- a/block/elevator.c
913 ++++ b/block/elevator.c
914 +@@ -523,8 +523,6 @@ void elv_unregister_queue(struct request_queue *q)
915 + kobject_del(&e->kobj);
916 +
917 + e->registered = 0;
918 +- /* Re-enable throttling in case elevator disabled it */
919 +- wbt_enable_default(q);
920 + }
921 + }
922 +
923 +diff --git a/block/genhd.c b/block/genhd.c
924 +index de789d1a1e3d2..2dcedbe4ef046 100644
925 +--- a/block/genhd.c
926 ++++ b/block/genhd.c
927 +@@ -544,6 +544,20 @@ out_free_ext_minor:
928 + }
929 + EXPORT_SYMBOL(device_add_disk);
930 +
931 ++/**
932 ++ * blk_mark_disk_dead - mark a disk as dead
933 ++ * @disk: disk to mark as dead
934 ++ *
935 ++ * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
936 ++ * to this disk.
937 ++ */
938 ++void blk_mark_disk_dead(struct gendisk *disk)
939 ++{
940 ++ set_bit(GD_DEAD, &disk->state);
941 ++ blk_queue_start_drain(disk->queue);
942 ++}
943 ++EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
944 ++
945 + /**
946 + * del_gendisk - remove the gendisk
947 + * @disk: the struct gendisk to remove
948 +diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
949 +index 1c48358b43ba3..e0185e841b2a3 100644
950 +--- a/drivers/acpi/x86/s2idle.c
951 ++++ b/drivers/acpi/x86/s2idle.c
952 +@@ -424,15 +424,11 @@ static int lps0_device_attach(struct acpi_device *adev,
953 + mem_sleep_current = PM_SUSPEND_TO_IDLE;
954 +
955 + /*
956 +- * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
957 +- * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
958 +- * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
959 +- *
960 +- * Only enable on !AMD as enabling this universally causes problems for a number
961 +- * of AMD based systems.
962 ++ * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
963 ++ * EC GPE to be enabled while suspended for certain wakeup devices to
964 ++ * work, so mark it as wakeup-capable.
965 + */
966 +- if (!acpi_s2idle_vendor_amd())
967 +- acpi_ec_mark_gpe_for_wake();
968 ++ acpi_ec_mark_gpe_for_wake();
969 +
970 + return 0;
971 + }
972 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
973 +index 4d848cfc406fe..24b67d78cb83d 100644
974 +--- a/drivers/ata/libata-core.c
975 ++++ b/drivers/ata/libata-core.c
976 +@@ -4014,6 +4014,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
977 +
978 + /* devices that don't properly handle TRIM commands */
979 + { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
980 ++ { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, },
981 +
982 + /*
983 + * As defined, the DRAT (Deterministic Read After Trim) and RZAT
984 +diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
985 +index 901855717cb53..ba61e72741eab 100644
986 +--- a/drivers/block/mtip32xx/mtip32xx.c
987 ++++ b/drivers/block/mtip32xx/mtip32xx.c
988 +@@ -4112,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
989 + "Completion workers still active!\n");
990 + }
991 +
992 +- blk_set_queue_dying(dd->queue);
993 ++ blk_mark_disk_dead(dd->disk);
994 + set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
995 +
996 + /* Clean up the block layer. */
997 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
998 +index e65c9d706f6fb..c4a52f33604dc 100644
999 +--- a/drivers/block/rbd.c
1000 ++++ b/drivers/block/rbd.c
1001 +@@ -7182,7 +7182,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
1002 + * IO to complete/fail.
1003 + */
1004 + blk_mq_freeze_queue(rbd_dev->disk->queue);
1005 +- blk_set_queue_dying(rbd_dev->disk->queue);
1006 ++ blk_mark_disk_dead(rbd_dev->disk);
1007 + }
1008 +
1009 + del_gendisk(rbd_dev->disk);
1010 +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
1011 +index 4dbb71230d6e7..3efd341959832 100644
1012 +--- a/drivers/block/xen-blkfront.c
1013 ++++ b/drivers/block/xen-blkfront.c
1014 +@@ -2128,7 +2128,7 @@ static void blkfront_closing(struct blkfront_info *info)
1015 +
1016 + /* No more blkif_request(). */
1017 + blk_mq_stop_hw_queues(info->rq);
1018 +- blk_set_queue_dying(info->rq);
1019 ++ blk_mark_disk_dead(info->gd);
1020 + set_capacity(info->gd, 0);
1021 +
1022 + for_each_rinfo(info, rinfo, i) {
1023 +diff --git a/drivers/char/random.c b/drivers/char/random.c
1024 +index a27ae3999ff32..ebe86de9d0acc 100644
1025 +--- a/drivers/char/random.c
1026 ++++ b/drivers/char/random.c
1027 +@@ -1963,7 +1963,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1028 + */
1029 + if (!capable(CAP_SYS_ADMIN))
1030 + return -EPERM;
1031 +- input_pool.entropy_count = 0;
1032 ++ if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
1033 ++ wake_up_interruptible(&random_write_wait);
1034 ++ kill_fasync(&fasync, SIGIO, POLL_OUT);
1035 ++ }
1036 + return 0;
1037 + case RNDRESEEDCRNG:
1038 + if (!capable(CAP_SYS_ADMIN))
1039 +diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
1040 +index 8a6bf291a73fe..daafea5bc35d9 100644
1041 +--- a/drivers/dma/ptdma/ptdma-dev.c
1042 ++++ b/drivers/dma/ptdma/ptdma-dev.c
1043 +@@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt)
1044 + if (!cmd_q->qbase) {
1045 + dev_err(dev, "unable to allocate command queue\n");
1046 + ret = -ENOMEM;
1047 +- goto e_dma_alloc;
1048 ++ goto e_destroy_pool;
1049 + }
1050 +
1051 + cmd_q->qidx = 0;
1052 +@@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt)
1053 +
1054 + /* Request an irq */
1055 + ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
1056 +- if (ret)
1057 +- goto e_pool;
1058 ++ if (ret) {
1059 ++ dev_err(dev, "unable to allocate an IRQ\n");
1060 ++ goto e_free_dma;
1061 ++ }
1062 +
1063 + /* Update the device registers with queue information. */
1064 + cmd_q->qcontrol &= ~CMD_Q_SIZE;
1065 +@@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt)
1066 + /* Register the DMA engine support */
1067 + ret = pt_dmaengine_register(pt);
1068 + if (ret)
1069 +- goto e_dmaengine;
1070 ++ goto e_free_irq;
1071 +
1072 + /* Set up debugfs entries */
1073 + ptdma_debugfs_setup(pt);
1074 +
1075 + return 0;
1076 +
1077 +-e_dmaengine:
1078 ++e_free_irq:
1079 + free_irq(pt->pt_irq, pt);
1080 +
1081 +-e_dma_alloc:
1082 ++e_free_dma:
1083 + dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
1084 +
1085 +-e_pool:
1086 +- dev_err(dev, "unable to allocate an IRQ\n");
1087 ++e_destroy_pool:
1088 + dma_pool_destroy(pt->cmd_q.dma_pool);
1089 +
1090 + return ret;
1091 +diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
1092 +index 6885b3dcd7a97..f4c46b3b6d9d7 100644
1093 +--- a/drivers/dma/sh/rcar-dmac.c
1094 ++++ b/drivers/dma/sh/rcar-dmac.c
1095 +@@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1096 +
1097 + dmac->dev = &pdev->dev;
1098 + platform_set_drvdata(pdev, dmac);
1099 +- dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
1100 +- dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
1101 ++ ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
1102 ++ if (ret)
1103 ++ return ret;
1104 ++
1105 ++ ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
1106 ++ if (ret)
1107 ++ return ret;
1108 +
1109 + ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1110 + if (ret < 0)
1111 +diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
1112 +index a42164389ebc2..d5d55732adba1 100644
1113 +--- a/drivers/dma/stm32-dmamux.c
1114 ++++ b/drivers/dma/stm32-dmamux.c
1115 +@@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
1116 + ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
1117 + &stm32_dmamux->dmarouter);
1118 + if (ret)
1119 +- goto err_clk;
1120 ++ goto pm_disable;
1121 +
1122 + return 0;
1123 +
1124 ++pm_disable:
1125 ++ pm_runtime_disable(&pdev->dev);
1126 + err_clk:
1127 + clk_disable_unprepare(stm32_dmamux->clk);
1128 +
1129 +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
1130 +index 2c5975674723a..a859ddd9d4a13 100644
1131 +--- a/drivers/edac/edac_mc.c
1132 ++++ b/drivers/edac/edac_mc.c
1133 +@@ -215,7 +215,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems)
1134 + else
1135 + return (char *)ptr;
1136 +
1137 +- r = (unsigned long)p % align;
1138 ++ r = (unsigned long)ptr % align;
1139 +
1140 + if (r == 0)
1141 + return (char *)ptr;
1142 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1143 +index f428f94b43c0a..7e73ac6fb21db 100644
1144 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1145 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1146 +@@ -1397,12 +1397,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
1147 + int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
1148 +
1149 + void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
1150 +-bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
1151 + void amdgpu_acpi_detect(void);
1152 + #else
1153 + static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
1154 + static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
1155 +-static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
1156 + static inline void amdgpu_acpi_detect(void) { }
1157 + static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
1158 + static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1159 +@@ -1411,6 +1409,14 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
1160 + enum amdgpu_ss ss_state) { return 0; }
1161 + #endif
1162 +
1163 ++#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
1164 ++bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
1165 ++bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
1166 ++#else
1167 ++static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
1168 ++static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
1169 ++#endif
1170 ++
1171 + int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1172 + uint64_t addr, struct amdgpu_bo **bo,
1173 + struct amdgpu_bo_va_mapping **mapping);
1174 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
1175 +index 4811b0faafd9a..0e12315fa0cb8 100644
1176 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
1177 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
1178 +@@ -1031,6 +1031,20 @@ void amdgpu_acpi_detect(void)
1179 + }
1180 + }
1181 +
1182 ++#if IS_ENABLED(CONFIG_SUSPEND)
1183 ++/**
1184 ++ * amdgpu_acpi_is_s3_active
1185 ++ *
1186 ++ * @adev: amdgpu_device_pointer
1187 ++ *
1188 ++ * returns true if supported, false if not.
1189 ++ */
1190 ++bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
1191 ++{
1192 ++ return !(adev->flags & AMD_IS_APU) ||
1193 ++ (pm_suspend_target_state == PM_SUSPEND_MEM);
1194 ++}
1195 ++
1196 + /**
1197 + * amdgpu_acpi_is_s0ix_active
1198 + *
1199 +@@ -1040,11 +1054,24 @@ void amdgpu_acpi_detect(void)
1200 + */
1201 + bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
1202 + {
1203 +-#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
1204 +- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
1205 +- if (adev->flags & AMD_IS_APU)
1206 +- return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
1207 ++ if (!(adev->flags & AMD_IS_APU) ||
1208 ++ (pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
1209 ++ return false;
1210 ++
1211 ++ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
1212 ++ dev_warn_once(adev->dev,
1213 ++ "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
1214 ++ "To use suspend-to-idle change the sleep mode in BIOS setup.\n");
1215 ++ return false;
1216 + }
1217 +-#endif
1218 ++
1219 ++#if !IS_ENABLED(CONFIG_AMD_PMC)
1220 ++ dev_warn_once(adev->dev,
1221 ++ "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
1222 + return false;
1223 ++#else
1224 ++ return true;
1225 ++#endif /* CONFIG_AMD_PMC */
1226 + }
1227 ++
1228 ++#endif /* CONFIG_SUSPEND */
1229 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1230 +index 30059b7db0b25..b7509d3f7c1c7 100644
1231 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1232 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1233 +@@ -1499,6 +1499,7 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
1234 + static int amdgpu_pmops_prepare(struct device *dev)
1235 + {
1236 + struct drm_device *drm_dev = dev_get_drvdata(dev);
1237 ++ struct amdgpu_device *adev = drm_to_adev(drm_dev);
1238 +
1239 + /* Return a positive number here so
1240 + * DPM_FLAG_SMART_SUSPEND works properly
1241 +@@ -1506,6 +1507,13 @@ static int amdgpu_pmops_prepare(struct device *dev)
1242 + if (amdgpu_device_supports_boco(drm_dev))
1243 + return pm_runtime_suspended(dev);
1244 +
1245 ++ /* if we will not support s3 or s2i for the device
1246 ++ * then skip suspend
1247 ++ */
1248 ++ if (!amdgpu_acpi_is_s0ix_active(adev) &&
1249 ++ !amdgpu_acpi_is_s3_active(adev))
1250 ++ return 1;
1251 ++
1252 + return 0;
1253 + }
1254 +
1255 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1256 +index 94126dc396888..8132f66177c27 100644
1257 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1258 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1259 +@@ -1892,7 +1892,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1260 + unsigned i;
1261 + int r;
1262 +
1263 +- if (direct_submit && !ring->sched.ready) {
1264 ++ if (!direct_submit && !ring->sched.ready) {
1265 + DRM_ERROR("Trying to move memory with ring turned off.\n");
1266 + return -EINVAL;
1267 + }
1268 +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1269 +index 8931000dcd418..e37948c157692 100644
1270 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1271 ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1272 +@@ -2062,6 +2062,10 @@ static int sdma_v4_0_suspend(void *handle)
1273 + {
1274 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1275 +
1276 ++ /* SMU saves SDMA state for us */
1277 ++ if (adev->in_s0ix)
1278 ++ return 0;
1279 ++
1280 + return sdma_v4_0_hw_fini(adev);
1281 + }
1282 +
1283 +@@ -2069,6 +2073,10 @@ static int sdma_v4_0_resume(void *handle)
1284 + {
1285 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1286 +
1287 ++ /* SMU restores SDMA state for us */
1288 ++ if (adev->in_s0ix)
1289 ++ return 0;
1290 ++
1291 + return sdma_v4_0_hw_init(adev);
1292 + }
1293 +
1294 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1295 +index 16556ae892d4a..5ae9b8133d6da 100644
1296 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1297 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1298 +@@ -3230,7 +3230,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1299 +
1300 + /* Use GRPH_PFLIP interrupt */
1301 + for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1302 +- i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1303 ++ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
1304 + i++) {
1305 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1306 + if (r) {
1307 +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
1308 +index 162ae71861247..21d2cbc3cbb20 100644
1309 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
1310 ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
1311 +@@ -120,7 +120,11 @@ int dcn31_smu_send_msg_with_param(
1312 + result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
1313 +
1314 + if (result == VBIOSSMC_Result_Failed) {
1315 +- ASSERT(0);
1316 ++ if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
1317 ++ param == TABLE_WATERMARKS)
1318 ++ DC_LOG_WARNING("Watermarks table not configured properly by SMU");
1319 ++ else
1320 ++ ASSERT(0);
1321 + REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
1322 + return -1;
1323 + }
1324 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
1325 +index 1860ccc3f4f2c..4fae73478840c 100644
1326 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
1327 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
1328 +@@ -1118,6 +1118,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
1329 +
1330 + dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1331 +
1332 ++ dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1333 ++
1334 + if (dc->res_pool->dmcu != NULL)
1335 + dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1336 + }
1337 +diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
1338 +index 3ab52d9a82cf6..e0f58fab5e8ed 100644
1339 +--- a/drivers/gpu/drm/amd/display/dc/dc.h
1340 ++++ b/drivers/gpu/drm/amd/display/dc/dc.h
1341 +@@ -185,6 +185,7 @@ struct dc_caps {
1342 + struct dc_color_caps color;
1343 + bool vbios_lttpr_aware;
1344 + bool vbios_lttpr_enable;
1345 ++ uint32_t max_otg_num;
1346 + };
1347 +
1348 + struct dc_bug_wa {
1349 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
1350 +index 90c73a1cb9861..5e3bcaf12cac4 100644
1351 +--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
1352 ++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
1353 +@@ -138,8 +138,11 @@ static uint32_t convert_and_clamp(
1354 + ret_val = wm_ns * refclk_mhz;
1355 + ret_val /= 1000;
1356 +
1357 +- if (ret_val > clamp_value)
1358 ++ if (ret_val > clamp_value) {
1359 ++ /* clamping WMs is abnormal, unexpected and may lead to underflow*/
1360 ++ ASSERT(0);
1361 + ret_val = clamp_value;
1362 ++ }
1363 +
1364 + return ret_val;
1365 + }
1366 +@@ -159,7 +162,7 @@ static bool hubbub31_program_urgent_watermarks(
1367 + if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
1368 + hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
1369 + prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
1370 +- refclk_mhz, 0x1fffff);
1371 ++ refclk_mhz, 0x3fff);
1372 + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
1373 + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
1374 +
1375 +@@ -193,7 +196,7 @@ static bool hubbub31_program_urgent_watermarks(
1376 + if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
1377 + hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
1378 + prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
1379 +- refclk_mhz, 0x1fffff);
1380 ++ refclk_mhz, 0x3fff);
1381 + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
1382 + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
1383 + } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
1384 +@@ -203,7 +206,7 @@ static bool hubbub31_program_urgent_watermarks(
1385 + if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
1386 + hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
1387 + prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
1388 +- refclk_mhz, 0x1fffff);
1389 ++ refclk_mhz, 0x3fff);
1390 + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
1391 + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
1392 +
1393 +@@ -237,7 +240,7 @@ static bool hubbub31_program_urgent_watermarks(
1394 + if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
1395 + hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
1396 + prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
1397 +- refclk_mhz, 0x1fffff);
1398 ++ refclk_mhz, 0x3fff);
1399 + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
1400 + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
1401 + } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
1402 +@@ -247,7 +250,7 @@ static bool hubbub31_program_urgent_watermarks(
1403 + if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
1404 + hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
1405 + prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
1406 +- refclk_mhz, 0x1fffff);
1407 ++ refclk_mhz, 0x3fff);
1408 + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
1409 + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
1410 +
1411 +@@ -281,7 +284,7 @@ static bool hubbub31_program_urgent_watermarks(
1412 + if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
1413 + hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
1414 + prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
1415 +- refclk_mhz, 0x1fffff);
1416 ++ refclk_mhz, 0x3fff);
1417 + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
1418 + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
1419 + } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
1420 +@@ -291,7 +294,7 @@ static bool hubbub31_program_urgent_watermarks(
1421 + if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
1422 + hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
1423 + prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
1424 +- refclk_mhz, 0x1fffff);
1425 ++ refclk_mhz, 0x3fff);
1426 + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
1427 + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
1428 +
1429 +@@ -325,7 +328,7 @@ static bool hubbub31_program_urgent_watermarks(
1430 + if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
1431 + hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
1432 + prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
1433 +- refclk_mhz, 0x1fffff);
1434 ++ refclk_mhz, 0x3fff);
1435 + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
1436 + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
1437 + } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
1438 +@@ -351,7 +354,7 @@ static bool hubbub31_program_stutter_watermarks(
1439 + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
1440 + prog_wm_value = convert_and_clamp(
1441 + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
1442 +- refclk_mhz, 0x1fffff);
1443 ++ refclk_mhz, 0xffff);
1444 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
1445 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
1446 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
1447 +@@ -367,7 +370,7 @@ static bool hubbub31_program_stutter_watermarks(
1448 + watermarks->a.cstate_pstate.cstate_exit_ns;
1449 + prog_wm_value = convert_and_clamp(
1450 + watermarks->a.cstate_pstate.cstate_exit_ns,
1451 +- refclk_mhz, 0x1fffff);
1452 ++ refclk_mhz, 0xffff);
1453 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
1454 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
1455 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
1456 +@@ -383,7 +386,7 @@ static bool hubbub31_program_stutter_watermarks(
1457 + watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
1458 + prog_wm_value = convert_and_clamp(
1459 + watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
1460 +- refclk_mhz, 0x1fffff);
1461 ++ refclk_mhz, 0xffff);
1462 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
1463 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
1464 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
1465 +@@ -399,7 +402,7 @@ static bool hubbub31_program_stutter_watermarks(
1466 + watermarks->a.cstate_pstate.cstate_exit_z8_ns;
1467 + prog_wm_value = convert_and_clamp(
1468 + watermarks->a.cstate_pstate.cstate_exit_z8_ns,
1469 +- refclk_mhz, 0x1fffff);
1470 ++ refclk_mhz, 0xffff);
1471 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
1472 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
1473 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
1474 +@@ -416,7 +419,7 @@ static bool hubbub31_program_stutter_watermarks(
1475 + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
1476 + prog_wm_value = convert_and_clamp(
1477 + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
1478 +- refclk_mhz, 0x1fffff);
1479 ++ refclk_mhz, 0xffff);
1480 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
1481 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
1482 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
1483 +@@ -432,7 +435,7 @@ static bool hubbub31_program_stutter_watermarks(
1484 + watermarks->b.cstate_pstate.cstate_exit_ns;
1485 + prog_wm_value = convert_and_clamp(
1486 + watermarks->b.cstate_pstate.cstate_exit_ns,
1487 +- refclk_mhz, 0x1fffff);
1488 ++ refclk_mhz, 0xffff);
1489 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
1490 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
1491 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
1492 +@@ -448,7 +451,7 @@ static bool hubbub31_program_stutter_watermarks(
1493 + watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
1494 + prog_wm_value = convert_and_clamp(
1495 + watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
1496 +- refclk_mhz, 0x1fffff);
1497 ++ refclk_mhz, 0xffff);
1498 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
1499 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
1500 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
1501 +@@ -464,7 +467,7 @@ static bool hubbub31_program_stutter_watermarks(
1502 + watermarks->b.cstate_pstate.cstate_exit_z8_ns;
1503 + prog_wm_value = convert_and_clamp(
1504 + watermarks->b.cstate_pstate.cstate_exit_z8_ns,
1505 +- refclk_mhz, 0x1fffff);
1506 ++ refclk_mhz, 0xffff);
1507 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
1508 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
1509 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
1510 +@@ -481,7 +484,7 @@ static bool hubbub31_program_stutter_watermarks(
1511 + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
1512 + prog_wm_value = convert_and_clamp(
1513 + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
1514 +- refclk_mhz, 0x1fffff);
1515 ++ refclk_mhz, 0xffff);
1516 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
1517 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
1518 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
1519 +@@ -497,7 +500,7 @@ static bool hubbub31_program_stutter_watermarks(
1520 + watermarks->c.cstate_pstate.cstate_exit_ns;
1521 + prog_wm_value = convert_and_clamp(
1522 + watermarks->c.cstate_pstate.cstate_exit_ns,
1523 +- refclk_mhz, 0x1fffff);
1524 ++ refclk_mhz, 0xffff);
1525 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
1526 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
1527 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
1528 +@@ -513,7 +516,7 @@ static bool hubbub31_program_stutter_watermarks(
1529 + watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
1530 + prog_wm_value = convert_and_clamp(
1531 + watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
1532 +- refclk_mhz, 0x1fffff);
1533 ++ refclk_mhz, 0xffff);
1534 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
1535 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
1536 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
1537 +@@ -529,7 +532,7 @@ static bool hubbub31_program_stutter_watermarks(
1538 + watermarks->c.cstate_pstate.cstate_exit_z8_ns;
1539 + prog_wm_value = convert_and_clamp(
1540 + watermarks->c.cstate_pstate.cstate_exit_z8_ns,
1541 +- refclk_mhz, 0x1fffff);
1542 ++ refclk_mhz, 0xffff);
1543 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
1544 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
1545 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
1546 +@@ -546,7 +549,7 @@ static bool hubbub31_program_stutter_watermarks(
1547 + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
1548 + prog_wm_value = convert_and_clamp(
1549 + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
1550 +- refclk_mhz, 0x1fffff);
1551 ++ refclk_mhz, 0xffff);
1552 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
1553 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
1554 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
1555 +@@ -562,7 +565,7 @@ static bool hubbub31_program_stutter_watermarks(
1556 + watermarks->d.cstate_pstate.cstate_exit_ns;
1557 + prog_wm_value = convert_and_clamp(
1558 + watermarks->d.cstate_pstate.cstate_exit_ns,
1559 +- refclk_mhz, 0x1fffff);
1560 ++ refclk_mhz, 0xffff);
1561 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
1562 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
1563 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
1564 +@@ -578,7 +581,7 @@ static bool hubbub31_program_stutter_watermarks(
1565 + watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
1566 + prog_wm_value = convert_and_clamp(
1567 + watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
1568 +- refclk_mhz, 0x1fffff);
1569 ++ refclk_mhz, 0xffff);
1570 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
1571 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
1572 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
1573 +@@ -594,7 +597,7 @@ static bool hubbub31_program_stutter_watermarks(
1574 + watermarks->d.cstate_pstate.cstate_exit_z8_ns;
1575 + prog_wm_value = convert_and_clamp(
1576 + watermarks->d.cstate_pstate.cstate_exit_z8_ns,
1577 +- refclk_mhz, 0x1fffff);
1578 ++ refclk_mhz, 0xffff);
1579 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
1580 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
1581 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
1582 +@@ -625,7 +628,7 @@ static bool hubbub31_program_pstate_watermarks(
1583 + watermarks->a.cstate_pstate.pstate_change_ns;
1584 + prog_wm_value = convert_and_clamp(
1585 + watermarks->a.cstate_pstate.pstate_change_ns,
1586 +- refclk_mhz, 0x1fffff);
1587 ++ refclk_mhz, 0xffff);
1588 + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
1589 + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
1590 + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
1591 +@@ -642,7 +645,7 @@ static bool hubbub31_program_pstate_watermarks(
1592 + watermarks->b.cstate_pstate.pstate_change_ns;
1593 + prog_wm_value = convert_and_clamp(
1594 + watermarks->b.cstate_pstate.pstate_change_ns,
1595 +- refclk_mhz, 0x1fffff);
1596 ++ refclk_mhz, 0xffff);
1597 + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
1598 + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
1599 + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
1600 +@@ -659,7 +662,7 @@ static bool hubbub31_program_pstate_watermarks(
1601 + watermarks->c.cstate_pstate.pstate_change_ns;
1602 + prog_wm_value = convert_and_clamp(
1603 + watermarks->c.cstate_pstate.pstate_change_ns,
1604 +- refclk_mhz, 0x1fffff);
1605 ++ refclk_mhz, 0xffff);
1606 + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
1607 + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
1608 + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
1609 +@@ -676,7 +679,7 @@ static bool hubbub31_program_pstate_watermarks(
1610 + watermarks->d.cstate_pstate.pstate_change_ns;
1611 + prog_wm_value = convert_and_clamp(
1612 + watermarks->d.cstate_pstate.pstate_change_ns,
1613 +- refclk_mhz, 0x1fffff);
1614 ++ refclk_mhz, 0xffff);
1615 + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
1616 + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
1617 + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
1618 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
1619 +index a403657151ba1..0e1a843608e43 100644
1620 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
1621 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
1622 +@@ -291,14 +291,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu)
1623 +
1624 + static int yellow_carp_mode_reset(struct smu_context *smu, int type)
1625 + {
1626 +- int ret = 0, index = 0;
1627 +-
1628 +- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
1629 +- SMU_MSG_GfxDeviceDriverReset);
1630 +- if (index < 0)
1631 +- return index == -EACCES ? 0 : index;
1632 ++ int ret = 0;
1633 +
1634 +- ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
1635 ++ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
1636 + if (ret)
1637 + dev_err(smu->adev->dev, "Failed to mode reset!\n");
1638 +
1639 +diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
1640 +index 909f318331816..f195c70131373 100644
1641 +--- a/drivers/gpu/drm/drm_atomic_uapi.c
1642 ++++ b/drivers/gpu/drm/drm_atomic_uapi.c
1643 +@@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
1644 + state->mode_blob = NULL;
1645 +
1646 + if (mode) {
1647 ++ struct drm_property_blob *blob;
1648 ++
1649 + drm_mode_convert_to_umode(&umode, mode);
1650 +- state->mode_blob =
1651 +- drm_property_create_blob(state->crtc->dev,
1652 +- sizeof(umode),
1653 +- &umode);
1654 +- if (IS_ERR(state->mode_blob))
1655 +- return PTR_ERR(state->mode_blob);
1656 ++ blob = drm_property_create_blob(crtc->dev,
1657 ++ sizeof(umode), &umode);
1658 ++ if (IS_ERR(blob))
1659 ++ return PTR_ERR(blob);
1660 +
1661 + drm_mode_copy(&state->mode, mode);
1662 ++
1663 ++ state->mode_blob = blob;
1664 + state->enable = true;
1665 + drm_dbg_atomic(crtc->dev,
1666 + "Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
1667 +diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
1668 +index 9d05674550a4f..6533efa840204 100644
1669 +--- a/drivers/gpu/drm/drm_gem_cma_helper.c
1670 ++++ b/drivers/gpu/drm/drm_gem_cma_helper.c
1671 +@@ -515,6 +515,7 @@ int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1672 + */
1673 + vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
1674 + vma->vm_flags &= ~VM_PFNMAP;
1675 ++ vma->vm_flags |= VM_DONTEXPAND;
1676 +
1677 + cma_obj = to_drm_gem_cma_obj(obj);
1678 +
1679 +diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
1680 +index f960f5d7664e6..fe6b34774483f 100644
1681 +--- a/drivers/gpu/drm/i915/Kconfig
1682 ++++ b/drivers/gpu/drm/i915/Kconfig
1683 +@@ -101,6 +101,7 @@ config DRM_I915_USERPTR
1684 + config DRM_I915_GVT
1685 + bool "Enable Intel GVT-g graphics virtualization host support"
1686 + depends on DRM_I915
1687 ++ depends on X86
1688 + depends on 64BIT
1689 + default n
1690 + help
1691 +diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
1692 +index 3855fba709807..f7f49b69830fa 100644
1693 +--- a/drivers/gpu/drm/i915/display/intel_opregion.c
1694 ++++ b/drivers/gpu/drm/i915/display/intel_opregion.c
1695 +@@ -361,6 +361,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
1696 + port++;
1697 + }
1698 +
1699 ++ /*
1700 ++ * The port numbering and mapping here is bizarre. The now-obsolete
1701 ++ * swsci spec supports ports numbered [0..4]. Port E is handled as a
1702 ++ * special case, but port F and beyond are not. The functionality is
1703 ++ * supposed to be obsolete for new platforms. Just bail out if the port
1704 ++ * number is out of bounds after mapping.
1705 ++ */
1706 ++ if (port > 4) {
1707 ++ drm_dbg_kms(&dev_priv->drm,
1708 ++ "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
1709 ++ intel_encoder->base.base.id, intel_encoder->base.name,
1710 ++ port_name(intel_encoder->port), port);
1711 ++ return -EINVAL;
1712 ++ }
1713 ++
1714 + if (!enable)
1715 + parm |= 4 << 8;
1716 +
1717 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
1718 +index 6ea13159bffcc..4b823fbfe76a1 100644
1719 +--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
1720 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
1721 +@@ -759,11 +759,9 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
1722 + if (obj->mm.madv != I915_MADV_WILLNEED) {
1723 + bo->priority = I915_TTM_PRIO_PURGE;
1724 + } else if (!i915_gem_object_has_pages(obj)) {
1725 +- if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
1726 +- bo->priority = I915_TTM_PRIO_HAS_PAGES;
1727 ++ bo->priority = I915_TTM_PRIO_NO_PAGES;
1728 + } else {
1729 +- if (bo->priority > I915_TTM_PRIO_NO_PAGES)
1730 +- bo->priority = I915_TTM_PRIO_NO_PAGES;
1731 ++ bo->priority = I915_TTM_PRIO_HAS_PAGES;
1732 + }
1733 +
1734 + ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
1735 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1736 +index aea4cc2b3486e..8937bc8985d6d 100644
1737 +--- a/drivers/gpu/drm/i915/intel_pm.c
1738 ++++ b/drivers/gpu/drm/i915/intel_pm.c
1739 +@@ -4844,7 +4844,7 @@ static bool check_mbus_joined(u8 active_pipes,
1740 + {
1741 + int i;
1742 +
1743 +- for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
1744 ++ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1745 + if (dbuf_slices[i].active_pipes == active_pipes)
1746 + return dbuf_slices[i].join_mbus;
1747 + }
1748 +@@ -4861,7 +4861,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
1749 + {
1750 + int i;
1751 +
1752 +- for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
1753 ++ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1754 + if (dbuf_slices[i].active_pipes == active_pipes &&
1755 + dbuf_slices[i].join_mbus == join_mbus)
1756 + return dbuf_slices[i].dbuf_mask[pipe];
1757 +diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
1758 +index 262641a014b06..c91130a6be2a1 100644
1759 +--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
1760 ++++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
1761 +@@ -117,8 +117,12 @@ nvkm_falcon_disable(struct nvkm_falcon *falcon)
1762 + int
1763 + nvkm_falcon_reset(struct nvkm_falcon *falcon)
1764 + {
1765 +- nvkm_falcon_disable(falcon);
1766 +- return nvkm_falcon_enable(falcon);
1767 ++ if (!falcon->func->reset) {
1768 ++ nvkm_falcon_disable(falcon);
1769 ++ return nvkm_falcon_enable(falcon);
1770 ++ }
1771 ++
1772 ++ return falcon->func->reset(falcon);
1773 + }
1774 +
1775 + int
1776 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
1777 +index 5968c7696596c..40439e329aa9f 100644
1778 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
1779 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
1780 +@@ -23,9 +23,38 @@
1781 + */
1782 + #include "priv.h"
1783 +
1784 ++static int
1785 ++gm200_pmu_flcn_reset(struct nvkm_falcon *falcon)
1786 ++{
1787 ++ struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
1788 ++
1789 ++ nvkm_falcon_wr32(falcon, 0x014, 0x0000ffff);
1790 ++ pmu->func->reset(pmu);
1791 ++ return nvkm_falcon_enable(falcon);
1792 ++}
1793 ++
1794 ++const struct nvkm_falcon_func
1795 ++gm200_pmu_flcn = {
1796 ++ .debug = 0xc08,
1797 ++ .fbif = 0xe00,
1798 ++ .load_imem = nvkm_falcon_v1_load_imem,
1799 ++ .load_dmem = nvkm_falcon_v1_load_dmem,
1800 ++ .read_dmem = nvkm_falcon_v1_read_dmem,
1801 ++ .bind_context = nvkm_falcon_v1_bind_context,
1802 ++ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
1803 ++ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
1804 ++ .set_start_addr = nvkm_falcon_v1_set_start_addr,
1805 ++ .start = nvkm_falcon_v1_start,
1806 ++ .enable = nvkm_falcon_v1_enable,
1807 ++ .disable = nvkm_falcon_v1_disable,
1808 ++ .reset = gm200_pmu_flcn_reset,
1809 ++ .cmdq = { 0x4a0, 0x4b0, 4 },
1810 ++ .msgq = { 0x4c8, 0x4cc, 0 },
1811 ++};
1812 ++
1813 + static const struct nvkm_pmu_func
1814 + gm200_pmu = {
1815 +- .flcn = &gt215_pmu_flcn,
1816 ++ .flcn = &gm200_pmu_flcn,
1817 + .enabled = gf100_pmu_enabled,
1818 + .reset = gf100_pmu_reset,
1819 + };
1820 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
1821 +index 148706977eec7..e1772211b0a4b 100644
1822 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
1823 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
1824 +@@ -211,7 +211,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu)
1825 +
1826 + static const struct nvkm_pmu_func
1827 + gm20b_pmu = {
1828 +- .flcn = &gt215_pmu_flcn,
1829 ++ .flcn = &gm200_pmu_flcn,
1830 + .enabled = gf100_pmu_enabled,
1831 + .intr = gt215_pmu_intr,
1832 + .recv = gm20b_pmu_recv,
1833 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
1834 +index 00da1b873ce81..6bf7fc1bd1e3b 100644
1835 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
1836 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
1837 +@@ -39,7 +39,7 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu)
1838 +
1839 + static const struct nvkm_pmu_func
1840 + gp102_pmu = {
1841 +- .flcn = &gt215_pmu_flcn,
1842 ++ .flcn = &gm200_pmu_flcn,
1843 + .enabled = gp102_pmu_enabled,
1844 + .reset = gp102_pmu_reset,
1845 + };
1846 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
1847 +index 461f722656e24..ba1583bb618b2 100644
1848 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
1849 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
1850 +@@ -78,7 +78,7 @@ gp10b_pmu_acr = {
1851 +
1852 + static const struct nvkm_pmu_func
1853 + gp10b_pmu = {
1854 +- .flcn = &gt215_pmu_flcn,
1855 ++ .flcn = &gm200_pmu_flcn,
1856 + .enabled = gf100_pmu_enabled,
1857 + .intr = gt215_pmu_intr,
1858 + .recv = gm20b_pmu_recv,
1859 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
1860 +index e7860d1773539..bcaade758ff72 100644
1861 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
1862 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
1863 +@@ -44,6 +44,8 @@ void gf100_pmu_reset(struct nvkm_pmu *);
1864 +
1865 + void gk110_pmu_pgob(struct nvkm_pmu *, bool);
1866 +
1867 ++extern const struct nvkm_falcon_func gm200_pmu_flcn;
1868 ++
1869 + void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64);
1870 + void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
1871 + int gm20b_pmu_acr_boot(struct nvkm_falcon *);
1872 +diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
1873 +index 0fce73b9a6469..70bd84b7ef2b0 100644
1874 +--- a/drivers/gpu/drm/radeon/atombios_encoders.c
1875 ++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
1876 +@@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
1877 + * so don't register a backlight device
1878 + */
1879 + if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
1880 +- (rdev->pdev->device == 0x6741))
1881 ++ (rdev->pdev->device == 0x6741) &&
1882 ++ !dmi_match(DMI_PRODUCT_NAME, "iMac12,1"))
1883 + return;
1884 +
1885 + if (!radeon_encoder->enc_priv)
1886 +diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
1887 +index 830bdd5e9b7ce..8677c82716784 100644
1888 +--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
1889 ++++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
1890 +@@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
1891 + return ret;
1892 + }
1893 +
1894 +- ret = clk_prepare_enable(hdmi->vpll_clk);
1895 +- if (ret) {
1896 +- DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
1897 +- ret);
1898 +- return ret;
1899 +- }
1900 +-
1901 + hdmi->phy = devm_phy_optional_get(dev, "hdmi");
1902 + if (IS_ERR(hdmi->phy)) {
1903 + ret = PTR_ERR(hdmi->phy);
1904 +@@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
1905 + return ret;
1906 + }
1907 +
1908 ++ ret = clk_prepare_enable(hdmi->vpll_clk);
1909 ++ if (ret) {
1910 ++ DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
1911 ++ ret);
1912 ++ return ret;
1913 ++ }
1914 ++
1915 + drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
1916 + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
1917 +
1918 +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
1919 +index 05c007b213f24..f7a4eaf3a2e07 100644
1920 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
1921 ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
1922 +@@ -36,11 +36,11 @@ static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_
1923 + {
1924 + union cmd_response cmd_resp;
1925 +
1926 +- /* Get response with status within a max of 800 ms timeout */
1927 ++ /* Get response with status within a max of 1600 ms timeout */
1928 + if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
1929 + (cmd_resp.response_v2.response == sensor_sts &&
1930 + cmd_resp.response_v2.status == 0 && (sid == 0xff ||
1931 +- cmd_resp.response_v2.sensor_id == sid)), 500, 800000))
1932 ++ cmd_resp.response_v2.sensor_id == sid)), 500, 1600000))
1933 + return cmd_resp.response_v2.response;
1934 +
1935 + return SENSOR_DISABLED;
1936 +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
1937 +index 1ff6f83cb6fd1..9c9119227135e 100644
1938 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
1939 ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
1940 +@@ -48,7 +48,7 @@ union sfh_cmd_base {
1941 + } s;
1942 + struct {
1943 + u32 cmd_id : 4;
1944 +- u32 intr_enable : 1;
1945 ++ u32 intr_disable : 1;
1946 + u32 rsvd1 : 3;
1947 + u32 length : 7;
1948 + u32 mem_type : 1;
1949 +diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
1950 +index 0c36972193821..07eb3281b88db 100644
1951 +--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
1952 ++++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
1953 +@@ -26,6 +26,7 @@
1954 + #define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02
1955 + #define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05
1956 + #define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04
1957 ++#define ILLUMINANCE_MASK GENMASK(14, 0)
1958 +
1959 + int get_report_descriptor(int sensor_idx, u8 *rep_desc)
1960 + {
1961 +@@ -245,7 +246,8 @@ u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_
1962 + get_common_inputs(&als_input.common_property, report_id);
1963 + /* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */
1964 + if (supported_input == V2_STATUS)
1965 +- als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5));
1966 ++ als_input.illuminance_value =
1967 ++ readl(privdata->mmio + AMD_C2P_MSG(5)) & ILLUMINANCE_MASK;
1968 + else
1969 + als_input.illuminance_value =
1970 + (int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
1971 +diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
1972 +index 8e960d7b233b3..9b42b0cdeef06 100644
1973 +--- a/drivers/hid/hid-elo.c
1974 ++++ b/drivers/hid/hid-elo.c
1975 +@@ -262,6 +262,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
1976 +
1977 + return 0;
1978 + err_free:
1979 ++ usb_put_dev(udev);
1980 + kfree(priv);
1981 + return ret;
1982 + }
1983 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1984 +index bdedf594e2d1e..645a5f566d233 100644
1985 +--- a/drivers/hid/hid-ids.h
1986 ++++ b/drivers/hid/hid-ids.h
1987 +@@ -1353,6 +1353,7 @@
1988 + #define USB_VENDOR_ID_UGTIZER 0x2179
1989 + #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
1990 + #define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077
1991 ++#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004
1992 +
1993 + #define USB_VENDOR_ID_VIEWSONIC 0x0543
1994 + #define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621
1995 +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
1996 +index 65b7114761749..544d1197aca48 100644
1997 +--- a/drivers/hid/hid-quirks.c
1998 ++++ b/drivers/hid/hid-quirks.c
1999 +@@ -187,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = {
2000 + { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
2001 + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
2002 + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
2003 ++ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT },
2004 + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
2005 + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
2006 + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
2007 +diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
2008 +index b4dad66fa954d..ec6c73f75ffe0 100644
2009 +--- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
2010 ++++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
2011 +@@ -27,7 +27,6 @@ struct i2c_hid_of_goodix {
2012 +
2013 + struct regulator *vdd;
2014 + struct notifier_block nb;
2015 +- struct mutex regulator_mutex;
2016 + struct gpio_desc *reset_gpio;
2017 + const struct goodix_i2c_hid_timing_data *timings;
2018 + };
2019 +@@ -67,8 +66,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
2020 + container_of(nb, struct i2c_hid_of_goodix, nb);
2021 + int ret = NOTIFY_OK;
2022 +
2023 +- mutex_lock(&ihid_goodix->regulator_mutex);
2024 +-
2025 + switch (event) {
2026 + case REGULATOR_EVENT_PRE_DISABLE:
2027 + gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
2028 +@@ -87,8 +84,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
2029 + break;
2030 + }
2031 +
2032 +- mutex_unlock(&ihid_goodix->regulator_mutex);
2033 +-
2034 + return ret;
2035 + }
2036 +
2037 +@@ -102,8 +97,6 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
2038 + if (!ihid_goodix)
2039 + return -ENOMEM;
2040 +
2041 +- mutex_init(&ihid_goodix->regulator_mutex);
2042 +-
2043 + ihid_goodix->ops.power_up = goodix_i2c_hid_power_up;
2044 + ihid_goodix->ops.power_down = goodix_i2c_hid_power_down;
2045 +
2046 +@@ -130,25 +123,28 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
2047 + * long. Holding the controller in reset apparently draws extra
2048 + * power.
2049 + */
2050 +- mutex_lock(&ihid_goodix->regulator_mutex);
2051 + ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify;
2052 + ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb);
2053 +- if (ret) {
2054 +- mutex_unlock(&ihid_goodix->regulator_mutex);
2055 ++ if (ret)
2056 + return dev_err_probe(&client->dev, ret,
2057 + "regulator notifier request failed\n");
2058 +- }
2059 +
2060 + /*
2061 + * If someone else is holding the regulator on (or the regulator is
2062 + * an always-on one) we might never be told to deassert reset. Do it
2063 +- * now. Here we'll assume that someone else might have _just
2064 +- * barely_ turned the regulator on so we'll do the full
2065 +- * "post_power_delay" just in case.
2066 ++ * now... and temporarily bump the regulator reference count just to
2067 ++ * make sure it is impossible for this to race with our own notifier!
2068 ++ * We also assume that someone else might have _just barely_ turned
2069 ++ * the regulator on so we'll do the full "post_power_delay" just in
2070 ++ * case.
2071 + */
2072 +- if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd))
2073 ++ if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) {
2074 ++ ret = regulator_enable(ihid_goodix->vdd);
2075 ++ if (ret)
2076 ++ return ret;
2077 + goodix_i2c_hid_deassert_reset(ihid_goodix, true);
2078 +- mutex_unlock(&ihid_goodix->regulator_mutex);
2079 ++ regulator_disable(ihid_goodix->vdd);
2080 ++ }
2081 +
2082 + return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0);
2083 + }
2084 +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
2085 +index 392c1ac4f8193..44bd0b6ff5059 100644
2086 +--- a/drivers/hv/vmbus_drv.c
2087 ++++ b/drivers/hv/vmbus_drv.c
2088 +@@ -2027,8 +2027,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
2089 + kobj->kset = dev->channels_kset;
2090 + ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
2091 + "%u", relid);
2092 +- if (ret)
2093 ++ if (ret) {
2094 ++ kobject_put(kobj);
2095 + return ret;
2096 ++ }
2097 +
2098 + ret = sysfs_create_group(kobj, &vmbus_chan_group);
2099 +
2100 +@@ -2037,6 +2039,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
2101 + * The calling functions' error handling paths will cleanup the
2102 + * empty channel directory.
2103 + */
2104 ++ kobject_put(kobj);
2105 + dev_err(device, "Unable to set up channel sysfs files\n");
2106 + return ret;
2107 + }
2108 +diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
2109 +index 490ee3962645d..b00f35c0b0662 100644
2110 +--- a/drivers/i2c/busses/i2c-brcmstb.c
2111 ++++ b/drivers/i2c/busses/i2c-brcmstb.c
2112 +@@ -673,7 +673,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
2113 +
2114 + /* set the data in/out register size for compatible SoCs */
2115 + if (of_device_is_compatible(dev->device->of_node,
2116 +- "brcmstb,brcmper-i2c"))
2117 ++ "brcm,brcmper-i2c"))
2118 + dev->data_regsz = sizeof(u8);
2119 + else
2120 + dev->data_regsz = sizeof(u32);
2121 +diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
2122 +index c1de8eb66169f..cf54f1cb4c57a 100644
2123 +--- a/drivers/i2c/busses/i2c-qcom-cci.c
2124 ++++ b/drivers/i2c/busses/i2c-qcom-cci.c
2125 +@@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev)
2126 + cci->master[idx].adap.quirks = &cci->data->quirks;
2127 + cci->master[idx].adap.algo = &cci_algo;
2128 + cci->master[idx].adap.dev.parent = dev;
2129 +- cci->master[idx].adap.dev.of_node = child;
2130 ++ cci->master[idx].adap.dev.of_node = of_node_get(child);
2131 + cci->master[idx].master = idx;
2132 + cci->master[idx].cci = cci;
2133 +
2134 +@@ -643,8 +643,10 @@ static int cci_probe(struct platform_device *pdev)
2135 + continue;
2136 +
2137 + ret = i2c_add_adapter(&cci->master[i].adap);
2138 +- if (ret < 0)
2139 ++ if (ret < 0) {
2140 ++ of_node_put(cci->master[i].adap.dev.of_node);
2141 + goto error_i2c;
2142 ++ }
2143 + }
2144 +
2145 + pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
2146 +@@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev)
2147 + return 0;
2148 +
2149 + error_i2c:
2150 +- for (; i >= 0; i--) {
2151 +- if (cci->master[i].cci)
2152 ++ for (--i ; i >= 0; i--) {
2153 ++ if (cci->master[i].cci) {
2154 + i2c_del_adapter(&cci->master[i].adap);
2155 ++ of_node_put(cci->master[i].adap.dev.of_node);
2156 ++ }
2157 + }
2158 + error:
2159 + disable_irq(cci->irq);
2160 +@@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev)
2161 + int i;
2162 +
2163 + for (i = 0; i < cci->data->num_masters; i++) {
2164 +- if (cci->master[i].cci)
2165 ++ if (cci->master[i].cci) {
2166 + i2c_del_adapter(&cci->master[i].adap);
2167 ++ of_node_put(cci->master[i].adap.dev.of_node);
2168 ++ }
2169 + cci_halt(cci, i);
2170 + }
2171 +
2172 +diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
2173 +index 259065d271ef0..09cc98266d30f 100644
2174 +--- a/drivers/irqchip/irq-sifive-plic.c
2175 ++++ b/drivers/irqchip/irq-sifive-plic.c
2176 +@@ -398,3 +398,4 @@ out_free_priv:
2177 +
2178 + IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
2179 + IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
2180 ++IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */
2181 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
2182 +index b75ff6b2b9525..5f33700d12473 100644
2183 +--- a/drivers/md/dm.c
2184 ++++ b/drivers/md/dm.c
2185 +@@ -2156,7 +2156,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2186 + set_bit(DMF_FREEING, &md->flags);
2187 + spin_unlock(&_minor_lock);
2188 +
2189 +- blk_set_queue_dying(md->queue);
2190 ++ blk_mark_disk_dead(md->disk);
2191 +
2192 + /*
2193 + * Take suspend_lock so that presuspend and postsuspend methods
2194 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
2195 +index 431af5e8be2f8..b575d0bfd0d68 100644
2196 +--- a/drivers/mmc/core/block.c
2197 ++++ b/drivers/mmc/core/block.c
2198 +@@ -1682,31 +1682,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
2199 + struct mmc_card *card = mq->card;
2200 + struct mmc_host *host = card->host;
2201 + blk_status_t error = BLK_STS_OK;
2202 +- int retries = 0;
2203 +
2204 + do {
2205 + u32 status;
2206 + int err;
2207 ++ int retries = 0;
2208 +
2209 +- mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
2210 ++ while (retries++ <= MMC_READ_SINGLE_RETRIES) {
2211 ++ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
2212 +
2213 +- mmc_wait_for_req(host, mrq);
2214 ++ mmc_wait_for_req(host, mrq);
2215 +
2216 +- err = mmc_send_status(card, &status);
2217 +- if (err)
2218 +- goto error_exit;
2219 +-
2220 +- if (!mmc_host_is_spi(host) &&
2221 +- !mmc_ready_for_data(status)) {
2222 +- err = mmc_blk_fix_state(card, req);
2223 ++ err = mmc_send_status(card, &status);
2224 + if (err)
2225 + goto error_exit;
2226 +- }
2227 +
2228 +- if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
2229 +- continue;
2230 ++ if (!mmc_host_is_spi(host) &&
2231 ++ !mmc_ready_for_data(status)) {
2232 ++ err = mmc_blk_fix_state(card, req);
2233 ++ if (err)
2234 ++ goto error_exit;
2235 ++ }
2236 +
2237 +- retries = 0;
2238 ++ if (!mrq->cmd->error)
2239 ++ break;
2240 ++ }
2241 +
2242 + if (mrq->cmd->error ||
2243 + mrq->data->error ||
2244 +diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
2245 +index 6ed6c51fac69e..d503821a3e606 100644
2246 +--- a/drivers/mtd/devices/phram.c
2247 ++++ b/drivers/mtd/devices/phram.c
2248 +@@ -264,16 +264,20 @@ static int phram_setup(const char *val)
2249 + }
2250 + }
2251 +
2252 +- if (erasesize)
2253 +- div_u64_rem(len, (uint32_t)erasesize, &rem);
2254 +-
2255 + if (len == 0 || erasesize == 0 || erasesize > len
2256 +- || erasesize > UINT_MAX || rem) {
2257 ++ || erasesize > UINT_MAX) {
2258 + parse_err("illegal erasesize or len\n");
2259 + ret = -EINVAL;
2260 + goto error;
2261 + }
2262 +
2263 ++ div_u64_rem(len, (uint32_t)erasesize, &rem);
2264 ++ if (rem) {
2265 ++ parse_err("len is not multiple of erasesize\n");
2266 ++ ret = -EINVAL;
2267 ++ goto error;
2268 ++ }
2269 ++
2270 + ret = register_device(name, start, len, (uint32_t)erasesize);
2271 + if (ret)
2272 + goto error;
2273 +diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
2274 +index f75929783b941..aee78f5f4f156 100644
2275 +--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
2276 ++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
2277 +@@ -2106,7 +2106,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
2278 + mtd->oobsize / trans,
2279 + host->hwcfg.sector_size_1k);
2280 +
2281 +- if (!ret) {
2282 ++ if (ret != -EBADMSG) {
2283 + *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
2284 +
2285 + if (*err_addr)
2286 +diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
2287 +index 6e9f7d80ef8b8..668d69fe4cf27 100644
2288 +--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
2289 ++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
2290 +@@ -2293,7 +2293,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
2291 + this->hw.must_apply_timings = false;
2292 + ret = gpmi_nfc_apply_timings(this);
2293 + if (ret)
2294 +- return ret;
2295 ++ goto out_pm;
2296 + }
2297 +
2298 + dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
2299 +@@ -2422,6 +2422,7 @@ unmap:
2300 +
2301 + this->bch = false;
2302 +
2303 ++out_pm:
2304 + pm_runtime_mark_last_busy(this->dev);
2305 + pm_runtime_put_autosuspend(this->dev);
2306 +
2307 +diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
2308 +index efe0ffe4f1abc..9054559e52dda 100644
2309 +--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
2310 ++++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
2311 +@@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
2312 + struct ingenic_ecc *ecc;
2313 +
2314 + pdev = of_find_device_by_node(np);
2315 +- if (!pdev || !platform_get_drvdata(pdev))
2316 ++ if (!pdev)
2317 + return ERR_PTR(-EPROBE_DEFER);
2318 +
2319 ++ if (!platform_get_drvdata(pdev)) {
2320 ++ put_device(&pdev->dev);
2321 ++ return ERR_PTR(-EPROBE_DEFER);
2322 ++ }
2323 ++
2324 + ecc = platform_get_drvdata(pdev);
2325 + clk_prepare_enable(ecc->clk);
2326 +
2327 +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
2328 +index 04e6f7b267064..0f41a9a421575 100644
2329 +--- a/drivers/mtd/nand/raw/qcom_nandc.c
2330 ++++ b/drivers/mtd/nand/raw/qcom_nandc.c
2331 +@@ -2,7 +2,6 @@
2332 + /*
2333 + * Copyright (c) 2016, The Linux Foundation. All rights reserved.
2334 + */
2335 +-
2336 + #include <linux/clk.h>
2337 + #include <linux/slab.h>
2338 + #include <linux/bitops.h>
2339 +@@ -3063,10 +3062,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
2340 + if (dma_mapping_error(dev, nandc->base_dma))
2341 + return -ENXIO;
2342 +
2343 +- ret = qcom_nandc_alloc(nandc);
2344 +- if (ret)
2345 +- goto err_nandc_alloc;
2346 +-
2347 + ret = clk_prepare_enable(nandc->core_clk);
2348 + if (ret)
2349 + goto err_core_clk;
2350 +@@ -3075,6 +3070,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
2351 + if (ret)
2352 + goto err_aon_clk;
2353 +
2354 ++ ret = qcom_nandc_alloc(nandc);
2355 ++ if (ret)
2356 ++ goto err_nandc_alloc;
2357 ++
2358 + ret = qcom_nandc_setup(nandc);
2359 + if (ret)
2360 + goto err_setup;
2361 +@@ -3086,15 +3085,14 @@ static int qcom_nandc_probe(struct platform_device *pdev)
2362 + return 0;
2363 +
2364 + err_setup:
2365 ++ qcom_nandc_unalloc(nandc);
2366 ++err_nandc_alloc:
2367 + clk_disable_unprepare(nandc->aon_clk);
2368 + err_aon_clk:
2369 + clk_disable_unprepare(nandc->core_clk);
2370 + err_core_clk:
2371 +- qcom_nandc_unalloc(nandc);
2372 +-err_nandc_alloc:
2373 + dma_unmap_resource(dev, res->start, resource_size(res),
2374 + DMA_BIDIRECTIONAL, 0);
2375 +-
2376 + return ret;
2377 + }
2378 +
2379 +diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
2380 +index 06a818cd2433f..32ddfea701423 100644
2381 +--- a/drivers/mtd/parsers/qcomsmempart.c
2382 ++++ b/drivers/mtd/parsers/qcomsmempart.c
2383 +@@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
2384 + const struct mtd_partition **pparts,
2385 + struct mtd_part_parser_data *data)
2386 + {
2387 ++ size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
2388 ++ int ret, i, j, tmpparts, numparts = 0;
2389 + struct smem_flash_pentry *pentry;
2390 + struct smem_flash_ptable *ptable;
2391 +- size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
2392 + struct mtd_partition *parts;
2393 +- int ret, i, numparts;
2394 + char *name, *c;
2395 +
2396 + if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
2397 +@@ -87,8 +87,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
2398 + }
2399 +
2400 + /* Ensure that # of partitions is less than the max we have allocated */
2401 +- numparts = le32_to_cpu(ptable->numparts);
2402 +- if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
2403 ++ tmpparts = le32_to_cpu(ptable->numparts);
2404 ++ if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
2405 + pr_err("Partition numbers exceed the max limit\n");
2406 + return -EINVAL;
2407 + }
2408 +@@ -116,11 +116,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
2409 + return PTR_ERR(ptable);
2410 + }
2411 +
2412 ++ for (i = 0; i < tmpparts; i++) {
2413 ++ pentry = &ptable->pentry[i];
2414 ++ if (pentry->name[0] != '\0')
2415 ++ numparts++;
2416 ++ }
2417 ++
2418 + parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
2419 + if (!parts)
2420 + return -ENOMEM;
2421 +
2422 +- for (i = 0; i < numparts; i++) {
2423 ++ for (i = 0, j = 0; i < tmpparts; i++) {
2424 + pentry = &ptable->pentry[i];
2425 + if (pentry->name[0] == '\0')
2426 + continue;
2427 +@@ -135,24 +141,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
2428 + for (c = name; *c != '\0'; c++)
2429 + *c = tolower(*c);
2430 +
2431 +- parts[i].name = name;
2432 +- parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
2433 +- parts[i].mask_flags = pentry->attr;
2434 +- parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize;
2435 ++ parts[j].name = name;
2436 ++ parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
2437 ++ parts[j].mask_flags = pentry->attr;
2438 ++ parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize;
2439 + pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
2440 + i, pentry->name, le32_to_cpu(pentry->offset),
2441 + le32_to_cpu(pentry->length), pentry->attr);
2442 ++ j++;
2443 + }
2444 +
2445 + pr_debug("SMEM partition table found: ver: %d len: %d\n",
2446 +- le32_to_cpu(ptable->version), numparts);
2447 ++ le32_to_cpu(ptable->version), tmpparts);
2448 + *pparts = parts;
2449 +
2450 + return numparts;
2451 +
2452 + out_free_parts:
2453 +- while (--i >= 0)
2454 +- kfree(parts[i].name);
2455 ++ while (--j >= 0)
2456 ++ kfree(parts[j].name);
2457 + kfree(parts);
2458 + *pparts = NULL;
2459 +
2460 +@@ -166,6 +173,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
2461 +
2462 + for (i = 0; i < nr_parts; i++)
2463 + kfree(pparts[i].name);
2464 ++
2465 ++ kfree(pparts);
2466 + }
2467 +
2468 + static const struct of_device_id qcomsmem_of_match_table[] = {
2469 +diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
2470 +index 9fd1d6cba3cda..a86b1f71762ea 100644
2471 +--- a/drivers/net/bonding/bond_3ad.c
2472 ++++ b/drivers/net/bonding/bond_3ad.c
2473 +@@ -225,7 +225,7 @@ static inline int __check_agg_selection_timer(struct port *port)
2474 + if (bond == NULL)
2475 + return 0;
2476 +
2477 +- return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
2478 ++ return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
2479 + }
2480 +
2481 + /**
2482 +@@ -1995,7 +1995,7 @@ static void ad_marker_response_received(struct bond_marker *marker,
2483 + */
2484 + void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
2485 + {
2486 +- BOND_AD_INFO(bond).agg_select_timer = timeout;
2487 ++ atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
2488 + }
2489 +
2490 + /**
2491 +@@ -2278,6 +2278,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
2492 + spin_unlock_bh(&bond->mode_lock);
2493 + }
2494 +
2495 ++/**
2496 ++ * bond_agg_timer_advance - advance agg_select_timer
2497 ++ * @bond: bonding structure
2498 ++ *
2499 ++ * Return true when agg_select_timer reaches 0.
2500 ++ */
2501 ++static bool bond_agg_timer_advance(struct bonding *bond)
2502 ++{
2503 ++ int val, nval;
2504 ++
2505 ++ while (1) {
2506 ++ val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
2507 ++ if (!val)
2508 ++ return false;
2509 ++ nval = val - 1;
2510 ++ if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
2511 ++ val, nval) == val)
2512 ++ break;
2513 ++ }
2514 ++ return nval == 0;
2515 ++}
2516 ++
2517 + /**
2518 + * bond_3ad_state_machine_handler - handle state machines timeout
2519 + * @work: work context to fetch bonding struct to work on from
2520 +@@ -2313,9 +2335,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2521 + if (!bond_has_slaves(bond))
2522 + goto re_arm;
2523 +
2524 +- /* check if agg_select_timer timer after initialize is timed out */
2525 +- if (BOND_AD_INFO(bond).agg_select_timer &&
2526 +- !(--BOND_AD_INFO(bond).agg_select_timer)) {
2527 ++ if (bond_agg_timer_advance(bond)) {
2528 + slave = bond_first_slave_rcu(bond);
2529 + port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
2530 +
2531 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2532 +index 83cdaabd7b69d..46c3301a5e07d 100644
2533 +--- a/drivers/net/bonding/bond_main.c
2534 ++++ b/drivers/net/bonding/bond_main.c
2535 +@@ -2377,10 +2377,9 @@ static int __bond_release_one(struct net_device *bond_dev,
2536 + bond_select_active_slave(bond);
2537 + }
2538 +
2539 +- if (!bond_has_slaves(bond)) {
2540 +- bond_set_carrier(bond);
2541 ++ bond_set_carrier(bond);
2542 ++ if (!bond_has_slaves(bond))
2543 + eth_hw_addr_random(bond_dev);
2544 +- }
2545 +
2546 + unblock_netpoll_tx();
2547 + synchronize_rcu();
2548 +diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
2549 +index 9891b072b4625..9428aac4a6868 100644
2550 +--- a/drivers/net/dsa/Kconfig
2551 ++++ b/drivers/net/dsa/Kconfig
2552 +@@ -81,6 +81,7 @@ config NET_DSA_REALTEK_SMI
2553 +
2554 + config NET_DSA_SMSC_LAN9303
2555 + tristate
2556 ++ depends on VLAN_8021Q || VLAN_8021Q=n
2557 + select NET_DSA_TAG_LAN9303
2558 + select REGMAP
2559 + help
2560 +diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
2561 +index 89f920289ae21..0b6f29ee87b56 100644
2562 +--- a/drivers/net/dsa/lan9303-core.c
2563 ++++ b/drivers/net/dsa/lan9303-core.c
2564 +@@ -10,6 +10,7 @@
2565 + #include <linux/mii.h>
2566 + #include <linux/phy.h>
2567 + #include <linux/if_bridge.h>
2568 ++#include <linux/if_vlan.h>
2569 + #include <linux/etherdevice.h>
2570 +
2571 + #include "lan9303.h"
2572 +@@ -1083,21 +1084,27 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
2573 + static int lan9303_port_enable(struct dsa_switch *ds, int port,
2574 + struct phy_device *phy)
2575 + {
2576 ++ struct dsa_port *dp = dsa_to_port(ds, port);
2577 + struct lan9303 *chip = ds->priv;
2578 +
2579 +- if (!dsa_is_user_port(ds, port))
2580 ++ if (!dsa_port_is_user(dp))
2581 + return 0;
2582 +
2583 ++ vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
2584 ++
2585 + return lan9303_enable_processing_port(chip, port);
2586 + }
2587 +
2588 + static void lan9303_port_disable(struct dsa_switch *ds, int port)
2589 + {
2590 ++ struct dsa_port *dp = dsa_to_port(ds, port);
2591 + struct lan9303 *chip = ds->priv;
2592 +
2593 +- if (!dsa_is_user_port(ds, port))
2594 ++ if (!dsa_port_is_user(dp))
2595 + return;
2596 +
2597 ++ vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
2598 ++
2599 + lan9303_disable_processing_port(chip, port);
2600 + lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
2601 + }
2602 +@@ -1309,7 +1316,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
2603 + struct device_node *np)
2604 + {
2605 + chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
2606 +- GPIOD_OUT_LOW);
2607 ++ GPIOD_OUT_HIGH);
2608 + if (IS_ERR(chip->reset_gpio))
2609 + return PTR_ERR(chip->reset_gpio);
2610 +
2611 +diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
2612 +index 503adf03d2fc6..9e006a25b636c 100644
2613 +--- a/drivers/net/dsa/lantiq_gswip.c
2614 ++++ b/drivers/net/dsa/lantiq_gswip.c
2615 +@@ -2201,8 +2201,8 @@ static int gswip_remove(struct platform_device *pdev)
2616 +
2617 + if (priv->ds->slave_mii_bus) {
2618 + mdiobus_unregister(priv->ds->slave_mii_bus);
2619 +- mdiobus_free(priv->ds->slave_mii_bus);
2620 + of_node_put(priv->ds->slave_mii_bus->dev.of_node);
2621 ++ mdiobus_free(priv->ds->slave_mii_bus);
2622 + }
2623 +
2624 + for (i = 0; i < priv->num_gphy_fw; i++)
2625 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
2626 +index 056e3b65cd278..263da7e2d6be7 100644
2627 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
2628 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
2629 +@@ -2291,6 +2291,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
2630 + if (!mv88e6xxx_max_vid(chip))
2631 + return -EOPNOTSUPP;
2632 +
2633 ++ /* The ATU removal procedure needs the FID to be mapped in the VTU,
2634 ++ * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
2635 ++ * switchdev workqueue to ensure that all FDB entries are deleted
2636 ++ * before we remove the VLAN.
2637 ++ */
2638 ++ dsa_flush_workqueue();
2639 ++
2640 + mv88e6xxx_reg_lock(chip);
2641 +
2642 + err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
2643 +diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
2644 +index 3b51b172b3172..5cbd815c737e7 100644
2645 +--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
2646 ++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
2647 +@@ -900,7 +900,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
2648 + atl1c_clean_buffer(pdev, buffer_info);
2649 + }
2650 +
2651 +- netdev_reset_queue(adapter->netdev);
2652 ++ netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
2653 +
2654 + /* Zero out Tx-buffers */
2655 + memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
2656 +diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
2657 +index df8ff839cc621..94eb3a42158e9 100644
2658 +--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
2659 ++++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
2660 +@@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
2661 + {
2662 + struct device_node *np = pdev->dev.of_node;
2663 + struct bgmac *bgmac;
2664 ++ struct resource *regs;
2665 + int ret;
2666 +
2667 + bgmac = bgmac_alloc(&pdev->dev);
2668 +@@ -208,15 +209,23 @@ static int bgmac_probe(struct platform_device *pdev)
2669 + if (IS_ERR(bgmac->plat.base))
2670 + return PTR_ERR(bgmac->plat.base);
2671 +
2672 +- bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
2673 +- if (IS_ERR(bgmac->plat.idm_base))
2674 +- return PTR_ERR(bgmac->plat.idm_base);
2675 +- else
2676 ++ /* The idm_base resource is optional for some platforms */
2677 ++ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
2678 ++ if (regs) {
2679 ++ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
2680 ++ if (IS_ERR(bgmac->plat.idm_base))
2681 ++ return PTR_ERR(bgmac->plat.idm_base);
2682 + bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
2683 ++ }
2684 +
2685 +- bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
2686 +- if (IS_ERR(bgmac->plat.nicpm_base))
2687 +- return PTR_ERR(bgmac->plat.nicpm_base);
2688 ++ /* The nicpm_base resource is optional for some platforms */
2689 ++ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
2690 ++ if (regs) {
2691 ++ bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
2692 ++ regs);
2693 ++ if (IS_ERR(bgmac->plat.nicpm_base))
2694 ++ return PTR_ERR(bgmac->plat.nicpm_base);
2695 ++ }
2696 +
2697 + bgmac->read = platform_bgmac_read;
2698 + bgmac->write = platform_bgmac_write;
2699 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
2700 +index d13fb1d318215..d71c11a6282ec 100644
2701 +--- a/drivers/net/ethernet/cadence/macb_main.c
2702 ++++ b/drivers/net/ethernet/cadence/macb_main.c
2703 +@@ -4739,7 +4739,7 @@ static int macb_probe(struct platform_device *pdev)
2704 +
2705 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2706 + if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
2707 +- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
2708 ++ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
2709 + bp->hw_dma_cap |= HW_DMA_CAP_64B;
2710 + }
2711 + #endif
2712 +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
2713 +index 110075336a757..8b7a29e1e221b 100644
2714 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
2715 ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
2716 +@@ -4329,7 +4329,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
2717 + }
2718 +
2719 + INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
2720 +-
2721 ++ mutex_init(&priv->onestep_tstamp_lock);
2722 + skb_queue_head_init(&priv->tx_skbs);
2723 +
2724 + priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
2725 +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
2726 +index d6eefbbf163fa..cacd454ac696c 100644
2727 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
2728 ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
2729 +@@ -532,6 +532,7 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
2730 + struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
2731 + struct flow_dissector *dissector = rule->match.dissector;
2732 + struct netlink_ext_ack *extack = cls->common.extack;
2733 ++ int ret = -EOPNOTSUPP;
2734 +
2735 + if (dissector->used_keys &
2736 + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
2737 +@@ -561,9 +562,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
2738 + }
2739 +
2740 + *vlan = (u16)match.key->vlan_id;
2741 ++ ret = 0;
2742 + }
2743 +
2744 +- return 0;
2745 ++ return ret;
2746 + }
2747 +
2748 + static int
2749 +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
2750 +index 62bf879dc6232..8c08997dcef64 100644
2751 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c
2752 ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
2753 +@@ -1521,6 +1521,12 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
2754 + if (status)
2755 + dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
2756 + vsi_num, ice_stat_str(status));
2757 ++
2758 ++ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
2759 ++ ICE_FLOW_SEG_HDR_ESP);
2760 ++ if (status)
2761 ++ dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
2762 ++ vsi_num, status);
2763 + }
2764 +
2765 + /**
2766 +diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
2767 +index dc7e5ea6ec158..148d431fcde42 100644
2768 +--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
2769 ++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
2770 +@@ -145,9 +145,9 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
2771 + skb_put(skb, byte_cnt - ETH_FCS_LEN);
2772 + eth_skb_pad(skb);
2773 + skb->protocol = eth_type_trans(skb, netdev);
2774 +- netif_rx(skb);
2775 + netdev->stats.rx_bytes += skb->len;
2776 + netdev->stats.rx_packets++;
2777 ++ netif_rx(skb);
2778 + }
2779 +
2780 + static int sparx5_inject(struct sparx5 *sparx5,
2781 +diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
2782 +index 7d67f41387f55..4f5ef8a9a9a87 100644
2783 +--- a/drivers/net/ieee802154/at86rf230.c
2784 ++++ b/drivers/net/ieee802154/at86rf230.c
2785 +@@ -100,6 +100,7 @@ struct at86rf230_local {
2786 + unsigned long cal_timeout;
2787 + bool is_tx;
2788 + bool is_tx_from_off;
2789 ++ bool was_tx;
2790 + u8 tx_retry;
2791 + struct sk_buff *tx_skb;
2792 + struct at86rf230_state_change tx;
2793 +@@ -343,7 +344,11 @@ at86rf230_async_error_recover_complete(void *context)
2794 + if (ctx->free)
2795 + kfree(ctx);
2796 +
2797 +- ieee802154_wake_queue(lp->hw);
2798 ++ if (lp->was_tx) {
2799 ++ lp->was_tx = 0;
2800 ++ dev_kfree_skb_any(lp->tx_skb);
2801 ++ ieee802154_wake_queue(lp->hw);
2802 ++ }
2803 + }
2804 +
2805 + static void
2806 +@@ -352,7 +357,11 @@ at86rf230_async_error_recover(void *context)
2807 + struct at86rf230_state_change *ctx = context;
2808 + struct at86rf230_local *lp = ctx->lp;
2809 +
2810 +- lp->is_tx = 0;
2811 ++ if (lp->is_tx) {
2812 ++ lp->was_tx = 1;
2813 ++ lp->is_tx = 0;
2814 ++ }
2815 ++
2816 + at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
2817 + at86rf230_async_error_recover_complete);
2818 + }
2819 +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
2820 +index 97fbe850de9ba..96592a20c61ff 100644
2821 +--- a/drivers/net/ieee802154/ca8210.c
2822 ++++ b/drivers/net/ieee802154/ca8210.c
2823 +@@ -2977,8 +2977,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw)
2824 + ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
2825 + ca8210_hw->phy->cca_ed_level = -9800;
2826 + ca8210_hw->phy->symbol_duration = 16;
2827 +- ca8210_hw->phy->lifs_period = 40;
2828 +- ca8210_hw->phy->sifs_period = 12;
2829 ++ ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration;
2830 ++ ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration;
2831 + ca8210_hw->flags =
2832 + IEEE802154_HW_AFILT |
2833 + IEEE802154_HW_OMIT_CKSUM |
2834 +diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
2835 +index 4300261e2f9e7..378ee779061c3 100644
2836 +--- a/drivers/net/netdevsim/fib.c
2837 ++++ b/drivers/net/netdevsim/fib.c
2838 +@@ -623,14 +623,14 @@ static int nsim_fib6_rt_append(struct nsim_fib_data *data,
2839 + if (err)
2840 + goto err_fib6_rt_nh_del;
2841 +
2842 +- fib6_event->rt_arr[i]->trap = true;
2843 ++ WRITE_ONCE(fib6_event->rt_arr[i]->trap, true);
2844 + }
2845 +
2846 + return 0;
2847 +
2848 + err_fib6_rt_nh_del:
2849 + for (i--; i >= 0; i--) {
2850 +- fib6_event->rt_arr[i]->trap = false;
2851 ++ WRITE_ONCE(fib6_event->rt_arr[i]->trap, false);
2852 + nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
2853 + }
2854 + return err;
2855 +diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c
2856 +index b7a5ae20edd53..68ee434f9dea3 100644
2857 +--- a/drivers/net/phy/mediatek-ge.c
2858 ++++ b/drivers/net/phy/mediatek-ge.c
2859 +@@ -55,9 +55,6 @@ static int mt7530_phy_config_init(struct phy_device *phydev)
2860 +
2861 + static int mt7531_phy_config_init(struct phy_device *phydev)
2862 + {
2863 +- if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
2864 +- return -EINVAL;
2865 +-
2866 + mtk_gephy_config_init(phydev);
2867 +
2868 + /* PHY link down power saving enable */
2869 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2870 +index 33ada2c59952e..0c7f02ca6822b 100644
2871 +--- a/drivers/net/usb/qmi_wwan.c
2872 ++++ b/drivers/net/usb/qmi_wwan.c
2873 +@@ -1395,6 +1395,8 @@ static const struct usb_device_id products[] = {
2874 + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
2875 + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
2876 + {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
2877 ++ {QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/
2878 ++ {QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */
2879 + {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
2880 + {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
2881 + {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
2882 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
2883 +index 0eb13e5df5177..d99140960a820 100644
2884 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
2885 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
2886 +@@ -693,7 +693,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
2887 + {
2888 + struct brcmf_fw_item *first = &req->items[0];
2889 + struct brcmf_fw *fwctx;
2890 +- char *alt_path;
2891 ++ char *alt_path = NULL;
2892 + int ret;
2893 +
2894 + brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
2895 +@@ -712,7 +712,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
2896 + fwctx->done = fw_cb;
2897 +
2898 + /* First try alternative board-specific path if any */
2899 +- alt_path = brcm_alt_fw_path(first->path, fwctx->req->board_type);
2900 ++ if (fwctx->req->board_type)
2901 ++ alt_path = brcm_alt_fw_path(first->path,
2902 ++ fwctx->req->board_type);
2903 + if (alt_path) {
2904 + ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
2905 + fwctx->dev, GFP_KERNEL, fwctx,
2906 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
2907 +index 1efac0b2a94d7..9e00d1d7e1468 100644
2908 +--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
2909 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
2910 +@@ -1,7 +1,7 @@
2911 + // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2912 + /*
2913 + * Copyright (C) 2017 Intel Deutschland GmbH
2914 +- * Copyright (C) 2019-2021 Intel Corporation
2915 ++ * Copyright (C) 2019-2022 Intel Corporation
2916 + */
2917 + #include <linux/uuid.h>
2918 + #include "iwl-drv.h"
2919 +@@ -814,10 +814,11 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
2920 + * only one using version 36, so skip this version entirely.
2921 + */
2922 + return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
2923 +- IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 ||
2924 +- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
2925 +- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
2926 +- CSR_HW_REV_TYPE_7265D));
2927 ++ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
2928 ++ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
2929 ++ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
2930 ++ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
2931 ++ CSR_HW_REV_TYPE_7265D));
2932 + }
2933 + IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
2934 +
2935 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
2936 +index 845a09d0dabaf..c8dff76ac03c1 100644
2937 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
2938 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
2939 +@@ -1,6 +1,6 @@
2940 + /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2941 + /*
2942 +- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
2943 ++ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
2944 + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
2945 + * Copyright (C) 2016 Intel Deutschland GmbH
2946 + */
2947 +@@ -319,6 +319,7 @@ enum {
2948 + #define CSR_HW_REV_TYPE_2x00 (0x0000100)
2949 + #define CSR_HW_REV_TYPE_105 (0x0000110)
2950 + #define CSR_HW_REV_TYPE_135 (0x0000120)
2951 ++#define CSR_HW_REV_TYPE_3160 (0x0000164)
2952 + #define CSR_HW_REV_TYPE_7265D (0x0000210)
2953 + #define CSR_HW_REV_TYPE_NONE (0x00001F0)
2954 + #define CSR_HW_REV_TYPE_QNJ (0x0000360)
2955 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
2956 +index b7f7b9c5b670c..524b0ad873578 100644
2957 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
2958 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
2959 +@@ -1614,6 +1614,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
2960 + out_unbind:
2961 + complete(&drv->request_firmware_complete);
2962 + device_release_driver(drv->trans->dev);
2963 ++ /* drv has just been freed by the release */
2964 ++ failure = false;
2965 + free:
2966 + if (failure)
2967 + iwl_dealloc_ucode(drv);
2968 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2969 +index 74404c96063bc..bcc032c815dcb 100644
2970 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2971 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2972 +@@ -1572,7 +1572,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
2973 + ret = iwl_mvm_sar_init(mvm);
2974 + if (ret == 0)
2975 + ret = iwl_mvm_sar_geo_init(mvm);
2976 +- else if (ret < 0)
2977 ++ if (ret < 0)
2978 + goto error;
2979 +
2980 + iwl_mvm_tas_init(mvm);
2981 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
2982 +index bf0c32a74ca47..a9c19be29e92e 100644
2983 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
2984 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
2985 +@@ -408,8 +408,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
2986 + /* This may fail if AMT took ownership of the device */
2987 + if (iwl_pcie_prepare_card_hw(trans)) {
2988 + IWL_WARN(trans, "Exit HW not ready\n");
2989 +- ret = -EIO;
2990 +- goto out;
2991 ++ return -EIO;
2992 + }
2993 +
2994 + iwl_enable_rfkill_int(trans);
2995 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
2996 +index f252680f18e88..02da9cc8646cf 100644
2997 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
2998 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
2999 +@@ -1273,8 +1273,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
3000 + /* This may fail if AMT took ownership of the device */
3001 + if (iwl_pcie_prepare_card_hw(trans)) {
3002 + IWL_WARN(trans, "Exit HW not ready\n");
3003 +- ret = -EIO;
3004 +- goto out;
3005 ++ return -EIO;
3006 + }
3007 +
3008 + iwl_enable_rfkill_int(trans);
3009 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
3010 +index f8dd664b2eda5..a480e1af48e8e 100644
3011 +--- a/drivers/nvme/host/core.c
3012 ++++ b/drivers/nvme/host/core.c
3013 +@@ -131,7 +131,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
3014 + if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
3015 + return;
3016 +
3017 +- blk_set_queue_dying(ns->queue);
3018 ++ blk_mark_disk_dead(ns->disk);
3019 + blk_mq_unquiesce_queue(ns->queue);
3020 +
3021 + set_capacity_and_notify(ns->disk, 0);
3022 +@@ -4187,7 +4187,14 @@ static void nvme_async_event_work(struct work_struct *work)
3023 + container_of(work, struct nvme_ctrl, async_event_work);
3024 +
3025 + nvme_aen_uevent(ctrl);
3026 +- ctrl->ops->submit_async_event(ctrl);
3027 ++
3028 ++ /*
3029 ++ * The transport drivers must guarantee AER submission here is safe by
3030 ++ * flushing ctrl async_event_work after changing the controller state
3031 ++ * from LIVE and before freeing the admin queue.
3032 ++ */
3033 ++ if (ctrl->state == NVME_CTRL_LIVE)
3034 ++ ctrl->ops->submit_async_event(ctrl);
3035 + }
3036 +
3037 + static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
3038 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
3039 +index 2f76969408b27..727520c397109 100644
3040 +--- a/drivers/nvme/host/multipath.c
3041 ++++ b/drivers/nvme/host/multipath.c
3042 +@@ -792,7 +792,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
3043 + {
3044 + if (!head->disk)
3045 + return;
3046 +- blk_set_queue_dying(head->disk->queue);
3047 ++ blk_mark_disk_dead(head->disk);
3048 + /* make sure all pending bios are cleaned up */
3049 + kblockd_schedule_work(&head->requeue_work);
3050 + flush_work(&head->requeue_work);
3051 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
3052 +index 0498801542eb6..d51f52e296f50 100644
3053 +--- a/drivers/nvme/host/rdma.c
3054 ++++ b/drivers/nvme/host/rdma.c
3055 +@@ -1192,6 +1192,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
3056 + struct nvme_rdma_ctrl, err_work);
3057 +
3058 + nvme_stop_keep_alive(&ctrl->ctrl);
3059 ++ flush_work(&ctrl->ctrl.async_event_work);
3060 + nvme_rdma_teardown_io_queues(ctrl, false);
3061 + nvme_start_queues(&ctrl->ctrl);
3062 + nvme_rdma_teardown_admin_queue(ctrl, false);
3063 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
3064 +index efa9037da53c9..ef65d24639c44 100644
3065 +--- a/drivers/nvme/host/tcp.c
3066 ++++ b/drivers/nvme/host/tcp.c
3067 +@@ -2105,6 +2105,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
3068 + struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
3069 +
3070 + nvme_stop_keep_alive(ctrl);
3071 ++ flush_work(&ctrl->async_event_work);
3072 + nvme_tcp_teardown_io_queues(ctrl, false);
3073 + /* unquiesce to fail fast pending requests */
3074 + nvme_start_queues(ctrl);
3075 +diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
3076 +index 059566f544291..9be007c9420f9 100644
3077 +--- a/drivers/parisc/ccio-dma.c
3078 ++++ b/drivers/parisc/ccio-dma.c
3079 +@@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
3080 + ioc->usg_calls++;
3081 + #endif
3082 +
3083 +- while(sg_dma_len(sglist) && nents--) {
3084 ++ while (nents && sg_dma_len(sglist)) {
3085 +
3086 + #ifdef CCIO_COLLECT_STATS
3087 + ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
3088 +@@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
3089 + ccio_unmap_page(dev, sg_dma_address(sglist),
3090 + sg_dma_len(sglist), direction, 0);
3091 + ++sglist;
3092 ++ nents--;
3093 + }
3094 +
3095 + DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
3096 +diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
3097 +index e60690d38d677..374b9199878d4 100644
3098 +--- a/drivers/parisc/sba_iommu.c
3099 ++++ b/drivers/parisc/sba_iommu.c
3100 +@@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
3101 + spin_unlock_irqrestore(&ioc->res_lock, flags);
3102 + #endif
3103 +
3104 +- while (sg_dma_len(sglist) && nents--) {
3105 ++ while (nents && sg_dma_len(sglist)) {
3106 +
3107 + sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
3108 + direction, 0);
3109 +@@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
3110 + ioc->usingle_calls--; /* kluge since call is unmap_sg() */
3111 + #endif
3112 + ++sglist;
3113 ++ nents--;
3114 + }
3115 +
3116 + DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
3117 +diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
3118 +index 67c46e52c0dc3..9dd4502d32a41 100644
3119 +--- a/drivers/pci/controller/pci-hyperv.c
3120 ++++ b/drivers/pci/controller/pci-hyperv.c
3121 +@@ -1899,8 +1899,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
3122 + if (!hv_dev)
3123 + continue;
3124 +
3125 +- if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
3126 +- set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
3127 ++ if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
3128 ++ hv_dev->desc.virtual_numa_node < num_possible_nodes())
3129 ++ /*
3130 ++ * The kernel may boot with some NUMA nodes offline
3131 ++ * (e.g. in a KDUMP kernel) or with NUMA disabled via
3132 ++ * "numa=off". In those cases, adjust the host provided
3133 ++ * NUMA node to a valid NUMA node used by the kernel.
3134 ++ */
3135 ++ set_dev_node(&dev->dev,
3136 ++ numa_map_to_online_node(
3137 ++ hv_dev->desc.virtual_numa_node));
3138 +
3139 + put_pcichild(hv_dev);
3140 + }
3141 +diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
3142 +index 116fb23aebd99..0f1deb6e0eabf 100644
3143 +--- a/drivers/phy/broadcom/phy-brcm-usb.c
3144 ++++ b/drivers/phy/broadcom/phy-brcm-usb.c
3145 +@@ -18,6 +18,7 @@
3146 + #include <linux/soc/brcmstb/brcmstb.h>
3147 + #include <dt-bindings/phy/phy.h>
3148 + #include <linux/mfd/syscon.h>
3149 ++#include <linux/suspend.h>
3150 +
3151 + #include "phy-brcm-usb-init.h"
3152 +
3153 +@@ -70,12 +71,35 @@ struct brcm_usb_phy_data {
3154 + int init_count;
3155 + int wake_irq;
3156 + struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX];
3157 ++ struct notifier_block pm_notifier;
3158 ++ bool pm_active;
3159 + };
3160 +
3161 + static s8 *node_reg_names[BRCM_REGS_MAX] = {
3162 + "crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
3163 + };
3164 +
3165 ++static int brcm_pm_notifier(struct notifier_block *notifier,
3166 ++ unsigned long pm_event,
3167 ++ void *unused)
3168 ++{
3169 ++ struct brcm_usb_phy_data *priv =
3170 ++ container_of(notifier, struct brcm_usb_phy_data, pm_notifier);
3171 ++
3172 ++ switch (pm_event) {
3173 ++ case PM_HIBERNATION_PREPARE:
3174 ++ case PM_SUSPEND_PREPARE:
3175 ++ priv->pm_active = true;
3176 ++ break;
3177 ++ case PM_POST_RESTORE:
3178 ++ case PM_POST_HIBERNATION:
3179 ++ case PM_POST_SUSPEND:
3180 ++ priv->pm_active = false;
3181 ++ break;
3182 ++ }
3183 ++ return NOTIFY_DONE;
3184 ++}
3185 ++
3186 + static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
3187 + {
3188 + struct phy *gphy = dev_id;
3189 +@@ -91,6 +115,9 @@ static int brcm_usb_phy_init(struct phy *gphy)
3190 + struct brcm_usb_phy_data *priv =
3191 + container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
3192 +
3193 ++ if (priv->pm_active)
3194 ++ return 0;
3195 ++
3196 + /*
3197 + * Use a lock to make sure a second caller waits until
3198 + * the base phy is inited before using it.
3199 +@@ -120,6 +147,9 @@ static int brcm_usb_phy_exit(struct phy *gphy)
3200 + struct brcm_usb_phy_data *priv =
3201 + container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
3202 +
3203 ++ if (priv->pm_active)
3204 ++ return 0;
3205 ++
3206 + dev_dbg(&gphy->dev, "EXIT\n");
3207 + if (phy->id == BRCM_USB_PHY_2_0)
3208 + brcm_usb_uninit_eohci(&priv->ini);
3209 +@@ -488,6 +518,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
3210 + if (err)
3211 + return err;
3212 +
3213 ++ priv->pm_notifier.notifier_call = brcm_pm_notifier;
3214 ++ register_pm_notifier(&priv->pm_notifier);
3215 ++
3216 + mutex_init(&priv->mutex);
3217 +
3218 + /* make sure invert settings are correct */
3219 +@@ -528,7 +561,10 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
3220 +
3221 + static int brcm_usb_phy_remove(struct platform_device *pdev)
3222 + {
3223 ++ struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev);
3224 ++
3225 + sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group);
3226 ++ unregister_pm_notifier(&priv->pm_notifier);
3227 +
3228 + return 0;
3229 + }
3230 +@@ -539,6 +575,7 @@ static int brcm_usb_phy_suspend(struct device *dev)
3231 + struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
3232 +
3233 + if (priv->init_count) {
3234 ++ dev_dbg(dev, "SUSPEND\n");
3235 + priv->ini.wake_enabled = device_may_wakeup(dev);
3236 + if (priv->phys[BRCM_USB_PHY_3_0].inited)
3237 + brcm_usb_uninit_xhci(&priv->ini);
3238 +@@ -578,6 +615,7 @@ static int brcm_usb_phy_resume(struct device *dev)
3239 + * Uninitialize anything that wasn't previously initialized.
3240 + */
3241 + if (priv->init_count) {
3242 ++ dev_dbg(dev, "RESUME\n");
3243 + if (priv->wake_irq >= 0)
3244 + disable_irq_wake(priv->wake_irq);
3245 + brcm_usb_init_common(&priv->ini);
3246 +diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
3247 +index 98a942c607a67..db39b0c4649a2 100644
3248 +--- a/drivers/phy/mediatek/phy-mtk-tphy.c
3249 ++++ b/drivers/phy/mediatek/phy-mtk-tphy.c
3250 +@@ -1125,7 +1125,7 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc
3251 + /* no efuse, ignore it */
3252 + if (!instance->efuse_intr &&
3253 + !instance->efuse_rx_imp &&
3254 +- !instance->efuse_rx_imp) {
3255 ++ !instance->efuse_tx_imp) {
3256 + dev_warn(dev, "no u3 intr efuse, but dts enable it\n");
3257 + instance->efuse_sw_en = 0;
3258 + break;
3259 +diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
3260 +index c9c5efc927311..5973a279e6b8c 100644
3261 +--- a/drivers/pinctrl/bcm/Kconfig
3262 ++++ b/drivers/pinctrl/bcm/Kconfig
3263 +@@ -35,6 +35,7 @@ config PINCTRL_BCM63XX
3264 + select PINCONF
3265 + select GENERIC_PINCONF
3266 + select GPIOLIB
3267 ++ select REGMAP
3268 + select GPIO_REGMAP
3269 +
3270 + config PINCTRL_BCM6318
3271 +diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
3272 +index c9a85eb2e8600..e8424e70d81d2 100644
3273 +--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
3274 ++++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
3275 +@@ -596,7 +596,10 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
3276 + return ret;
3277 + }
3278 +
3279 +-static DEFINE_MUTEX(punit_misc_dev_lock);
3280 ++/* Lock to prevent module registration when already opened by user space */
3281 ++static DEFINE_MUTEX(punit_misc_dev_open_lock);
3282 ++/* Lock to allow one share misc device for all ISST interace */
3283 ++static DEFINE_MUTEX(punit_misc_dev_reg_lock);
3284 + static int misc_usage_count;
3285 + static int misc_device_ret;
3286 + static int misc_device_open;
3287 +@@ -606,7 +609,7 @@ static int isst_if_open(struct inode *inode, struct file *file)
3288 + int i, ret = 0;
3289 +
3290 + /* Fail open, if a module is going away */
3291 +- mutex_lock(&punit_misc_dev_lock);
3292 ++ mutex_lock(&punit_misc_dev_open_lock);
3293 + for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
3294 + struct isst_if_cmd_cb *cb = &punit_callbacks[i];
3295 +
3296 +@@ -628,7 +631,7 @@ static int isst_if_open(struct inode *inode, struct file *file)
3297 + } else {
3298 + misc_device_open++;
3299 + }
3300 +- mutex_unlock(&punit_misc_dev_lock);
3301 ++ mutex_unlock(&punit_misc_dev_open_lock);
3302 +
3303 + return ret;
3304 + }
3305 +@@ -637,7 +640,7 @@ static int isst_if_relase(struct inode *inode, struct file *f)
3306 + {
3307 + int i;
3308 +
3309 +- mutex_lock(&punit_misc_dev_lock);
3310 ++ mutex_lock(&punit_misc_dev_open_lock);
3311 + misc_device_open--;
3312 + for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
3313 + struct isst_if_cmd_cb *cb = &punit_callbacks[i];
3314 +@@ -645,7 +648,7 @@ static int isst_if_relase(struct inode *inode, struct file *f)
3315 + if (cb->registered)
3316 + module_put(cb->owner);
3317 + }
3318 +- mutex_unlock(&punit_misc_dev_lock);
3319 ++ mutex_unlock(&punit_misc_dev_open_lock);
3320 +
3321 + return 0;
3322 + }
3323 +@@ -662,6 +665,43 @@ static struct miscdevice isst_if_char_driver = {
3324 + .fops = &isst_if_char_driver_ops,
3325 + };
3326 +
3327 ++static int isst_misc_reg(void)
3328 ++{
3329 ++ mutex_lock(&punit_misc_dev_reg_lock);
3330 ++ if (misc_device_ret)
3331 ++ goto unlock_exit;
3332 ++
3333 ++ if (!misc_usage_count) {
3334 ++ misc_device_ret = isst_if_cpu_info_init();
3335 ++ if (misc_device_ret)
3336 ++ goto unlock_exit;
3337 ++
3338 ++ misc_device_ret = misc_register(&isst_if_char_driver);
3339 ++ if (misc_device_ret) {
3340 ++ isst_if_cpu_info_exit();
3341 ++ goto unlock_exit;
3342 ++ }
3343 ++ }
3344 ++ misc_usage_count++;
3345 ++
3346 ++unlock_exit:
3347 ++ mutex_unlock(&punit_misc_dev_reg_lock);
3348 ++
3349 ++ return misc_device_ret;
3350 ++}
3351 ++
3352 ++static void isst_misc_unreg(void)
3353 ++{
3354 ++ mutex_lock(&punit_misc_dev_reg_lock);
3355 ++ if (misc_usage_count)
3356 ++ misc_usage_count--;
3357 ++ if (!misc_usage_count && !misc_device_ret) {
3358 ++ misc_deregister(&isst_if_char_driver);
3359 ++ isst_if_cpu_info_exit();
3360 ++ }
3361 ++ mutex_unlock(&punit_misc_dev_reg_lock);
3362 ++}
3363 ++
3364 + /**
3365 + * isst_if_cdev_register() - Register callback for IOCTL
3366 + * @device_type: The device type this callback handling.
3367 +@@ -679,38 +719,31 @@ static struct miscdevice isst_if_char_driver = {
3368 + */
3369 + int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
3370 + {
3371 +- if (misc_device_ret)
3372 +- return misc_device_ret;
3373 ++ int ret;
3374 +
3375 + if (device_type >= ISST_IF_DEV_MAX)
3376 + return -EINVAL;
3377 +
3378 +- mutex_lock(&punit_misc_dev_lock);
3379 ++ mutex_lock(&punit_misc_dev_open_lock);
3380 ++ /* Device is already open, we don't want to add new callbacks */
3381 + if (misc_device_open) {
3382 +- mutex_unlock(&punit_misc_dev_lock);
3383 ++ mutex_unlock(&punit_misc_dev_open_lock);
3384 + return -EAGAIN;
3385 + }
3386 +- if (!misc_usage_count) {
3387 +- int ret;
3388 +-
3389 +- misc_device_ret = misc_register(&isst_if_char_driver);
3390 +- if (misc_device_ret)
3391 +- goto unlock_exit;
3392 +-
3393 +- ret = isst_if_cpu_info_init();
3394 +- if (ret) {
3395 +- misc_deregister(&isst_if_char_driver);
3396 +- misc_device_ret = ret;
3397 +- goto unlock_exit;
3398 +- }
3399 +- }
3400 + memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
3401 + punit_callbacks[device_type].registered = 1;
3402 +- misc_usage_count++;
3403 +-unlock_exit:
3404 +- mutex_unlock(&punit_misc_dev_lock);
3405 ++ mutex_unlock(&punit_misc_dev_open_lock);
3406 +
3407 +- return misc_device_ret;
3408 ++ ret = isst_misc_reg();
3409 ++ if (ret) {
3410 ++ /*
3411 ++ * No need of mutex as the misc device register failed
3412 ++ * as no one can open device yet. Hence no contention.
3413 ++ */
3414 ++ punit_callbacks[device_type].registered = 0;
3415 ++ return ret;
3416 ++ }
3417 ++ return 0;
3418 + }
3419 + EXPORT_SYMBOL_GPL(isst_if_cdev_register);
3420 +
3421 +@@ -725,16 +758,12 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register);
3422 + */
3423 + void isst_if_cdev_unregister(int device_type)
3424 + {
3425 +- mutex_lock(&punit_misc_dev_lock);
3426 +- misc_usage_count--;
3427 ++ isst_misc_unreg();
3428 ++ mutex_lock(&punit_misc_dev_open_lock);
3429 + punit_callbacks[device_type].registered = 0;
3430 + if (device_type == ISST_IF_DEV_MBOX)
3431 + isst_delete_hash();
3432 +- if (!misc_usage_count && !misc_device_ret) {
3433 +- misc_deregister(&isst_if_char_driver);
3434 +- isst_if_cpu_info_exit();
3435 +- }
3436 +- mutex_unlock(&punit_misc_dev_lock);
3437 ++ mutex_unlock(&punit_misc_dev_open_lock);
3438 + }
3439 + EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
3440 +
3441 +diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
3442 +index 033f797861d8a..c608078538a79 100644
3443 +--- a/drivers/platform/x86/touchscreen_dmi.c
3444 ++++ b/drivers/platform/x86/touchscreen_dmi.c
3445 +@@ -773,6 +773,21 @@ static const struct ts_dmi_data predia_basic_data = {
3446 + .properties = predia_basic_props,
3447 + };
3448 +
3449 ++static const struct property_entry rwc_nanote_p8_props[] = {
3450 ++ PROPERTY_ENTRY_U32("touchscreen-min-y", 46),
3451 ++ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
3452 ++ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
3453 ++ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
3454 ++ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"),
3455 ++ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
3456 ++ { }
3457 ++};
3458 ++
3459 ++static const struct ts_dmi_data rwc_nanote_p8_data = {
3460 ++ .acpi_name = "MSSL1680:00",
3461 ++ .properties = rwc_nanote_p8_props,
3462 ++};
3463 ++
3464 + static const struct property_entry schneider_sct101ctm_props[] = {
3465 + PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
3466 + PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
3467 +@@ -1379,6 +1394,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
3468 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"),
3469 + },
3470 + },
3471 ++ {
3472 ++ /* RWC NANOTE P8 */
3473 ++ .driver_data = (void *)&rwc_nanote_p8_data,
3474 ++ .matches = {
3475 ++ DMI_MATCH(DMI_BOARD_VENDOR, "Default string"),
3476 ++ DMI_MATCH(DMI_PRODUCT_NAME, "AY07J"),
3477 ++ DMI_MATCH(DMI_PRODUCT_SKU, "0001")
3478 ++ },
3479 ++ },
3480 + {
3481 + /* Schneider SCT101CTM */
3482 + .driver_data = (void *)&schneider_sct101ctm_data,
3483 +diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
3484 +index f66ba64080a31..1044832b60549 100644
3485 +--- a/drivers/scsi/lpfc/lpfc.h
3486 ++++ b/drivers/scsi/lpfc/lpfc.h
3487 +@@ -593,6 +593,7 @@ struct lpfc_vport {
3488 + #define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
3489 + #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
3490 + #define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
3491 ++#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */
3492 + #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
3493 + #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
3494 + #define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
3495 +diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
3496 +index 632b9cdabd14e..9f3f7805f1f95 100644
3497 +--- a/drivers/scsi/lpfc/lpfc_attr.c
3498 ++++ b/drivers/scsi/lpfc/lpfc_attr.c
3499 +@@ -1315,6 +1315,9 @@ lpfc_issue_lip(struct Scsi_Host *shost)
3500 + pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
3501 + pmboxq->u.mb.mbxOwner = OWN_HOST;
3502 +
3503 ++ if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
3504 ++ vport->fc_flag &= ~FC_PT2PT_NO_NVME;
3505 ++
3506 + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
3507 +
3508 + if ((mbxstatus == MBX_SUCCESS) &&
3509 +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
3510 +index f08ab8269f441..886006ad12a29 100644
3511 +--- a/drivers/scsi/lpfc/lpfc_els.c
3512 ++++ b/drivers/scsi/lpfc/lpfc_els.c
3513 +@@ -1072,7 +1072,8 @@ stop_rr_fcf_flogi:
3514 +
3515 + /* FLOGI failed, so there is no fabric */
3516 + spin_lock_irq(shost->host_lock);
3517 +- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3518 ++ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
3519 ++ FC_PT2PT_NO_NVME);
3520 + spin_unlock_irq(shost->host_lock);
3521 +
3522 + /* If private loop, then allow max outstanding els to be
3523 +@@ -4587,6 +4588,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3524 + /* Added for Vendor specifc support
3525 + * Just keep retrying for these Rsn / Exp codes
3526 + */
3527 ++ if ((vport->fc_flag & FC_PT2PT) &&
3528 ++ cmd == ELS_CMD_NVMEPRLI) {
3529 ++ switch (stat.un.b.lsRjtRsnCode) {
3530 ++ case LSRJT_UNABLE_TPC:
3531 ++ case LSRJT_INVALID_CMD:
3532 ++ case LSRJT_LOGICAL_ERR:
3533 ++ case LSRJT_CMD_UNSUPPORTED:
3534 ++ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
3535 ++ "0168 NVME PRLI LS_RJT "
3536 ++ "reason %x port doesn't "
3537 ++ "support NVME, disabling NVME\n",
3538 ++ stat.un.b.lsRjtRsnCode);
3539 ++ retry = 0;
3540 ++ vport->fc_flag |= FC_PT2PT_NO_NVME;
3541 ++ goto out_retry;
3542 ++ }
3543 ++ }
3544 + switch (stat.un.b.lsRjtRsnCode) {
3545 + case LSRJT_UNABLE_TPC:
3546 + /* The driver has a VALID PLOGI but the rport has
3547 +diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
3548 +index 7d717a4ac14d1..fdf5e777bf113 100644
3549 +--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
3550 ++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
3551 +@@ -1961,8 +1961,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
3552 + * is configured try it.
3553 + */
3554 + ndlp->nlp_fc4_type |= NLP_FC4_FCP;
3555 +- if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3556 +- (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
3557 ++ if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
3558 ++ (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
3559 ++ vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
3560 + ndlp->nlp_fc4_type |= NLP_FC4_NVME;
3561 + /* We need to update the localport also */
3562 + lpfc_nvme_update_localport(vport);
3563 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
3564 +index 2978c61dc5863..68d8e55c1205c 100644
3565 +--- a/drivers/scsi/lpfc/lpfc_sli.c
3566 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
3567 +@@ -8147,6 +8147,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
3568 + struct lpfc_vport *vport = phba->pport;
3569 + struct lpfc_dmabuf *mp;
3570 + struct lpfc_rqb *rqbp;
3571 ++ u32 flg;
3572 +
3573 + /* Perform a PCI function reset to start from clean */
3574 + rc = lpfc_pci_function_reset(phba);
3575 +@@ -8160,7 +8161,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
3576 + else {
3577 + spin_lock_irq(&phba->hbalock);
3578 + phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
3579 ++ flg = phba->sli.sli_flag;
3580 + spin_unlock_irq(&phba->hbalock);
3581 ++ /* Allow a little time after setting SLI_ACTIVE for any polled
3582 ++ * MBX commands to complete via BSG.
3583 ++ */
3584 ++ for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
3585 ++ msleep(20);
3586 ++ spin_lock_irq(&phba->hbalock);
3587 ++ flg = phba->sli.sli_flag;
3588 ++ spin_unlock_irq(&phba->hbalock);
3589 ++ }
3590 + }
3591 +
3592 + lpfc_sli4_dip(phba);
3593 +@@ -9744,7 +9755,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
3594 + "(%d):2541 Mailbox command x%x "
3595 + "(x%x/x%x) failure: "
3596 + "mqe_sta: x%x mcqe_sta: x%x/x%x "
3597 +- "Data: x%x x%x\n,",
3598 ++ "Data: x%x x%x\n",
3599 + mboxq->vport ? mboxq->vport->vpi : 0,
3600 + mboxq->u.mb.mbxCommand,
3601 + lpfc_sli_config_mbox_subsys_get(phba,
3602 +@@ -9778,7 +9789,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
3603 + "(%d):2597 Sync Mailbox command "
3604 + "x%x (x%x/x%x) failure: "
3605 + "mqe_sta: x%x mcqe_sta: x%x/x%x "
3606 +- "Data: x%x x%x\n,",
3607 ++ "Data: x%x x%x\n",
3608 + mboxq->vport ? mboxq->vport->vpi : 0,
3609 + mboxq->u.mb.mbxCommand,
3610 + lpfc_sli_config_mbox_subsys_get(phba,
3611 +diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
3612 +index 880e1f356defc..5e6b23da4157c 100644
3613 +--- a/drivers/scsi/pm8001/pm8001_hwi.c
3614 ++++ b/drivers/scsi/pm8001/pm8001_hwi.c
3615 +@@ -2695,7 +2695,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3616 + u32 tag = le32_to_cpu(psataPayload->tag);
3617 + u32 port_id = le32_to_cpu(psataPayload->port_id);
3618 + u32 dev_id = le32_to_cpu(psataPayload->device_id);
3619 +- unsigned long flags;
3620 +
3621 + ccb = &pm8001_ha->ccb_info[tag];
3622 +
3623 +@@ -2735,8 +2734,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3624 + ts->resp = SAS_TASK_COMPLETE;
3625 + ts->stat = SAS_DATA_OVERRUN;
3626 + ts->residual = 0;
3627 +- if (pm8001_dev)
3628 +- atomic_dec(&pm8001_dev->running_req);
3629 + break;
3630 + case IO_XFER_ERROR_BREAK:
3631 + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
3632 +@@ -2778,7 +2775,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3633 + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
3634 + ts->resp = SAS_TASK_COMPLETE;
3635 + ts->stat = SAS_QUEUE_FULL;
3636 +- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
3637 + return;
3638 + }
3639 + break;
3640 +@@ -2864,20 +2860,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3641 + ts->stat = SAS_OPEN_TO;
3642 + break;
3643 + }
3644 +- spin_lock_irqsave(&t->task_state_lock, flags);
3645 +- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
3646 +- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
3647 +- t->task_state_flags |= SAS_TASK_STATE_DONE;
3648 +- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
3649 +- spin_unlock_irqrestore(&t->task_state_lock, flags);
3650 +- pm8001_dbg(pm8001_ha, FAIL,
3651 +- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
3652 +- t, event, ts->resp, ts->stat);
3653 +- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
3654 +- } else {
3655 +- spin_unlock_irqrestore(&t->task_state_lock, flags);
3656 +- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
3657 +- }
3658 + }
3659 +
3660 + /*See the comments for mpi_ssp_completion */
3661 +diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
3662 +index 32e60f0c3b148..491cecbbe1aa7 100644
3663 +--- a/drivers/scsi/pm8001/pm8001_sas.c
3664 ++++ b/drivers/scsi/pm8001/pm8001_sas.c
3665 +@@ -753,8 +753,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
3666 + res = -TMF_RESP_FUNC_FAILED;
3667 + /* Even TMF timed out, return direct. */
3668 + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
3669 ++ struct pm8001_ccb_info *ccb = task->lldd_task;
3670 ++
3671 + pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
3672 + tmf->tmf);
3673 ++
3674 ++ if (ccb)
3675 ++ ccb->task = NULL;
3676 + goto ex_err;
3677 + }
3678 +
3679 +diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
3680 +index ed13e0e044b74..3056f3615ab8a 100644
3681 +--- a/drivers/scsi/pm8001/pm80xx_hwi.c
3682 ++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
3683 +@@ -2184,9 +2184,9 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
3684 + pm8001_dbg(pm8001_ha, FAIL,
3685 + "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
3686 + t, status, ts->resp, ts->stat);
3687 ++ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
3688 + if (t->slow_task)
3689 + complete(&t->slow_task->completion);
3690 +- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
3691 + } else {
3692 + spin_unlock_irqrestore(&t->task_state_lock, flags);
3693 + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
3694 +@@ -2801,9 +2801,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
3695 + pm8001_dbg(pm8001_ha, FAIL,
3696 + "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
3697 + t, status, ts->resp, ts->stat);
3698 ++ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
3699 + if (t->slow_task)
3700 + complete(&t->slow_task->completion);
3701 +- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
3702 + } else {
3703 + spin_unlock_irqrestore(&t->task_state_lock, flags);
3704 + spin_unlock_irqrestore(&circularQ->oq_lock,
3705 +@@ -2828,7 +2828,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
3706 + u32 tag = le32_to_cpu(psataPayload->tag);
3707 + u32 port_id = le32_to_cpu(psataPayload->port_id);
3708 + u32 dev_id = le32_to_cpu(psataPayload->device_id);
3709 +- unsigned long flags;
3710 +
3711 + ccb = &pm8001_ha->ccb_info[tag];
3712 +
3713 +@@ -2866,8 +2865,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
3714 + ts->resp = SAS_TASK_COMPLETE;
3715 + ts->stat = SAS_DATA_OVERRUN;
3716 + ts->residual = 0;
3717 +- if (pm8001_dev)
3718 +- atomic_dec(&pm8001_dev->running_req);
3719 + break;
3720 + case IO_XFER_ERROR_BREAK:
3721 + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
3722 +@@ -2916,11 +2913,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
3723 + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
3724 + ts->resp = SAS_TASK_COMPLETE;
3725 + ts->stat = SAS_QUEUE_FULL;
3726 +- spin_unlock_irqrestore(&circularQ->oq_lock,
3727 +- circularQ->lock_flags);
3728 +- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
3729 +- spin_lock_irqsave(&circularQ->oq_lock,
3730 +- circularQ->lock_flags);
3731 + return;
3732 + }
3733 + break;
3734 +@@ -3020,24 +3012,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
3735 + ts->stat = SAS_OPEN_TO;
3736 + break;
3737 + }
3738 +- spin_lock_irqsave(&t->task_state_lock, flags);
3739 +- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
3740 +- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
3741 +- t->task_state_flags |= SAS_TASK_STATE_DONE;
3742 +- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
3743 +- spin_unlock_irqrestore(&t->task_state_lock, flags);
3744 +- pm8001_dbg(pm8001_ha, FAIL,
3745 +- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
3746 +- t, event, ts->resp, ts->stat);
3747 +- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
3748 +- } else {
3749 +- spin_unlock_irqrestore(&t->task_state_lock, flags);
3750 +- spin_unlock_irqrestore(&circularQ->oq_lock,
3751 +- circularQ->lock_flags);
3752 +- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
3753 +- spin_lock_irqsave(&circularQ->oq_lock,
3754 +- circularQ->lock_flags);
3755 +- }
3756 + }
3757 +
3758 + /*See the comments for mpi_ssp_completion */
3759 +diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
3760 +index d01cd829ef975..df9ce6ed52bf9 100644
3761 +--- a/drivers/scsi/qedi/qedi_fw.c
3762 ++++ b/drivers/scsi/qedi/qedi_fw.c
3763 +@@ -772,11 +772,10 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
3764 + qedi_cmd->list_tmf_work = NULL;
3765 + }
3766 + }
3767 ++ spin_unlock_bh(&qedi_conn->tmf_work_lock);
3768 +
3769 +- if (!found) {
3770 +- spin_unlock_bh(&qedi_conn->tmf_work_lock);
3771 ++ if (!found)
3772 + goto check_cleanup_reqs;
3773 +- }
3774 +
3775 + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
3776 + "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
3777 +@@ -807,7 +806,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
3778 + qedi_cmd->state = CLEANUP_RECV;
3779 + unlock:
3780 + spin_unlock_bh(&conn->session->back_lock);
3781 +- spin_unlock_bh(&qedi_conn->tmf_work_lock);
3782 + wake_up_interruptible(&qedi_conn->wait_queue);
3783 + return;
3784 +
3785 +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
3786 +index fe22191522a3b..7266880c70c21 100644
3787 +--- a/drivers/scsi/scsi_scan.c
3788 ++++ b/drivers/scsi/scsi_scan.c
3789 +@@ -198,6 +198,48 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
3790 + SCSI_TIMEOUT, 3, NULL);
3791 + }
3792 +
3793 ++static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
3794 ++ unsigned int depth)
3795 ++{
3796 ++ int new_shift = sbitmap_calculate_shift(depth);
3797 ++ bool need_alloc = !sdev->budget_map.map;
3798 ++ bool need_free = false;
3799 ++ int ret;
3800 ++ struct sbitmap sb_backup;
3801 ++
3802 ++ /*
3803 ++ * realloc if new shift is calculated, which is caused by setting
3804 ++ * up one new default queue depth after calling ->slave_configure
3805 ++ */
3806 ++ if (!need_alloc && new_shift != sdev->budget_map.shift)
3807 ++ need_alloc = need_free = true;
3808 ++
3809 ++ if (!need_alloc)
3810 ++ return 0;
3811 ++
3812 ++ /*
3813 ++ * Request queue has to be frozen for reallocating budget map,
3814 ++ * and here disk isn't added yet, so freezing is pretty fast
3815 ++ */
3816 ++ if (need_free) {
3817 ++ blk_mq_freeze_queue(sdev->request_queue);
3818 ++ sb_backup = sdev->budget_map;
3819 ++ }
3820 ++ ret = sbitmap_init_node(&sdev->budget_map,
3821 ++ scsi_device_max_queue_depth(sdev),
3822 ++ new_shift, GFP_KERNEL,
3823 ++ sdev->request_queue->node, false, true);
3824 ++ if (need_free) {
3825 ++ if (ret)
3826 ++ sdev->budget_map = sb_backup;
3827 ++ else
3828 ++ sbitmap_free(&sb_backup);
3829 ++ ret = 0;
3830 ++ blk_mq_unfreeze_queue(sdev->request_queue);
3831 ++ }
3832 ++ return ret;
3833 ++}
3834 ++
3835 + /**
3836 + * scsi_alloc_sdev - allocate and setup a scsi_Device
3837 + * @starget: which target to allocate a &scsi_device for
3838 +@@ -291,11 +333,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
3839 + * default device queue depth to figure out sbitmap shift
3840 + * since we use this queue depth most of times.
3841 + */
3842 +- if (sbitmap_init_node(&sdev->budget_map,
3843 +- scsi_device_max_queue_depth(sdev),
3844 +- sbitmap_calculate_shift(depth),
3845 +- GFP_KERNEL, sdev->request_queue->node,
3846 +- false, true)) {
3847 ++ if (scsi_realloc_sdev_budget_map(sdev, depth)) {
3848 + put_device(&starget->dev);
3849 + kfree(sdev);
3850 + goto out;
3851 +@@ -1001,6 +1039,13 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
3852 + }
3853 + return SCSI_SCAN_NO_RESPONSE;
3854 + }
3855 ++
3856 ++ /*
3857 ++ * The queue_depth is often changed in ->slave_configure.
3858 ++ * Set up budget map again since memory consumption of
3859 ++ * the map depends on actual queue depth.
3860 ++ */
3861 ++ scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
3862 + }
3863 +
3864 + if (sdev->scsi_level >= SCSI_3)
3865 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
3866 +index f489954e46321..cdec85bcc4ccc 100644
3867 +--- a/drivers/scsi/ufs/ufshcd.c
3868 ++++ b/drivers/scsi/ufs/ufshcd.c
3869 +@@ -125,8 +125,9 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
3870 + enum {
3871 + UFSHCD_MAX_CHANNEL = 0,
3872 + UFSHCD_MAX_ID = 1,
3873 +- UFSHCD_CMD_PER_LUN = 32,
3874 +- UFSHCD_CAN_QUEUE = 32,
3875 ++ UFSHCD_NUM_RESERVED = 1,
3876 ++ UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
3877 ++ UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
3878 + };
3879 +
3880 + /* UFSHCD error handling flags */
3881 +@@ -2185,6 +2186,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
3882 + hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
3883 + hba->nutmrs =
3884 + ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
3885 ++ hba->reserved_slot = hba->nutrs - 1;
3886 +
3887 + /* Read crypto capabilities */
3888 + err = ufshcd_hba_init_crypto_capabilities(hba);
3889 +@@ -2910,30 +2912,15 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3890 + static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3891 + enum dev_cmd_type cmd_type, int timeout)
3892 + {
3893 +- struct request_queue *q = hba->cmd_queue;
3894 + DECLARE_COMPLETION_ONSTACK(wait);
3895 +- struct request *req;
3896 ++ const u32 tag = hba->reserved_slot;
3897 + struct ufshcd_lrb *lrbp;
3898 + int err;
3899 +- int tag;
3900 +
3901 +- down_read(&hba->clk_scaling_lock);
3902 ++ /* Protects use of hba->reserved_slot. */
3903 ++ lockdep_assert_held(&hba->dev_cmd.lock);
3904 +
3905 +- /*
3906 +- * Get free slot, sleep if slots are unavailable.
3907 +- * Even though we use wait_event() which sleeps indefinitely,
3908 +- * the maximum wait time is bounded by SCSI request timeout.
3909 +- */
3910 +- req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
3911 +- if (IS_ERR(req)) {
3912 +- err = PTR_ERR(req);
3913 +- goto out_unlock;
3914 +- }
3915 +- tag = req->tag;
3916 +- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
3917 +- /* Set the timeout such that the SCSI error handler is not activated. */
3918 +- req->timeout = msecs_to_jiffies(2 * timeout);
3919 +- blk_mq_start_request(req);
3920 ++ down_read(&hba->clk_scaling_lock);
3921 +
3922 + lrbp = &hba->lrb[tag];
3923 + WARN_ON(lrbp->cmd);
3924 +@@ -2951,8 +2938,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3925 + (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
3926 +
3927 + out:
3928 +- blk_put_request(req);
3929 +-out_unlock:
3930 + up_read(&hba->clk_scaling_lock);
3931 + return err;
3932 + }
3933 +@@ -6640,28 +6625,16 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
3934 + enum dev_cmd_type cmd_type,
3935 + enum query_opcode desc_op)
3936 + {
3937 +- struct request_queue *q = hba->cmd_queue;
3938 + DECLARE_COMPLETION_ONSTACK(wait);
3939 +- struct request *req;
3940 ++ const u32 tag = hba->reserved_slot;
3941 + struct ufshcd_lrb *lrbp;
3942 + int err = 0;
3943 +- int tag;
3944 + u8 upiu_flags;
3945 +
3946 +- down_read(&hba->clk_scaling_lock);
3947 +-
3948 +- req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
3949 +- if (IS_ERR(req)) {
3950 +- err = PTR_ERR(req);
3951 +- goto out_unlock;
3952 +- }
3953 +- tag = req->tag;
3954 +- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
3955 ++ /* Protects use of hba->reserved_slot. */
3956 ++ lockdep_assert_held(&hba->dev_cmd.lock);
3957 +
3958 +- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
3959 +- err = -EBUSY;
3960 +- goto out;
3961 +- }
3962 ++ down_read(&hba->clk_scaling_lock);
3963 +
3964 + lrbp = &hba->lrb[tag];
3965 + WARN_ON(lrbp->cmd);
3966 +@@ -6730,9 +6703,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
3967 + ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
3968 + (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
3969 +
3970 +-out:
3971 +- blk_put_request(req);
3972 +-out_unlock:
3973 + up_read(&hba->clk_scaling_lock);
3974 + return err;
3975 + }
3976 +@@ -9423,8 +9393,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
3977 + /* Configure LRB */
3978 + ufshcd_host_memory_configure(hba);
3979 +
3980 +- host->can_queue = hba->nutrs;
3981 +- host->cmd_per_lun = hba->nutrs;
3982 ++ host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
3983 ++ host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
3984 + host->max_id = UFSHCD_MAX_ID;
3985 + host->max_lun = UFS_MAX_LUNS;
3986 + host->max_channel = UFSHCD_MAX_CHANNEL;
3987 +diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
3988 +index 07ada6676c3b4..d470a52ff24c3 100644
3989 +--- a/drivers/scsi/ufs/ufshcd.h
3990 ++++ b/drivers/scsi/ufs/ufshcd.h
3991 +@@ -725,6 +725,7 @@ struct ufs_hba_monitor {
3992 + * @capabilities: UFS Controller Capabilities
3993 + * @nutrs: Transfer Request Queue depth supported by controller
3994 + * @nutmrs: Task Management Queue depth supported by controller
3995 ++ * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
3996 + * @ufs_version: UFS Version to which controller complies
3997 + * @vops: pointer to variant specific operations
3998 + * @priv: pointer to variant specific private data
3999 +@@ -813,6 +814,7 @@ struct ufs_hba {
4000 + u32 capabilities;
4001 + int nutrs;
4002 + int nutmrs;
4003 ++ u32 reserved_slot;
4004 + u32 ufs_version;
4005 + const struct ufs_hba_variant_ops *vops;
4006 + struct ufs_hba_variant_params *vps;
4007 +diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
4008 +index 72771e018c42e..258894ed234b3 100644
4009 +--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
4010 ++++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
4011 +@@ -306,10 +306,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
4012 + }
4013 +
4014 + lpc_ctrl->clk = devm_clk_get(dev, NULL);
4015 +- if (IS_ERR(lpc_ctrl->clk)) {
4016 +- dev_err(dev, "couldn't get clock\n");
4017 +- return PTR_ERR(lpc_ctrl->clk);
4018 +- }
4019 ++ if (IS_ERR(lpc_ctrl->clk))
4020 ++ return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk),
4021 ++ "couldn't get clock\n");
4022 + rc = clk_prepare_enable(lpc_ctrl->clk);
4023 + if (rc) {
4024 + dev_err(dev, "couldn't enable clock\n");
4025 +diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
4026 +index 967f10b9582a8..ea9a53bdb4174 100644
4027 +--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
4028 ++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
4029 +@@ -1033,15 +1033,27 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
4030 +
4031 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
4032 +
4033 ++ rcu_read_lock();
4034 + service = handle_to_service(handle);
4035 +- if (WARN_ON(!service))
4036 ++ if (WARN_ON(!service)) {
4037 ++ rcu_read_unlock();
4038 + return VCHIQ_SUCCESS;
4039 ++ }
4040 +
4041 + user_service = (struct user_service *)service->base.userdata;
4042 + instance = user_service->instance;
4043 +
4044 +- if (!instance || instance->closing)
4045 ++ if (!instance || instance->closing) {
4046 ++ rcu_read_unlock();
4047 + return VCHIQ_SUCCESS;
4048 ++ }
4049 ++
4050 ++ /*
4051 ++ * As hopping around different synchronization mechanism,
4052 ++ * taking an extra reference results in simpler implementation.
4053 ++ */
4054 ++ vchiq_service_get(service);
4055 ++ rcu_read_unlock();
4056 +
4057 + vchiq_log_trace(vchiq_arm_log_level,
4058 + "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
4059 +@@ -1074,6 +1086,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
4060 + NULL, user_service, bulk_userdata);
4061 + if (status != VCHIQ_SUCCESS) {
4062 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
4063 ++ vchiq_service_put(service);
4064 + return status;
4065 + }
4066 + }
4067 +@@ -1084,11 +1097,13 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
4068 + vchiq_log_info(vchiq_arm_log_level,
4069 + "%s interrupted", __func__);
4070 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
4071 ++ vchiq_service_put(service);
4072 + return VCHIQ_RETRY;
4073 + } else if (instance->closing) {
4074 + vchiq_log_info(vchiq_arm_log_level,
4075 + "%s closing", __func__);
4076 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
4077 ++ vchiq_service_put(service);
4078 + return VCHIQ_ERROR;
4079 + }
4080 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
4081 +@@ -1117,6 +1132,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
4082 + header = NULL;
4083 + }
4084 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
4085 ++ vchiq_service_put(service);
4086 +
4087 + if (skip_completion)
4088 + return VCHIQ_SUCCESS;
4089 +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
4090 +index 451e02cd06377..de5b45de50402 100644
4091 +--- a/drivers/tty/n_tty.c
4092 ++++ b/drivers/tty/n_tty.c
4093 +@@ -1963,7 +1963,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
4094 + return false;
4095 +
4096 + canon_head = smp_load_acquire(&ldata->canon_head);
4097 +- n = min(*nr + 1, canon_head - ldata->read_tail);
4098 ++ n = min(*nr, canon_head - ldata->read_tail);
4099 +
4100 + tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
4101 + size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
4102 +@@ -1985,10 +1985,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
4103 + n += N_TTY_BUF_SIZE;
4104 + c = n + found;
4105 +
4106 +- if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
4107 +- c = min(*nr, c);
4108 ++ if (!found || read_buf(ldata, eol) != __DISABLED_CHAR)
4109 + n = c;
4110 +- }
4111 +
4112 + n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
4113 + __func__, eol, found, n, c, tail, more);
4114 +diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
4115 +index 673cda3d011d0..948d0a1c6ae8e 100644
4116 +--- a/drivers/tty/serial/8250/8250_gsc.c
4117 ++++ b/drivers/tty/serial/8250/8250_gsc.c
4118 +@@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
4119 + unsigned long address;
4120 + int err;
4121 +
4122 +-#ifdef CONFIG_64BIT
4123 ++#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC)
4124 + if (!dev->irq && (dev->id.sversion == 0xad))
4125 + dev->irq = iosapic_serial_irq(dev);
4126 + #endif
4127 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
4128 +index d029be40ea6f0..bdbc310a8f8c5 100644
4129 +--- a/fs/btrfs/dev-replace.c
4130 ++++ b/fs/btrfs/dev-replace.c
4131 +@@ -325,7 +325,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
4132 + set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
4133 + device->fs_devices = fs_info->fs_devices;
4134 +
4135 +- ret = btrfs_get_dev_zone_info(device);
4136 ++ ret = btrfs_get_dev_zone_info(device, false);
4137 + if (ret)
4138 + goto error;
4139 +
4140 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
4141 +index e1a262120e021..2c3e106a02704 100644
4142 +--- a/fs/btrfs/disk-io.c
4143 ++++ b/fs/btrfs/disk-io.c
4144 +@@ -3565,6 +3565,8 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
4145 + goto fail_sysfs;
4146 + }
4147 +
4148 ++ btrfs_free_zone_cache(fs_info);
4149 ++
4150 + if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&
4151 + !btrfs_check_rw_degradable(fs_info, NULL)) {
4152 + btrfs_warn(fs_info,
4153 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
4154 +index 72f9b865e8479..5612e8bf2ace4 100644
4155 +--- a/fs/btrfs/send.c
4156 ++++ b/fs/btrfs/send.c
4157 +@@ -4978,6 +4978,10 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
4158 + lock_page(page);
4159 + if (!PageUptodate(page)) {
4160 + unlock_page(page);
4161 ++ btrfs_err(fs_info,
4162 ++ "send: IO error at offset %llu for inode %llu root %llu",
4163 ++ page_offset(page), sctx->cur_ino,
4164 ++ sctx->send_root->root_key.objectid);
4165 + put_page(page);
4166 + ret = -EIO;
4167 + break;
4168 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
4169 +index c34efdc1ecddb..06a1a7c2254ce 100644
4170 +--- a/fs/btrfs/volumes.c
4171 ++++ b/fs/btrfs/volumes.c
4172 +@@ -2596,7 +2596,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
4173 + device->fs_info = fs_info;
4174 + device->bdev = bdev;
4175 +
4176 +- ret = btrfs_get_dev_zone_info(device);
4177 ++ ret = btrfs_get_dev_zone_info(device, false);
4178 + if (ret)
4179 + goto error_free_device;
4180 +
4181 +diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
4182 +index 5672c24a2d582..596b2148807d4 100644
4183 +--- a/fs/btrfs/zoned.c
4184 ++++ b/fs/btrfs/zoned.c
4185 +@@ -4,6 +4,7 @@
4186 + #include <linux/slab.h>
4187 + #include <linux/blkdev.h>
4188 + #include <linux/sched/mm.h>
4189 ++#include <linux/vmalloc.h>
4190 + #include "ctree.h"
4191 + #include "volumes.h"
4192 + #include "zoned.h"
4193 +@@ -195,6 +196,8 @@ static int emulate_report_zones(struct btrfs_device *device, u64 pos,
4194 + static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
4195 + struct blk_zone *zones, unsigned int *nr_zones)
4196 + {
4197 ++ struct btrfs_zoned_device_info *zinfo = device->zone_info;
4198 ++ u32 zno;
4199 + int ret;
4200 +
4201 + if (!*nr_zones)
4202 +@@ -206,6 +209,34 @@ static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
4203 + return 0;
4204 + }
4205 +
4206 ++ /* Check cache */
4207 ++ if (zinfo->zone_cache) {
4208 ++ unsigned int i;
4209 ++
4210 ++ ASSERT(IS_ALIGNED(pos, zinfo->zone_size));
4211 ++ zno = pos >> zinfo->zone_size_shift;
4212 ++ /*
4213 ++ * We cannot report zones beyond the zone end. So, it is OK to
4214 ++ * cap *nr_zones to at the end.
4215 ++ */
4216 ++ *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
4217 ++
4218 ++ for (i = 0; i < *nr_zones; i++) {
4219 ++ struct blk_zone *zone_info;
4220 ++
4221 ++ zone_info = &zinfo->zone_cache[zno + i];
4222 ++ if (!zone_info->len)
4223 ++ break;
4224 ++ }
4225 ++
4226 ++ if (i == *nr_zones) {
4227 ++ /* Cache hit on all the zones */
4228 ++ memcpy(zones, zinfo->zone_cache + zno,
4229 ++ sizeof(*zinfo->zone_cache) * *nr_zones);
4230 ++ return 0;
4231 ++ }
4232 ++ }
4233 ++
4234 + ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
4235 + copy_zone_info_cb, zones);
4236 + if (ret < 0) {
4237 +@@ -219,6 +250,11 @@ static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
4238 + if (!ret)
4239 + return -EIO;
4240 +
4241 ++ /* Populate cache */
4242 ++ if (zinfo->zone_cache)
4243 ++ memcpy(zinfo->zone_cache + zno, zones,
4244 ++ sizeof(*zinfo->zone_cache) * *nr_zones);
4245 ++
4246 + return 0;
4247 + }
4248 +
4249 +@@ -282,7 +318,7 @@ int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
4250 + if (!device->bdev)
4251 + continue;
4252 +
4253 +- ret = btrfs_get_dev_zone_info(device);
4254 ++ ret = btrfs_get_dev_zone_info(device, true);
4255 + if (ret)
4256 + break;
4257 + }
4258 +@@ -291,7 +327,7 @@ int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
4259 + return ret;
4260 + }
4261 +
4262 +-int btrfs_get_dev_zone_info(struct btrfs_device *device)
4263 ++int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
4264 + {
4265 + struct btrfs_fs_info *fs_info = device->fs_info;
4266 + struct btrfs_zoned_device_info *zone_info = NULL;
4267 +@@ -318,6 +354,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
4268 + if (!zone_info)
4269 + return -ENOMEM;
4270 +
4271 ++ device->zone_info = zone_info;
4272 ++
4273 + if (!bdev_is_zoned(bdev)) {
4274 + if (!fs_info->zone_size) {
4275 + ret = calculate_emulated_zone_size(fs_info);
4276 +@@ -369,6 +407,23 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
4277 + goto out;
4278 + }
4279 +
4280 ++ /*
4281 ++ * Enable zone cache only for a zoned device. On a non-zoned device, we
4282 ++ * fill the zone info with emulated CONVENTIONAL zones, so no need to
4283 ++ * use the cache.
4284 ++ */
4285 ++ if (populate_cache && bdev_is_zoned(device->bdev)) {
4286 ++ zone_info->zone_cache = vzalloc(sizeof(struct blk_zone) *
4287 ++ zone_info->nr_zones);
4288 ++ if (!zone_info->zone_cache) {
4289 ++ btrfs_err_in_rcu(device->fs_info,
4290 ++ "zoned: failed to allocate zone cache for %s",
4291 ++ rcu_str_deref(device->name));
4292 ++ ret = -ENOMEM;
4293 ++ goto out;
4294 ++ }
4295 ++ }
4296 ++
4297 + /* Get zones type */
4298 + while (sector < nr_sectors) {
4299 + nr_zones = BTRFS_REPORT_NR_ZONES;
4300 +@@ -444,8 +499,6 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
4301 +
4302 + kfree(zones);
4303 +
4304 +- device->zone_info = zone_info;
4305 +-
4306 + switch (bdev_zoned_model(bdev)) {
4307 + case BLK_ZONED_HM:
4308 + model = "host-managed zoned";
4309 +@@ -478,10 +531,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
4310 + out:
4311 + kfree(zones);
4312 + out_free_zone_info:
4313 +- bitmap_free(zone_info->empty_zones);
4314 +- bitmap_free(zone_info->seq_zones);
4315 +- kfree(zone_info);
4316 +- device->zone_info = NULL;
4317 ++ btrfs_destroy_dev_zone_info(device);
4318 +
4319 + return ret;
4320 + }
4321 +@@ -495,6 +545,7 @@ void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
4322 +
4323 + bitmap_free(zone_info->seq_zones);
4324 + bitmap_free(zone_info->empty_zones);
4325 ++ vfree(zone_info->zone_cache);
4326 + kfree(zone_info);
4327 + device->zone_info = NULL;
4328 + }
4329 +@@ -1551,3 +1602,21 @@ void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
4330 + fs_info->data_reloc_bg = 0;
4331 + spin_unlock(&fs_info->relocation_bg_lock);
4332 + }
4333 ++
4334 ++void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
4335 ++{
4336 ++ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4337 ++ struct btrfs_device *device;
4338 ++
4339 ++ if (!btrfs_is_zoned(fs_info))
4340 ++ return;
4341 ++
4342 ++ mutex_lock(&fs_devices->device_list_mutex);
4343 ++ list_for_each_entry(device, &fs_devices->devices, dev_list) {
4344 ++ if (device->zone_info) {
4345 ++ vfree(device->zone_info->zone_cache);
4346 ++ device->zone_info->zone_cache = NULL;
4347 ++ }
4348 ++ }
4349 ++ mutex_unlock(&fs_devices->device_list_mutex);
4350 ++}
4351 +diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
4352 +index 70b3be517599f..813aa3cddc11f 100644
4353 +--- a/fs/btrfs/zoned.h
4354 ++++ b/fs/btrfs/zoned.h
4355 +@@ -25,6 +25,7 @@ struct btrfs_zoned_device_info {
4356 + u32 nr_zones;
4357 + unsigned long *seq_zones;
4358 + unsigned long *empty_zones;
4359 ++ struct blk_zone *zone_cache;
4360 + struct blk_zone sb_zones[2 * BTRFS_SUPER_MIRROR_MAX];
4361 + };
4362 +
4363 +@@ -32,7 +33,7 @@ struct btrfs_zoned_device_info {
4364 + int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
4365 + struct blk_zone *zone);
4366 + int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info);
4367 +-int btrfs_get_dev_zone_info(struct btrfs_device *device);
4368 ++int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache);
4369 + void btrfs_destroy_dev_zone_info(struct btrfs_device *device);
4370 + int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info);
4371 + int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info);
4372 +@@ -67,6 +68,7 @@ int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
4373 + struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
4374 + u64 logical, u64 length);
4375 + void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg);
4376 ++void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info);
4377 + #else /* CONFIG_BLK_DEV_ZONED */
4378 + static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
4379 + struct blk_zone *zone)
4380 +@@ -79,7 +81,8 @@ static inline int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_i
4381 + return 0;
4382 + }
4383 +
4384 +-static inline int btrfs_get_dev_zone_info(struct btrfs_device *device)
4385 ++static inline int btrfs_get_dev_zone_info(struct btrfs_device *device,
4386 ++ bool populate_cache)
4387 + {
4388 + return 0;
4389 + }
4390 +@@ -202,6 +205,7 @@ static inline struct btrfs_device *btrfs_zoned_get_device(
4391 +
4392 + static inline void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) { }
4393 +
4394 ++static inline void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { }
4395 + #endif
4396 +
4397 + static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
4398 +diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
4399 +index 0a2542286552f..3b8ed36b37113 100644
4400 +--- a/fs/cifs/fs_context.c
4401 ++++ b/fs/cifs/fs_context.c
4402 +@@ -146,7 +146,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
4403 + fsparam_u32("echo_interval", Opt_echo_interval),
4404 + fsparam_u32("max_credits", Opt_max_credits),
4405 + fsparam_u32("handletimeout", Opt_handletimeout),
4406 +- fsparam_u32("snapshot", Opt_snapshot),
4407 ++ fsparam_u64("snapshot", Opt_snapshot),
4408 + fsparam_u32("max_channels", Opt_max_channels),
4409 +
4410 + /* Mount options which take string value */
4411 +@@ -1062,7 +1062,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
4412 + ctx->echo_interval = result.uint_32;
4413 + break;
4414 + case Opt_snapshot:
4415 +- ctx->snapshot_time = result.uint_32;
4416 ++ ctx->snapshot_time = result.uint_64;
4417 + break;
4418 + case Opt_max_credits:
4419 + if (result.uint_32 < 20 || result.uint_32 > 60000) {
4420 +diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
4421 +index 7d8b72d67c803..9d486fbbfbbde 100644
4422 +--- a/fs/cifs/xattr.c
4423 ++++ b/fs/cifs/xattr.c
4424 +@@ -175,11 +175,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
4425 + switch (handler->flags) {
4426 + case XATTR_CIFS_NTSD_FULL:
4427 + aclflags = (CIFS_ACL_OWNER |
4428 ++ CIFS_ACL_GROUP |
4429 + CIFS_ACL_DACL |
4430 + CIFS_ACL_SACL);
4431 + break;
4432 + case XATTR_CIFS_NTSD:
4433 + aclflags = (CIFS_ACL_OWNER |
4434 ++ CIFS_ACL_GROUP |
4435 + CIFS_ACL_DACL);
4436 + break;
4437 + case XATTR_CIFS_ACL:
4438 +diff --git a/fs/io_uring.c b/fs/io_uring.c
4439 +index 993913c585fbf..21fc8ce9405d3 100644
4440 +--- a/fs/io_uring.c
4441 ++++ b/fs/io_uring.c
4442 +@@ -8820,10 +8820,9 @@ static void io_mem_free(void *ptr)
4443 +
4444 + static void *io_mem_alloc(size_t size)
4445 + {
4446 +- gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
4447 +- __GFP_NORETRY | __GFP_ACCOUNT;
4448 ++ gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
4449 +
4450 +- return (void *) __get_free_pages(gfp_flags, get_order(size));
4451 ++ return (void *) __get_free_pages(gfp, get_order(size));
4452 + }
4453 +
4454 + static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
4455 +diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
4456 +index 70685cbbec8c0..192d8308afc27 100644
4457 +--- a/fs/ksmbd/smb2pdu.c
4458 ++++ b/fs/ksmbd/smb2pdu.c
4459 +@@ -3422,9 +3422,9 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
4460 + goto free_conv_name;
4461 + }
4462 +
4463 +- struct_sz = readdir_info_level_struct_sz(info_level);
4464 +- next_entry_offset = ALIGN(struct_sz - 1 + conv_len,
4465 +- KSMBD_DIR_INFO_ALIGNMENT);
4466 ++ struct_sz = readdir_info_level_struct_sz(info_level) - 1 + conv_len;
4467 ++ next_entry_offset = ALIGN(struct_sz, KSMBD_DIR_INFO_ALIGNMENT);
4468 ++ d_info->last_entry_off_align = next_entry_offset - struct_sz;
4469 +
4470 + if (next_entry_offset > d_info->out_buf_len) {
4471 + d_info->out_buf_len = 0;
4472 +@@ -3976,6 +3976,7 @@ int smb2_query_dir(struct ksmbd_work *work)
4473 + ((struct file_directory_info *)
4474 + ((char *)rsp->Buffer + d_info.last_entry_offset))
4475 + ->NextEntryOffset = 0;
4476 ++ d_info.data_count -= d_info.last_entry_off_align;
4477 +
4478 + rsp->StructureSize = cpu_to_le16(9);
4479 + rsp->OutputBufferOffset = cpu_to_le16(72);
4480 +diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
4481 +index 707490ab1f4c4..f2e7e3a654b34 100644
4482 +--- a/fs/ksmbd/smb_common.c
4483 ++++ b/fs/ksmbd/smb_common.c
4484 +@@ -308,14 +308,17 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
4485 + for (i = 0; i < 2; i++) {
4486 + struct kstat kstat;
4487 + struct ksmbd_kstat ksmbd_kstat;
4488 ++ struct dentry *dentry;
4489 +
4490 + if (!dir->dot_dotdot[i]) { /* fill dot entry info */
4491 + if (i == 0) {
4492 + d_info->name = ".";
4493 + d_info->name_len = 1;
4494 ++ dentry = dir->filp->f_path.dentry;
4495 + } else {
4496 + d_info->name = "..";
4497 + d_info->name_len = 2;
4498 ++ dentry = dir->filp->f_path.dentry->d_parent;
4499 + }
4500 +
4501 + if (!match_pattern(d_info->name, d_info->name_len,
4502 +@@ -327,7 +330,7 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
4503 + ksmbd_kstat.kstat = &kstat;
4504 + ksmbd_vfs_fill_dentry_attrs(work,
4505 + user_ns,
4506 +- dir->filp->f_path.dentry->d_parent,
4507 ++ dentry,
4508 + &ksmbd_kstat);
4509 + rc = fn(conn, info_level, d_info, &ksmbd_kstat);
4510 + if (rc)
4511 +diff --git a/fs/ksmbd/vfs.h b/fs/ksmbd/vfs.h
4512 +index b0d5b8feb4a36..432c947731779 100644
4513 +--- a/fs/ksmbd/vfs.h
4514 ++++ b/fs/ksmbd/vfs.h
4515 +@@ -86,6 +86,7 @@ struct ksmbd_dir_info {
4516 + int last_entry_offset;
4517 + bool hide_dot_file;
4518 + int flags;
4519 ++ int last_entry_off_align;
4520 + };
4521 +
4522 + struct ksmbd_readdir_data {
4523 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
4524 +index f6381c675cbe9..9adc6f57a0083 100644
4525 +--- a/fs/nfs/dir.c
4526 ++++ b/fs/nfs/dir.c
4527 +@@ -1987,14 +1987,14 @@ no_open:
4528 + if (!res) {
4529 + inode = d_inode(dentry);
4530 + if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
4531 +- !S_ISDIR(inode->i_mode))
4532 ++ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
4533 + res = ERR_PTR(-ENOTDIR);
4534 + else if (inode && S_ISREG(inode->i_mode))
4535 + res = ERR_PTR(-EOPENSTALE);
4536 + } else if (!IS_ERR(res)) {
4537 + inode = d_inode(res);
4538 + if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
4539 +- !S_ISDIR(inode->i_mode)) {
4540 ++ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) {
4541 + dput(res);
4542 + res = ERR_PTR(-ENOTDIR);
4543 + } else if (inode && S_ISREG(inode->i_mode)) {
4544 +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
4545 +index f9d3ad3acf114..410f87bc48cca 100644
4546 +--- a/fs/nfs/inode.c
4547 ++++ b/fs/nfs/inode.c
4548 +@@ -840,12 +840,9 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
4549 + }
4550 +
4551 + /* Flush out writes to the server in order to update c/mtime. */
4552 +- if ((request_mask & (STATX_CTIME|STATX_MTIME)) &&
4553 +- S_ISREG(inode->i_mode)) {
4554 +- err = filemap_write_and_wait(inode->i_mapping);
4555 +- if (err)
4556 +- goto out;
4557 +- }
4558 ++ if ((request_mask & (STATX_CTIME | STATX_MTIME)) &&
4559 ++ S_ISREG(inode->i_mode))
4560 ++ filemap_write_and_wait(inode->i_mapping);
4561 +
4562 + /*
4563 + * We may force a getattr if the user cares about atime.
4564 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4565 +index 389fa72d4ca98..53be03681f69e 100644
4566 +--- a/fs/nfs/nfs4proc.c
4567 ++++ b/fs/nfs/nfs4proc.c
4568 +@@ -1232,8 +1232,7 @@ nfs4_update_changeattr_locked(struct inode *inode,
4569 + NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
4570 + NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
4571 + NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
4572 +- NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR |
4573 +- NFS_INO_REVAL_PAGECACHE;
4574 ++ NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
4575 + nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
4576 + }
4577 + nfsi->attrtimeo_timestamp = jiffies;
4578 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
4579 +index cf25be3e03212..958fce7aee635 100644
4580 +--- a/fs/proc/task_mmu.c
4581 ++++ b/fs/proc/task_mmu.c
4582 +@@ -430,7 +430,8 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
4583 + }
4584 +
4585 + static void smaps_account(struct mem_size_stats *mss, struct page *page,
4586 +- bool compound, bool young, bool dirty, bool locked)
4587 ++ bool compound, bool young, bool dirty, bool locked,
4588 ++ bool migration)
4589 + {
4590 + int i, nr = compound ? compound_nr(page) : 1;
4591 + unsigned long size = nr * PAGE_SIZE;
4592 +@@ -457,8 +458,15 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
4593 + * page_count(page) == 1 guarantees the page is mapped exactly once.
4594 + * If any subpage of the compound page mapped with PTE it would elevate
4595 + * page_count().
4596 ++ *
4597 ++ * The page_mapcount() is called to get a snapshot of the mapcount.
4598 ++ * Without holding the page lock this snapshot can be slightly wrong as
4599 ++ * we cannot always read the mapcount atomically. It is not safe to
4600 ++ * call page_mapcount() even with PTL held if the page is not mapped,
4601 ++ * especially for migration entries. Treat regular migration entries
4602 ++ * as mapcount == 1.
4603 + */
4604 +- if (page_count(page) == 1) {
4605 ++ if ((page_count(page) == 1) || migration) {
4606 + smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
4607 + locked, true);
4608 + return;
4609 +@@ -495,6 +503,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
4610 + struct vm_area_struct *vma = walk->vma;
4611 + bool locked = !!(vma->vm_flags & VM_LOCKED);
4612 + struct page *page = NULL;
4613 ++ bool migration = false;
4614 +
4615 + if (pte_present(*pte)) {
4616 + page = vm_normal_page(vma, addr, *pte);
4617 +@@ -514,8 +523,11 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
4618 + } else {
4619 + mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
4620 + }
4621 +- } else if (is_pfn_swap_entry(swpent))
4622 ++ } else if (is_pfn_swap_entry(swpent)) {
4623 ++ if (is_migration_entry(swpent))
4624 ++ migration = true;
4625 + page = pfn_swap_entry_to_page(swpent);
4626 ++ }
4627 + } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
4628 + && pte_none(*pte))) {
4629 + page = xa_load(&vma->vm_file->f_mapping->i_pages,
4630 +@@ -528,7 +540,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
4631 + if (!page)
4632 + return;
4633 +
4634 +- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
4635 ++ smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
4636 ++ locked, migration);
4637 + }
4638 +
4639 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4640 +@@ -539,6 +552,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
4641 + struct vm_area_struct *vma = walk->vma;
4642 + bool locked = !!(vma->vm_flags & VM_LOCKED);
4643 + struct page *page = NULL;
4644 ++ bool migration = false;
4645 +
4646 + if (pmd_present(*pmd)) {
4647 + /* FOLL_DUMP will return -EFAULT on huge zero page */
4648 +@@ -546,8 +560,10 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
4649 + } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
4650 + swp_entry_t entry = pmd_to_swp_entry(*pmd);
4651 +
4652 +- if (is_migration_entry(entry))
4653 ++ if (is_migration_entry(entry)) {
4654 ++ migration = true;
4655 + page = pfn_swap_entry_to_page(entry);
4656 ++ }
4657 + }
4658 + if (IS_ERR_OR_NULL(page))
4659 + return;
4660 +@@ -559,7 +575,9 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
4661 + /* pass */;
4662 + else
4663 + mss->file_thp += HPAGE_PMD_SIZE;
4664 +- smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
4665 ++
4666 ++ smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
4667 ++ locked, migration);
4668 + }
4669 + #else
4670 + static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
4671 +@@ -1363,6 +1381,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
4672 + {
4673 + u64 frame = 0, flags = 0;
4674 + struct page *page = NULL;
4675 ++ bool migration = false;
4676 +
4677 + if (pte_present(pte)) {
4678 + if (pm->show_pfn)
4679 +@@ -1384,13 +1403,14 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
4680 + frame = swp_type(entry) |
4681 + (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
4682 + flags |= PM_SWAP;
4683 ++ migration = is_migration_entry(entry);
4684 + if (is_pfn_swap_entry(entry))
4685 + page = pfn_swap_entry_to_page(entry);
4686 + }
4687 +
4688 + if (page && !PageAnon(page))
4689 + flags |= PM_FILE;
4690 +- if (page && page_mapcount(page) == 1)
4691 ++ if (page && !migration && page_mapcount(page) == 1)
4692 + flags |= PM_MMAP_EXCLUSIVE;
4693 + if (vma->vm_flags & VM_SOFTDIRTY)
4694 + flags |= PM_SOFT_DIRTY;
4695 +@@ -1406,8 +1426,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
4696 + spinlock_t *ptl;
4697 + pte_t *pte, *orig_pte;
4698 + int err = 0;
4699 +-
4700 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4701 ++ bool migration = false;
4702 ++
4703 + ptl = pmd_trans_huge_lock(pmdp, vma);
4704 + if (ptl) {
4705 + u64 flags = 0, frame = 0;
4706 +@@ -1446,11 +1467,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
4707 + if (pmd_swp_uffd_wp(pmd))
4708 + flags |= PM_UFFD_WP;
4709 + VM_BUG_ON(!is_pmd_migration_entry(pmd));
4710 ++ migration = is_migration_entry(entry);
4711 + page = pfn_swap_entry_to_page(entry);
4712 + }
4713 + #endif
4714 +
4715 +- if (page && page_mapcount(page) == 1)
4716 ++ if (page && !migration && page_mapcount(page) == 1)
4717 + flags |= PM_MMAP_EXCLUSIVE;
4718 +
4719 + for (; addr != end; addr += PAGE_SIZE) {
4720 +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
4721 +index 22d904bde6ab9..a74aef99bd3d6 100644
4722 +--- a/fs/quota/dquot.c
4723 ++++ b/fs/quota/dquot.c
4724 +@@ -690,9 +690,14 @@ int dquot_quota_sync(struct super_block *sb, int type)
4725 + /* This is not very clever (and fast) but currently I don't know about
4726 + * any other simple way of getting quota data to disk and we must get
4727 + * them there for userspace to be visible... */
4728 +- if (sb->s_op->sync_fs)
4729 +- sb->s_op->sync_fs(sb, 1);
4730 +- sync_blockdev(sb->s_bdev);
4731 ++ if (sb->s_op->sync_fs) {
4732 ++ ret = sb->s_op->sync_fs(sb, 1);
4733 ++ if (ret)
4734 ++ return ret;
4735 ++ }
4736 ++ ret = sync_blockdev(sb->s_bdev);
4737 ++ if (ret)
4738 ++ return ret;
4739 +
4740 + /*
4741 + * Now when everything is written we can discard the pagecache so
4742 +diff --git a/fs/super.c b/fs/super.c
4743 +index a1f82dfd1b39a..87379bb1f7a30 100644
4744 +--- a/fs/super.c
4745 ++++ b/fs/super.c
4746 +@@ -1616,11 +1616,9 @@ static void lockdep_sb_freeze_acquire(struct super_block *sb)
4747 + percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
4748 + }
4749 +
4750 +-static void sb_freeze_unlock(struct super_block *sb)
4751 ++static void sb_freeze_unlock(struct super_block *sb, int level)
4752 + {
4753 +- int level;
4754 +-
4755 +- for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
4756 ++ for (level--; level >= 0; level--)
4757 + percpu_up_write(sb->s_writers.rw_sem + level);
4758 + }
4759 +
4760 +@@ -1691,7 +1689,14 @@ int freeze_super(struct super_block *sb)
4761 + sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
4762 +
4763 + /* All writers are done so after syncing there won't be dirty data */
4764 +- sync_filesystem(sb);
4765 ++ ret = sync_filesystem(sb);
4766 ++ if (ret) {
4767 ++ sb->s_writers.frozen = SB_UNFROZEN;
4768 ++ sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
4769 ++ wake_up(&sb->s_writers.wait_unfrozen);
4770 ++ deactivate_locked_super(sb);
4771 ++ return ret;
4772 ++ }
4773 +
4774 + /* Now wait for internal filesystem counter */
4775 + sb->s_writers.frozen = SB_FREEZE_FS;
4776 +@@ -1703,7 +1708,7 @@ int freeze_super(struct super_block *sb)
4777 + printk(KERN_ERR
4778 + "VFS:Filesystem freeze failed\n");
4779 + sb->s_writers.frozen = SB_UNFROZEN;
4780 +- sb_freeze_unlock(sb);
4781 ++ sb_freeze_unlock(sb, SB_FREEZE_FS);
4782 + wake_up(&sb->s_writers.wait_unfrozen);
4783 + deactivate_locked_super(sb);
4784 + return ret;
4785 +@@ -1748,7 +1753,7 @@ static int thaw_super_locked(struct super_block *sb)
4786 + }
4787 +
4788 + sb->s_writers.frozen = SB_UNFROZEN;
4789 +- sb_freeze_unlock(sb);
4790 ++ sb_freeze_unlock(sb, SB_FREEZE_FS);
4791 + out:
4792 + wake_up(&sb->s_writers.wait_unfrozen);
4793 + deactivate_locked_super(sb);
4794 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
4795 +index be8e7a55d803c..413c0148c0ce5 100644
4796 +--- a/include/linux/blkdev.h
4797 ++++ b/include/linux/blkdev.h
4798 +@@ -1184,7 +1184,8 @@ extern void blk_dump_rq_flags(struct request *, char *);
4799 +
4800 + bool __must_check blk_get_queue(struct request_queue *);
4801 + extern void blk_put_queue(struct request_queue *);
4802 +-extern void blk_set_queue_dying(struct request_queue *);
4803 ++
4804 ++void blk_mark_disk_dead(struct gendisk *disk);
4805 +
4806 + #ifdef CONFIG_BLOCK
4807 + /*
4808 +diff --git a/include/linux/compiler.h b/include/linux/compiler.h
4809 +index 429dcebe2b992..0f7fd205ab7ea 100644
4810 +--- a/include/linux/compiler.h
4811 ++++ b/include/linux/compiler.h
4812 +@@ -117,14 +117,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
4813 + */
4814 + #define __stringify_label(n) #n
4815 +
4816 +-#define __annotate_reachable(c) ({ \
4817 +- asm volatile(__stringify_label(c) ":\n\t" \
4818 +- ".pushsection .discard.reachable\n\t" \
4819 +- ".long " __stringify_label(c) "b - .\n\t" \
4820 +- ".popsection\n\t" : : "i" (c)); \
4821 +-})
4822 +-#define annotate_reachable() __annotate_reachable(__COUNTER__)
4823 +-
4824 + #define __annotate_unreachable(c) ({ \
4825 + asm volatile(__stringify_label(c) ":\n\t" \
4826 + ".pushsection .discard.unreachable\n\t" \
4827 +@@ -133,24 +125,21 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
4828 + })
4829 + #define annotate_unreachable() __annotate_unreachable(__COUNTER__)
4830 +
4831 +-#define ASM_UNREACHABLE \
4832 +- "999:\n\t" \
4833 +- ".pushsection .discard.unreachable\n\t" \
4834 +- ".long 999b - .\n\t" \
4835 ++#define ASM_REACHABLE \
4836 ++ "998:\n\t" \
4837 ++ ".pushsection .discard.reachable\n\t" \
4838 ++ ".long 998b - .\n\t" \
4839 + ".popsection\n\t"
4840 +
4841 + /* Annotate a C jump table to allow objtool to follow the code flow */
4842 + #define __annotate_jump_table __section(".rodata..c_jump_table")
4843 +
4844 + #else
4845 +-#define annotate_reachable()
4846 + #define annotate_unreachable()
4847 ++# define ASM_REACHABLE
4848 + #define __annotate_jump_table
4849 + #endif
4850 +
4851 +-#ifndef ASM_UNREACHABLE
4852 +-# define ASM_UNREACHABLE
4853 +-#endif
4854 + #ifndef unreachable
4855 + # define unreachable() do { \
4856 + annotate_unreachable(); \
4857 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
4858 +index fba54624191a2..62ff094677762 100644
4859 +--- a/include/linux/netdevice.h
4860 ++++ b/include/linux/netdevice.h
4861 +@@ -2149,7 +2149,7 @@ struct net_device {
4862 + struct netdev_queue *_tx ____cacheline_aligned_in_smp;
4863 + unsigned int num_tx_queues;
4864 + unsigned int real_num_tx_queues;
4865 +- struct Qdisc *qdisc;
4866 ++ struct Qdisc __rcu *qdisc;
4867 + unsigned int tx_queue_len;
4868 + spinlock_t tx_global_lock;
4869 +
4870 +diff --git a/include/linux/sched.h b/include/linux/sched.h
4871 +index c1a927ddec646..76e8695506465 100644
4872 +--- a/include/linux/sched.h
4873 ++++ b/include/linux/sched.h
4874 +@@ -1675,7 +1675,6 @@ extern struct pid *cad_pid;
4875 + #define PF_MEMALLOC 0x00000800 /* Allocating memory */
4876 + #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
4877 + #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
4878 +-#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
4879 + #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
4880 + #define PF_FROZEN 0x00010000 /* Frozen for system suspend */
4881 + #define PF_KSWAPD 0x00020000 /* I am kswapd */
4882 +diff --git a/include/net/addrconf.h b/include/net/addrconf.h
4883 +index e7ce719838b5e..59940e230b782 100644
4884 +--- a/include/net/addrconf.h
4885 ++++ b/include/net/addrconf.h
4886 +@@ -109,8 +109,6 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
4887 + int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
4888 + const struct in6_addr *daddr, unsigned int srcprefs,
4889 + struct in6_addr *saddr);
4890 +-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
4891 +- u32 banned_flags);
4892 + int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
4893 + u32 banned_flags);
4894 + bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
4895 +diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
4896 +index 38785d48baff9..184105d682942 100644
4897 +--- a/include/net/bond_3ad.h
4898 ++++ b/include/net/bond_3ad.h
4899 +@@ -262,7 +262,7 @@ struct ad_system {
4900 + struct ad_bond_info {
4901 + struct ad_system system; /* 802.3ad system structure */
4902 + struct bond_3ad_stats stats;
4903 +- u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
4904 ++ atomic_t agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
4905 + u16 aggregator_identifier;
4906 + };
4907 +
4908 +diff --git a/include/net/dsa.h b/include/net/dsa.h
4909 +index d784e76113b8d..49e5ece9361c6 100644
4910 +--- a/include/net/dsa.h
4911 ++++ b/include/net/dsa.h
4912 +@@ -1056,6 +1056,7 @@ void dsa_unregister_switch(struct dsa_switch *ds);
4913 + int dsa_register_switch(struct dsa_switch *ds);
4914 + void dsa_switch_shutdown(struct dsa_switch *ds);
4915 + struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
4916 ++void dsa_flush_workqueue(void);
4917 + #ifdef CONFIG_PM_SLEEP
4918 + int dsa_switch_suspend(struct dsa_switch *ds);
4919 + int dsa_switch_resume(struct dsa_switch *ds);
4920 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
4921 +index c85b040728d7e..bbb27639f2933 100644
4922 +--- a/include/net/ip6_fib.h
4923 ++++ b/include/net/ip6_fib.h
4924 +@@ -189,14 +189,16 @@ struct fib6_info {
4925 + u32 fib6_metric;
4926 + u8 fib6_protocol;
4927 + u8 fib6_type;
4928 ++
4929 ++ u8 offload;
4930 ++ u8 trap;
4931 ++ u8 offload_failed;
4932 ++
4933 + u8 should_flush:1,
4934 + dst_nocount:1,
4935 + dst_nopolicy:1,
4936 + fib6_destroying:1,
4937 +- offload:1,
4938 +- trap:1,
4939 +- offload_failed:1,
4940 +- unused:1;
4941 ++ unused:4;
4942 +
4943 + struct rcu_head rcu;
4944 + struct nexthop *nh;
4945 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
4946 +index f2d0ecc257bb2..359540dfc0339 100644
4947 +--- a/include/net/ipv6.h
4948 ++++ b/include/net/ipv6.h
4949 +@@ -391,17 +391,20 @@ static inline void txopt_put(struct ipv6_txoptions *opt)
4950 + kfree_rcu(opt, rcu);
4951 + }
4952 +
4953 ++#if IS_ENABLED(CONFIG_IPV6)
4954 + struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label);
4955 +
4956 + extern struct static_key_false_deferred ipv6_flowlabel_exclusive;
4957 + static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk,
4958 + __be32 label)
4959 + {
4960 +- if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key))
4961 ++ if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) &&
4962 ++ READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl))
4963 + return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT);
4964 +
4965 + return NULL;
4966 + }
4967 ++#endif
4968 +
4969 + struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
4970 + struct ip6_flowlabel *fl,
4971 +diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
4972 +index a4b5503803165..6bd7e5a85ce76 100644
4973 +--- a/include/net/netns/ipv6.h
4974 ++++ b/include/net/netns/ipv6.h
4975 +@@ -77,9 +77,10 @@ struct netns_ipv6 {
4976 + spinlock_t fib6_gc_lock;
4977 + unsigned int ip6_rt_gc_expire;
4978 + unsigned long ip6_rt_last_gc;
4979 ++ unsigned char flowlabel_has_excl;
4980 + #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4981 +- unsigned int fib6_rules_require_fldissect;
4982 + bool fib6_has_custom_rules;
4983 ++ unsigned int fib6_rules_require_fldissect;
4984 + #ifdef CONFIG_IPV6_SUBTREES
4985 + unsigned int fib6_routes_require_src;
4986 + #endif
4987 +diff --git a/kernel/async.c b/kernel/async.c
4988 +index b8d7a663497f9..b2c4ba5686ee4 100644
4989 +--- a/kernel/async.c
4990 ++++ b/kernel/async.c
4991 +@@ -205,9 +205,6 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
4992 + atomic_inc(&entry_count);
4993 + spin_unlock_irqrestore(&async_lock, flags);
4994 +
4995 +- /* mark that this task has queued an async job, used by module init */
4996 +- current->flags |= PF_USED_ASYNC;
4997 +-
4998 + /* schedule for execution */
4999 + queue_work_node(node, system_unbound_wq, &entry->work);
5000 +
5001 +diff --git a/kernel/cred.c b/kernel/cred.c
5002 +index 1ae0b4948a5a8..933155c969227 100644
5003 +--- a/kernel/cred.c
5004 ++++ b/kernel/cred.c
5005 +@@ -665,26 +665,20 @@ EXPORT_SYMBOL(cred_fscmp);
5006 +
5007 + int set_cred_ucounts(struct cred *new)
5008 + {
5009 +- struct task_struct *task = current;
5010 +- const struct cred *old = task->real_cred;
5011 + struct ucounts *new_ucounts, *old_ucounts = new->ucounts;
5012 +
5013 +- if (new->user == old->user && new->user_ns == old->user_ns)
5014 +- return 0;
5015 +-
5016 + /*
5017 + * This optimization is needed because alloc_ucounts() uses locks
5018 + * for table lookups.
5019 + */
5020 +- if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
5021 ++ if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->uid))
5022 + return 0;
5023 +
5024 +- if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid)))
5025 ++ if (!(new_ucounts = alloc_ucounts(new->user_ns, new->uid)))
5026 + return -EAGAIN;
5027 +
5028 + new->ucounts = new_ucounts;
5029 +- if (old_ucounts)
5030 +- put_ucounts(old_ucounts);
5031 ++ put_ucounts(old_ucounts);
5032 +
5033 + return 0;
5034 + }
5035 +diff --git a/kernel/fork.c b/kernel/fork.c
5036 +index 10885c649ca42..28aee1a8875bc 100644
5037 +--- a/kernel/fork.c
5038 ++++ b/kernel/fork.c
5039 +@@ -2055,18 +2055,18 @@ static __latent_entropy struct task_struct *copy_process(
5040 + #ifdef CONFIG_PROVE_LOCKING
5041 + DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
5042 + #endif
5043 ++ retval = copy_creds(p, clone_flags);
5044 ++ if (retval < 0)
5045 ++ goto bad_fork_free;
5046 ++
5047 + retval = -EAGAIN;
5048 + if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
5049 + if (p->real_cred->user != INIT_USER &&
5050 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
5051 +- goto bad_fork_free;
5052 ++ goto bad_fork_cleanup_count;
5053 + }
5054 + current->flags &= ~PF_NPROC_EXCEEDED;
5055 +
5056 +- retval = copy_creds(p, clone_flags);
5057 +- if (retval < 0)
5058 +- goto bad_fork_free;
5059 +-
5060 + /*
5061 + * If multiple threads are within copy_process(), then this check
5062 + * triggers too late. This doesn't hurt, the check is only there
5063 +@@ -2353,10 +2353,6 @@ static __latent_entropy struct task_struct *copy_process(
5064 + goto bad_fork_cancel_cgroup;
5065 + }
5066 +
5067 +- /* past the last point of failure */
5068 +- if (pidfile)
5069 +- fd_install(pidfd, pidfile);
5070 +-
5071 + init_task_pid_links(p);
5072 + if (likely(p->pid)) {
5073 + ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
5074 +@@ -2405,6 +2401,9 @@ static __latent_entropy struct task_struct *copy_process(
5075 + syscall_tracepoint_update(p);
5076 + write_unlock_irq(&tasklist_lock);
5077 +
5078 ++ if (pidfile)
5079 ++ fd_install(pidfd, pidfile);
5080 ++
5081 + proc_fork_connector(p);
5082 + sched_post_fork(p, args);
5083 + cgroup_post_fork(p, args);
5084 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
5085 +index d624231eab2bb..92127296cf2bf 100644
5086 +--- a/kernel/locking/lockdep.c
5087 ++++ b/kernel/locking/lockdep.c
5088 +@@ -3450,7 +3450,7 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
5089 + u16 chain_hlock = chain_hlocks[chain->base + i];
5090 + unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
5091 +
5092 +- return lock_classes + class_idx - 1;
5093 ++ return lock_classes + class_idx;
5094 + }
5095 +
5096 + /*
5097 +@@ -3518,7 +3518,7 @@ static void print_chain_keys_chain(struct lock_chain *chain)
5098 + hlock_id = chain_hlocks[chain->base + i];
5099 + chain_key = print_chain_key_iteration(hlock_id, chain_key);
5100 +
5101 +- print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id) - 1);
5102 ++ print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
5103 + printk("\n");
5104 + }
5105 + }
5106 +diff --git a/kernel/module.c b/kernel/module.c
5107 +index 5c26a76e800b5..83991c2d5af9e 100644
5108 +--- a/kernel/module.c
5109 ++++ b/kernel/module.c
5110 +@@ -3683,12 +3683,6 @@ static noinline int do_init_module(struct module *mod)
5111 + }
5112 + freeinit->module_init = mod->init_layout.base;
5113 +
5114 +- /*
5115 +- * We want to find out whether @mod uses async during init. Clear
5116 +- * PF_USED_ASYNC. async_schedule*() will set it.
5117 +- */
5118 +- current->flags &= ~PF_USED_ASYNC;
5119 +-
5120 + do_mod_ctors(mod);
5121 + /* Start the module */
5122 + if (mod->init != NULL)
5123 +@@ -3714,22 +3708,13 @@ static noinline int do_init_module(struct module *mod)
5124 +
5125 + /*
5126 + * We need to finish all async code before the module init sequence
5127 +- * is done. This has potential to deadlock. For example, a newly
5128 +- * detected block device can trigger request_module() of the
5129 +- * default iosched from async probing task. Once userland helper
5130 +- * reaches here, async_synchronize_full() will wait on the async
5131 +- * task waiting on request_module() and deadlock.
5132 +- *
5133 +- * This deadlock is avoided by perfomring async_synchronize_full()
5134 +- * iff module init queued any async jobs. This isn't a full
5135 +- * solution as it will deadlock the same if module loading from
5136 +- * async jobs nests more than once; however, due to the various
5137 +- * constraints, this hack seems to be the best option for now.
5138 +- * Please refer to the following thread for details.
5139 ++ * is done. This has potential to deadlock if synchronous module
5140 ++ * loading is requested from async (which is not allowed!).
5141 + *
5142 +- * http://thread.gmane.org/gmane.linux.kernel/1420814
5143 ++ * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous
5144 ++ * request_module() from async workers") for more details.
5145 + */
5146 +- if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
5147 ++ if (!mod->async_probe_requested)
5148 + async_synchronize_full();
5149 +
5150 + ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
5151 +diff --git a/kernel/stackleak.c b/kernel/stackleak.c
5152 +index ce161a8e8d975..dd07239ddff9f 100644
5153 +--- a/kernel/stackleak.c
5154 ++++ b/kernel/stackleak.c
5155 +@@ -48,7 +48,7 @@ int stack_erasing_sysctl(struct ctl_table *table, int write,
5156 + #define skip_erasing() false
5157 + #endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
5158 +
5159 +-asmlinkage void notrace stackleak_erase(void)
5160 ++asmlinkage void noinstr stackleak_erase(void)
5161 + {
5162 + /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
5163 + unsigned long kstack_ptr = current->lowest_stack;
5164 +@@ -102,9 +102,8 @@ asmlinkage void notrace stackleak_erase(void)
5165 + /* Reset the 'lowest_stack' value for the next syscall */
5166 + current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
5167 + }
5168 +-NOKPROBE_SYMBOL(stackleak_erase);
5169 +
5170 +-void __used __no_caller_saved_registers notrace stackleak_track_stack(void)
5171 ++void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
5172 + {
5173 + unsigned long sp = current_stack_pointer;
5174 +
5175 +diff --git a/kernel/sys.c b/kernel/sys.c
5176 +index 8fdac0d90504a..3e4e8930fafc6 100644
5177 +--- a/kernel/sys.c
5178 ++++ b/kernel/sys.c
5179 +@@ -472,6 +472,16 @@ static int set_user(struct cred *new)
5180 + if (!new_user)
5181 + return -EAGAIN;
5182 +
5183 ++ free_uid(new->user);
5184 ++ new->user = new_user;
5185 ++ return 0;
5186 ++}
5187 ++
5188 ++static void flag_nproc_exceeded(struct cred *new)
5189 ++{
5190 ++ if (new->ucounts == current_ucounts())
5191 ++ return;
5192 ++
5193 + /*
5194 + * We don't fail in case of NPROC limit excess here because too many
5195 + * poorly written programs don't check set*uid() return code, assuming
5196 +@@ -480,15 +490,10 @@ static int set_user(struct cred *new)
5197 + * failure to the execve() stage.
5198 + */
5199 + if (is_ucounts_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) &&
5200 +- new_user != INIT_USER &&
5201 +- !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
5202 ++ new->user != INIT_USER)
5203 + current->flags |= PF_NPROC_EXCEEDED;
5204 + else
5205 + current->flags &= ~PF_NPROC_EXCEEDED;
5206 +-
5207 +- free_uid(new->user);
5208 +- new->user = new_user;
5209 +- return 0;
5210 + }
5211 +
5212 + /*
5213 +@@ -563,6 +568,7 @@ long __sys_setreuid(uid_t ruid, uid_t euid)
5214 + if (retval < 0)
5215 + goto error;
5216 +
5217 ++ flag_nproc_exceeded(new);
5218 + return commit_creds(new);
5219 +
5220 + error:
5221 +@@ -625,6 +631,7 @@ long __sys_setuid(uid_t uid)
5222 + if (retval < 0)
5223 + goto error;
5224 +
5225 ++ flag_nproc_exceeded(new);
5226 + return commit_creds(new);
5227 +
5228 + error:
5229 +@@ -704,6 +711,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
5230 + if (retval < 0)
5231 + goto error;
5232 +
5233 ++ flag_nproc_exceeded(new);
5234 + return commit_creds(new);
5235 +
5236 + error:
5237 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
5238 +index 51a87a67e2abe..618c20ce2479d 100644
5239 +--- a/kernel/trace/trace.c
5240 ++++ b/kernel/trace/trace.c
5241 +@@ -252,6 +252,10 @@ __setup("trace_clock=", set_trace_boot_clock);
5242 +
5243 + static int __init set_tracepoint_printk(char *str)
5244 + {
5245 ++ /* Ignore the "tp_printk_stop_on_boot" param */
5246 ++ if (*str == '_')
5247 ++ return 0;
5248 ++
5249 + if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
5250 + tracepoint_printk = 1;
5251 + return 1;
5252 +diff --git a/kernel/ucount.c b/kernel/ucount.c
5253 +index 804f64799fc14..a1d67261501a6 100644
5254 +--- a/kernel/ucount.c
5255 ++++ b/kernel/ucount.c
5256 +@@ -344,7 +344,8 @@ bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsign
5257 + if (rlimit > LONG_MAX)
5258 + max = LONG_MAX;
5259 + for (iter = ucounts; iter; iter = iter->ns->ucounts) {
5260 +- if (get_ucounts_value(iter, type) > max)
5261 ++ long val = get_ucounts_value(iter, type);
5262 ++ if (val < 0 || val > max)
5263 + return true;
5264 + max = READ_ONCE(iter->ns->ucount_max[type]);
5265 + }
5266 +diff --git a/lib/iov_iter.c b/lib/iov_iter.c
5267 +index 60b5e6edfbaa7..c5b2f0f4b8a84 100644
5268 +--- a/lib/iov_iter.c
5269 ++++ b/lib/iov_iter.c
5270 +@@ -416,6 +416,7 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
5271 + return 0;
5272 +
5273 + buf->ops = &page_cache_pipe_buf_ops;
5274 ++ buf->flags = 0;
5275 + get_page(page);
5276 + buf->page = page;
5277 + buf->offset = offset;
5278 +@@ -532,6 +533,7 @@ static size_t push_pipe(struct iov_iter *i, size_t size,
5279 + break;
5280 +
5281 + buf->ops = &default_pipe_buf_ops;
5282 ++ buf->flags = 0;
5283 + buf->page = page;
5284 + buf->offset = 0;
5285 + buf->len = min_t(ssize_t, left, PAGE_SIZE);
5286 +diff --git a/mm/mprotect.c b/mm/mprotect.c
5287 +index 883e2cc85cad8..ed18dc49533f6 100644
5288 +--- a/mm/mprotect.c
5289 ++++ b/mm/mprotect.c
5290 +@@ -94,7 +94,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
5291 +
5292 + /* Also skip shared copy-on-write pages */
5293 + if (is_cow_mapping(vma->vm_flags) &&
5294 +- page_mapcount(page) != 1)
5295 ++ page_count(page) != 1)
5296 + continue;
5297 +
5298 + /*
5299 +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
5300 +index 7473e0cc6d469..ea3431ac46a14 100644
5301 +--- a/net/ax25/af_ax25.c
5302 ++++ b/net/ax25/af_ax25.c
5303 +@@ -77,6 +77,7 @@ static void ax25_kill_by_device(struct net_device *dev)
5304 + {
5305 + ax25_dev *ax25_dev;
5306 + ax25_cb *s;
5307 ++ struct sock *sk;
5308 +
5309 + if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
5310 + return;
5311 +@@ -85,13 +86,15 @@ static void ax25_kill_by_device(struct net_device *dev)
5312 + again:
5313 + ax25_for_each(s, &ax25_list) {
5314 + if (s->ax25_dev == ax25_dev) {
5315 ++ sk = s->sk;
5316 ++ sock_hold(sk);
5317 + spin_unlock_bh(&ax25_list_lock);
5318 +- lock_sock(s->sk);
5319 ++ lock_sock(sk);
5320 + s->ax25_dev = NULL;
5321 +- release_sock(s->sk);
5322 ++ release_sock(sk);
5323 + ax25_disconnect(s, ENETUNREACH);
5324 + spin_lock_bh(&ax25_list_lock);
5325 +-
5326 ++ sock_put(sk);
5327 + /* The entry could have been deleted from the
5328 + * list meanwhile and thus the next pointer is
5329 + * no longer valid. Play it safe and restart
5330 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
5331 +index de24098894897..db4f2641d1cd1 100644
5332 +--- a/net/bridge/br_multicast.c
5333 ++++ b/net/bridge/br_multicast.c
5334 +@@ -82,6 +82,9 @@ static void br_multicast_find_del_pg(struct net_bridge *br,
5335 + struct net_bridge_port_group *pg);
5336 + static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
5337 +
5338 ++static int br_mc_disabled_update(struct net_device *dev, bool value,
5339 ++ struct netlink_ext_ack *extack);
5340 ++
5341 + static struct net_bridge_port_group *
5342 + br_sg_port_find(struct net_bridge *br,
5343 + struct net_bridge_port_group_sg_key *sg_p)
5344 +@@ -1156,6 +1159,7 @@ struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
5345 + return mp;
5346 +
5347 + if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
5348 ++ br_mc_disabled_update(br->dev, false, NULL);
5349 + br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
5350 + return ERR_PTR(-E2BIG);
5351 + }
5352 +diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
5353 +index 49442cae6f69d..1d99b731e5b21 100644
5354 +--- a/net/core/drop_monitor.c
5355 ++++ b/net/core/drop_monitor.c
5356 +@@ -280,13 +280,17 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi,
5357 +
5358 + rcu_read_lock();
5359 + list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
5360 ++ struct net_device *dev;
5361 ++
5362 + /*
5363 + * only add a note to our monitor buffer if:
5364 + * 1) this is the dev we received on
5365 + * 2) its after the last_rx delta
5366 + * 3) our rx_dropped count has gone up
5367 + */
5368 +- if ((new_stat->dev == napi->dev) &&
5369 ++ /* Paired with WRITE_ONCE() in dropmon_net_event() */
5370 ++ dev = READ_ONCE(new_stat->dev);
5371 ++ if ((dev == napi->dev) &&
5372 + (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
5373 + (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
5374 + trace_drop_common(NULL, NULL);
5375 +@@ -1572,7 +1576,10 @@ static int dropmon_net_event(struct notifier_block *ev_block,
5376 + mutex_lock(&net_dm_mutex);
5377 + list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
5378 + if (new_stat->dev == dev) {
5379 +- new_stat->dev = NULL;
5380 ++
5381 ++ /* Paired with READ_ONCE() in trace_napi_poll_hit() */
5382 ++ WRITE_ONCE(new_stat->dev, NULL);
5383 ++
5384 + if (trace_state == TRACE_OFF) {
5385 + list_del_rcu(&new_stat->list);
5386 + kfree_rcu(new_stat, rcu);
5387 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
5388 +index 198cc8b74dc3e..91d7a5a5a08d0 100644
5389 +--- a/net/core/rtnetlink.c
5390 ++++ b/net/core/rtnetlink.c
5391 +@@ -1698,6 +1698,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
5392 + {
5393 + struct ifinfomsg *ifm;
5394 + struct nlmsghdr *nlh;
5395 ++ struct Qdisc *qdisc;
5396 +
5397 + ASSERT_RTNL();
5398 + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
5399 +@@ -1715,6 +1716,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
5400 + if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
5401 + goto nla_put_failure;
5402 +
5403 ++ qdisc = rtnl_dereference(dev->qdisc);
5404 + if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5405 + nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
5406 + nla_put_u8(skb, IFLA_OPERSTATE,
5407 +@@ -1733,8 +1735,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
5408 + #endif
5409 + put_master_ifindex(skb, dev) ||
5410 + nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
5411 +- (dev->qdisc &&
5412 +- nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
5413 ++ (qdisc &&
5414 ++ nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
5415 + nla_put_ifalias(skb, dev) ||
5416 + nla_put_u32(skb, IFLA_CARRIER_CHANGES,
5417 + atomic_read(&dev->carrier_up_count) +
5418 +diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
5419 +index 41f36ad8b0ec6..4ff03fb262e02 100644
5420 +--- a/net/dsa/dsa.c
5421 ++++ b/net/dsa/dsa.c
5422 +@@ -349,6 +349,7 @@ void dsa_flush_workqueue(void)
5423 + {
5424 + flush_workqueue(dsa_owq);
5425 + }
5426 ++EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
5427 +
5428 + int dsa_devlink_param_get(struct devlink *dl, u32 id,
5429 + struct devlink_param_gset_ctx *ctx)
5430 +diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
5431 +index a5c9bc7b66c6e..33ab7d7af9eb4 100644
5432 +--- a/net/dsa/dsa_priv.h
5433 ++++ b/net/dsa/dsa_priv.h
5434 +@@ -170,7 +170,6 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
5435 + const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
5436 +
5437 + bool dsa_schedule_work(struct work_struct *work);
5438 +-void dsa_flush_workqueue(void);
5439 + const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
5440 +
5441 + static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
5442 +diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
5443 +index cb548188f8134..98d7d7120bab2 100644
5444 +--- a/net/dsa/tag_lan9303.c
5445 ++++ b/net/dsa/tag_lan9303.c
5446 +@@ -77,7 +77,6 @@ static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
5447 +
5448 + static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
5449 + {
5450 +- __be16 *lan9303_tag;
5451 + u16 lan9303_tag1;
5452 + unsigned int source_port;
5453 +
5454 +@@ -87,14 +86,15 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
5455 + return NULL;
5456 + }
5457 +
5458 +- lan9303_tag = dsa_etype_header_pos_rx(skb);
5459 +-
5460 +- if (lan9303_tag[0] != htons(ETH_P_8021Q)) {
5461 +- dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n");
5462 +- return NULL;
5463 ++ if (skb_vlan_tag_present(skb)) {
5464 ++ lan9303_tag1 = skb_vlan_tag_get(skb);
5465 ++ __vlan_hwaccel_clear_tag(skb);
5466 ++ } else {
5467 ++ skb_push_rcsum(skb, ETH_HLEN);
5468 ++ __skb_vlan_pop(skb, &lan9303_tag1);
5469 ++ skb_pull_rcsum(skb, ETH_HLEN);
5470 + }
5471 +
5472 +- lan9303_tag1 = ntohs(lan9303_tag[1]);
5473 + source_port = lan9303_tag1 & 0x3;
5474 +
5475 + skb->dev = dsa_master_find_slave(dev, 0, source_port);
5476 +@@ -103,13 +103,6 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
5477 + return NULL;
5478 + }
5479 +
5480 +- /* remove the special VLAN tag between the MAC addresses
5481 +- * and the current ethertype field.
5482 +- */
5483 +- skb_pull_rcsum(skb, 2 + 2);
5484 +-
5485 +- dsa_strip_etype_header(skb, LAN9303_TAG_LEN);
5486 +-
5487 + if (!(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU))
5488 + dsa_default_offload_fwd_mark(skb);
5489 +
5490 +diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
5491 +index e184bcb199434..78e40ea42e58d 100644
5492 +--- a/net/ipv4/fib_lookup.h
5493 ++++ b/net/ipv4/fib_lookup.h
5494 +@@ -16,10 +16,9 @@ struct fib_alias {
5495 + u8 fa_slen;
5496 + u32 tb_id;
5497 + s16 fa_default;
5498 +- u8 offload:1,
5499 +- trap:1,
5500 +- offload_failed:1,
5501 +- unused:5;
5502 ++ u8 offload;
5503 ++ u8 trap;
5504 ++ u8 offload_failed;
5505 + struct rcu_head rcu;
5506 + };
5507 +
5508 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
5509 +index 5dfb94abe7b10..d244c57b73031 100644
5510 +--- a/net/ipv4/fib_semantics.c
5511 ++++ b/net/ipv4/fib_semantics.c
5512 +@@ -524,9 +524,9 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
5513 + fri.dst_len = dst_len;
5514 + fri.tos = fa->fa_tos;
5515 + fri.type = fa->fa_type;
5516 +- fri.offload = fa->offload;
5517 +- fri.trap = fa->trap;
5518 +- fri.offload_failed = fa->offload_failed;
5519 ++ fri.offload = READ_ONCE(fa->offload);
5520 ++ fri.trap = READ_ONCE(fa->trap);
5521 ++ fri.offload_failed = READ_ONCE(fa->offload_failed);
5522 + err = fib_dump_info(skb, info->portid, seq, event, &fri, nlm_flags);
5523 + if (err < 0) {
5524 + /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
5525 +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
5526 +index 8060524f42566..f7f74d5c14da6 100644
5527 +--- a/net/ipv4/fib_trie.c
5528 ++++ b/net/ipv4/fib_trie.c
5529 +@@ -1047,19 +1047,23 @@ void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
5530 + if (!fa_match)
5531 + goto out;
5532 +
5533 +- if (fa_match->offload == fri->offload && fa_match->trap == fri->trap &&
5534 +- fa_match->offload_failed == fri->offload_failed)
5535 ++ /* These are paired with the WRITE_ONCE() happening in this function.
5536 ++ * The reason is that we are only protected by RCU at this point.
5537 ++ */
5538 ++ if (READ_ONCE(fa_match->offload) == fri->offload &&
5539 ++ READ_ONCE(fa_match->trap) == fri->trap &&
5540 ++ READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
5541 + goto out;
5542 +
5543 +- fa_match->offload = fri->offload;
5544 +- fa_match->trap = fri->trap;
5545 ++ WRITE_ONCE(fa_match->offload, fri->offload);
5546 ++ WRITE_ONCE(fa_match->trap, fri->trap);
5547 +
5548 + /* 2 means send notifications only if offload_failed was changed. */
5549 + if (net->ipv4.sysctl_fib_notify_on_flag_change == 2 &&
5550 +- fa_match->offload_failed == fri->offload_failed)
5551 ++ READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
5552 + goto out;
5553 +
5554 +- fa_match->offload_failed = fri->offload_failed;
5555 ++ WRITE_ONCE(fa_match->offload_failed, fri->offload_failed);
5556 +
5557 + if (!net->ipv4.sysctl_fib_notify_on_flag_change)
5558 + goto out;
5559 +@@ -2297,9 +2301,9 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
5560 + fri.dst_len = KEYLENGTH - fa->fa_slen;
5561 + fri.tos = fa->fa_tos;
5562 + fri.type = fa->fa_type;
5563 +- fri.offload = fa->offload;
5564 +- fri.trap = fa->trap;
5565 +- fri.offload_failed = fa->offload_failed;
5566 ++ fri.offload = READ_ONCE(fa->offload);
5567 ++ fri.trap = READ_ONCE(fa->trap);
5568 ++ fri.offload_failed = READ_ONCE(fa->offload_failed);
5569 + err = fib_dump_info(skb,
5570 + NETLINK_CB(cb->skb).portid,
5571 + cb->nlh->nlmsg_seq,
5572 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
5573 +index 086822cb1cc96..e3a159c8f231e 100644
5574 +--- a/net/ipv4/ping.c
5575 ++++ b/net/ipv4/ping.c
5576 +@@ -172,16 +172,23 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
5577 + struct sock *sk = NULL;
5578 + struct inet_sock *isk;
5579 + struct hlist_nulls_node *hnode;
5580 +- int dif = skb->dev->ifindex;
5581 ++ int dif, sdif;
5582 +
5583 + if (skb->protocol == htons(ETH_P_IP)) {
5584 ++ dif = inet_iif(skb);
5585 ++ sdif = inet_sdif(skb);
5586 + pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
5587 + (int)ident, &ip_hdr(skb)->daddr, dif);
5588 + #if IS_ENABLED(CONFIG_IPV6)
5589 + } else if (skb->protocol == htons(ETH_P_IPV6)) {
5590 ++ dif = inet6_iif(skb);
5591 ++ sdif = inet6_sdif(skb);
5592 + pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n",
5593 + (int)ident, &ipv6_hdr(skb)->daddr, dif);
5594 + #endif
5595 ++ } else {
5596 ++ pr_err("ping: protocol(%x) is not supported\n", ntohs(skb->protocol));
5597 ++ return NULL;
5598 + }
5599 +
5600 + read_lock_bh(&ping_table.lock);
5601 +@@ -221,7 +228,7 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
5602 + }
5603 +
5604 + if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
5605 +- sk->sk_bound_dev_if != inet_sdif(skb))
5606 ++ sk->sk_bound_dev_if != sdif)
5607 + continue;
5608 +
5609 + sock_hold(sk);
5610 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
5611 +index d6899ab5fb39b..23833660584df 100644
5612 +--- a/net/ipv4/route.c
5613 ++++ b/net/ipv4/route.c
5614 +@@ -3401,8 +3401,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5615 + fa->fa_tos == fri.tos &&
5616 + fa->fa_info == res.fi &&
5617 + fa->fa_type == fri.type) {
5618 +- fri.offload = fa->offload;
5619 +- fri.trap = fa->trap;
5620 ++ fri.offload = READ_ONCE(fa->offload);
5621 ++ fri.trap = READ_ONCE(fa->trap);
5622 + break;
5623 + }
5624 + }
5625 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
5626 +index bf13865426340..c6e1989ab2ed9 100644
5627 +--- a/net/ipv6/addrconf.c
5628 ++++ b/net/ipv6/addrconf.c
5629 +@@ -1837,8 +1837,8 @@ out:
5630 + }
5631 + EXPORT_SYMBOL(ipv6_dev_get_saddr);
5632 +
5633 +-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
5634 +- u32 banned_flags)
5635 ++static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
5636 ++ u32 banned_flags)
5637 + {
5638 + struct inet6_ifaddr *ifp;
5639 + int err = -EADDRNOTAVAIL;
5640 +diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
5641 +index aa673a6a7e432..ceb85c67ce395 100644
5642 +--- a/net/ipv6/ip6_flowlabel.c
5643 ++++ b/net/ipv6/ip6_flowlabel.c
5644 +@@ -450,8 +450,10 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
5645 + err = -EINVAL;
5646 + goto done;
5647 + }
5648 +- if (fl_shared_exclusive(fl) || fl->opt)
5649 ++ if (fl_shared_exclusive(fl) || fl->opt) {
5650 ++ WRITE_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl, 1);
5651 + static_branch_deferred_inc(&ipv6_flowlabel_exclusive);
5652 ++ }
5653 + return fl;
5654 +
5655 + done:
5656 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
5657 +index bed8155508c85..a8861db52c187 100644
5658 +--- a/net/ipv6/mcast.c
5659 ++++ b/net/ipv6/mcast.c
5660 +@@ -1759,7 +1759,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
5661 + skb_reserve(skb, hlen);
5662 + skb_tailroom_reserve(skb, mtu, tlen);
5663 +
5664 +- if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
5665 ++ if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
5666 + /* <draft-ietf-magma-mld-source-05.txt>:
5667 + * use unspecified address as the source address
5668 + * when a valid link-local address is not available.
5669 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
5670 +index 3c5bb49692206..e0766bdf20e7d 100644
5671 +--- a/net/ipv6/route.c
5672 ++++ b/net/ipv6/route.c
5673 +@@ -5767,11 +5767,11 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5674 + }
5675 +
5676 + if (!dst) {
5677 +- if (rt->offload)
5678 ++ if (READ_ONCE(rt->offload))
5679 + rtm->rtm_flags |= RTM_F_OFFLOAD;
5680 +- if (rt->trap)
5681 ++ if (READ_ONCE(rt->trap))
5682 + rtm->rtm_flags |= RTM_F_TRAP;
5683 +- if (rt->offload_failed)
5684 ++ if (READ_ONCE(rt->offload_failed))
5685 + rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
5686 + }
5687 +
5688 +@@ -6229,19 +6229,20 @@ void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
5689 + struct sk_buff *skb;
5690 + int err;
5691 +
5692 +- if (f6i->offload == offload && f6i->trap == trap &&
5693 +- f6i->offload_failed == offload_failed)
5694 ++ if (READ_ONCE(f6i->offload) == offload &&
5695 ++ READ_ONCE(f6i->trap) == trap &&
5696 ++ READ_ONCE(f6i->offload_failed) == offload_failed)
5697 + return;
5698 +
5699 +- f6i->offload = offload;
5700 +- f6i->trap = trap;
5701 ++ WRITE_ONCE(f6i->offload, offload);
5702 ++ WRITE_ONCE(f6i->trap, trap);
5703 +
5704 + /* 2 means send notifications only if offload_failed was changed. */
5705 + if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 &&
5706 +- f6i->offload_failed == offload_failed)
5707 ++ READ_ONCE(f6i->offload_failed) == offload_failed)
5708 + return;
5709 +
5710 +- f6i->offload_failed = offload_failed;
5711 ++ WRITE_ONCE(f6i->offload_failed, offload_failed);
5712 +
5713 + if (!rcu_access_pointer(f6i->fib6_node))
5714 + /* The route was removed from the tree, do not send
5715 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
5716 +index 89c648b035b9a..215948fb0d35e 100644
5717 +--- a/net/mac80211/mlme.c
5718 ++++ b/net/mac80211/mlme.c
5719 +@@ -664,7 +664,7 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
5720 + ieee80211_ie_build_he_6ghz_cap(sdata, skb);
5721 + }
5722 +
5723 +-static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
5724 ++static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
5725 + {
5726 + struct ieee80211_local *local = sdata->local;
5727 + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
5728 +@@ -684,6 +684,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
5729 + enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
5730 + const struct ieee80211_sband_iftype_data *iftd;
5731 + struct ieee80211_prep_tx_info info = {};
5732 ++ int ret;
5733 +
5734 + /* we know it's writable, cast away the const */
5735 + if (assoc_data->ie_len)
5736 +@@ -697,7 +698,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
5737 + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
5738 + if (WARN_ON(!chanctx_conf)) {
5739 + rcu_read_unlock();
5740 +- return;
5741 ++ return -EINVAL;
5742 + }
5743 + chan = chanctx_conf->def.chan;
5744 + rcu_read_unlock();
5745 +@@ -748,7 +749,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
5746 + (iftd ? iftd->vendor_elems.len : 0),
5747 + GFP_KERNEL);
5748 + if (!skb)
5749 +- return;
5750 ++ return -ENOMEM;
5751 +
5752 + skb_reserve(skb, local->hw.extra_tx_headroom);
5753 +
5754 +@@ -1029,15 +1030,22 @@ skip_rates:
5755 + skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
5756 + }
5757 +
5758 +- if (assoc_data->fils_kek_len &&
5759 +- fils_encrypt_assoc_req(skb, assoc_data) < 0) {
5760 +- dev_kfree_skb(skb);
5761 +- return;
5762 ++ if (assoc_data->fils_kek_len) {
5763 ++ ret = fils_encrypt_assoc_req(skb, assoc_data);
5764 ++ if (ret < 0) {
5765 ++ dev_kfree_skb(skb);
5766 ++ return ret;
5767 ++ }
5768 + }
5769 +
5770 + pos = skb_tail_pointer(skb);
5771 + kfree(ifmgd->assoc_req_ies);
5772 + ifmgd->assoc_req_ies = kmemdup(ie_start, pos - ie_start, GFP_ATOMIC);
5773 ++ if (!ifmgd->assoc_req_ies) {
5774 ++ dev_kfree_skb(skb);
5775 ++ return -ENOMEM;
5776 ++ }
5777 ++
5778 + ifmgd->assoc_req_ies_len = pos - ie_start;
5779 +
5780 + drv_mgd_prepare_tx(local, sdata, &info);
5781 +@@ -1047,6 +1055,8 @@ skip_rates:
5782 + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
5783 + IEEE80211_TX_INTFL_MLME_CONN_TX;
5784 + ieee80211_tx_skb(sdata, skb);
5785 ++
5786 ++ return 0;
5787 + }
5788 +
5789 + void ieee80211_send_pspoll(struct ieee80211_local *local,
5790 +@@ -4451,6 +4461,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
5791 + {
5792 + struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
5793 + struct ieee80211_local *local = sdata->local;
5794 ++ int ret;
5795 +
5796 + sdata_assert_lock(sdata);
5797 +
5798 +@@ -4471,7 +4482,9 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
5799 + sdata_info(sdata, "associate with %pM (try %d/%d)\n",
5800 + assoc_data->bss->bssid, assoc_data->tries,
5801 + IEEE80211_ASSOC_MAX_TRIES);
5802 +- ieee80211_send_assoc(sdata);
5803 ++ ret = ieee80211_send_assoc(sdata);
5804 ++ if (ret)
5805 ++ return ret;
5806 +
5807 + if (!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
5808 + assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
5809 +diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
5810 +index 2394238d01c91..5a936334b517a 100644
5811 +--- a/net/netfilter/nf_conntrack_proto_sctp.c
5812 ++++ b/net/netfilter/nf_conntrack_proto_sctp.c
5813 +@@ -489,6 +489,15 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
5814 + pr_debug("Setting vtag %x for dir %d\n",
5815 + ih->init_tag, !dir);
5816 + ct->proto.sctp.vtag[!dir] = ih->init_tag;
5817 ++
5818 ++ /* don't renew timeout on init retransmit so
5819 ++ * port reuse by client or NAT middlebox cannot
5820 ++ * keep entry alive indefinitely (incl. nat info).
5821 ++ */
5822 ++ if (new_state == SCTP_CONNTRACK_CLOSED &&
5823 ++ old_state == SCTP_CONNTRACK_CLOSED &&
5824 ++ nf_ct_is_confirmed(ct))
5825 ++ ignore = true;
5826 + }
5827 +
5828 + ct->proto.sctp.state = new_state;
5829 +diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
5830 +index a0109fa1e92d0..1133e06f3c40e 100644
5831 +--- a/net/netfilter/nft_synproxy.c
5832 ++++ b/net/netfilter/nft_synproxy.c
5833 +@@ -191,8 +191,10 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx,
5834 + if (err)
5835 + goto nf_ct_failure;
5836 + err = nf_synproxy_ipv6_init(snet, ctx->net);
5837 +- if (err)
5838 ++ if (err) {
5839 ++ nf_synproxy_ipv4_fini(snet, ctx->net);
5840 + goto nf_ct_failure;
5841 ++ }
5842 + break;
5843 + }
5844 +
5845 +diff --git a/net/sched/act_api.c b/net/sched/act_api.c
5846 +index 7dd3a2dc5fa40..7d53272727bfa 100644
5847 +--- a/net/sched/act_api.c
5848 ++++ b/net/sched/act_api.c
5849 +@@ -728,15 +728,24 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
5850 + restart_act_graph:
5851 + for (i = 0; i < nr_actions; i++) {
5852 + const struct tc_action *a = actions[i];
5853 ++ int repeat_ttl;
5854 +
5855 + if (jmp_prgcnt > 0) {
5856 + jmp_prgcnt -= 1;
5857 + continue;
5858 + }
5859 ++
5860 ++ repeat_ttl = 32;
5861 + repeat:
5862 + ret = a->ops->act(skb, a, res);
5863 +- if (ret == TC_ACT_REPEAT)
5864 +- goto repeat; /* we need a ttl - JHS */
5865 ++
5866 ++ if (unlikely(ret == TC_ACT_REPEAT)) {
5867 ++ if (--repeat_ttl != 0)
5868 ++ goto repeat;
5869 ++ /* suspicious opcode, stop pipeline */
5870 ++ net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
5871 ++ return TC_ACT_OK;
5872 ++ }
5873 +
5874 + if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
5875 + jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
5876 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
5877 +index 56dba8519d7c3..cd44cac7fbcf9 100644
5878 +--- a/net/sched/cls_api.c
5879 ++++ b/net/sched/cls_api.c
5880 +@@ -1044,7 +1044,7 @@ static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
5881 +
5882 + /* Find qdisc */
5883 + if (!*parent) {
5884 +- *q = dev->qdisc;
5885 ++ *q = rcu_dereference(dev->qdisc);
5886 + *parent = (*q)->handle;
5887 + } else {
5888 + *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
5889 +@@ -2587,7 +2587,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
5890 +
5891 + parent = tcm->tcm_parent;
5892 + if (!parent)
5893 +- q = dev->qdisc;
5894 ++ q = rtnl_dereference(dev->qdisc);
5895 + else
5896 + q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
5897 + if (!q)
5898 +@@ -2962,7 +2962,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
5899 + return skb->len;
5900 +
5901 + if (!tcm->tcm_parent)
5902 +- q = dev->qdisc;
5903 ++ q = rtnl_dereference(dev->qdisc);
5904 + else
5905 + q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
5906 +
5907 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
5908 +index 8e629c356e693..0fb387c9d706d 100644
5909 +--- a/net/sched/sch_api.c
5910 ++++ b/net/sched/sch_api.c
5911 +@@ -301,7 +301,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
5912 +
5913 + if (!handle)
5914 + return NULL;
5915 +- q = qdisc_match_from_root(dev->qdisc, handle);
5916 ++ q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
5917 + if (q)
5918 + goto out;
5919 +
5920 +@@ -320,7 +320,7 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
5921 +
5922 + if (!handle)
5923 + return NULL;
5924 +- q = qdisc_match_from_root(dev->qdisc, handle);
5925 ++ q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
5926 + if (q)
5927 + goto out;
5928 +
5929 +@@ -1082,10 +1082,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
5930 + skip:
5931 + if (!ingress) {
5932 + notify_and_destroy(net, skb, n, classid,
5933 +- dev->qdisc, new);
5934 ++ rtnl_dereference(dev->qdisc), new);
5935 + if (new && !new->ops->attach)
5936 + qdisc_refcount_inc(new);
5937 +- dev->qdisc = new ? : &noop_qdisc;
5938 ++ rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
5939 +
5940 + if (new && new->ops->attach)
5941 + new->ops->attach(new);
5942 +@@ -1460,7 +1460,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
5943 + q = dev_ingress_queue(dev)->qdisc_sleeping;
5944 + }
5945 + } else {
5946 +- q = dev->qdisc;
5947 ++ q = rtnl_dereference(dev->qdisc);
5948 + }
5949 + if (!q) {
5950 + NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
5951 +@@ -1549,7 +1549,7 @@ replay:
5952 + q = dev_ingress_queue(dev)->qdisc_sleeping;
5953 + }
5954 + } else {
5955 +- q = dev->qdisc;
5956 ++ q = rtnl_dereference(dev->qdisc);
5957 + }
5958 +
5959 + /* It may be default qdisc, ignore it */
5960 +@@ -1771,7 +1771,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
5961 + s_q_idx = 0;
5962 + q_idx = 0;
5963 +
5964 +- if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
5965 ++ if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
5966 ++ skb, cb, &q_idx, s_q_idx,
5967 + true, tca[TCA_DUMP_INVISIBLE]) < 0)
5968 + goto done;
5969 +
5970 +@@ -2042,7 +2043,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
5971 + } else if (qid1) {
5972 + qid = qid1;
5973 + } else if (qid == 0)
5974 +- qid = dev->qdisc->handle;
5975 ++ qid = rtnl_dereference(dev->qdisc)->handle;
5976 +
5977 + /* Now qid is genuine qdisc handle consistent
5978 + * both with parent and child.
5979 +@@ -2053,7 +2054,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
5980 + portid = TC_H_MAKE(qid, portid);
5981 + } else {
5982 + if (qid == 0)
5983 +- qid = dev->qdisc->handle;
5984 ++ qid = rtnl_dereference(dev->qdisc)->handle;
5985 + }
5986 +
5987 + /* OK. Locate qdisc */
5988 +@@ -2214,7 +2215,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
5989 + s_t = cb->args[0];
5990 + t = 0;
5991 +
5992 +- if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0)
5993 ++ if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
5994 ++ skb, tcm, cb, &t, s_t, true) < 0)
5995 + goto done;
5996 +
5997 + dev_queue = dev_ingress_queue(dev);
5998 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
5999 +index 47ca76ba7ffa8..30c29a9a2efd2 100644
6000 +--- a/net/sched/sch_generic.c
6001 ++++ b/net/sched/sch_generic.c
6002 +@@ -1114,30 +1114,33 @@ static void attach_default_qdiscs(struct net_device *dev)
6003 + if (!netif_is_multiqueue(dev) ||
6004 + dev->priv_flags & IFF_NO_QUEUE) {
6005 + netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
6006 +- dev->qdisc = txq->qdisc_sleeping;
6007 +- qdisc_refcount_inc(dev->qdisc);
6008 ++ qdisc = txq->qdisc_sleeping;
6009 ++ rcu_assign_pointer(dev->qdisc, qdisc);
6010 ++ qdisc_refcount_inc(qdisc);
6011 + } else {
6012 + qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
6013 + if (qdisc) {
6014 +- dev->qdisc = qdisc;
6015 ++ rcu_assign_pointer(dev->qdisc, qdisc);
6016 + qdisc->ops->attach(qdisc);
6017 + }
6018 + }
6019 ++ qdisc = rtnl_dereference(dev->qdisc);
6020 +
6021 + /* Detect default qdisc setup/init failed and fallback to "noqueue" */
6022 +- if (dev->qdisc == &noop_qdisc) {
6023 ++ if (qdisc == &noop_qdisc) {
6024 + netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
6025 + default_qdisc_ops->id, noqueue_qdisc_ops.id);
6026 + dev->priv_flags |= IFF_NO_QUEUE;
6027 + netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
6028 +- dev->qdisc = txq->qdisc_sleeping;
6029 +- qdisc_refcount_inc(dev->qdisc);
6030 ++ qdisc = txq->qdisc_sleeping;
6031 ++ rcu_assign_pointer(dev->qdisc, qdisc);
6032 ++ qdisc_refcount_inc(qdisc);
6033 + dev->priv_flags ^= IFF_NO_QUEUE;
6034 + }
6035 +
6036 + #ifdef CONFIG_NET_SCHED
6037 +- if (dev->qdisc != &noop_qdisc)
6038 +- qdisc_hash_add(dev->qdisc, false);
6039 ++ if (qdisc != &noop_qdisc)
6040 ++ qdisc_hash_add(qdisc, false);
6041 + #endif
6042 + }
6043 +
6044 +@@ -1167,7 +1170,7 @@ void dev_activate(struct net_device *dev)
6045 + * and noqueue_qdisc for virtual interfaces
6046 + */
6047 +
6048 +- if (dev->qdisc == &noop_qdisc)
6049 ++ if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
6050 + attach_default_qdiscs(dev);
6051 +
6052 + if (!netif_carrier_ok(dev))
6053 +@@ -1333,7 +1336,7 @@ static int qdisc_change_tx_queue_len(struct net_device *dev,
6054 + void dev_qdisc_change_real_num_tx(struct net_device *dev,
6055 + unsigned int new_real_tx)
6056 + {
6057 +- struct Qdisc *qdisc = dev->qdisc;
6058 ++ struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
6059 +
6060 + if (qdisc->ops->change_real_num_tx)
6061 + qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
6062 +@@ -1373,7 +1376,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
6063 +
6064 + void dev_init_scheduler(struct net_device *dev)
6065 + {
6066 +- dev->qdisc = &noop_qdisc;
6067 ++ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
6068 + netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
6069 + if (dev_ingress_queue(dev))
6070 + dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
6071 +@@ -1401,8 +1404,8 @@ void dev_shutdown(struct net_device *dev)
6072 + netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
6073 + if (dev_ingress_queue(dev))
6074 + shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
6075 +- qdisc_put(dev->qdisc);
6076 +- dev->qdisc = &noop_qdisc;
6077 ++ qdisc_put(rtnl_dereference(dev->qdisc));
6078 ++ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
6079 +
6080 + WARN_ON(timer_pending(&dev->watchdog_timer));
6081 + }
6082 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
6083 +index 96dee4a62385f..eff22065e1977 100644
6084 +--- a/net/smc/af_smc.c
6085 ++++ b/net/smc/af_smc.c
6086 +@@ -649,14 +649,17 @@ static void smc_fback_error_report(struct sock *clcsk)
6087 + static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
6088 + {
6089 + struct sock *clcsk;
6090 ++ int rc = 0;
6091 +
6092 + mutex_lock(&smc->clcsock_release_lock);
6093 + if (!smc->clcsock) {
6094 +- mutex_unlock(&smc->clcsock_release_lock);
6095 +- return -EBADF;
6096 ++ rc = -EBADF;
6097 ++ goto out;
6098 + }
6099 + clcsk = smc->clcsock->sk;
6100 +
6101 ++ if (smc->use_fallback)
6102 ++ goto out;
6103 + smc->use_fallback = true;
6104 + smc->fallback_rsn = reason_code;
6105 + smc_stat_fallback(smc);
6106 +@@ -683,8 +686,9 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
6107 + smc->clcsock->sk->sk_user_data =
6108 + (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
6109 + }
6110 ++out:
6111 + mutex_unlock(&smc->clcsock_release_lock);
6112 +- return 0;
6113 ++ return rc;
6114 + }
6115 +
6116 + /* fall back during connect */
6117 +diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
6118 +index aaec3c9be8db6..1295f9ab839fd 100644
6119 +--- a/net/sunrpc/xprtrdma/verbs.c
6120 ++++ b/net/sunrpc/xprtrdma/verbs.c
6121 +@@ -438,6 +438,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
6122 + IB_POLL_WORKQUEUE);
6123 + if (IS_ERR(ep->re_attr.send_cq)) {
6124 + rc = PTR_ERR(ep->re_attr.send_cq);
6125 ++ ep->re_attr.send_cq = NULL;
6126 + goto out_destroy;
6127 + }
6128 +
6129 +@@ -446,6 +447,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
6130 + IB_POLL_WORKQUEUE);
6131 + if (IS_ERR(ep->re_attr.recv_cq)) {
6132 + rc = PTR_ERR(ep->re_attr.recv_cq);
6133 ++ ep->re_attr.recv_cq = NULL;
6134 + goto out_destroy;
6135 + }
6136 + ep->re_receive_count = 0;
6137 +@@ -484,6 +486,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
6138 + ep->re_pd = ib_alloc_pd(device, 0);
6139 + if (IS_ERR(ep->re_pd)) {
6140 + rc = PTR_ERR(ep->re_pd);
6141 ++ ep->re_pd = NULL;
6142 + goto out_destroy;
6143 + }
6144 +
6145 +diff --git a/net/tipc/node.c b/net/tipc/node.c
6146 +index 9947b7dfe1d2d..6ef95ce565bd3 100644
6147 +--- a/net/tipc/node.c
6148 ++++ b/net/tipc/node.c
6149 +@@ -403,7 +403,7 @@ static void tipc_node_write_unlock(struct tipc_node *n)
6150 + u32 flags = n->action_flags;
6151 + struct list_head *publ_list;
6152 + struct tipc_uaddr ua;
6153 +- u32 bearer_id;
6154 ++ u32 bearer_id, node;
6155 +
6156 + if (likely(!flags)) {
6157 + write_unlock_bh(&n->lock);
6158 +@@ -413,7 +413,8 @@ static void tipc_node_write_unlock(struct tipc_node *n)
6159 + tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
6160 + TIPC_LINK_STATE, n->addr, n->addr);
6161 + sk.ref = n->link_id;
6162 +- sk.node = n->addr;
6163 ++ sk.node = tipc_own_addr(net);
6164 ++ node = n->addr;
6165 + bearer_id = n->link_id & 0xffff;
6166 + publ_list = &n->publ_list;
6167 +
6168 +@@ -423,17 +424,17 @@ static void tipc_node_write_unlock(struct tipc_node *n)
6169 + write_unlock_bh(&n->lock);
6170 +
6171 + if (flags & TIPC_NOTIFY_NODE_DOWN)
6172 +- tipc_publ_notify(net, publ_list, sk.node, n->capabilities);
6173 ++ tipc_publ_notify(net, publ_list, node, n->capabilities);
6174 +
6175 + if (flags & TIPC_NOTIFY_NODE_UP)
6176 +- tipc_named_node_up(net, sk.node, n->capabilities);
6177 ++ tipc_named_node_up(net, node, n->capabilities);
6178 +
6179 + if (flags & TIPC_NOTIFY_LINK_UP) {
6180 +- tipc_mon_peer_up(net, sk.node, bearer_id);
6181 ++ tipc_mon_peer_up(net, node, bearer_id);
6182 + tipc_nametbl_publish(net, &ua, &sk, sk.ref);
6183 + }
6184 + if (flags & TIPC_NOTIFY_LINK_DOWN) {
6185 +- tipc_mon_peer_down(net, sk.node, bearer_id);
6186 ++ tipc_mon_peer_down(net, node, bearer_id);
6187 + tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
6188 + }
6189 + }
6190 +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
6191 +index fa8c1b623fa21..91a5c65707ba1 100644
6192 +--- a/net/vmw_vsock/af_vsock.c
6193 ++++ b/net/vmw_vsock/af_vsock.c
6194 +@@ -1400,6 +1400,7 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
6195 + sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
6196 + sock->state = SS_UNCONNECTED;
6197 + vsock_transport_cancel_pkt(vsk);
6198 ++ vsock_remove_connected(vsk);
6199 + goto out_wait;
6200 + } else if (timeout == 0) {
6201 + err = -ETIMEDOUT;
6202 +diff --git a/net/wireless/core.c b/net/wireless/core.c
6203 +index eb297e1015e05..441136646f89a 100644
6204 +--- a/net/wireless/core.c
6205 ++++ b/net/wireless/core.c
6206 +@@ -5,7 +5,7 @@
6207 + * Copyright 2006-2010 Johannes Berg <johannes@××××××××××××.net>
6208 + * Copyright 2013-2014 Intel Mobile Communications GmbH
6209 + * Copyright 2015-2017 Intel Deutschland GmbH
6210 +- * Copyright (C) 2018-2021 Intel Corporation
6211 ++ * Copyright (C) 2018-2022 Intel Corporation
6212 + */
6213 +
6214 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6215 +@@ -332,29 +332,20 @@ static void cfg80211_event_work(struct work_struct *work)
6216 + void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
6217 + {
6218 + struct wireless_dev *wdev, *tmp;
6219 +- bool found = false;
6220 +
6221 + ASSERT_RTNL();
6222 +
6223 +- list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
6224 ++ list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
6225 + if (wdev->nl_owner_dead) {
6226 + if (wdev->netdev)
6227 + dev_close(wdev->netdev);
6228 +- found = true;
6229 +- }
6230 +- }
6231 +-
6232 +- if (!found)
6233 +- return;
6234 +
6235 +- wiphy_lock(&rdev->wiphy);
6236 +- list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
6237 +- if (wdev->nl_owner_dead) {
6238 ++ wiphy_lock(&rdev->wiphy);
6239 + cfg80211_leave(rdev, wdev);
6240 + rdev_del_virtual_intf(rdev, wdev);
6241 ++ wiphy_unlock(&rdev->wiphy);
6242 + }
6243 + }
6244 +- wiphy_unlock(&rdev->wiphy);
6245 + }
6246 +
6247 + static void cfg80211_destroy_iface_wk(struct work_struct *work)
6248 +diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
6249 +index cf72680cd7692..4a828bca071e8 100644
6250 +--- a/scripts/kconfig/confdata.c
6251 ++++ b/scripts/kconfig/confdata.c
6252 +@@ -983,14 +983,19 @@ static int conf_write_dep(const char *name)
6253 +
6254 + static int conf_touch_deps(void)
6255 + {
6256 +- const char *name;
6257 ++ const char *name, *tmp;
6258 + struct symbol *sym;
6259 + int res, i;
6260 +
6261 +- strcpy(depfile_path, "include/config/");
6262 +- depfile_prefix_len = strlen(depfile_path);
6263 +-
6264 + name = conf_get_autoconfig_name();
6265 ++ tmp = strrchr(name, '/');
6266 ++ depfile_prefix_len = tmp ? tmp - name + 1 : 0;
6267 ++ if (depfile_prefix_len + 1 > sizeof(depfile_path))
6268 ++ return -1;
6269 ++
6270 ++ strncpy(depfile_path, name, depfile_prefix_len);
6271 ++ depfile_path[depfile_prefix_len] = 0;
6272 ++
6273 + conf_read_simple(name, S_DEF_AUTO);
6274 + sym_calc_value(modules_sym);
6275 +
6276 +diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
6277 +index 0590f86df6e40..748da578b418c 100644
6278 +--- a/scripts/kconfig/preprocess.c
6279 ++++ b/scripts/kconfig/preprocess.c
6280 +@@ -141,7 +141,7 @@ static char *do_lineno(int argc, char *argv[])
6281 + static char *do_shell(int argc, char *argv[])
6282 + {
6283 + FILE *p;
6284 +- char buf[256];
6285 ++ char buf[4096];
6286 + char *cmd;
6287 + size_t nread;
6288 + int i;
6289 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6290 +index 21fec82489bd7..9e36f992605ab 100644
6291 +--- a/sound/pci/hda/hda_intel.c
6292 ++++ b/sound/pci/hda/hda_intel.c
6293 +@@ -1611,6 +1611,7 @@ static const struct snd_pci_quirk probe_mask_list[] = {
6294 + /* forced codec slots */
6295 + SND_PCI_QUIRK(0x1043, 0x1262, "ASUS W5Fm", 0x103),
6296 + SND_PCI_QUIRK(0x1046, 0x1262, "ASUS W5F", 0x103),
6297 ++ SND_PCI_QUIRK(0x1558, 0x0351, "Schenker Dock 15", 0x105),
6298 + /* WinFast VP200 H (Teradici) user reported broken communication */
6299 + SND_PCI_QUIRK(0x3a21, 0x040d, "WinFast VP200 H", 0x101),
6300 + {}
6301 +@@ -1794,8 +1795,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
6302 +
6303 + assign_position_fix(chip, check_position_fix(chip, position_fix[dev]));
6304 +
6305 +- check_probe_mask(chip, dev);
6306 +-
6307 + if (single_cmd < 0) /* allow fallback to single_cmd at errors */
6308 + chip->fallback_to_single_cmd = 1;
6309 + else /* explicitly set to single_cmd or not */
6310 +@@ -1821,6 +1820,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
6311 + chip->bus.core.needs_damn_long_delay = 1;
6312 + }
6313 +
6314 ++ check_probe_mask(chip, dev);
6315 ++
6316 + err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
6317 + if (err < 0) {
6318 + dev_err(card->dev, "Error creating device [card]!\n");
6319 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6320 +index 18f04137f61cf..83b56c1ba3996 100644
6321 +--- a/sound/pci/hda/patch_realtek.c
6322 ++++ b/sound/pci/hda/patch_realtek.c
6323 +@@ -133,6 +133,22 @@ struct alc_spec {
6324 + * COEF access helper functions
6325 + */
6326 +
6327 ++static void coef_mutex_lock(struct hda_codec *codec)
6328 ++{
6329 ++ struct alc_spec *spec = codec->spec;
6330 ++
6331 ++ snd_hda_power_up_pm(codec);
6332 ++ mutex_lock(&spec->coef_mutex);
6333 ++}
6334 ++
6335 ++static void coef_mutex_unlock(struct hda_codec *codec)
6336 ++{
6337 ++ struct alc_spec *spec = codec->spec;
6338 ++
6339 ++ mutex_unlock(&spec->coef_mutex);
6340 ++ snd_hda_power_down_pm(codec);
6341 ++}
6342 ++
6343 + static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
6344 + unsigned int coef_idx)
6345 + {
6346 +@@ -146,12 +162,11 @@ static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
6347 + static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
6348 + unsigned int coef_idx)
6349 + {
6350 +- struct alc_spec *spec = codec->spec;
6351 + unsigned int val;
6352 +
6353 +- mutex_lock(&spec->coef_mutex);
6354 ++ coef_mutex_lock(codec);
6355 + val = __alc_read_coefex_idx(codec, nid, coef_idx);
6356 +- mutex_unlock(&spec->coef_mutex);
6357 ++ coef_mutex_unlock(codec);
6358 + return val;
6359 + }
6360 +
6361 +@@ -168,11 +183,9 @@ static void __alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
6362 + static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
6363 + unsigned int coef_idx, unsigned int coef_val)
6364 + {
6365 +- struct alc_spec *spec = codec->spec;
6366 +-
6367 +- mutex_lock(&spec->coef_mutex);
6368 ++ coef_mutex_lock(codec);
6369 + __alc_write_coefex_idx(codec, nid, coef_idx, coef_val);
6370 +- mutex_unlock(&spec->coef_mutex);
6371 ++ coef_mutex_unlock(codec);
6372 + }
6373 +
6374 + #define alc_write_coef_idx(codec, coef_idx, coef_val) \
6375 +@@ -193,11 +206,9 @@ static void alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
6376 + unsigned int coef_idx, unsigned int mask,
6377 + unsigned int bits_set)
6378 + {
6379 +- struct alc_spec *spec = codec->spec;
6380 +-
6381 +- mutex_lock(&spec->coef_mutex);
6382 ++ coef_mutex_lock(codec);
6383 + __alc_update_coefex_idx(codec, nid, coef_idx, mask, bits_set);
6384 +- mutex_unlock(&spec->coef_mutex);
6385 ++ coef_mutex_unlock(codec);
6386 + }
6387 +
6388 + #define alc_update_coef_idx(codec, coef_idx, mask, bits_set) \
6389 +@@ -230,9 +241,7 @@ struct coef_fw {
6390 + static void alc_process_coef_fw(struct hda_codec *codec,
6391 + const struct coef_fw *fw)
6392 + {
6393 +- struct alc_spec *spec = codec->spec;
6394 +-
6395 +- mutex_lock(&spec->coef_mutex);
6396 ++ coef_mutex_lock(codec);
6397 + for (; fw->nid; fw++) {
6398 + if (fw->mask == (unsigned short)-1)
6399 + __alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val);
6400 +@@ -240,7 +249,7 @@ static void alc_process_coef_fw(struct hda_codec *codec,
6401 + __alc_update_coefex_idx(codec, fw->nid, fw->idx,
6402 + fw->mask, fw->val);
6403 + }
6404 +- mutex_unlock(&spec->coef_mutex);
6405 ++ coef_mutex_unlock(codec);
6406 + }
6407 +
6408 + /*
6409 +@@ -9013,6 +9022,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6410 + SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
6411 + SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
6412 + SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
6413 ++ SND_PCI_QUIRK(0x17aa, 0x383d, "Legion Y9000X 2019", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
6414 + SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
6415 + SND_PCI_QUIRK(0x17aa, 0x384a, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
6416 + SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
6417 +diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
6418 +index 6549e7fef3e32..c5ea3b115966b 100644
6419 +--- a/sound/soc/codecs/tas2770.c
6420 ++++ b/sound/soc/codecs/tas2770.c
6421 +@@ -38,10 +38,12 @@ static void tas2770_reset(struct tas2770_priv *tas2770)
6422 + gpiod_set_value_cansleep(tas2770->reset_gpio, 0);
6423 + msleep(20);
6424 + gpiod_set_value_cansleep(tas2770->reset_gpio, 1);
6425 ++ usleep_range(1000, 2000);
6426 + }
6427 +
6428 + snd_soc_component_write(tas2770->component, TAS2770_SW_RST,
6429 + TAS2770_RST);
6430 ++ usleep_range(1000, 2000);
6431 + }
6432 +
6433 + static int tas2770_set_bias_level(struct snd_soc_component *component,
6434 +@@ -110,6 +112,7 @@ static int tas2770_codec_resume(struct snd_soc_component *component)
6435 +
6436 + if (tas2770->sdz_gpio) {
6437 + gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
6438 ++ usleep_range(1000, 2000);
6439 + } else {
6440 + ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
6441 + TAS2770_PWR_CTRL_MASK,
6442 +@@ -510,8 +513,10 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
6443 +
6444 + tas2770->component = component;
6445 +
6446 +- if (tas2770->sdz_gpio)
6447 ++ if (tas2770->sdz_gpio) {
6448 + gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
6449 ++ usleep_range(1000, 2000);
6450 ++ }
6451 +
6452 + tas2770_reset(tas2770);
6453 +
6454 +diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
6455 +index a59e9d20cb46b..4b1773c1fb95f 100644
6456 +--- a/sound/soc/qcom/lpass-platform.c
6457 ++++ b/sound/soc/qcom/lpass-platform.c
6458 +@@ -524,7 +524,7 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
6459 + return -EINVAL;
6460 + }
6461 +
6462 +- ret = regmap_update_bits(map, reg_irqclr, val_irqclr, val_irqclr);
6463 ++ ret = regmap_write_bits(map, reg_irqclr, val_irqclr, val_irqclr);
6464 + if (ret) {
6465 + dev_err(soc_runtime->dev, "error writing to irqclear reg: %d\n", ret);
6466 + return ret;
6467 +@@ -665,7 +665,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
6468 + return -EINVAL;
6469 + }
6470 + if (interrupts & LPAIF_IRQ_PER(chan)) {
6471 +- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
6472 ++ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
6473 + if (rv) {
6474 + dev_err(soc_runtime->dev,
6475 + "error writing to irqclear reg: %d\n", rv);
6476 +@@ -676,7 +676,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
6477 + }
6478 +
6479 + if (interrupts & LPAIF_IRQ_XRUN(chan)) {
6480 +- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
6481 ++ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
6482 + if (rv) {
6483 + dev_err(soc_runtime->dev,
6484 + "error writing to irqclear reg: %d\n", rv);
6485 +@@ -688,7 +688,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
6486 + }
6487 +
6488 + if (interrupts & LPAIF_IRQ_ERR(chan)) {
6489 +- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
6490 ++ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
6491 + if (rv) {
6492 + dev_err(soc_runtime->dev,
6493 + "error writing to irqclear reg: %d\n", rv);
6494 +diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
6495 +index dc0e7c8d31f37..53457a0d466d3 100644
6496 +--- a/sound/soc/soc-ops.c
6497 ++++ b/sound/soc/soc-ops.c
6498 +@@ -308,7 +308,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
6499 + unsigned int sign_bit = mc->sign_bit;
6500 + unsigned int mask = (1 << fls(max)) - 1;
6501 + unsigned int invert = mc->invert;
6502 +- int err;
6503 ++ int err, ret;
6504 + bool type_2r = false;
6505 + unsigned int val2 = 0;
6506 + unsigned int val, val_mask;
6507 +@@ -350,12 +350,18 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
6508 + err = snd_soc_component_update_bits(component, reg, val_mask, val);
6509 + if (err < 0)
6510 + return err;
6511 ++ ret = err;
6512 +
6513 +- if (type_2r)
6514 ++ if (type_2r) {
6515 + err = snd_soc_component_update_bits(component, reg2, val_mask,
6516 +- val2);
6517 ++ val2);
6518 ++ /* Don't discard any error code or drop change flag */
6519 ++ if (ret == 0 || err < 0) {
6520 ++ ret = err;
6521 ++ }
6522 ++ }
6523 +
6524 +- return err;
6525 ++ return ret;
6526 + }
6527 + EXPORT_SYMBOL_GPL(snd_soc_put_volsw);
6528 +
6529 +@@ -421,6 +427,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
6530 + int min = mc->min;
6531 + unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
6532 + int err = 0;
6533 ++ int ret;
6534 + unsigned int val, val_mask;
6535 +
6536 + val = ucontrol->value.integer.value[0];
6537 +@@ -437,6 +444,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
6538 + err = snd_soc_component_update_bits(component, reg, val_mask, val);
6539 + if (err < 0)
6540 + return err;
6541 ++ ret = err;
6542 +
6543 + if (snd_soc_volsw_is_stereo(mc)) {
6544 + unsigned int val2;
6545 +@@ -447,6 +455,11 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
6546 +
6547 + err = snd_soc_component_update_bits(component, reg2, val_mask,
6548 + val2);
6549 ++
6550 ++ /* Don't discard any error code or drop change flag */
6551 ++ if (ret == 0 || err < 0) {
6552 ++ ret = err;
6553 ++ }
6554 + }
6555 + return err;
6556 + }
6557 +@@ -506,7 +519,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
6558 + unsigned int mask = (1 << fls(max)) - 1;
6559 + unsigned int invert = mc->invert;
6560 + unsigned int val, val_mask;
6561 +- int ret;
6562 ++ int err, ret;
6563 +
6564 + if (invert)
6565 + val = (max - ucontrol->value.integer.value[0]) & mask;
6566 +@@ -515,9 +528,10 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
6567 + val_mask = mask << shift;
6568 + val = val << shift;
6569 +
6570 +- ret = snd_soc_component_update_bits(component, reg, val_mask, val);
6571 +- if (ret < 0)
6572 +- return ret;
6573 ++ err = snd_soc_component_update_bits(component, reg, val_mask, val);
6574 ++ if (err < 0)
6575 ++ return err;
6576 ++ ret = err;
6577 +
6578 + if (snd_soc_volsw_is_stereo(mc)) {
6579 + if (invert)
6580 +@@ -527,8 +541,12 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
6581 + val_mask = mask << shift;
6582 + val = val << shift;
6583 +
6584 +- ret = snd_soc_component_update_bits(component, rreg, val_mask,
6585 ++ err = snd_soc_component_update_bits(component, rreg, val_mask,
6586 + val);
6587 ++ /* Don't discard any error code or drop change flag */
6588 ++ if (ret == 0 || err < 0) {
6589 ++ ret = err;
6590 ++ }
6591 + }
6592 +
6593 + return ret;
6594 +@@ -877,6 +895,7 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
6595 + unsigned long mask = (1UL<<mc->nbits)-1;
6596 + long max = mc->max;
6597 + long val = ucontrol->value.integer.value[0];
6598 ++ int ret = 0;
6599 + unsigned int i;
6600 +
6601 + if (val < mc->min || val > mc->max)
6602 +@@ -891,9 +910,11 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
6603 + regmask, regval);
6604 + if (err < 0)
6605 + return err;
6606 ++ if (err > 0)
6607 ++ ret = err;
6608 + }
6609 +
6610 +- return 0;
6611 ++ return ret;
6612 + }
6613 + EXPORT_SYMBOL_GPL(snd_soc_put_xr_sx);
6614 +
6615 +diff --git a/sound/usb/implicit.c b/sound/usb/implicit.c
6616 +index 70319c822c10b..2d444ec742029 100644
6617 +--- a/sound/usb/implicit.c
6618 ++++ b/sound/usb/implicit.c
6619 +@@ -47,13 +47,13 @@ struct snd_usb_implicit_fb_match {
6620 + static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
6621 + /* Generic matching */
6622 + IMPLICIT_FB_GENERIC_DEV(0x0499, 0x1509), /* Steinberg UR22 */
6623 +- IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2080), /* M-Audio FastTrack Ultra */
6624 +- IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2081), /* M-Audio FastTrack Ultra */
6625 + IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2030), /* M-Audio Fast Track C400 */
6626 + IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2031), /* M-Audio Fast Track C600 */
6627 +
6628 + /* Fixed EP */
6629 + /* FIXME: check the availability of generic matching */
6630 ++ IMPLICIT_FB_FIXED_DEV(0x0763, 0x2080, 0x81, 2), /* M-Audio FastTrack Ultra */
6631 ++ IMPLICIT_FB_FIXED_DEV(0x0763, 0x2081, 0x81, 2), /* M-Audio FastTrack Ultra */
6632 + IMPLICIT_FB_FIXED_DEV(0x2466, 0x8010, 0x81, 2), /* Fractal Audio Axe-Fx III */
6633 + IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0001, 0x81, 2), /* Solid State Logic SSL2 */
6634 + IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0002, 0x81, 2), /* Solid State Logic SSL2+ */
6635 +diff --git a/tools/lib/subcmd/subcmd-util.h b/tools/lib/subcmd/subcmd-util.h
6636 +index 794a375dad360..b2aec04fce8f6 100644
6637 +--- a/tools/lib/subcmd/subcmd-util.h
6638 ++++ b/tools/lib/subcmd/subcmd-util.h
6639 +@@ -50,15 +50,8 @@ static NORETURN inline void die(const char *err, ...)
6640 + static inline void *xrealloc(void *ptr, size_t size)
6641 + {
6642 + void *ret = realloc(ptr, size);
6643 +- if (!ret && !size)
6644 +- ret = realloc(ptr, 1);
6645 +- if (!ret) {
6646 +- ret = realloc(ptr, size);
6647 +- if (!ret && !size)
6648 +- ret = realloc(ptr, 1);
6649 +- if (!ret)
6650 +- die("Out of memory, realloc failed");
6651 +- }
6652 ++ if (!ret)
6653 ++ die("Out of memory, realloc failed");
6654 + return ret;
6655 + }
6656 +
6657 +diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
6658 +index fbb3c4057c302..71710a1da4472 100644
6659 +--- a/tools/perf/util/bpf-loader.c
6660 ++++ b/tools/perf/util/bpf-loader.c
6661 +@@ -1214,9 +1214,10 @@ bpf__obj_config_map(struct bpf_object *obj,
6662 + pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
6663 + err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
6664 + out:
6665 +- free(map_name);
6666 + if (!err)
6667 + *key_scan_pos += strlen(map_opt);
6668 ++
6669 ++ free(map_name);
6670 + return err;
6671 + }
6672 +
6673 +diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
6674 +index 2c6f916ccbafa..0874e512d109b 100644
6675 +--- a/tools/testing/kunit/kunit_kernel.py
6676 ++++ b/tools/testing/kunit/kunit_kernel.py
6677 +@@ -6,6 +6,7 @@
6678 + # Author: Felix Guo <felixguoxiuping@×××××.com>
6679 + # Author: Brendan Higgins <brendanhiggins@××××××.com>
6680 +
6681 ++import importlib.abc
6682 + import importlib.util
6683 + import logging
6684 + import subprocess
6685 +diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c
6686 +index 076cf4325f783..cd4582129c7d6 100644
6687 +--- a/tools/testing/selftests/clone3/clone3.c
6688 ++++ b/tools/testing/selftests/clone3/clone3.c
6689 +@@ -126,8 +126,6 @@ static void test_clone3(uint64_t flags, size_t size, int expected,
6690 +
6691 + int main(int argc, char *argv[])
6692 + {
6693 +- pid_t pid;
6694 +-
6695 + uid_t uid = getuid();
6696 +
6697 + ksft_print_header();
6698 +diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
6699 +index 12c5e27d32c16..2d7fca446c7f7 100644
6700 +--- a/tools/testing/selftests/exec/Makefile
6701 ++++ b/tools/testing/selftests/exec/Makefile
6702 +@@ -3,8 +3,8 @@ CFLAGS = -Wall
6703 + CFLAGS += -Wno-nonnull
6704 + CFLAGS += -D_GNU_SOURCE
6705 +
6706 +-TEST_PROGS := binfmt_script non-regular
6707 +-TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216
6708 ++TEST_PROGS := binfmt_script
6709 ++TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular
6710 + TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir
6711 + # Makefile is a run-time dependency, since it's accessed by the execveat test
6712 + TEST_FILES := Makefile
6713 +diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
6714 +index 79a182cfa43ad..78e59620d28de 100644
6715 +--- a/tools/testing/selftests/kselftest_harness.h
6716 ++++ b/tools/testing/selftests/kselftest_harness.h
6717 +@@ -875,7 +875,8 @@ static void __timeout_handler(int sig, siginfo_t *info, void *ucontext)
6718 + }
6719 +
6720 + t->timed_out = true;
6721 +- kill(t->pid, SIGKILL);
6722 ++ // signal process group
6723 ++ kill(-(t->pid), SIGKILL);
6724 + }
6725 +
6726 + void __wait_for_test(struct __test_metadata *t)
6727 +@@ -985,6 +986,7 @@ void __run_test(struct __fixture_metadata *f,
6728 + ksft_print_msg("ERROR SPAWNING TEST CHILD\n");
6729 + t->passed = 0;
6730 + } else if (t->pid == 0) {
6731 ++ setpgrp();
6732 + t->fn(t, variant);
6733 + if (t->skip)
6734 + _exit(255);
6735 +diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c
6736 +index e54106643337b..4c88238fc8f05 100644
6737 +--- a/tools/testing/selftests/mincore/mincore_selftest.c
6738 ++++ b/tools/testing/selftests/mincore/mincore_selftest.c
6739 +@@ -207,15 +207,21 @@ TEST(check_file_mmap)
6740 +
6741 + errno = 0;
6742 + fd = open(".", O_TMPFILE | O_RDWR, 0600);
6743 +- ASSERT_NE(-1, fd) {
6744 +- TH_LOG("Can't create temporary file: %s",
6745 +- strerror(errno));
6746 ++ if (fd < 0) {
6747 ++ ASSERT_EQ(errno, EOPNOTSUPP) {
6748 ++ TH_LOG("Can't create temporary file: %s",
6749 ++ strerror(errno));
6750 ++ }
6751 ++ SKIP(goto out_free, "O_TMPFILE not supported by filesystem.");
6752 + }
6753 + errno = 0;
6754 + retval = fallocate(fd, 0, 0, FILE_SIZE);
6755 +- ASSERT_EQ(0, retval) {
6756 +- TH_LOG("Error allocating space for the temporary file: %s",
6757 +- strerror(errno));
6758 ++ if (retval) {
6759 ++ ASSERT_EQ(errno, EOPNOTSUPP) {
6760 ++ TH_LOG("Error allocating space for the temporary file: %s",
6761 ++ strerror(errno));
6762 ++ }
6763 ++ SKIP(goto out_close, "fallocate not supported by filesystem.");
6764 + }
6765 +
6766 + /*
6767 +@@ -271,7 +277,9 @@ TEST(check_file_mmap)
6768 + }
6769 +
6770 + munmap(addr, FILE_SIZE);
6771 ++out_close:
6772 + close(fd);
6773 ++out_free:
6774 + free(vec);
6775 + }
6776 +
6777 +diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
6778 +index f31205f04ee05..8c5fea68ae677 100644
6779 +--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
6780 ++++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
6781 +@@ -1236,7 +1236,7 @@ static int get_userns_fd(unsigned long nsid, unsigned long hostid, unsigned long
6782 + }
6783 +
6784 + /**
6785 +- * Validate that an attached mount in our mount namespace can be idmapped.
6786 ++ * Validate that an attached mount in our mount namespace cannot be idmapped.
6787 + * (The kernel enforces that the mount's mount namespace and the caller's mount
6788 + * namespace match.)
6789 + */
6790 +@@ -1259,7 +1259,7 @@ TEST_F(mount_setattr_idmapped, attached_mount_inside_current_mount_namespace)
6791 +
6792 + attr.userns_fd = get_userns_fd(0, 10000, 10000);
6793 + ASSERT_GE(attr.userns_fd, 0);
6794 +- ASSERT_EQ(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
6795 ++ ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
6796 + ASSERT_EQ(close(attr.userns_fd), 0);
6797 + ASSERT_EQ(close(open_tree_fd), 0);
6798 + }
6799 +diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh
6800 +index 9313fa32bef13..b5eef5ffb58e5 100755
6801 +--- a/tools/testing/selftests/netfilter/nft_concat_range.sh
6802 ++++ b/tools/testing/selftests/netfilter/nft_concat_range.sh
6803 +@@ -1583,4 +1583,4 @@ for name in ${TESTS}; do
6804 + done
6805 + done
6806 +
6807 +-[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP}
6808 ++[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP} || exit 0
6809 +diff --git a/tools/testing/selftests/netfilter/nft_fib.sh b/tools/testing/selftests/netfilter/nft_fib.sh
6810 +index 6caf6ac8c285f..695a1958723f5 100755
6811 +--- a/tools/testing/selftests/netfilter/nft_fib.sh
6812 ++++ b/tools/testing/selftests/netfilter/nft_fib.sh
6813 +@@ -174,6 +174,7 @@ test_ping() {
6814 + ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
6815 + ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
6816 + ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
6817 ++ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.rp_filter=0 > /dev/null
6818 +
6819 + sleep 3
6820 +
6821 +diff --git a/tools/testing/selftests/openat2/Makefile b/tools/testing/selftests/openat2/Makefile
6822 +index 4b93b1417b862..843ba56d8e49e 100644
6823 +--- a/tools/testing/selftests/openat2/Makefile
6824 ++++ b/tools/testing/selftests/openat2/Makefile
6825 +@@ -5,4 +5,4 @@ TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test
6826 +
6827 + include ../lib.mk
6828 +
6829 +-$(TEST_GEN_PROGS): helpers.c
6830 ++$(TEST_GEN_PROGS): helpers.c helpers.h
6831 +diff --git a/tools/testing/selftests/openat2/helpers.h b/tools/testing/selftests/openat2/helpers.h
6832 +index a6ea27344db2d..7056340b9339e 100644
6833 +--- a/tools/testing/selftests/openat2/helpers.h
6834 ++++ b/tools/testing/selftests/openat2/helpers.h
6835 +@@ -9,6 +9,7 @@
6836 +
6837 + #define _GNU_SOURCE
6838 + #include <stdint.h>
6839 ++#include <stdbool.h>
6840 + #include <errno.h>
6841 + #include <linux/types.h>
6842 + #include "../kselftest.h"
6843 +@@ -62,11 +63,12 @@ bool needs_openat2(const struct open_how *how);
6844 + (similar to chroot(2)). */
6845 + #endif /* RESOLVE_IN_ROOT */
6846 +
6847 +-#define E_func(func, ...) \
6848 +- do { \
6849 +- if (func(__VA_ARGS__) < 0) \
6850 +- ksft_exit_fail_msg("%s:%d %s failed\n", \
6851 +- __FILE__, __LINE__, #func);\
6852 ++#define E_func(func, ...) \
6853 ++ do { \
6854 ++ errno = 0; \
6855 ++ if (func(__VA_ARGS__) < 0) \
6856 ++ ksft_exit_fail_msg("%s:%d %s failed - errno:%d\n", \
6857 ++ __FILE__, __LINE__, #func, errno); \
6858 + } while (0)
6859 +
6860 + #define E_asprintf(...) E_func(asprintf, __VA_ARGS__)
6861 +diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
6862 +index 1bddbe934204c..7fb902099de45 100644
6863 +--- a/tools/testing/selftests/openat2/openat2_test.c
6864 ++++ b/tools/testing/selftests/openat2/openat2_test.c
6865 +@@ -259,6 +259,16 @@ void test_openat2_flags(void)
6866 + unlink(path);
6867 +
6868 + fd = sys_openat2(AT_FDCWD, path, &test->how);
6869 ++ if (fd < 0 && fd == -EOPNOTSUPP) {
6870 ++ /*
6871 ++ * Skip the testcase if it failed because not supported
6872 ++ * by FS. (e.g. a valid O_TMPFILE combination on NFS)
6873 ++ */
6874 ++ ksft_test_result_skip("openat2 with %s fails with %d (%s)\n",
6875 ++ test->name, fd, strerror(-fd));
6876 ++ goto next;
6877 ++ }
6878 ++
6879 + if (test->err >= 0)
6880 + failed = (fd < 0);
6881 + else
6882 +@@ -303,7 +313,7 @@ skip:
6883 + else
6884 + resultfn("openat2 with %s fails with %d (%s)\n",
6885 + test->name, test->err, strerror(-test->err));
6886 +-
6887 ++next:
6888 + free(fdpath);
6889 + fflush(stdout);
6890 + }
6891 +diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h
6892 +index 01f8d3c0cf2cb..6922d6417e1cf 100644
6893 +--- a/tools/testing/selftests/pidfd/pidfd.h
6894 ++++ b/tools/testing/selftests/pidfd/pidfd.h
6895 +@@ -68,7 +68,7 @@
6896 + #define PIDFD_SKIP 3
6897 + #define PIDFD_XFAIL 4
6898 +
6899 +-int wait_for_pid(pid_t pid)
6900 ++static inline int wait_for_pid(pid_t pid)
6901 + {
6902 + int status, ret;
6903 +
6904 +@@ -78,13 +78,20 @@ again:
6905 + if (errno == EINTR)
6906 + goto again;
6907 +
6908 ++ ksft_print_msg("waitpid returned -1, errno=%d\n", errno);
6909 + return -1;
6910 + }
6911 +
6912 +- if (!WIFEXITED(status))
6913 ++ if (!WIFEXITED(status)) {
6914 ++ ksft_print_msg(
6915 ++ "waitpid !WIFEXITED, WIFSIGNALED=%d, WTERMSIG=%d\n",
6916 ++ WIFSIGNALED(status), WTERMSIG(status));
6917 + return -1;
6918 ++ }
6919 +
6920 +- return WEXITSTATUS(status);
6921 ++ ret = WEXITSTATUS(status);
6922 ++ ksft_print_msg("waitpid WEXITSTATUS=%d\n", ret);
6923 ++ return ret;
6924 + }
6925 +
6926 + static inline int sys_pidfd_open(pid_t pid, unsigned int flags)
6927 +diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
6928 +index 22558524f71c3..3fd8e903118f5 100644
6929 +--- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
6930 ++++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
6931 +@@ -12,6 +12,7 @@
6932 + #include <string.h>
6933 + #include <syscall.h>
6934 + #include <sys/wait.h>
6935 ++#include <sys/mman.h>
6936 +
6937 + #include "pidfd.h"
6938 + #include "../kselftest.h"
6939 +@@ -80,7 +81,10 @@ static inline int error_check(struct error *err, const char *test_name)
6940 + return err->code;
6941 + }
6942 +
6943 ++#define CHILD_STACK_SIZE 8192
6944 ++
6945 + struct child {
6946 ++ char *stack;
6947 + pid_t pid;
6948 + int fd;
6949 + };
6950 +@@ -89,17 +93,22 @@ static struct child clone_newns(int (*fn)(void *), void *args,
6951 + struct error *err)
6952 + {
6953 + static int flags = CLONE_PIDFD | CLONE_NEWPID | CLONE_NEWNS | SIGCHLD;
6954 +- size_t stack_size = 1024;
6955 +- char *stack[1024] = { 0 };
6956 + struct child ret;
6957 +
6958 + if (!(flags & CLONE_NEWUSER) && geteuid() != 0)
6959 + flags |= CLONE_NEWUSER;
6960 +
6961 ++ ret.stack = mmap(NULL, CHILD_STACK_SIZE, PROT_READ | PROT_WRITE,
6962 ++ MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
6963 ++ if (ret.stack == MAP_FAILED) {
6964 ++ error_set(err, -1, "mmap of stack failed (errno %d)", errno);
6965 ++ return ret;
6966 ++ }
6967 ++
6968 + #ifdef __ia64__
6969 +- ret.pid = __clone2(fn, stack, stack_size, flags, args, &ret.fd);
6970 ++ ret.pid = __clone2(fn, ret.stack, CHILD_STACK_SIZE, flags, args, &ret.fd);
6971 + #else
6972 +- ret.pid = clone(fn, stack + stack_size, flags, args, &ret.fd);
6973 ++ ret.pid = clone(fn, ret.stack + CHILD_STACK_SIZE, flags, args, &ret.fd);
6974 + #endif
6975 +
6976 + if (ret.pid < 0) {
6977 +@@ -129,6 +138,11 @@ static inline int child_join(struct child *child, struct error *err)
6978 + else if (r > 0)
6979 + error_set(err, r, "child %d reported: %d", child->pid, r);
6980 +
6981 ++ if (munmap(child->stack, CHILD_STACK_SIZE)) {
6982 ++ error_set(err, -1, "munmap of child stack failed (errno %d)", errno);
6983 ++ r = -1;
6984 ++ }
6985 ++
6986 + return r;
6987 + }
6988 +
6989 +diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
6990 +index 529eb700ac26a..9a2d64901d591 100644
6991 +--- a/tools/testing/selftests/pidfd/pidfd_test.c
6992 ++++ b/tools/testing/selftests/pidfd/pidfd_test.c
6993 +@@ -441,7 +441,6 @@ static void test_pidfd_poll_exec(int use_waitpid)
6994 + {
6995 + int pid, pidfd = 0;
6996 + int status, ret;
6997 +- pthread_t t1;
6998 + time_t prog_start = time(NULL);
6999 + const char *test_name = "pidfd_poll check for premature notification on child thread exec";
7000 +
7001 +@@ -500,13 +499,14 @@ static int child_poll_leader_exit_test(void *args)
7002 + */
7003 + *child_exit_secs = time(NULL);
7004 + syscall(SYS_exit, 0);
7005 ++ /* Never reached, but appeases compiler thinking we should return. */
7006 ++ exit(0);
7007 + }
7008 +
7009 + static void test_pidfd_poll_leader_exit(int use_waitpid)
7010 + {
7011 + int pid, pidfd = 0;
7012 +- int status, ret;
7013 +- time_t prog_start = time(NULL);
7014 ++ int status, ret = 0;
7015 + const char *test_name = "pidfd_poll check for premature notification on non-empty"
7016 + "group leader exit";
7017 +
7018 +diff --git a/tools/testing/selftests/pidfd/pidfd_wait.c b/tools/testing/selftests/pidfd/pidfd_wait.c
7019 +index be2943f072f60..17999e082aa71 100644
7020 +--- a/tools/testing/selftests/pidfd/pidfd_wait.c
7021 ++++ b/tools/testing/selftests/pidfd/pidfd_wait.c
7022 +@@ -39,7 +39,7 @@ static int sys_waitid(int which, pid_t pid, siginfo_t *info, int options,
7023 +
7024 + TEST(wait_simple)
7025 + {
7026 +- int pidfd = -1, status = 0;
7027 ++ int pidfd = -1;
7028 + pid_t parent_tid = -1;
7029 + struct clone_args args = {
7030 + .parent_tid = ptr_to_u64(&parent_tid),
7031 +@@ -47,7 +47,6 @@ TEST(wait_simple)
7032 + .flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
7033 + .exit_signal = SIGCHLD,
7034 + };
7035 +- int ret;
7036 + pid_t pid;
7037 + siginfo_t info = {
7038 + .si_signo = 0,
7039 +@@ -88,7 +87,7 @@ TEST(wait_simple)
7040 +
7041 + TEST(wait_states)
7042 + {
7043 +- int pidfd = -1, status = 0;
7044 ++ int pidfd = -1;
7045 + pid_t parent_tid = -1;
7046 + struct clone_args args = {
7047 + .parent_tid = ptr_to_u64(&parent_tid),
7048 +diff --git a/tools/testing/selftests/rtc/settings b/tools/testing/selftests/rtc/settings
7049 +index ba4d85f74cd6b..a953c96aa16e1 100644
7050 +--- a/tools/testing/selftests/rtc/settings
7051 ++++ b/tools/testing/selftests/rtc/settings
7052 +@@ -1 +1 @@
7053 +-timeout=90
7054 ++timeout=180
7055 +diff --git a/tools/testing/selftests/vDSO/vdso_test_abi.c b/tools/testing/selftests/vDSO/vdso_test_abi.c
7056 +index 3d603f1394af4..883ca85424bc5 100644
7057 +--- a/tools/testing/selftests/vDSO/vdso_test_abi.c
7058 ++++ b/tools/testing/selftests/vDSO/vdso_test_abi.c
7059 +@@ -33,110 +33,114 @@ typedef long (*vdso_clock_gettime_t)(clockid_t clk_id, struct timespec *ts);
7060 + typedef long (*vdso_clock_getres_t)(clockid_t clk_id, struct timespec *ts);
7061 + typedef time_t (*vdso_time_t)(time_t *t);
7062 +
7063 +-static int vdso_test_gettimeofday(void)
7064 ++#define VDSO_TEST_PASS_MSG() "\n%s(): PASS\n", __func__
7065 ++#define VDSO_TEST_FAIL_MSG(x) "\n%s(): %s FAIL\n", __func__, x
7066 ++#define VDSO_TEST_SKIP_MSG(x) "\n%s(): SKIP: Could not find %s\n", __func__, x
7067 ++
7068 ++static void vdso_test_gettimeofday(void)
7069 + {
7070 + /* Find gettimeofday. */
7071 + vdso_gettimeofday_t vdso_gettimeofday =
7072 + (vdso_gettimeofday_t)vdso_sym(version, name[0]);
7073 +
7074 + if (!vdso_gettimeofday) {
7075 +- printf("Could not find %s\n", name[0]);
7076 +- return KSFT_SKIP;
7077 ++ ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[0]));
7078 ++ return;
7079 + }
7080 +
7081 + struct timeval tv;
7082 + long ret = vdso_gettimeofday(&tv, 0);
7083 +
7084 + if (ret == 0) {
7085 +- printf("The time is %lld.%06lld\n",
7086 +- (long long)tv.tv_sec, (long long)tv.tv_usec);
7087 ++ ksft_print_msg("The time is %lld.%06lld\n",
7088 ++ (long long)tv.tv_sec, (long long)tv.tv_usec);
7089 ++ ksft_test_result_pass(VDSO_TEST_PASS_MSG());
7090 + } else {
7091 +- printf("%s failed\n", name[0]);
7092 +- return KSFT_FAIL;
7093 ++ ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[0]));
7094 + }
7095 +-
7096 +- return KSFT_PASS;
7097 + }
7098 +
7099 +-static int vdso_test_clock_gettime(clockid_t clk_id)
7100 ++static void vdso_test_clock_gettime(clockid_t clk_id)
7101 + {
7102 + /* Find clock_gettime. */
7103 + vdso_clock_gettime_t vdso_clock_gettime =
7104 + (vdso_clock_gettime_t)vdso_sym(version, name[1]);
7105 +
7106 + if (!vdso_clock_gettime) {
7107 +- printf("Could not find %s\n", name[1]);
7108 +- return KSFT_SKIP;
7109 ++ ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[1]));
7110 ++ return;
7111 + }
7112 +
7113 + struct timespec ts;
7114 + long ret = vdso_clock_gettime(clk_id, &ts);
7115 +
7116 + if (ret == 0) {
7117 +- printf("The time is %lld.%06lld\n",
7118 +- (long long)ts.tv_sec, (long long)ts.tv_nsec);
7119 ++ ksft_print_msg("The time is %lld.%06lld\n",
7120 ++ (long long)ts.tv_sec, (long long)ts.tv_nsec);
7121 ++ ksft_test_result_pass(VDSO_TEST_PASS_MSG());
7122 + } else {
7123 +- printf("%s failed\n", name[1]);
7124 +- return KSFT_FAIL;
7125 ++ ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[1]));
7126 + }
7127 +-
7128 +- return KSFT_PASS;
7129 + }
7130 +
7131 +-static int vdso_test_time(void)
7132 ++static void vdso_test_time(void)
7133 + {
7134 + /* Find time. */
7135 + vdso_time_t vdso_time =
7136 + (vdso_time_t)vdso_sym(version, name[2]);
7137 +
7138 + if (!vdso_time) {
7139 +- printf("Could not find %s\n", name[2]);
7140 +- return KSFT_SKIP;
7141 ++ ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[2]));
7142 ++ return;
7143 + }
7144 +
7145 + long ret = vdso_time(NULL);
7146 +
7147 + if (ret > 0) {
7148 +- printf("The time in hours since January 1, 1970 is %lld\n",
7149 ++ ksft_print_msg("The time in hours since January 1, 1970 is %lld\n",
7150 + (long long)(ret / 3600));
7151 ++ ksft_test_result_pass(VDSO_TEST_PASS_MSG());
7152 + } else {
7153 +- printf("%s failed\n", name[2]);
7154 +- return KSFT_FAIL;
7155 ++ ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[2]));
7156 + }
7157 +-
7158 +- return KSFT_PASS;
7159 + }
7160 +
7161 +-static int vdso_test_clock_getres(clockid_t clk_id)
7162 ++static void vdso_test_clock_getres(clockid_t clk_id)
7163 + {
7164 ++ int clock_getres_fail = 0;
7165 ++
7166 + /* Find clock_getres. */
7167 + vdso_clock_getres_t vdso_clock_getres =
7168 + (vdso_clock_getres_t)vdso_sym(version, name[3]);
7169 +
7170 + if (!vdso_clock_getres) {
7171 +- printf("Could not find %s\n", name[3]);
7172 +- return KSFT_SKIP;
7173 ++ ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[3]));
7174 ++ return;
7175 + }
7176 +
7177 + struct timespec ts, sys_ts;
7178 + long ret = vdso_clock_getres(clk_id, &ts);
7179 +
7180 + if (ret == 0) {
7181 +- printf("The resolution is %lld %lld\n",
7182 +- (long long)ts.tv_sec, (long long)ts.tv_nsec);
7183 ++ ksft_print_msg("The vdso resolution is %lld %lld\n",
7184 ++ (long long)ts.tv_sec, (long long)ts.tv_nsec);
7185 + } else {
7186 +- printf("%s failed\n", name[3]);
7187 +- return KSFT_FAIL;
7188 ++ clock_getres_fail++;
7189 + }
7190 +
7191 + ret = syscall(SYS_clock_getres, clk_id, &sys_ts);
7192 +
7193 +- if ((sys_ts.tv_sec != ts.tv_sec) || (sys_ts.tv_nsec != ts.tv_nsec)) {
7194 +- printf("%s failed\n", name[3]);
7195 +- return KSFT_FAIL;
7196 +- }
7197 ++ ksft_print_msg("The syscall resolution is %lld %lld\n",
7198 ++ (long long)sys_ts.tv_sec, (long long)sys_ts.tv_nsec);
7199 +
7200 +- return KSFT_PASS;
7201 ++ if ((sys_ts.tv_sec != ts.tv_sec) || (sys_ts.tv_nsec != ts.tv_nsec))
7202 ++ clock_getres_fail++;
7203 ++
7204 ++ if (clock_getres_fail > 0) {
7205 ++ ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[3]));
7206 ++ } else {
7207 ++ ksft_test_result_pass(VDSO_TEST_PASS_MSG());
7208 ++ }
7209 + }
7210 +
7211 + const char *vdso_clock_name[12] = {
7212 +@@ -158,36 +162,23 @@ const char *vdso_clock_name[12] = {
7213 + * This function calls vdso_test_clock_gettime and vdso_test_clock_getres
7214 + * with different values for clock_id.
7215 + */
7216 +-static inline int vdso_test_clock(clockid_t clock_id)
7217 ++static inline void vdso_test_clock(clockid_t clock_id)
7218 + {
7219 +- int ret0, ret1;
7220 +-
7221 +- ret0 = vdso_test_clock_gettime(clock_id);
7222 +- /* A skipped test is considered passed */
7223 +- if (ret0 == KSFT_SKIP)
7224 +- ret0 = KSFT_PASS;
7225 +-
7226 +- ret1 = vdso_test_clock_getres(clock_id);
7227 +- /* A skipped test is considered passed */
7228 +- if (ret1 == KSFT_SKIP)
7229 +- ret1 = KSFT_PASS;
7230 ++ ksft_print_msg("\nclock_id: %s\n", vdso_clock_name[clock_id]);
7231 +
7232 +- ret0 += ret1;
7233 ++ vdso_test_clock_gettime(clock_id);
7234 +
7235 +- printf("clock_id: %s", vdso_clock_name[clock_id]);
7236 +-
7237 +- if (ret0 > 0)
7238 +- printf(" [FAIL]\n");
7239 +- else
7240 +- printf(" [PASS]\n");
7241 +-
7242 +- return ret0;
7243 ++ vdso_test_clock_getres(clock_id);
7244 + }
7245 +
7246 ++#define VDSO_TEST_PLAN 16
7247 ++
7248 + int main(int argc, char **argv)
7249 + {
7250 + unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
7251 +- int ret;
7252 ++
7253 ++ ksft_print_header();
7254 ++ ksft_set_plan(VDSO_TEST_PLAN);
7255 +
7256 + if (!sysinfo_ehdr) {
7257 + printf("AT_SYSINFO_EHDR is not present!\n");
7258 +@@ -201,44 +192,42 @@ int main(int argc, char **argv)
7259 +
7260 + vdso_init_from_sysinfo_ehdr(getauxval(AT_SYSINFO_EHDR));
7261 +
7262 +- ret = vdso_test_gettimeofday();
7263 ++ vdso_test_gettimeofday();
7264 +
7265 + #if _POSIX_TIMERS > 0
7266 +
7267 + #ifdef CLOCK_REALTIME
7268 +- ret += vdso_test_clock(CLOCK_REALTIME);
7269 ++ vdso_test_clock(CLOCK_REALTIME);
7270 + #endif
7271 +
7272 + #ifdef CLOCK_BOOTTIME
7273 +- ret += vdso_test_clock(CLOCK_BOOTTIME);
7274 ++ vdso_test_clock(CLOCK_BOOTTIME);
7275 + #endif
7276 +
7277 + #ifdef CLOCK_TAI
7278 +- ret += vdso_test_clock(CLOCK_TAI);
7279 ++ vdso_test_clock(CLOCK_TAI);
7280 + #endif
7281 +
7282 + #ifdef CLOCK_REALTIME_COARSE
7283 +- ret += vdso_test_clock(CLOCK_REALTIME_COARSE);
7284 ++ vdso_test_clock(CLOCK_REALTIME_COARSE);
7285 + #endif
7286 +
7287 + #ifdef CLOCK_MONOTONIC
7288 +- ret += vdso_test_clock(CLOCK_MONOTONIC);
7289 ++ vdso_test_clock(CLOCK_MONOTONIC);
7290 + #endif
7291 +
7292 + #ifdef CLOCK_MONOTONIC_RAW
7293 +- ret += vdso_test_clock(CLOCK_MONOTONIC_RAW);
7294 ++ vdso_test_clock(CLOCK_MONOTONIC_RAW);
7295 + #endif
7296 +
7297 + #ifdef CLOCK_MONOTONIC_COARSE
7298 +- ret += vdso_test_clock(CLOCK_MONOTONIC_COARSE);
7299 ++ vdso_test_clock(CLOCK_MONOTONIC_COARSE);
7300 + #endif
7301 +
7302 + #endif
7303 +
7304 +- ret += vdso_test_time();
7305 +-
7306 +- if (ret > 0)
7307 +- return KSFT_FAIL;
7308 ++ vdso_test_time();
7309 +
7310 +- return KSFT_PASS;
7311 ++ ksft_print_cnts();
7312 ++ return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
7313 + }
7314 +diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh
7315 +index 232e958ec4547..b0b91d9b0dc21 100755
7316 +--- a/tools/testing/selftests/zram/zram.sh
7317 ++++ b/tools/testing/selftests/zram/zram.sh
7318 +@@ -2,9 +2,6 @@
7319 + # SPDX-License-Identifier: GPL-2.0
7320 + TCID="zram.sh"
7321 +
7322 +-# Kselftest framework requirement - SKIP code is 4.
7323 +-ksft_skip=4
7324 +-
7325 + . ./zram_lib.sh
7326 +
7327 + run_zram () {
7328 +@@ -18,14 +15,4 @@ echo ""
7329 +
7330 + check_prereqs
7331 +
7332 +-# check zram module exists
7333 +-MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko
7334 +-if [ -f $MODULE_PATH ]; then
7335 +- run_zram
7336 +-elif [ -b /dev/zram0 ]; then
7337 +- run_zram
7338 +-else
7339 +- echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
7340 +- echo "$TCID : CONFIG_ZRAM is not set"
7341 +- exit $ksft_skip
7342 +-fi
7343 ++run_zram
7344 +diff --git a/tools/testing/selftests/zram/zram01.sh b/tools/testing/selftests/zram/zram01.sh
7345 +index 114863d9fb876..8f4affe34f3e4 100755
7346 +--- a/tools/testing/selftests/zram/zram01.sh
7347 ++++ b/tools/testing/selftests/zram/zram01.sh
7348 +@@ -33,9 +33,7 @@ zram_algs="lzo"
7349 +
7350 + zram_fill_fs()
7351 + {
7352 +- local mem_free0=$(free -m | awk 'NR==2 {print $4}')
7353 +-
7354 +- for i in $(seq 0 $(($dev_num - 1))); do
7355 ++ for i in $(seq $dev_start $dev_end); do
7356 + echo "fill zram$i..."
7357 + local b=0
7358 + while [ true ]; do
7359 +@@ -45,29 +43,17 @@ zram_fill_fs()
7360 + b=$(($b + 1))
7361 + done
7362 + echo "zram$i can be filled with '$b' KB"
7363 +- done
7364 +
7365 +- local mem_free1=$(free -m | awk 'NR==2 {print $4}')
7366 +- local used_mem=$(($mem_free0 - $mem_free1))
7367 ++ local mem_used_total=`awk '{print $3}' "/sys/block/zram$i/mm_stat"`
7368 ++ local v=$((100 * 1024 * $b / $mem_used_total))
7369 ++ if [ "$v" -lt 100 ]; then
7370 ++ echo "FAIL compression ratio: 0.$v:1"
7371 ++ ERR_CODE=-1
7372 ++ return
7373 ++ fi
7374 +
7375 +- local total_size=0
7376 +- for sm in $zram_sizes; do
7377 +- local s=$(echo $sm | sed 's/M//')
7378 +- total_size=$(($total_size + $s))
7379 ++ echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK"
7380 + done
7381 +-
7382 +- echo "zram used ${used_mem}M, zram disk sizes ${total_size}M"
7383 +-
7384 +- local v=$((100 * $total_size / $used_mem))
7385 +-
7386 +- if [ "$v" -lt 100 ]; then
7387 +- echo "FAIL compression ratio: 0.$v:1"
7388 +- ERR_CODE=-1
7389 +- zram_cleanup
7390 +- return
7391 +- fi
7392 +-
7393 +- echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK"
7394 + }
7395 +
7396 + check_prereqs
7397 +@@ -81,7 +67,6 @@ zram_mount
7398 +
7399 + zram_fill_fs
7400 + zram_cleanup
7401 +-zram_unload
7402 +
7403 + if [ $ERR_CODE -ne 0 ]; then
7404 + echo "$TCID : [FAIL]"
7405 +diff --git a/tools/testing/selftests/zram/zram02.sh b/tools/testing/selftests/zram/zram02.sh
7406 +index e83b404807c09..2418b0c4ed136 100755
7407 +--- a/tools/testing/selftests/zram/zram02.sh
7408 ++++ b/tools/testing/selftests/zram/zram02.sh
7409 +@@ -36,7 +36,6 @@ zram_set_memlimit
7410 + zram_makeswap
7411 + zram_swapoff
7412 + zram_cleanup
7413 +-zram_unload
7414 +
7415 + if [ $ERR_CODE -ne 0 ]; then
7416 + echo "$TCID : [FAIL]"
7417 +diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh
7418 +index 6f872f266fd11..21ec1966de76c 100755
7419 +--- a/tools/testing/selftests/zram/zram_lib.sh
7420 ++++ b/tools/testing/selftests/zram/zram_lib.sh
7421 +@@ -5,12 +5,17 @@
7422 + # Author: Alexey Kodanev <alexey.kodanev@××××××.com>
7423 + # Modified: Naresh Kamboju <naresh.kamboju@××××××.org>
7424 +
7425 +-MODULE=0
7426 + dev_makeswap=-1
7427 + dev_mounted=-1
7428 +-
7429 ++dev_start=0
7430 ++dev_end=-1
7431 ++module_load=-1
7432 ++sys_control=-1
7433 + # Kselftest framework requirement - SKIP code is 4.
7434 + ksft_skip=4
7435 ++kernel_version=`uname -r | cut -d'.' -f1,2`
7436 ++kernel_major=${kernel_version%.*}
7437 ++kernel_minor=${kernel_version#*.}
7438 +
7439 + trap INT
7440 +
7441 +@@ -25,68 +30,104 @@ check_prereqs()
7442 + fi
7443 + }
7444 +
7445 ++kernel_gte()
7446 ++{
7447 ++ major=${1%.*}
7448 ++ minor=${1#*.}
7449 ++
7450 ++ if [ $kernel_major -gt $major ]; then
7451 ++ return 0
7452 ++ elif [[ $kernel_major -eq $major && $kernel_minor -ge $minor ]]; then
7453 ++ return 0
7454 ++ fi
7455 ++
7456 ++ return 1
7457 ++}
7458 ++
7459 + zram_cleanup()
7460 + {
7461 + echo "zram cleanup"
7462 + local i=
7463 +- for i in $(seq 0 $dev_makeswap); do
7464 ++ for i in $(seq $dev_start $dev_makeswap); do
7465 + swapoff /dev/zram$i
7466 + done
7467 +
7468 +- for i in $(seq 0 $dev_mounted); do
7469 ++ for i in $(seq $dev_start $dev_mounted); do
7470 + umount /dev/zram$i
7471 + done
7472 +
7473 +- for i in $(seq 0 $(($dev_num - 1))); do
7474 ++ for i in $(seq $dev_start $dev_end); do
7475 + echo 1 > /sys/block/zram${i}/reset
7476 + rm -rf zram$i
7477 + done
7478 +
7479 +-}
7480 ++ if [ $sys_control -eq 1 ]; then
7481 ++ for i in $(seq $dev_start $dev_end); do
7482 ++ echo $i > /sys/class/zram-control/hot_remove
7483 ++ done
7484 ++ fi
7485 +
7486 +-zram_unload()
7487 +-{
7488 +- if [ $MODULE -ne 0 ] ; then
7489 +- echo "zram rmmod zram"
7490 ++ if [ $module_load -eq 1 ]; then
7491 + rmmod zram > /dev/null 2>&1
7492 + fi
7493 + }
7494 +
7495 + zram_load()
7496 + {
7497 +- # check zram module exists
7498 +- MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko
7499 +- if [ -f $MODULE_PATH ]; then
7500 +- MODULE=1
7501 +- echo "create '$dev_num' zram device(s)"
7502 +- modprobe zram num_devices=$dev_num
7503 +- if [ $? -ne 0 ]; then
7504 +- echo "failed to insert zram module"
7505 +- exit 1
7506 +- fi
7507 +-
7508 +- dev_num_created=$(ls /dev/zram* | wc -w)
7509 ++ echo "create '$dev_num' zram device(s)"
7510 ++
7511 ++ # zram module loaded, new kernel
7512 ++ if [ -d "/sys/class/zram-control" ]; then
7513 ++ echo "zram modules already loaded, kernel supports" \
7514 ++ "zram-control interface"
7515 ++ dev_start=$(ls /dev/zram* | wc -w)
7516 ++ dev_end=$(($dev_start + $dev_num - 1))
7517 ++ sys_control=1
7518 ++
7519 ++ for i in $(seq $dev_start $dev_end); do
7520 ++ cat /sys/class/zram-control/hot_add > /dev/null
7521 ++ done
7522 ++
7523 ++ echo "all zram devices (/dev/zram$dev_start~$dev_end" \
7524 ++ "successfully created"
7525 ++ return 0
7526 ++ fi
7527 +
7528 +- if [ "$dev_num_created" -ne "$dev_num" ]; then
7529 +- echo "unexpected num of devices: $dev_num_created"
7530 +- ERR_CODE=-1
7531 ++ # detect old kernel or built-in
7532 ++ modprobe zram num_devices=$dev_num
7533 ++ if [ ! -d "/sys/class/zram-control" ]; then
7534 ++ if grep -q '^zram' /proc/modules; then
7535 ++ rmmod zram > /dev/null 2>&1
7536 ++ if [ $? -ne 0 ]; then
7537 ++ echo "zram module is being used on old kernel" \
7538 ++ "without zram-control interface"
7539 ++ exit $ksft_skip
7540 ++ fi
7541 + else
7542 +- echo "zram load module successful"
7543 ++ echo "test needs CONFIG_ZRAM=m on old kernel without" \
7544 ++ "zram-control interface"
7545 ++ exit $ksft_skip
7546 + fi
7547 +- elif [ -b /dev/zram0 ]; then
7548 +- echo "/dev/zram0 device file found: OK"
7549 +- else
7550 +- echo "ERROR: No zram.ko module or no /dev/zram0 device found"
7551 +- echo "$TCID : CONFIG_ZRAM is not set"
7552 +- exit 1
7553 ++ modprobe zram num_devices=$dev_num
7554 + fi
7555 ++
7556 ++ module_load=1
7557 ++ dev_end=$(($dev_num - 1))
7558 ++ echo "all zram devices (/dev/zram0~$dev_end) successfully created"
7559 + }
7560 +
7561 + zram_max_streams()
7562 + {
7563 + echo "set max_comp_streams to zram device(s)"
7564 +
7565 +- local i=0
7566 ++ kernel_gte 4.7
7567 ++ if [ $? -eq 0 ]; then
7568 ++ echo "The device attribute max_comp_streams was"\
7569 ++ "deprecated in 4.7"
7570 ++ return 0
7571 ++ fi
7572 ++
7573 ++ local i=$dev_start
7574 + for max_s in $zram_max_streams; do
7575 + local sys_path="/sys/block/zram${i}/max_comp_streams"
7576 + echo $max_s > $sys_path || \
7577 +@@ -98,7 +139,7 @@ zram_max_streams()
7578 + echo "FAIL can't set max_streams '$max_s', get $max_stream"
7579 +
7580 + i=$(($i + 1))
7581 +- echo "$sys_path = '$max_streams' ($i/$dev_num)"
7582 ++ echo "$sys_path = '$max_streams'"
7583 + done
7584 +
7585 + echo "zram max streams: OK"
7586 +@@ -108,15 +149,16 @@ zram_compress_alg()
7587 + {
7588 + echo "test that we can set compression algorithm"
7589 +
7590 +- local algs=$(cat /sys/block/zram0/comp_algorithm)
7591 ++ local i=$dev_start
7592 ++ local algs=$(cat /sys/block/zram${i}/comp_algorithm)
7593 + echo "supported algs: $algs"
7594 +- local i=0
7595 ++
7596 + for alg in $zram_algs; do
7597 + local sys_path="/sys/block/zram${i}/comp_algorithm"
7598 + echo "$alg" > $sys_path || \
7599 + echo "FAIL can't set '$alg' to $sys_path"
7600 + i=$(($i + 1))
7601 +- echo "$sys_path = '$alg' ($i/$dev_num)"
7602 ++ echo "$sys_path = '$alg'"
7603 + done
7604 +
7605 + echo "zram set compression algorithm: OK"
7606 +@@ -125,14 +167,14 @@ zram_compress_alg()
7607 + zram_set_disksizes()
7608 + {
7609 + echo "set disk size to zram device(s)"
7610 +- local i=0
7611 ++ local i=$dev_start
7612 + for ds in $zram_sizes; do
7613 + local sys_path="/sys/block/zram${i}/disksize"
7614 + echo "$ds" > $sys_path || \
7615 + echo "FAIL can't set '$ds' to $sys_path"
7616 +
7617 + i=$(($i + 1))
7618 +- echo "$sys_path = '$ds' ($i/$dev_num)"
7619 ++ echo "$sys_path = '$ds'"
7620 + done
7621 +
7622 + echo "zram set disksizes: OK"
7623 +@@ -142,14 +184,14 @@ zram_set_memlimit()
7624 + {
7625 + echo "set memory limit to zram device(s)"
7626 +
7627 +- local i=0
7628 ++ local i=$dev_start
7629 + for ds in $zram_mem_limits; do
7630 + local sys_path="/sys/block/zram${i}/mem_limit"
7631 + echo "$ds" > $sys_path || \
7632 + echo "FAIL can't set '$ds' to $sys_path"
7633 +
7634 + i=$(($i + 1))
7635 +- echo "$sys_path = '$ds' ($i/$dev_num)"
7636 ++ echo "$sys_path = '$ds'"
7637 + done
7638 +
7639 + echo "zram set memory limit: OK"
7640 +@@ -158,8 +200,8 @@ zram_set_memlimit()
7641 + zram_makeswap()
7642 + {
7643 + echo "make swap with zram device(s)"
7644 +- local i=0
7645 +- for i in $(seq 0 $(($dev_num - 1))); do
7646 ++ local i=$dev_start
7647 ++ for i in $(seq $dev_start $dev_end); do
7648 + mkswap /dev/zram$i > err.log 2>&1
7649 + if [ $? -ne 0 ]; then
7650 + cat err.log
7651 +@@ -182,7 +224,7 @@ zram_makeswap()
7652 + zram_swapoff()
7653 + {
7654 + local i=
7655 +- for i in $(seq 0 $dev_makeswap); do
7656 ++ for i in $(seq $dev_start $dev_end); do
7657 + swapoff /dev/zram$i > err.log 2>&1
7658 + if [ $? -ne 0 ]; then
7659 + cat err.log
7660 +@@ -196,7 +238,7 @@ zram_swapoff()
7661 +
7662 + zram_makefs()
7663 + {
7664 +- local i=0
7665 ++ local i=$dev_start
7666 + for fs in $zram_filesystems; do
7667 + # if requested fs not supported default it to ext2
7668 + which mkfs.$fs > /dev/null 2>&1 || fs=ext2
7669 +@@ -215,7 +257,7 @@ zram_makefs()
7670 + zram_mount()
7671 + {
7672 + local i=0
7673 +- for i in $(seq 0 $(($dev_num - 1))); do
7674 ++ for i in $(seq $dev_start $dev_end); do
7675 + echo "mount /dev/zram$i"
7676 + mkdir zram$i
7677 + mount /dev/zram$i zram$i > /dev/null || \