Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.16 commit in: /
Date: Wed, 23 Feb 2022 12:35:43
Message-Id: 1645619725.c33c02322aa211b7f4d87fc86b7d8ed27a0f5668.mpagano@gentoo
1 commit: c33c02322aa211b7f4d87fc86b7d8ed27a0f5668
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 23 12:35:25 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 23 12:35:25 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c33c0232
7
8 Linux patch 5.16.11
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1010_linux-5.16.11.patch | 10908 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 10912 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index fd9081b3..544fcf03 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -83,6 +83,10 @@ Patch: 1009_linux-5.16.10.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.16.10
23
24 +Patch: 1010_linux-5.16.11.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.16.11
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1010_linux-5.16.11.patch b/1010_linux-5.16.11.patch
33 new file mode 100644
34 index 00000000..4fde165d
35 --- /dev/null
36 +++ b/1010_linux-5.16.11.patch
37 @@ -0,0 +1,10908 @@
38 +diff --git a/.mailmap b/.mailmap
39 +index b344067e0acb6..3979fb166e0fd 100644
40 +--- a/.mailmap
41 ++++ b/.mailmap
42 +@@ -74,6 +74,9 @@ Chris Chiu <chris.chiu@×××××××××.com> <chiu@×××××××××.org>
43 + Christian Borntraeger <borntraeger@×××××××××.com> <borntraeger@××××××.com>
44 + Christian Borntraeger <borntraeger@×××××××××.com> <cborntra@××××××.com>
45 + Christian Borntraeger <borntraeger@×××××××××.com> <borntrae@××××××.com>
46 ++Christian Brauner <brauner@××××××.org> <christian@×××××××.io>
47 ++Christian Brauner <brauner@××××××.org> <christian.brauner@×××××××××.com>
48 ++Christian Brauner <brauner@××××××.org> <christian.brauner@××××××.com>
49 + Christophe Ricard <christophe.ricard@×××××.com>
50 + Christoph Hellwig <hch@×××.de>
51 + Colin Ian King <colin.king@×××××.com> <colin.king@×××××××××.com>
52 +diff --git a/Makefile b/Makefile
53 +index 36bbff16530ba..00ba75768af73 100644
54 +--- a/Makefile
55 ++++ b/Makefile
56 +@@ -1,7 +1,7 @@
57 + # SPDX-License-Identifier: GPL-2.0
58 + VERSION = 5
59 + PATCHLEVEL = 16
60 +-SUBLEVEL = 10
61 ++SUBLEVEL = 11
62 + EXTRAVERSION =
63 + NAME = Gobble Gobble
64 +
65 +diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
66 +index 6daaa645ae5d9..21413a9b7b6c6 100644
67 +--- a/arch/arm/mach-omap2/display.c
68 ++++ b/arch/arm/mach-omap2/display.c
69 +@@ -263,9 +263,9 @@ static int __init omapdss_init_of(void)
70 + }
71 +
72 + r = of_platform_populate(node, NULL, NULL, &pdev->dev);
73 ++ put_device(&pdev->dev);
74 + if (r) {
75 + pr_err("Unable to populate DSS submodule devices\n");
76 +- put_device(&pdev->dev);
77 + return r;
78 + }
79 +
80 +diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
81 +index ccb0e3732c0dc..31d1a21f60416 100644
82 +--- a/arch/arm/mach-omap2/omap_hwmod.c
83 ++++ b/arch/arm/mach-omap2/omap_hwmod.c
84 +@@ -752,8 +752,10 @@ static int __init _init_clkctrl_providers(void)
85 +
86 + for_each_matching_node(np, ti_clkctrl_match_table) {
87 + ret = _setup_clkctrl_provider(np);
88 +- if (ret)
89 ++ if (ret) {
90 ++ of_node_put(np);
91 + break;
92 ++ }
93 + }
94 +
95 + return ret;
96 +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
97 +index 428449d98c0ae..a3a1ea0f21340 100644
98 +--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
99 ++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
100 +@@ -107,6 +107,12 @@
101 + no-map;
102 + };
103 +
104 ++ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
105 ++ secmon_reserved_bl32: secmon@5300000 {
106 ++ reg = <0x0 0x05300000 0x0 0x2000000>;
107 ++ no-map;
108 ++ };
109 ++
110 + linux,cma {
111 + compatible = "shared-dma-pool";
112 + reusable;
113 +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
114 +index d8838dde0f0f4..4fb31c2ba31c4 100644
115 +--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
116 ++++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
117 +@@ -157,14 +157,6 @@
118 + regulator-always-on;
119 + };
120 +
121 +- reserved-memory {
122 +- /* TEE Reserved Memory */
123 +- bl32_reserved: bl32@5000000 {
124 +- reg = <0x0 0x05300000 0x0 0x2000000>;
125 +- no-map;
126 +- };
127 +- };
128 +-
129 + sdio_pwrseq: sdio-pwrseq {
130 + compatible = "mmc-pwrseq-simple";
131 + reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
132 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
133 +index 6b457b2c30a4b..aa14ea017a613 100644
134 +--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
135 ++++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
136 +@@ -49,6 +49,12 @@
137 + no-map;
138 + };
139 +
140 ++ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
141 ++ secmon_reserved_bl32: secmon@5300000 {
142 ++ reg = <0x0 0x05300000 0x0 0x2000000>;
143 ++ no-map;
144 ++ };
145 ++
146 + linux,cma {
147 + compatible = "shared-dma-pool";
148 + reusable;
149 +diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
150 +index 427475846fc70..a5d79f2f7c196 100644
151 +--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
152 ++++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
153 +@@ -203,14 +203,6 @@
154 + regulator-always-on;
155 + };
156 +
157 +- reserved-memory {
158 +- /* TEE Reserved Memory */
159 +- bl32_reserved: bl32@5000000 {
160 +- reg = <0x0 0x05300000 0x0 0x2000000>;
161 +- no-map;
162 +- };
163 +- };
164 +-
165 + sdio_pwrseq: sdio-pwrseq {
166 + compatible = "mmc-pwrseq-simple";
167 + reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
168 +diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
169 +index 3198acb2aad8c..7f3c87f7a0cec 100644
170 +--- a/arch/arm64/include/asm/el2_setup.h
171 ++++ b/arch/arm64/include/asm/el2_setup.h
172 +@@ -106,7 +106,7 @@
173 + msr_s SYS_ICC_SRE_EL2, x0
174 + isb // Make sure SRE is now set
175 + mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
176 +- tbz x0, #0, 1f // and check that it sticks
177 ++ tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks
178 + msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
179 + .Lskip_gicv3_\@:
180 + .endm
181 +diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
182 +index daa2afd974fbf..9efb18573fd81 100644
183 +--- a/arch/parisc/include/asm/bitops.h
184 ++++ b/arch/parisc/include/asm/bitops.h
185 +@@ -12,6 +12,14 @@
186 + #include <asm/barrier.h>
187 + #include <linux/atomic.h>
188 +
189 ++/* compiler build environment sanity checks: */
190 ++#if !defined(CONFIG_64BIT) && defined(__LP64__)
191 ++#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
192 ++#endif
193 ++#if defined(CONFIG_64BIT) && !defined(__LP64__)
194 ++#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
195 ++#endif
196 ++
197 + /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
198 + * on use of volatile and __*_bit() (set/clear/change):
199 + * *_bit() want use of volatile.
200 +diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
201 +index 367f6397bda7a..8603850580857 100644
202 +--- a/arch/parisc/lib/iomap.c
203 ++++ b/arch/parisc/lib/iomap.c
204 +@@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr)
205 + return *((u64 *)addr);
206 + }
207 +
208 ++u64 ioread64_lo_hi(const void __iomem *addr)
209 ++{
210 ++ u32 low, high;
211 ++
212 ++ low = ioread32(addr);
213 ++ high = ioread32(addr + sizeof(u32));
214 ++
215 ++ return low + ((u64)high << 32);
216 ++}
217 ++
218 + u64 ioread64_hi_lo(const void __iomem *addr)
219 + {
220 + u32 low, high;
221 +@@ -419,6 +429,12 @@ void iowrite64be(u64 datum, void __iomem *addr)
222 + }
223 + }
224 +
225 ++void iowrite64_lo_hi(u64 val, void __iomem *addr)
226 ++{
227 ++ iowrite32(val, addr);
228 ++ iowrite32(val >> 32, addr + sizeof(u32));
229 ++}
230 ++
231 + void iowrite64_hi_lo(u64 val, void __iomem *addr)
232 + {
233 + iowrite32(val >> 32, addr + sizeof(u32));
234 +@@ -530,6 +546,7 @@ EXPORT_SYMBOL(ioread32);
235 + EXPORT_SYMBOL(ioread32be);
236 + EXPORT_SYMBOL(ioread64);
237 + EXPORT_SYMBOL(ioread64be);
238 ++EXPORT_SYMBOL(ioread64_lo_hi);
239 + EXPORT_SYMBOL(ioread64_hi_lo);
240 + EXPORT_SYMBOL(iowrite8);
241 + EXPORT_SYMBOL(iowrite16);
242 +@@ -538,6 +555,7 @@ EXPORT_SYMBOL(iowrite32);
243 + EXPORT_SYMBOL(iowrite32be);
244 + EXPORT_SYMBOL(iowrite64);
245 + EXPORT_SYMBOL(iowrite64be);
246 ++EXPORT_SYMBOL(iowrite64_lo_hi);
247 + EXPORT_SYMBOL(iowrite64_hi_lo);
248 + EXPORT_SYMBOL(ioread8_rep);
249 + EXPORT_SYMBOL(ioread16_rep);
250 +diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
251 +index 1ae31db9988f5..1dc2e88e7b04f 100644
252 +--- a/arch/parisc/mm/init.c
253 ++++ b/arch/parisc/mm/init.c
254 +@@ -337,9 +337,9 @@ static void __init setup_bootmem(void)
255 +
256 + static bool kernel_set_to_readonly;
257 +
258 +-static void __init map_pages(unsigned long start_vaddr,
259 +- unsigned long start_paddr, unsigned long size,
260 +- pgprot_t pgprot, int force)
261 ++static void __ref map_pages(unsigned long start_vaddr,
262 ++ unsigned long start_paddr, unsigned long size,
263 ++ pgprot_t pgprot, int force)
264 + {
265 + pmd_t *pmd;
266 + pte_t *pg_table;
267 +@@ -449,7 +449,7 @@ void __init set_kernel_text_rw(int enable_read_write)
268 + flush_tlb_all();
269 + }
270 +
271 +-void __ref free_initmem(void)
272 ++void free_initmem(void)
273 + {
274 + unsigned long init_begin = (unsigned long)__init_begin;
275 + unsigned long init_end = (unsigned long)__init_end;
276 +@@ -463,7 +463,6 @@ void __ref free_initmem(void)
277 + /* The init text pages are marked R-X. We have to
278 + * flush the icache and mark them RW-
279 + *
280 +- * This is tricky, because map_pages is in the init section.
281 + * Do a dummy remap of the data section first (the data
282 + * section is already PAGE_KERNEL) to pull in the TLB entries
283 + * for map_kernel */
284 +diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
285 +index 68e5c0a7e99d1..2e2a8211b17be 100644
286 +--- a/arch/powerpc/kernel/head_book3s_32.S
287 ++++ b/arch/powerpc/kernel/head_book3s_32.S
288 +@@ -421,14 +421,14 @@ InstructionTLBMiss:
289 + */
290 + /* Get PTE (linux-style) and check access */
291 + mfspr r3,SPRN_IMISS
292 +-#ifdef CONFIG_MODULES
293 ++#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
294 + lis r1, TASK_SIZE@h /* check if kernel address */
295 + cmplw 0,r1,r3
296 + #endif
297 + mfspr r2, SPRN_SDR1
298 + li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
299 + rlwinm r2, r2, 28, 0xfffff000
300 +-#ifdef CONFIG_MODULES
301 ++#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
302 + bgt- 112f
303 + lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
304 + li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
305 +diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
306 +index 86f49e3e7cf56..b042fcae39137 100644
307 +--- a/arch/powerpc/lib/sstep.c
308 ++++ b/arch/powerpc/lib/sstep.c
309 +@@ -3264,12 +3264,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
310 + case BARRIER_EIEIO:
311 + eieio();
312 + break;
313 ++#ifdef CONFIG_PPC64
314 + case BARRIER_LWSYNC:
315 + asm volatile("lwsync" : : : "memory");
316 + break;
317 + case BARRIER_PTESYNC:
318 + asm volatile("ptesync" : : : "memory");
319 + break;
320 ++#endif
321 + }
322 + break;
323 +
324 +diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
325 +index 84b87538a15de..bab883c0b6fee 100644
326 +--- a/arch/x86/include/asm/bug.h
327 ++++ b/arch/x86/include/asm/bug.h
328 +@@ -22,7 +22,7 @@
329 +
330 + #ifdef CONFIG_DEBUG_BUGVERBOSE
331 +
332 +-#define _BUG_FLAGS(ins, flags) \
333 ++#define _BUG_FLAGS(ins, flags, extra) \
334 + do { \
335 + asm_inline volatile("1:\t" ins "\n" \
336 + ".pushsection __bug_table,\"aw\"\n" \
337 +@@ -31,7 +31,8 @@ do { \
338 + "\t.word %c1" "\t# bug_entry::line\n" \
339 + "\t.word %c2" "\t# bug_entry::flags\n" \
340 + "\t.org 2b+%c3\n" \
341 +- ".popsection" \
342 ++ ".popsection\n" \
343 ++ extra \
344 + : : "i" (__FILE__), "i" (__LINE__), \
345 + "i" (flags), \
346 + "i" (sizeof(struct bug_entry))); \
347 +@@ -39,14 +40,15 @@ do { \
348 +
349 + #else /* !CONFIG_DEBUG_BUGVERBOSE */
350 +
351 +-#define _BUG_FLAGS(ins, flags) \
352 ++#define _BUG_FLAGS(ins, flags, extra) \
353 + do { \
354 + asm_inline volatile("1:\t" ins "\n" \
355 + ".pushsection __bug_table,\"aw\"\n" \
356 + "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
357 + "\t.word %c0" "\t# bug_entry::flags\n" \
358 + "\t.org 2b+%c1\n" \
359 +- ".popsection" \
360 ++ ".popsection\n" \
361 ++ extra \
362 + : : "i" (flags), \
363 + "i" (sizeof(struct bug_entry))); \
364 + } while (0)
365 +@@ -55,7 +57,7 @@ do { \
366 +
367 + #else
368 +
369 +-#define _BUG_FLAGS(ins, flags) asm volatile(ins)
370 ++#define _BUG_FLAGS(ins, flags, extra) asm volatile(ins)
371 +
372 + #endif /* CONFIG_GENERIC_BUG */
373 +
374 +@@ -63,8 +65,8 @@ do { \
375 + #define BUG() \
376 + do { \
377 + instrumentation_begin(); \
378 +- _BUG_FLAGS(ASM_UD2, 0); \
379 +- unreachable(); \
380 ++ _BUG_FLAGS(ASM_UD2, 0, ""); \
381 ++ __builtin_unreachable(); \
382 + } while (0)
383 +
384 + /*
385 +@@ -75,9 +77,9 @@ do { \
386 + */
387 + #define __WARN_FLAGS(flags) \
388 + do { \
389 ++ __auto_type f = BUGFLAG_WARNING|(flags); \
390 + instrumentation_begin(); \
391 +- _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
392 +- annotate_reachable(); \
393 ++ _BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE); \
394 + instrumentation_end(); \
395 + } while (0)
396 +
397 +diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
398 +index 437d7c930c0bd..75ffaef8c2991 100644
399 +--- a/arch/x86/kernel/fpu/regset.c
400 ++++ b/arch/x86/kernel/fpu/regset.c
401 +@@ -91,11 +91,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
402 + const void *kbuf, const void __user *ubuf)
403 + {
404 + struct fpu *fpu = &target->thread.fpu;
405 +- struct user32_fxsr_struct newstate;
406 ++ struct fxregs_state newstate;
407 + int ret;
408 +
409 +- BUILD_BUG_ON(sizeof(newstate) != sizeof(struct fxregs_state));
410 +-
411 + if (!cpu_feature_enabled(X86_FEATURE_FXSR))
412 + return -ENODEV;
413 +
414 +@@ -116,9 +114,10 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
415 + /* Copy the state */
416 + memcpy(&fpu->fpstate->regs.fxsave, &newstate, sizeof(newstate));
417 +
418 +- /* Clear xmm8..15 */
419 ++ /* Clear xmm8..15 for 32-bit callers */
420 + BUILD_BUG_ON(sizeof(fpu->__fpstate.regs.fxsave.xmm_space) != 16 * 16);
421 +- memset(&fpu->fpstate->regs.fxsave.xmm_space[8], 0, 8 * 16);
422 ++ if (in_ia32_syscall())
423 ++ memset(&fpu->fpstate->regs.fxsave.xmm_space[8*4], 0, 8 * 16);
424 +
425 + /* Mark FP and SSE as in use when XSAVE is enabled */
426 + if (use_xsave())
427 +diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
428 +index 6d2244c94799c..8d2f2f995539d 100644
429 +--- a/arch/x86/kernel/ptrace.c
430 ++++ b/arch/x86/kernel/ptrace.c
431 +@@ -1224,7 +1224,7 @@ static struct user_regset x86_64_regsets[] __ro_after_init = {
432 + },
433 + [REGSET_FP] = {
434 + .core_note_type = NT_PRFPREG,
435 +- .n = sizeof(struct user_i387_struct) / sizeof(long),
436 ++ .n = sizeof(struct fxregs_state) / sizeof(long),
437 + .size = sizeof(long), .align = sizeof(long),
438 + .active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
439 + },
440 +@@ -1271,7 +1271,7 @@ static struct user_regset x86_32_regsets[] __ro_after_init = {
441 + },
442 + [REGSET_XFP] = {
443 + .core_note_type = NT_PRXFPREG,
444 +- .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
445 ++ .n = sizeof(struct fxregs_state) / sizeof(u32),
446 + .size = sizeof(u32), .align = sizeof(u32),
447 + .active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
448 + },
449 +diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
450 +index 09873f6488f7c..de955ca58d17c 100644
451 +--- a/arch/x86/kvm/pmu.c
452 ++++ b/arch/x86/kvm/pmu.c
453 +@@ -95,7 +95,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
454 + }
455 +
456 + static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
457 +- unsigned config, bool exclude_user,
458 ++ u64 config, bool exclude_user,
459 + bool exclude_kernel, bool intr,
460 + bool in_tx, bool in_tx_cp)
461 + {
462 +@@ -173,8 +173,8 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
463 +
464 + void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
465 + {
466 +- unsigned config, type = PERF_TYPE_RAW;
467 +- u8 event_select, unit_mask;
468 ++ u64 config;
469 ++ u32 type = PERF_TYPE_RAW;
470 + struct kvm *kvm = pmc->vcpu->kvm;
471 + struct kvm_pmu_event_filter *filter;
472 + int i;
473 +@@ -206,23 +206,18 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
474 + if (!allow_event)
475 + return;
476 +
477 +- event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
478 +- unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
479 +-
480 + if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
481 + ARCH_PERFMON_EVENTSEL_INV |
482 + ARCH_PERFMON_EVENTSEL_CMASK |
483 + HSW_IN_TX |
484 + HSW_IN_TX_CHECKPOINTED))) {
485 +- config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc),
486 +- event_select,
487 +- unit_mask);
488 ++ config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
489 + if (config != PERF_COUNT_HW_MAX)
490 + type = PERF_TYPE_HARDWARE;
491 + }
492 +
493 + if (type == PERF_TYPE_RAW)
494 +- config = eventsel & X86_RAW_EVENT_MASK;
495 ++ config = eventsel & AMD64_RAW_EVENT_MASK;
496 +
497 + if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
498 + return;
499 +diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
500 +index 59d6b76203d5b..dd7dbb1c5048d 100644
501 +--- a/arch/x86/kvm/pmu.h
502 ++++ b/arch/x86/kvm/pmu.h
503 +@@ -24,8 +24,7 @@ struct kvm_event_hw_type_mapping {
504 + };
505 +
506 + struct kvm_pmu_ops {
507 +- unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
508 +- u8 unit_mask);
509 ++ unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
510 + unsigned (*find_fixed_event)(int idx);
511 + bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
512 + struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
513 +diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
514 +index 8f9af7b7dbbe4..212af871ca746 100644
515 +--- a/arch/x86/kvm/svm/avic.c
516 ++++ b/arch/x86/kvm/svm/avic.c
517 +@@ -342,8 +342,6 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
518 + avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
519 + break;
520 + case AVIC_IPI_FAILURE_INVALID_TARGET:
521 +- WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
522 +- index, vcpu->vcpu_id, icrh, icrl);
523 + break;
524 + case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
525 + WARN_ONCE(1, "Invalid backing page\n");
526 +diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
527 +index a67f8bee3adc3..f70d90b4402e8 100644
528 +--- a/arch/x86/kvm/svm/nested.c
529 ++++ b/arch/x86/kvm/svm/nested.c
530 +@@ -1389,18 +1389,6 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
531 + !nested_vmcb_valid_sregs(vcpu, save))
532 + goto out_free;
533 +
534 +- /*
535 +- * While the nested guest CR3 is already checked and set by
536 +- * KVM_SET_SREGS, it was set when nested state was yet loaded,
537 +- * thus MMU might not be initialized correctly.
538 +- * Set it again to fix this.
539 +- */
540 +-
541 +- ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
542 +- nested_npt_enabled(svm), false);
543 +- if (WARN_ON_ONCE(ret))
544 +- goto out_free;
545 +-
546 +
547 + /*
548 + * All checks done, we can enter guest mode. Userspace provides
549 +@@ -1426,6 +1414,20 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
550 +
551 + svm_switch_vmcb(svm, &svm->nested.vmcb02);
552 + nested_vmcb02_prepare_control(svm);
553 ++
554 ++ /*
555 ++ * While the nested guest CR3 is already checked and set by
556 ++ * KVM_SET_SREGS, it was set when nested state was yet loaded,
557 ++ * thus MMU might not be initialized correctly.
558 ++ * Set it again to fix this.
559 ++ */
560 ++
561 ++ ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
562 ++ nested_npt_enabled(svm), false);
563 ++ if (WARN_ON_ONCE(ret))
564 ++ goto out_free;
565 ++
566 ++
567 + kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
568 + ret = 0;
569 + out_free:
570 +diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
571 +index b4095dfeeee62..7fadfe3c67e73 100644
572 +--- a/arch/x86/kvm/svm/pmu.c
573 ++++ b/arch/x86/kvm/svm/pmu.c
574 +@@ -134,10 +134,10 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
575 + return &pmu->gp_counters[msr_to_index(msr)];
576 + }
577 +
578 +-static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
579 +- u8 event_select,
580 +- u8 unit_mask)
581 ++static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
582 + {
583 ++ u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
584 ++ u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
585 + int i;
586 +
587 + for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
588 +@@ -319,7 +319,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
589 + }
590 +
591 + struct kvm_pmu_ops amd_pmu_ops = {
592 +- .find_arch_event = amd_find_arch_event,
593 ++ .pmc_perf_hw_id = amd_pmc_perf_hw_id,
594 + .find_fixed_event = amd_find_fixed_event,
595 + .pmc_is_enabled = amd_pmc_is_enabled,
596 + .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
597 +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
598 +index d6a4acaa65742..57e2a55e46175 100644
599 +--- a/arch/x86/kvm/svm/svm.c
600 ++++ b/arch/x86/kvm/svm/svm.c
601 +@@ -1795,6 +1795,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
602 + {
603 + struct vcpu_svm *svm = to_svm(vcpu);
604 + u64 hcr0 = cr0;
605 ++ bool old_paging = is_paging(vcpu);
606 +
607 + #ifdef CONFIG_X86_64
608 + if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
609 +@@ -1811,8 +1812,11 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
610 + #endif
611 + vcpu->arch.cr0 = cr0;
612 +
613 +- if (!npt_enabled)
614 ++ if (!npt_enabled) {
615 + hcr0 |= X86_CR0_PG | X86_CR0_WP;
616 ++ if (old_paging != is_paging(vcpu))
617 ++ svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
618 ++ }
619 +
620 + /*
621 + * re-enable caching here because the QEMU bios
622 +@@ -1856,8 +1860,12 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
623 + svm_flush_tlb(vcpu);
624 +
625 + vcpu->arch.cr4 = cr4;
626 +- if (!npt_enabled)
627 ++ if (!npt_enabled) {
628 + cr4 |= X86_CR4_PAE;
629 ++
630 ++ if (!is_paging(vcpu))
631 ++ cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
632 ++ }
633 + cr4 |= host_cr4_mce;
634 + to_svm(vcpu)->vmcb->save.cr4 = cr4;
635 + vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
636 +@@ -4441,10 +4449,17 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
637 + * Enter the nested guest now
638 + */
639 +
640 ++ vmcb_mark_all_dirty(svm->vmcb01.ptr);
641 ++
642 + vmcb12 = map.hva;
643 + nested_load_control_from_vmcb12(svm, &vmcb12->control);
644 + ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
645 +
646 ++ if (ret)
647 ++ goto unmap_save;
648 ++
649 ++ svm->nested.nested_run_pending = 1;
650 ++
651 + unmap_save:
652 + kvm_vcpu_unmap(vcpu, &map_save, true);
653 + unmap_map:
654 +diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
655 +index 1b7456b2177b9..60563a45f3eb8 100644
656 +--- a/arch/x86/kvm/vmx/pmu_intel.c
657 ++++ b/arch/x86/kvm/vmx/pmu_intel.c
658 +@@ -68,10 +68,11 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
659 + reprogram_counter(pmu, bit);
660 + }
661 +
662 +-static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
663 +- u8 event_select,
664 +- u8 unit_mask)
665 ++static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
666 + {
667 ++ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
668 ++ u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
669 ++ u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
670 + int i;
671 +
672 + for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
673 +@@ -703,7 +704,7 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
674 + }
675 +
676 + struct kvm_pmu_ops intel_pmu_ops = {
677 +- .find_arch_event = intel_find_arch_event,
678 ++ .pmc_perf_hw_id = intel_pmc_perf_hw_id,
679 + .find_fixed_event = intel_find_fixed_event,
680 + .pmc_is_enabled = intel_pmc_is_enabled,
681 + .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
682 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
683 +index fe4a36c984460..4b356ae175cc9 100644
684 +--- a/arch/x86/kvm/vmx/vmx.c
685 ++++ b/arch/x86/kvm/vmx/vmx.c
686 +@@ -7534,6 +7534,7 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
687 + if (ret)
688 + return ret;
689 +
690 ++ vmx->nested.nested_run_pending = 1;
691 + vmx->nested.smm.guest_mode = false;
692 + }
693 + return 0;
694 +diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
695 +index dff2bdf9507a8..94fce17f0f5a3 100644
696 +--- a/arch/x86/kvm/xen.c
697 ++++ b/arch/x86/kvm/xen.c
698 +@@ -93,32 +93,57 @@ static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
699 + void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
700 + {
701 + struct kvm_vcpu_xen *vx = &v->arch.xen;
702 ++ struct gfn_to_hva_cache *ghc = &vx->runstate_cache;
703 ++ struct kvm_memslots *slots = kvm_memslots(v->kvm);
704 ++ bool atomic = (state == RUNSTATE_runnable);
705 + uint64_t state_entry_time;
706 +- unsigned int offset;
707 ++ int __user *user_state;
708 ++ uint64_t __user *user_times;
709 +
710 + kvm_xen_update_runstate(v, state);
711 +
712 + if (!vx->runstate_set)
713 + return;
714 +
715 +- BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
716 ++ if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) &&
717 ++ kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len))
718 ++ return;
719 ++
720 ++ /* We made sure it fits in a single page */
721 ++ BUG_ON(!ghc->memslot);
722 ++
723 ++ if (atomic)
724 ++ pagefault_disable();
725 +
726 +- offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time);
727 +-#ifdef CONFIG_X86_64
728 + /*
729 +- * The only difference is alignment of uint64_t in 32-bit.
730 +- * So the first field 'state' is accessed directly using
731 +- * offsetof() (where its offset happens to be zero), while the
732 +- * remaining fields which are all uint64_t, start at 'offset'
733 +- * which we tweak here by adding 4.
734 ++ * The only difference between 32-bit and 64-bit versions of the
735 ++ * runstate struct us the alignment of uint64_t in 32-bit, which
736 ++ * means that the 64-bit version has an additional 4 bytes of
737 ++ * padding after the first field 'state'.
738 ++ *
739 ++ * So we use 'int __user *user_state' to point to the state field,
740 ++ * and 'uint64_t __user *user_times' for runstate_entry_time. So
741 ++ * the actual array of time[] in each state starts at user_times[1].
742 + */
743 ++ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
744 ++ BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
745 ++ user_state = (int __user *)ghc->hva;
746 ++
747 ++ BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
748 ++
749 ++ user_times = (uint64_t __user *)(ghc->hva +
750 ++ offsetof(struct compat_vcpu_runstate_info,
751 ++ state_entry_time));
752 ++#ifdef CONFIG_X86_64
753 + BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
754 + offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
755 + BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
756 + offsetof(struct compat_vcpu_runstate_info, time) + 4);
757 +
758 + if (v->kvm->arch.xen.long_mode)
759 +- offset = offsetof(struct vcpu_runstate_info, state_entry_time);
760 ++ user_times = (uint64_t __user *)(ghc->hva +
761 ++ offsetof(struct vcpu_runstate_info,
762 ++ state_entry_time));
763 + #endif
764 + /*
765 + * First write the updated state_entry_time at the appropriate
766 +@@ -132,10 +157,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
767 + BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
768 + sizeof(state_entry_time));
769 +
770 +- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
771 +- &state_entry_time, offset,
772 +- sizeof(state_entry_time)))
773 +- return;
774 ++ if (__put_user(state_entry_time, user_times))
775 ++ goto out;
776 + smp_wmb();
777 +
778 + /*
779 +@@ -149,11 +172,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
780 + BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
781 + sizeof(vx->current_runstate));
782 +
783 +- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
784 +- &vx->current_runstate,
785 +- offsetof(struct vcpu_runstate_info, state),
786 +- sizeof(vx->current_runstate)))
787 +- return;
788 ++ if (__put_user(vx->current_runstate, user_state))
789 ++ goto out;
790 +
791 + /*
792 + * Write the actual runstate times immediately after the
793 +@@ -168,24 +188,23 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
794 + BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
795 + sizeof(vx->runstate_times));
796 +
797 +- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
798 +- &vx->runstate_times[0],
799 +- offset + sizeof(u64),
800 +- sizeof(vx->runstate_times)))
801 +- return;
802 +-
803 ++ if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)))
804 ++ goto out;
805 + smp_wmb();
806 +
807 + /*
808 + * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
809 + * runstate_entry_time field.
810 + */
811 +-
812 + state_entry_time &= ~XEN_RUNSTATE_UPDATE;
813 +- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
814 +- &state_entry_time, offset,
815 +- sizeof(state_entry_time)))
816 +- return;
817 ++ __put_user(state_entry_time, user_times);
818 ++ smp_wmb();
819 ++
820 ++ out:
821 ++ mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
822 ++
823 ++ if (atomic)
824 ++ pagefault_enable();
825 + }
826 +
827 + int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
828 +@@ -337,6 +356,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
829 + break;
830 + }
831 +
832 ++ /* It must fit within a single page */
833 ++ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_info) > PAGE_SIZE) {
834 ++ r = -EINVAL;
835 ++ break;
836 ++ }
837 ++
838 + r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
839 + &vcpu->arch.xen.vcpu_info_cache,
840 + data->u.gpa,
841 +@@ -354,6 +379,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
842 + break;
843 + }
844 +
845 ++ /* It must fit within a single page */
846 ++ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct pvclock_vcpu_time_info) > PAGE_SIZE) {
847 ++ r = -EINVAL;
848 ++ break;
849 ++ }
850 ++
851 + r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
852 + &vcpu->arch.xen.vcpu_time_info_cache,
853 + data->u.gpa,
854 +@@ -375,6 +406,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
855 + break;
856 + }
857 +
858 ++ /* It must fit within a single page */
859 ++ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) {
860 ++ r = -EINVAL;
861 ++ break;
862 ++ }
863 ++
864 + r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
865 + &vcpu->arch.xen.runstate_cache,
866 + data->u.gpa,
867 +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
868 +index 5004feb16783d..d47c3d176ae4b 100644
869 +--- a/arch/x86/xen/enlighten_pv.c
870 ++++ b/arch/x86/xen/enlighten_pv.c
871 +@@ -1341,10 +1341,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
872 +
873 + xen_acpi_sleep_register();
874 +
875 +- /* Avoid searching for BIOS MP tables */
876 +- x86_init.mpparse.find_smp_config = x86_init_noop;
877 +- x86_init.mpparse.get_smp_config = x86_init_uint_noop;
878 +-
879 + xen_boot_params_init_edd();
880 +
881 + #ifdef CONFIG_ACPI
882 +diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
883 +index 6a8f3b53ab834..4a6019238ee7d 100644
884 +--- a/arch/x86/xen/smp_pv.c
885 ++++ b/arch/x86/xen/smp_pv.c
886 +@@ -148,28 +148,12 @@ int xen_smp_intr_init_pv(unsigned int cpu)
887 + return rc;
888 + }
889 +
890 +-static void __init xen_fill_possible_map(void)
891 +-{
892 +- int i, rc;
893 +-
894 +- if (xen_initial_domain())
895 +- return;
896 +-
897 +- for (i = 0; i < nr_cpu_ids; i++) {
898 +- rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
899 +- if (rc >= 0) {
900 +- num_processors++;
901 +- set_cpu_possible(i, true);
902 +- }
903 +- }
904 +-}
905 +-
906 +-static void __init xen_filter_cpu_maps(void)
907 ++static void __init _get_smp_config(unsigned int early)
908 + {
909 + int i, rc;
910 + unsigned int subtract = 0;
911 +
912 +- if (!xen_initial_domain())
913 ++ if (early)
914 + return;
915 +
916 + num_processors = 0;
917 +@@ -210,7 +194,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void)
918 + * sure the old memory can be recycled. */
919 + make_lowmem_page_readwrite(xen_initial_gdt);
920 +
921 +- xen_filter_cpu_maps();
922 + xen_setup_vcpu_info_placement();
923 +
924 + /*
925 +@@ -476,5 +459,8 @@ static const struct smp_ops xen_smp_ops __initconst = {
926 + void __init xen_smp_init(void)
927 + {
928 + smp_ops = xen_smp_ops;
929 +- xen_fill_possible_map();
930 ++
931 ++ /* Avoid searching for BIOS MP tables */
932 ++ x86_init.mpparse.find_smp_config = x86_init_noop;
933 ++ x86_init.mpparse.get_smp_config = _get_smp_config;
934 + }
935 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
936 +index 30918b0e81c02..8c0950c9a1a2f 100644
937 +--- a/block/bfq-iosched.c
938 ++++ b/block/bfq-iosched.c
939 +@@ -6878,6 +6878,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
940 + spin_unlock_irq(&bfqd->lock);
941 + #endif
942 +
943 ++ wbt_enable_default(bfqd->queue);
944 ++
945 + kfree(bfqd);
946 + }
947 +
948 +diff --git a/block/blk-core.c b/block/blk-core.c
949 +index 9ebeb9bdf5832..5adca3a9cebea 100644
950 +--- a/block/blk-core.c
951 ++++ b/block/blk-core.c
952 +@@ -324,13 +324,6 @@ void blk_queue_start_drain(struct request_queue *q)
953 + wake_up_all(&q->mq_freeze_wq);
954 + }
955 +
956 +-void blk_set_queue_dying(struct request_queue *q)
957 +-{
958 +- blk_queue_flag_set(QUEUE_FLAG_DYING, q);
959 +- blk_queue_start_drain(q);
960 +-}
961 +-EXPORT_SYMBOL_GPL(blk_set_queue_dying);
962 +-
963 + /**
964 + * blk_cleanup_queue - shutdown a request queue
965 + * @q: request queue to shutdown
966 +@@ -348,7 +341,8 @@ void blk_cleanup_queue(struct request_queue *q)
967 + WARN_ON_ONCE(blk_queue_registered(q));
968 +
969 + /* mark @q DYING, no new request or merges will be allowed afterwards */
970 +- blk_set_queue_dying(q);
971 ++ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
972 ++ blk_queue_start_drain(q);
973 +
974 + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
975 + blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
976 +diff --git a/block/elevator.c b/block/elevator.c
977 +index 19a78d5516ba7..42cb7af57b3ed 100644
978 +--- a/block/elevator.c
979 ++++ b/block/elevator.c
980 +@@ -523,8 +523,6 @@ void elv_unregister_queue(struct request_queue *q)
981 + kobject_del(&e->kobj);
982 +
983 + e->registered = 0;
984 +- /* Re-enable throttling in case elevator disabled it */
985 +- wbt_enable_default(q);
986 + }
987 + }
988 +
989 +diff --git a/block/genhd.c b/block/genhd.c
990 +index 5308e0920fa6f..f6a698f3252b5 100644
991 +--- a/block/genhd.c
992 ++++ b/block/genhd.c
993 +@@ -549,6 +549,20 @@ out_free_ext_minor:
994 + }
995 + EXPORT_SYMBOL(device_add_disk);
996 +
997 ++/**
998 ++ * blk_mark_disk_dead - mark a disk as dead
999 ++ * @disk: disk to mark as dead
1000 ++ *
1001 ++ * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
1002 ++ * to this disk.
1003 ++ */
1004 ++void blk_mark_disk_dead(struct gendisk *disk)
1005 ++{
1006 ++ set_bit(GD_DEAD, &disk->state);
1007 ++ blk_queue_start_drain(disk->queue);
1008 ++}
1009 ++EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
1010 ++
1011 + /**
1012 + * del_gendisk - remove the gendisk
1013 + * @disk: the struct gendisk to remove
1014 +diff --git a/crypto/af_alg.c b/crypto/af_alg.c
1015 +index 3dd5a773c320b..17dc136d4538f 100644
1016 +--- a/crypto/af_alg.c
1017 ++++ b/crypto/af_alg.c
1018 +@@ -25,12 +25,9 @@ struct alg_type_list {
1019 + struct list_head list;
1020 + };
1021 +
1022 +-static atomic_long_t alg_memory_allocated;
1023 +-
1024 + static struct proto alg_proto = {
1025 + .name = "ALG",
1026 + .owner = THIS_MODULE,
1027 +- .memory_allocated = &alg_memory_allocated,
1028 + .obj_size = sizeof(struct alg_sock),
1029 + };
1030 +
1031 +diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
1032 +index 76ef1bcc88480..a26f8094cc1c1 100644
1033 +--- a/drivers/acpi/processor_idle.c
1034 ++++ b/drivers/acpi/processor_idle.c
1035 +@@ -95,6 +95,11 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
1036 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
1037 + DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
1038 + (void *)1},
1039 ++ /* T40 can not handle C3 idle state */
1040 ++ { set_max_cstate, "IBM ThinkPad T40", {
1041 ++ DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
1042 ++ DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
1043 ++ (void *)2},
1044 + {},
1045 + };
1046 +
1047 +diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
1048 +index 1c48358b43ba3..e0185e841b2a3 100644
1049 +--- a/drivers/acpi/x86/s2idle.c
1050 ++++ b/drivers/acpi/x86/s2idle.c
1051 +@@ -424,15 +424,11 @@ static int lps0_device_attach(struct acpi_device *adev,
1052 + mem_sleep_current = PM_SUSPEND_TO_IDLE;
1053 +
1054 + /*
1055 +- * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
1056 +- * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
1057 +- * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
1058 +- *
1059 +- * Only enable on !AMD as enabling this universally causes problems for a number
1060 +- * of AMD based systems.
1061 ++ * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
1062 ++ * EC GPE to be enabled while suspended for certain wakeup devices to
1063 ++ * work, so mark it as wakeup-capable.
1064 + */
1065 +- if (!acpi_s2idle_vendor_amd())
1066 +- acpi_ec_mark_gpe_for_wake();
1067 ++ acpi_ec_mark_gpe_for_wake();
1068 +
1069 + return 0;
1070 + }
1071 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1072 +index 94bc5dbb31e1e..63666ee9de175 100644
1073 +--- a/drivers/ata/libata-core.c
1074 ++++ b/drivers/ata/libata-core.c
1075 +@@ -4079,6 +4079,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1076 +
1077 + /* devices that don't properly handle TRIM commands */
1078 + { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
1079 ++ { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, },
1080 +
1081 + /*
1082 + * As defined, the DRAT (Deterministic Read After Trim) and RZAT
1083 +diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
1084 +index c91b9010c1a6d..53489562fa36b 100644
1085 +--- a/drivers/block/mtip32xx/mtip32xx.c
1086 ++++ b/drivers/block/mtip32xx/mtip32xx.c
1087 +@@ -4113,7 +4113,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
1088 + "Completion workers still active!\n");
1089 + }
1090 +
1091 +- blk_set_queue_dying(dd->queue);
1092 ++ blk_mark_disk_dead(dd->disk);
1093 + set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
1094 +
1095 + /* Clean up the block layer. */
1096 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
1097 +index 953fa134cd3db..7cc6871fd8e52 100644
1098 +--- a/drivers/block/rbd.c
1099 ++++ b/drivers/block/rbd.c
1100 +@@ -7186,7 +7186,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
1101 + * IO to complete/fail.
1102 + */
1103 + blk_mq_freeze_queue(rbd_dev->disk->queue);
1104 +- blk_set_queue_dying(rbd_dev->disk->queue);
1105 ++ blk_mark_disk_dead(rbd_dev->disk);
1106 + }
1107 +
1108 + del_gendisk(rbd_dev->disk);
1109 +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
1110 +index 286cf1afad781..2a5b14230986a 100644
1111 +--- a/drivers/block/xen-blkfront.c
1112 ++++ b/drivers/block/xen-blkfront.c
1113 +@@ -2129,7 +2129,7 @@ static void blkfront_closing(struct blkfront_info *info)
1114 +
1115 + /* No more blkif_request(). */
1116 + blk_mq_stop_hw_queues(info->rq);
1117 +- blk_set_queue_dying(info->rq);
1118 ++ blk_mark_disk_dead(info->gd);
1119 + set_capacity(info->gd, 0);
1120 +
1121 + for_each_rinfo(info, rinfo, i) {
1122 +diff --git a/drivers/char/random.c b/drivers/char/random.c
1123 +index a27ae3999ff32..ebe86de9d0acc 100644
1124 +--- a/drivers/char/random.c
1125 ++++ b/drivers/char/random.c
1126 +@@ -1963,7 +1963,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1127 + */
1128 + if (!capable(CAP_SYS_ADMIN))
1129 + return -EPERM;
1130 +- input_pool.entropy_count = 0;
1131 ++ if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
1132 ++ wake_up_interruptible(&random_write_wait);
1133 ++ kill_fasync(&fasync, SIGIO, POLL_OUT);
1134 ++ }
1135 + return 0;
1136 + case RNDRESEEDCRNG:
1137 + if (!capable(CAP_SYS_ADMIN))
1138 +diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
1139 +index 8a6bf291a73fe..daafea5bc35d9 100644
1140 +--- a/drivers/dma/ptdma/ptdma-dev.c
1141 ++++ b/drivers/dma/ptdma/ptdma-dev.c
1142 +@@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt)
1143 + if (!cmd_q->qbase) {
1144 + dev_err(dev, "unable to allocate command queue\n");
1145 + ret = -ENOMEM;
1146 +- goto e_dma_alloc;
1147 ++ goto e_destroy_pool;
1148 + }
1149 +
1150 + cmd_q->qidx = 0;
1151 +@@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt)
1152 +
1153 + /* Request an irq */
1154 + ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
1155 +- if (ret)
1156 +- goto e_pool;
1157 ++ if (ret) {
1158 ++ dev_err(dev, "unable to allocate an IRQ\n");
1159 ++ goto e_free_dma;
1160 ++ }
1161 +
1162 + /* Update the device registers with queue information. */
1163 + cmd_q->qcontrol &= ~CMD_Q_SIZE;
1164 +@@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt)
1165 + /* Register the DMA engine support */
1166 + ret = pt_dmaengine_register(pt);
1167 + if (ret)
1168 +- goto e_dmaengine;
1169 ++ goto e_free_irq;
1170 +
1171 + /* Set up debugfs entries */
1172 + ptdma_debugfs_setup(pt);
1173 +
1174 + return 0;
1175 +
1176 +-e_dmaengine:
1177 ++e_free_irq:
1178 + free_irq(pt->pt_irq, pt);
1179 +
1180 +-e_dma_alloc:
1181 ++e_free_dma:
1182 + dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
1183 +
1184 +-e_pool:
1185 +- dev_err(dev, "unable to allocate an IRQ\n");
1186 ++e_destroy_pool:
1187 + dma_pool_destroy(pt->cmd_q.dma_pool);
1188 +
1189 + return ret;
1190 +diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
1191 +index 5c7716fd6bc56..02fbb85a1aaf3 100644
1192 +--- a/drivers/dma/sh/rcar-dmac.c
1193 ++++ b/drivers/dma/sh/rcar-dmac.c
1194 +@@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1195 +
1196 + dmac->dev = &pdev->dev;
1197 + platform_set_drvdata(pdev, dmac);
1198 +- dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
1199 +- dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
1200 ++ ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
1201 ++ if (ret)
1202 ++ return ret;
1203 ++
1204 ++ ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
1205 ++ if (ret)
1206 ++ return ret;
1207 +
1208 + ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1209 + if (ret < 0)
1210 +diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
1211 +index a42164389ebc2..d5d55732adba1 100644
1212 +--- a/drivers/dma/stm32-dmamux.c
1213 ++++ b/drivers/dma/stm32-dmamux.c
1214 +@@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
1215 + ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
1216 + &stm32_dmamux->dmarouter);
1217 + if (ret)
1218 +- goto err_clk;
1219 ++ goto pm_disable;
1220 +
1221 + return 0;
1222 +
1223 ++pm_disable:
1224 ++ pm_runtime_disable(&pdev->dev);
1225 + err_clk:
1226 + clk_disable_unprepare(stm32_dmamux->clk);
1227 +
1228 +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
1229 +index 9f82ca2953530..9dcca5b90b804 100644
1230 +--- a/drivers/edac/edac_mc.c
1231 ++++ b/drivers/edac/edac_mc.c
1232 +@@ -213,7 +213,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems)
1233 + else
1234 + return (char *)ptr;
1235 +
1236 +- r = (unsigned long)p % align;
1237 ++ r = (unsigned long)ptr % align;
1238 +
1239 + if (r == 0)
1240 + return (char *)ptr;
1241 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1242 +index 7d67aec6f4a2b..f59121ec26485 100644
1243 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1244 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1245 +@@ -1406,12 +1406,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
1246 + int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
1247 +
1248 + void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
1249 +-bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
1250 + void amdgpu_acpi_detect(void);
1251 + #else
1252 + static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
1253 + static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
1254 +-static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
1255 + static inline void amdgpu_acpi_detect(void) { }
1256 + static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
1257 + static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1258 +@@ -1420,6 +1418,14 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
1259 + enum amdgpu_ss ss_state) { return 0; }
1260 + #endif
1261 +
1262 ++#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
1263 ++bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
1264 ++bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
1265 ++#else
1266 ++static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
1267 ++static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
1268 ++#endif
1269 ++
1270 + int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1271 + uint64_t addr, struct amdgpu_bo **bo,
1272 + struct amdgpu_bo_va_mapping **mapping);
1273 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
1274 +index 4811b0faafd9a..0e12315fa0cb8 100644
1275 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
1276 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
1277 +@@ -1031,6 +1031,20 @@ void amdgpu_acpi_detect(void)
1278 + }
1279 + }
1280 +
1281 ++#if IS_ENABLED(CONFIG_SUSPEND)
1282 ++/**
1283 ++ * amdgpu_acpi_is_s3_active
1284 ++ *
1285 ++ * @adev: amdgpu_device_pointer
1286 ++ *
1287 ++ * returns true if supported, false if not.
1288 ++ */
1289 ++bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
1290 ++{
1291 ++ return !(adev->flags & AMD_IS_APU) ||
1292 ++ (pm_suspend_target_state == PM_SUSPEND_MEM);
1293 ++}
1294 ++
1295 + /**
1296 + * amdgpu_acpi_is_s0ix_active
1297 + *
1298 +@@ -1040,11 +1054,24 @@ void amdgpu_acpi_detect(void)
1299 + */
1300 + bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
1301 + {
1302 +-#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
1303 +- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
1304 +- if (adev->flags & AMD_IS_APU)
1305 +- return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
1306 ++ if (!(adev->flags & AMD_IS_APU) ||
1307 ++ (pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
1308 ++ return false;
1309 ++
1310 ++ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
1311 ++ dev_warn_once(adev->dev,
1312 ++ "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
1313 ++ "To use suspend-to-idle change the sleep mode in BIOS setup.\n");
1314 ++ return false;
1315 + }
1316 +-#endif
1317 ++
1318 ++#if !IS_ENABLED(CONFIG_AMD_PMC)
1319 ++ dev_warn_once(adev->dev,
1320 ++ "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
1321 + return false;
1322 ++#else
1323 ++ return true;
1324 ++#endif /* CONFIG_AMD_PMC */
1325 + }
1326 ++
1327 ++#endif /* CONFIG_SUSPEND */
1328 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1329 +index c811161ce9f09..ab3851c26f71c 100644
1330 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1331 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1332 +@@ -2236,6 +2236,7 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
1333 + static int amdgpu_pmops_prepare(struct device *dev)
1334 + {
1335 + struct drm_device *drm_dev = dev_get_drvdata(dev);
1336 ++ struct amdgpu_device *adev = drm_to_adev(drm_dev);
1337 +
1338 + /* Return a positive number here so
1339 + * DPM_FLAG_SMART_SUSPEND works properly
1340 +@@ -2243,6 +2244,13 @@ static int amdgpu_pmops_prepare(struct device *dev)
1341 + if (amdgpu_device_supports_boco(drm_dev))
1342 + return pm_runtime_suspended(dev);
1343 +
1344 ++ /* if we will not support s3 or s2i for the device
1345 ++ * then skip suspend
1346 ++ */
1347 ++ if (!amdgpu_acpi_is_s0ix_active(adev) &&
1348 ++ !amdgpu_acpi_is_s3_active(adev))
1349 ++ return 1;
1350 ++
1351 + return 0;
1352 + }
1353 +
1354 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1355 +index c875f1cdd2af7..ffc3ce0004e99 100644
1356 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1357 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1358 +@@ -1913,7 +1913,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1359 + unsigned i;
1360 + int r;
1361 +
1362 +- if (direct_submit && !ring->sched.ready) {
1363 ++ if (!direct_submit && !ring->sched.ready) {
1364 + DRM_ERROR("Trying to move memory with ring turned off.\n");
1365 + return -EINVAL;
1366 + }
1367 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
1368 +index b4eddf6e98a6a..ff738e9725ee8 100644
1369 +--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
1370 ++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
1371 +@@ -543,7 +543,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
1372 + adev->gfx.config.max_sh_per_se *
1373 + adev->gfx.config.max_shader_engines);
1374 +
1375 +- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) {
1376 ++ switch (adev->ip_versions[GC_HWIP][0]) {
1377 ++ case IP_VERSION(10, 3, 1):
1378 ++ case IP_VERSION(10, 3, 3):
1379 + /* Get SA disabled bitmap from eFuse setting */
1380 + efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
1381 + efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
1382 +@@ -566,6 +568,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
1383 + disabled_sa = tmp;
1384 +
1385 + WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa);
1386 ++ break;
1387 ++ default:
1388 ++ break;
1389 + }
1390 + }
1391 +
1392 +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1393 +index e8e4749e9c797..f0638db57111d 100644
1394 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1395 ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1396 +@@ -2057,6 +2057,10 @@ static int sdma_v4_0_suspend(void *handle)
1397 + {
1398 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1399 +
1400 ++ /* SMU saves SDMA state for us */
1401 ++ if (adev->in_s0ix)
1402 ++ return 0;
1403 ++
1404 + return sdma_v4_0_hw_fini(adev);
1405 + }
1406 +
1407 +@@ -2064,6 +2068,10 @@ static int sdma_v4_0_resume(void *handle)
1408 + {
1409 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1410 +
1411 ++ /* SMU restores SDMA state for us */
1412 ++ if (adev->in_s0ix)
1413 ++ return 0;
1414 ++
1415 + return sdma_v4_0_hw_init(adev);
1416 + }
1417 +
1418 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1419 +index efcb25ef1809a..0117b00b4ed83 100644
1420 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1421 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1422 +@@ -3629,7 +3629,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1423 +
1424 + /* Use GRPH_PFLIP interrupt */
1425 + for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1426 +- i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1427 ++ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
1428 + i++) {
1429 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1430 + if (r) {
1431 +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
1432 +index 162ae71861247..21d2cbc3cbb20 100644
1433 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
1434 ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
1435 +@@ -120,7 +120,11 @@ int dcn31_smu_send_msg_with_param(
1436 + result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
1437 +
1438 + if (result == VBIOSSMC_Result_Failed) {
1439 +- ASSERT(0);
1440 ++ if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
1441 ++ param == TABLE_WATERMARKS)
1442 ++ DC_LOG_WARNING("Watermarks table not configured properly by SMU");
1443 ++ else
1444 ++ ASSERT(0);
1445 + REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
1446 + return -1;
1447 + }
1448 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
1449 +index f0fbd8ad56229..e890e063cde31 100644
1450 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
1451 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
1452 +@@ -1237,6 +1237,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
1453 +
1454 + dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1455 +
1456 ++ dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1457 ++
1458 + if (dc->res_pool->dmcu != NULL)
1459 + dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1460 + }
1461 +diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
1462 +index 618e7989176fc..14864763a1881 100644
1463 +--- a/drivers/gpu/drm/amd/display/dc/dc.h
1464 ++++ b/drivers/gpu/drm/amd/display/dc/dc.h
1465 +@@ -190,6 +190,7 @@ struct dc_caps {
1466 + #endif
1467 + bool vbios_lttpr_aware;
1468 + bool vbios_lttpr_enable;
1469 ++ uint32_t max_otg_num;
1470 + };
1471 +
1472 + struct dc_bug_wa {
1473 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
1474 +index 90c73a1cb9861..5e3bcaf12cac4 100644
1475 +--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
1476 ++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
1477 +@@ -138,8 +138,11 @@ static uint32_t convert_and_clamp(
1478 + ret_val = wm_ns * refclk_mhz;
1479 + ret_val /= 1000;
1480 +
1481 +- if (ret_val > clamp_value)
1482 ++ if (ret_val > clamp_value) {
1483 ++ /* clamping WMs is abnormal, unexpected and may lead to underflow*/
1484 ++ ASSERT(0);
1485 + ret_val = clamp_value;
1486 ++ }
1487 +
1488 + return ret_val;
1489 + }
1490 +@@ -159,7 +162,7 @@ static bool hubbub31_program_urgent_watermarks(
1491 + if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
1492 + hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
1493 + prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
1494 +- refclk_mhz, 0x1fffff);
1495 ++ refclk_mhz, 0x3fff);
1496 + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
1497 + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
1498 +
1499 +@@ -193,7 +196,7 @@ static bool hubbub31_program_urgent_watermarks(
1500 + if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
1501 + hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
1502 + prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
1503 +- refclk_mhz, 0x1fffff);
1504 ++ refclk_mhz, 0x3fff);
1505 + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
1506 + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
1507 + } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
1508 +@@ -203,7 +206,7 @@ static bool hubbub31_program_urgent_watermarks(
1509 + if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
1510 + hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
1511 + prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
1512 +- refclk_mhz, 0x1fffff);
1513 ++ refclk_mhz, 0x3fff);
1514 + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
1515 + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
1516 +
1517 +@@ -237,7 +240,7 @@ static bool hubbub31_program_urgent_watermarks(
1518 + if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
1519 + hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
1520 + prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
1521 +- refclk_mhz, 0x1fffff);
1522 ++ refclk_mhz, 0x3fff);
1523 + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
1524 + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
1525 + } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
1526 +@@ -247,7 +250,7 @@ static bool hubbub31_program_urgent_watermarks(
1527 + if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
1528 + hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
1529 + prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
1530 +- refclk_mhz, 0x1fffff);
1531 ++ refclk_mhz, 0x3fff);
1532 + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
1533 + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
1534 +
1535 +@@ -281,7 +284,7 @@ static bool hubbub31_program_urgent_watermarks(
1536 + if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
1537 + hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
1538 + prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
1539 +- refclk_mhz, 0x1fffff);
1540 ++ refclk_mhz, 0x3fff);
1541 + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
1542 + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
1543 + } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
1544 +@@ -291,7 +294,7 @@ static bool hubbub31_program_urgent_watermarks(
1545 + if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
1546 + hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
1547 + prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
1548 +- refclk_mhz, 0x1fffff);
1549 ++ refclk_mhz, 0x3fff);
1550 + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
1551 + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
1552 +
1553 +@@ -325,7 +328,7 @@ static bool hubbub31_program_urgent_watermarks(
1554 + if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
1555 + hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
1556 + prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
1557 +- refclk_mhz, 0x1fffff);
1558 ++ refclk_mhz, 0x3fff);
1559 + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
1560 + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
1561 + } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
1562 +@@ -351,7 +354,7 @@ static bool hubbub31_program_stutter_watermarks(
1563 + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
1564 + prog_wm_value = convert_and_clamp(
1565 + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
1566 +- refclk_mhz, 0x1fffff);
1567 ++ refclk_mhz, 0xffff);
1568 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
1569 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
1570 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
1571 +@@ -367,7 +370,7 @@ static bool hubbub31_program_stutter_watermarks(
1572 + watermarks->a.cstate_pstate.cstate_exit_ns;
1573 + prog_wm_value = convert_and_clamp(
1574 + watermarks->a.cstate_pstate.cstate_exit_ns,
1575 +- refclk_mhz, 0x1fffff);
1576 ++ refclk_mhz, 0xffff);
1577 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
1578 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
1579 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
1580 +@@ -383,7 +386,7 @@ static bool hubbub31_program_stutter_watermarks(
1581 + watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
1582 + prog_wm_value = convert_and_clamp(
1583 + watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
1584 +- refclk_mhz, 0x1fffff);
1585 ++ refclk_mhz, 0xffff);
1586 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
1587 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
1588 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
1589 +@@ -399,7 +402,7 @@ static bool hubbub31_program_stutter_watermarks(
1590 + watermarks->a.cstate_pstate.cstate_exit_z8_ns;
1591 + prog_wm_value = convert_and_clamp(
1592 + watermarks->a.cstate_pstate.cstate_exit_z8_ns,
1593 +- refclk_mhz, 0x1fffff);
1594 ++ refclk_mhz, 0xffff);
1595 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
1596 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
1597 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
1598 +@@ -416,7 +419,7 @@ static bool hubbub31_program_stutter_watermarks(
1599 + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
1600 + prog_wm_value = convert_and_clamp(
1601 + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
1602 +- refclk_mhz, 0x1fffff);
1603 ++ refclk_mhz, 0xffff);
1604 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
1605 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
1606 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
1607 +@@ -432,7 +435,7 @@ static bool hubbub31_program_stutter_watermarks(
1608 + watermarks->b.cstate_pstate.cstate_exit_ns;
1609 + prog_wm_value = convert_and_clamp(
1610 + watermarks->b.cstate_pstate.cstate_exit_ns,
1611 +- refclk_mhz, 0x1fffff);
1612 ++ refclk_mhz, 0xffff);
1613 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
1614 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
1615 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
1616 +@@ -448,7 +451,7 @@ static bool hubbub31_program_stutter_watermarks(
1617 + watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
1618 + prog_wm_value = convert_and_clamp(
1619 + watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
1620 +- refclk_mhz, 0x1fffff);
1621 ++ refclk_mhz, 0xffff);
1622 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
1623 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
1624 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
1625 +@@ -464,7 +467,7 @@ static bool hubbub31_program_stutter_watermarks(
1626 + watermarks->b.cstate_pstate.cstate_exit_z8_ns;
1627 + prog_wm_value = convert_and_clamp(
1628 + watermarks->b.cstate_pstate.cstate_exit_z8_ns,
1629 +- refclk_mhz, 0x1fffff);
1630 ++ refclk_mhz, 0xffff);
1631 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
1632 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
1633 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
1634 +@@ -481,7 +484,7 @@ static bool hubbub31_program_stutter_watermarks(
1635 + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
1636 + prog_wm_value = convert_and_clamp(
1637 + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
1638 +- refclk_mhz, 0x1fffff);
1639 ++ refclk_mhz, 0xffff);
1640 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
1641 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
1642 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
1643 +@@ -497,7 +500,7 @@ static bool hubbub31_program_stutter_watermarks(
1644 + watermarks->c.cstate_pstate.cstate_exit_ns;
1645 + prog_wm_value = convert_and_clamp(
1646 + watermarks->c.cstate_pstate.cstate_exit_ns,
1647 +- refclk_mhz, 0x1fffff);
1648 ++ refclk_mhz, 0xffff);
1649 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
1650 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
1651 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
1652 +@@ -513,7 +516,7 @@ static bool hubbub31_program_stutter_watermarks(
1653 + watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
1654 + prog_wm_value = convert_and_clamp(
1655 + watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
1656 +- refclk_mhz, 0x1fffff);
1657 ++ refclk_mhz, 0xffff);
1658 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
1659 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
1660 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
1661 +@@ -529,7 +532,7 @@ static bool hubbub31_program_stutter_watermarks(
1662 + watermarks->c.cstate_pstate.cstate_exit_z8_ns;
1663 + prog_wm_value = convert_and_clamp(
1664 + watermarks->c.cstate_pstate.cstate_exit_z8_ns,
1665 +- refclk_mhz, 0x1fffff);
1666 ++ refclk_mhz, 0xffff);
1667 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
1668 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
1669 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
1670 +@@ -546,7 +549,7 @@ static bool hubbub31_program_stutter_watermarks(
1671 + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
1672 + prog_wm_value = convert_and_clamp(
1673 + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
1674 +- refclk_mhz, 0x1fffff);
1675 ++ refclk_mhz, 0xffff);
1676 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
1677 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
1678 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
1679 +@@ -562,7 +565,7 @@ static bool hubbub31_program_stutter_watermarks(
1680 + watermarks->d.cstate_pstate.cstate_exit_ns;
1681 + prog_wm_value = convert_and_clamp(
1682 + watermarks->d.cstate_pstate.cstate_exit_ns,
1683 +- refclk_mhz, 0x1fffff);
1684 ++ refclk_mhz, 0xffff);
1685 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
1686 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
1687 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
1688 +@@ -578,7 +581,7 @@ static bool hubbub31_program_stutter_watermarks(
1689 + watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
1690 + prog_wm_value = convert_and_clamp(
1691 + watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
1692 +- refclk_mhz, 0x1fffff);
1693 ++ refclk_mhz, 0xffff);
1694 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
1695 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
1696 + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
1697 +@@ -594,7 +597,7 @@ static bool hubbub31_program_stutter_watermarks(
1698 + watermarks->d.cstate_pstate.cstate_exit_z8_ns;
1699 + prog_wm_value = convert_and_clamp(
1700 + watermarks->d.cstate_pstate.cstate_exit_z8_ns,
1701 +- refclk_mhz, 0x1fffff);
1702 ++ refclk_mhz, 0xffff);
1703 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
1704 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
1705 + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
1706 +@@ -625,7 +628,7 @@ static bool hubbub31_program_pstate_watermarks(
1707 + watermarks->a.cstate_pstate.pstate_change_ns;
1708 + prog_wm_value = convert_and_clamp(
1709 + watermarks->a.cstate_pstate.pstate_change_ns,
1710 +- refclk_mhz, 0x1fffff);
1711 ++ refclk_mhz, 0xffff);
1712 + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
1713 + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
1714 + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
1715 +@@ -642,7 +645,7 @@ static bool hubbub31_program_pstate_watermarks(
1716 + watermarks->b.cstate_pstate.pstate_change_ns;
1717 + prog_wm_value = convert_and_clamp(
1718 + watermarks->b.cstate_pstate.pstate_change_ns,
1719 +- refclk_mhz, 0x1fffff);
1720 ++ refclk_mhz, 0xffff);
1721 + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
1722 + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
1723 + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
1724 +@@ -659,7 +662,7 @@ static bool hubbub31_program_pstate_watermarks(
1725 + watermarks->c.cstate_pstate.pstate_change_ns;
1726 + prog_wm_value = convert_and_clamp(
1727 + watermarks->c.cstate_pstate.pstate_change_ns,
1728 +- refclk_mhz, 0x1fffff);
1729 ++ refclk_mhz, 0xffff);
1730 + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
1731 + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
1732 + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
1733 +@@ -676,7 +679,7 @@ static bool hubbub31_program_pstate_watermarks(
1734 + watermarks->d.cstate_pstate.pstate_change_ns;
1735 + prog_wm_value = convert_and_clamp(
1736 + watermarks->d.cstate_pstate.pstate_change_ns,
1737 +- refclk_mhz, 0x1fffff);
1738 ++ refclk_mhz, 0xffff);
1739 + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
1740 + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
1741 + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
1742 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
1743 +index caf1775d48ef6..0bc84b709a935 100644
1744 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
1745 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
1746 +@@ -282,14 +282,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu)
1747 +
1748 + static int yellow_carp_mode_reset(struct smu_context *smu, int type)
1749 + {
1750 +- int ret = 0, index = 0;
1751 +-
1752 +- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
1753 +- SMU_MSG_GfxDeviceDriverReset);
1754 +- if (index < 0)
1755 +- return index == -EACCES ? 0 : index;
1756 ++ int ret = 0;
1757 +
1758 +- ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
1759 ++ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
1760 + if (ret)
1761 + dev_err(smu->adev->dev, "Failed to mode reset!\n");
1762 +
1763 +diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
1764 +index 909f318331816..f195c70131373 100644
1765 +--- a/drivers/gpu/drm/drm_atomic_uapi.c
1766 ++++ b/drivers/gpu/drm/drm_atomic_uapi.c
1767 +@@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
1768 + state->mode_blob = NULL;
1769 +
1770 + if (mode) {
1771 ++ struct drm_property_blob *blob;
1772 ++
1773 + drm_mode_convert_to_umode(&umode, mode);
1774 +- state->mode_blob =
1775 +- drm_property_create_blob(state->crtc->dev,
1776 +- sizeof(umode),
1777 +- &umode);
1778 +- if (IS_ERR(state->mode_blob))
1779 +- return PTR_ERR(state->mode_blob);
1780 ++ blob = drm_property_create_blob(crtc->dev,
1781 ++ sizeof(umode), &umode);
1782 ++ if (IS_ERR(blob))
1783 ++ return PTR_ERR(blob);
1784 +
1785 + drm_mode_copy(&state->mode, mode);
1786 ++
1787 ++ state->mode_blob = blob;
1788 + state->enable = true;
1789 + drm_dbg_atomic(crtc->dev,
1790 + "Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
1791 +diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
1792 +index 1e7e8cd64cb58..9338a342027a5 100644
1793 +--- a/drivers/gpu/drm/drm_gem_cma_helper.c
1794 ++++ b/drivers/gpu/drm/drm_gem_cma_helper.c
1795 +@@ -518,6 +518,7 @@ int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1796 + */
1797 + vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
1798 + vma->vm_flags &= ~VM_PFNMAP;
1799 ++ vma->vm_flags |= VM_DONTEXPAND;
1800 +
1801 + cma_obj = to_drm_gem_cma_obj(obj);
1802 +
1803 +diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
1804 +index 84b6fc70cbf52..0bddb75fa7e01 100644
1805 +--- a/drivers/gpu/drm/i915/Kconfig
1806 ++++ b/drivers/gpu/drm/i915/Kconfig
1807 +@@ -101,6 +101,7 @@ config DRM_I915_USERPTR
1808 + config DRM_I915_GVT
1809 + bool "Enable Intel GVT-g graphics virtualization host support"
1810 + depends on DRM_I915
1811 ++ depends on X86
1812 + depends on 64BIT
1813 + default n
1814 + help
1815 +diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
1816 +index 0065111593a60..4a2662838cd8d 100644
1817 +--- a/drivers/gpu/drm/i915/display/intel_opregion.c
1818 ++++ b/drivers/gpu/drm/i915/display/intel_opregion.c
1819 +@@ -360,6 +360,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
1820 + port++;
1821 + }
1822 +
1823 ++ /*
1824 ++ * The port numbering and mapping here is bizarre. The now-obsolete
1825 ++ * swsci spec supports ports numbered [0..4]. Port E is handled as a
1826 ++ * special case, but port F and beyond are not. The functionality is
1827 ++ * supposed to be obsolete for new platforms. Just bail out if the port
1828 ++ * number is out of bounds after mapping.
1829 ++ */
1830 ++ if (port > 4) {
1831 ++ drm_dbg_kms(&dev_priv->drm,
1832 ++ "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
1833 ++ intel_encoder->base.base.id, intel_encoder->base.name,
1834 ++ port_name(intel_encoder->port), port);
1835 ++ return -EINVAL;
1836 ++ }
1837 ++
1838 + if (!enable)
1839 + parm |= 4 << 8;
1840 +
1841 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
1842 +index 74a1ffd0d7ddb..dcb184d9d0b80 100644
1843 +--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
1844 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
1845 +@@ -787,11 +787,9 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
1846 + if (obj->mm.madv != I915_MADV_WILLNEED) {
1847 + bo->priority = I915_TTM_PRIO_PURGE;
1848 + } else if (!i915_gem_object_has_pages(obj)) {
1849 +- if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
1850 +- bo->priority = I915_TTM_PRIO_HAS_PAGES;
1851 ++ bo->priority = I915_TTM_PRIO_NO_PAGES;
1852 + } else {
1853 +- if (bo->priority > I915_TTM_PRIO_NO_PAGES)
1854 +- bo->priority = I915_TTM_PRIO_NO_PAGES;
1855 ++ bo->priority = I915_TTM_PRIO_HAS_PAGES;
1856 + }
1857 +
1858 + ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
1859 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1860 +index d4ca29755e647..75c1522fdae8c 100644
1861 +--- a/drivers/gpu/drm/i915/intel_pm.c
1862 ++++ b/drivers/gpu/drm/i915/intel_pm.c
1863 +@@ -4843,7 +4843,7 @@ static bool check_mbus_joined(u8 active_pipes,
1864 + {
1865 + int i;
1866 +
1867 +- for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
1868 ++ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1869 + if (dbuf_slices[i].active_pipes == active_pipes)
1870 + return dbuf_slices[i].join_mbus;
1871 + }
1872 +@@ -4860,7 +4860,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
1873 + {
1874 + int i;
1875 +
1876 +- for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
1877 ++ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1878 + if (dbuf_slices[i].active_pipes == active_pipes &&
1879 + dbuf_slices[i].join_mbus == join_mbus)
1880 + return dbuf_slices[i].dbuf_mask[pipe];
1881 +diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
1882 +index 5d90d2eb00193..bced4c7d668e3 100644
1883 +--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
1884 ++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
1885 +@@ -786,18 +786,101 @@ void mtk_dsi_ddp_stop(struct device *dev)
1886 + mtk_dsi_poweroff(dsi);
1887 + }
1888 +
1889 ++static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
1890 ++{
1891 ++ int ret;
1892 ++
1893 ++ ret = drm_simple_encoder_init(drm, &dsi->encoder,
1894 ++ DRM_MODE_ENCODER_DSI);
1895 ++ if (ret) {
1896 ++ DRM_ERROR("Failed to encoder init to drm\n");
1897 ++ return ret;
1898 ++ }
1899 ++
1900 ++ dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
1901 ++
1902 ++ ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
1903 ++ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
1904 ++ if (ret)
1905 ++ goto err_cleanup_encoder;
1906 ++
1907 ++ dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
1908 ++ if (IS_ERR(dsi->connector)) {
1909 ++ DRM_ERROR("Unable to create bridge connector\n");
1910 ++ ret = PTR_ERR(dsi->connector);
1911 ++ goto err_cleanup_encoder;
1912 ++ }
1913 ++ drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
1914 ++
1915 ++ return 0;
1916 ++
1917 ++err_cleanup_encoder:
1918 ++ drm_encoder_cleanup(&dsi->encoder);
1919 ++ return ret;
1920 ++}
1921 ++
1922 ++static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
1923 ++{
1924 ++ int ret;
1925 ++ struct drm_device *drm = data;
1926 ++ struct mtk_dsi *dsi = dev_get_drvdata(dev);
1927 ++
1928 ++ ret = mtk_dsi_encoder_init(drm, dsi);
1929 ++ if (ret)
1930 ++ return ret;
1931 ++
1932 ++ return device_reset_optional(dev);
1933 ++}
1934 ++
1935 ++static void mtk_dsi_unbind(struct device *dev, struct device *master,
1936 ++ void *data)
1937 ++{
1938 ++ struct mtk_dsi *dsi = dev_get_drvdata(dev);
1939 ++
1940 ++ drm_encoder_cleanup(&dsi->encoder);
1941 ++}
1942 ++
1943 ++static const struct component_ops mtk_dsi_component_ops = {
1944 ++ .bind = mtk_dsi_bind,
1945 ++ .unbind = mtk_dsi_unbind,
1946 ++};
1947 ++
1948 + static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
1949 + struct mipi_dsi_device *device)
1950 + {
1951 + struct mtk_dsi *dsi = host_to_dsi(host);
1952 ++ struct device *dev = host->dev;
1953 ++ int ret;
1954 +
1955 + dsi->lanes = device->lanes;
1956 + dsi->format = device->format;
1957 + dsi->mode_flags = device->mode_flags;
1958 ++ dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
1959 ++ if (IS_ERR(dsi->next_bridge))
1960 ++ return PTR_ERR(dsi->next_bridge);
1961 ++
1962 ++ drm_bridge_add(&dsi->bridge);
1963 ++
1964 ++ ret = component_add(host->dev, &mtk_dsi_component_ops);
1965 ++ if (ret) {
1966 ++ DRM_ERROR("failed to add dsi_host component: %d\n", ret);
1967 ++ drm_bridge_remove(&dsi->bridge);
1968 ++ return ret;
1969 ++ }
1970 +
1971 + return 0;
1972 + }
1973 +
1974 ++static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
1975 ++ struct mipi_dsi_device *device)
1976 ++{
1977 ++ struct mtk_dsi *dsi = host_to_dsi(host);
1978 ++
1979 ++ component_del(host->dev, &mtk_dsi_component_ops);
1980 ++ drm_bridge_remove(&dsi->bridge);
1981 ++ return 0;
1982 ++}
1983 ++
1984 + static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
1985 + {
1986 + int ret;
1987 +@@ -938,73 +1021,14 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
1988 +
1989 + static const struct mipi_dsi_host_ops mtk_dsi_ops = {
1990 + .attach = mtk_dsi_host_attach,
1991 ++ .detach = mtk_dsi_host_detach,
1992 + .transfer = mtk_dsi_host_transfer,
1993 + };
1994 +
1995 +-static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
1996 +-{
1997 +- int ret;
1998 +-
1999 +- ret = drm_simple_encoder_init(drm, &dsi->encoder,
2000 +- DRM_MODE_ENCODER_DSI);
2001 +- if (ret) {
2002 +- DRM_ERROR("Failed to encoder init to drm\n");
2003 +- return ret;
2004 +- }
2005 +-
2006 +- dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
2007 +-
2008 +- ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
2009 +- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
2010 +- if (ret)
2011 +- goto err_cleanup_encoder;
2012 +-
2013 +- dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
2014 +- if (IS_ERR(dsi->connector)) {
2015 +- DRM_ERROR("Unable to create bridge connector\n");
2016 +- ret = PTR_ERR(dsi->connector);
2017 +- goto err_cleanup_encoder;
2018 +- }
2019 +- drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
2020 +-
2021 +- return 0;
2022 +-
2023 +-err_cleanup_encoder:
2024 +- drm_encoder_cleanup(&dsi->encoder);
2025 +- return ret;
2026 +-}
2027 +-
2028 +-static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
2029 +-{
2030 +- int ret;
2031 +- struct drm_device *drm = data;
2032 +- struct mtk_dsi *dsi = dev_get_drvdata(dev);
2033 +-
2034 +- ret = mtk_dsi_encoder_init(drm, dsi);
2035 +- if (ret)
2036 +- return ret;
2037 +-
2038 +- return device_reset_optional(dev);
2039 +-}
2040 +-
2041 +-static void mtk_dsi_unbind(struct device *dev, struct device *master,
2042 +- void *data)
2043 +-{
2044 +- struct mtk_dsi *dsi = dev_get_drvdata(dev);
2045 +-
2046 +- drm_encoder_cleanup(&dsi->encoder);
2047 +-}
2048 +-
2049 +-static const struct component_ops mtk_dsi_component_ops = {
2050 +- .bind = mtk_dsi_bind,
2051 +- .unbind = mtk_dsi_unbind,
2052 +-};
2053 +-
2054 + static int mtk_dsi_probe(struct platform_device *pdev)
2055 + {
2056 + struct mtk_dsi *dsi;
2057 + struct device *dev = &pdev->dev;
2058 +- struct drm_panel *panel;
2059 + struct resource *regs;
2060 + int irq_num;
2061 + int ret;
2062 +@@ -1021,19 +1045,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
2063 + return ret;
2064 + }
2065 +
2066 +- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
2067 +- &panel, &dsi->next_bridge);
2068 +- if (ret)
2069 +- goto err_unregister_host;
2070 +-
2071 +- if (panel) {
2072 +- dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel);
2073 +- if (IS_ERR(dsi->next_bridge)) {
2074 +- ret = PTR_ERR(dsi->next_bridge);
2075 +- goto err_unregister_host;
2076 +- }
2077 +- }
2078 +-
2079 + dsi->driver_data = of_device_get_match_data(dev);
2080 +
2081 + dsi->engine_clk = devm_clk_get(dev, "engine");
2082 +@@ -1098,14 +1109,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
2083 + dsi->bridge.of_node = dev->of_node;
2084 + dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
2085 +
2086 +- drm_bridge_add(&dsi->bridge);
2087 +-
2088 +- ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
2089 +- if (ret) {
2090 +- dev_err(&pdev->dev, "failed to add component: %d\n", ret);
2091 +- goto err_unregister_host;
2092 +- }
2093 +-
2094 + return 0;
2095 +
2096 + err_unregister_host:
2097 +@@ -1118,8 +1121,6 @@ static int mtk_dsi_remove(struct platform_device *pdev)
2098 + struct mtk_dsi *dsi = platform_get_drvdata(pdev);
2099 +
2100 + mtk_output_dsi_disable(dsi);
2101 +- drm_bridge_remove(&dsi->bridge);
2102 +- component_del(&pdev->dev, &mtk_dsi_component_ops);
2103 + mipi_dsi_host_unregister(&dsi->host);
2104 +
2105 + return 0;
2106 +diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
2107 +index 262641a014b06..c91130a6be2a1 100644
2108 +--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
2109 ++++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
2110 +@@ -117,8 +117,12 @@ nvkm_falcon_disable(struct nvkm_falcon *falcon)
2111 + int
2112 + nvkm_falcon_reset(struct nvkm_falcon *falcon)
2113 + {
2114 +- nvkm_falcon_disable(falcon);
2115 +- return nvkm_falcon_enable(falcon);
2116 ++ if (!falcon->func->reset) {
2117 ++ nvkm_falcon_disable(falcon);
2118 ++ return nvkm_falcon_enable(falcon);
2119 ++ }
2120 ++
2121 ++ return falcon->func->reset(falcon);
2122 + }
2123 +
2124 + int
2125 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
2126 +index 5968c7696596c..40439e329aa9f 100644
2127 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
2128 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
2129 +@@ -23,9 +23,38 @@
2130 + */
2131 + #include "priv.h"
2132 +
2133 ++static int
2134 ++gm200_pmu_flcn_reset(struct nvkm_falcon *falcon)
2135 ++{
2136 ++ struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
2137 ++
2138 ++ nvkm_falcon_wr32(falcon, 0x014, 0x0000ffff);
2139 ++ pmu->func->reset(pmu);
2140 ++ return nvkm_falcon_enable(falcon);
2141 ++}
2142 ++
2143 ++const struct nvkm_falcon_func
2144 ++gm200_pmu_flcn = {
2145 ++ .debug = 0xc08,
2146 ++ .fbif = 0xe00,
2147 ++ .load_imem = nvkm_falcon_v1_load_imem,
2148 ++ .load_dmem = nvkm_falcon_v1_load_dmem,
2149 ++ .read_dmem = nvkm_falcon_v1_read_dmem,
2150 ++ .bind_context = nvkm_falcon_v1_bind_context,
2151 ++ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
2152 ++ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
2153 ++ .set_start_addr = nvkm_falcon_v1_set_start_addr,
2154 ++ .start = nvkm_falcon_v1_start,
2155 ++ .enable = nvkm_falcon_v1_enable,
2156 ++ .disable = nvkm_falcon_v1_disable,
2157 ++ .reset = gm200_pmu_flcn_reset,
2158 ++ .cmdq = { 0x4a0, 0x4b0, 4 },
2159 ++ .msgq = { 0x4c8, 0x4cc, 0 },
2160 ++};
2161 ++
2162 + static const struct nvkm_pmu_func
2163 + gm200_pmu = {
2164 +- .flcn = &gt215_pmu_flcn,
2165 ++ .flcn = &gm200_pmu_flcn,
2166 + .enabled = gf100_pmu_enabled,
2167 + .reset = gf100_pmu_reset,
2168 + };
2169 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
2170 +index 148706977eec7..e1772211b0a4b 100644
2171 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
2172 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
2173 +@@ -211,7 +211,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu)
2174 +
2175 + static const struct nvkm_pmu_func
2176 + gm20b_pmu = {
2177 +- .flcn = &gt215_pmu_flcn,
2178 ++ .flcn = &gm200_pmu_flcn,
2179 + .enabled = gf100_pmu_enabled,
2180 + .intr = gt215_pmu_intr,
2181 + .recv = gm20b_pmu_recv,
2182 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
2183 +index 00da1b873ce81..6bf7fc1bd1e3b 100644
2184 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
2185 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
2186 +@@ -39,7 +39,7 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu)
2187 +
2188 + static const struct nvkm_pmu_func
2189 + gp102_pmu = {
2190 +- .flcn = &gt215_pmu_flcn,
2191 ++ .flcn = &gm200_pmu_flcn,
2192 + .enabled = gp102_pmu_enabled,
2193 + .reset = gp102_pmu_reset,
2194 + };
2195 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
2196 +index 461f722656e24..ba1583bb618b2 100644
2197 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
2198 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
2199 +@@ -78,7 +78,7 @@ gp10b_pmu_acr = {
2200 +
2201 + static const struct nvkm_pmu_func
2202 + gp10b_pmu = {
2203 +- .flcn = &gt215_pmu_flcn,
2204 ++ .flcn = &gm200_pmu_flcn,
2205 + .enabled = gf100_pmu_enabled,
2206 + .intr = gt215_pmu_intr,
2207 + .recv = gm20b_pmu_recv,
2208 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
2209 +index e7860d1773539..bcaade758ff72 100644
2210 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
2211 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
2212 +@@ -44,6 +44,8 @@ void gf100_pmu_reset(struct nvkm_pmu *);
2213 +
2214 + void gk110_pmu_pgob(struct nvkm_pmu *, bool);
2215 +
2216 ++extern const struct nvkm_falcon_func gm200_pmu_flcn;
2217 ++
2218 + void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64);
2219 + void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
2220 + int gm20b_pmu_acr_boot(struct nvkm_falcon *);
2221 +diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
2222 +index 0fce73b9a6469..70bd84b7ef2b0 100644
2223 +--- a/drivers/gpu/drm/radeon/atombios_encoders.c
2224 ++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
2225 +@@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
2226 + * so don't register a backlight device
2227 + */
2228 + if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
2229 +- (rdev->pdev->device == 0x6741))
2230 ++ (rdev->pdev->device == 0x6741) &&
2231 ++ !dmi_match(DMI_PRODUCT_NAME, "iMac12,1"))
2232 + return;
2233 +
2234 + if (!radeon_encoder->enc_priv)
2235 +diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
2236 +index 830bdd5e9b7ce..8677c82716784 100644
2237 +--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
2238 ++++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
2239 +@@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
2240 + return ret;
2241 + }
2242 +
2243 +- ret = clk_prepare_enable(hdmi->vpll_clk);
2244 +- if (ret) {
2245 +- DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
2246 +- ret);
2247 +- return ret;
2248 +- }
2249 +-
2250 + hdmi->phy = devm_phy_optional_get(dev, "hdmi");
2251 + if (IS_ERR(hdmi->phy)) {
2252 + ret = PTR_ERR(hdmi->phy);
2253 +@@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
2254 + return ret;
2255 + }
2256 +
2257 ++ ret = clk_prepare_enable(hdmi->vpll_clk);
2258 ++ if (ret) {
2259 ++ DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
2260 ++ ret);
2261 ++ return ret;
2262 ++ }
2263 ++
2264 + drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
2265 + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
2266 +
2267 +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
2268 +index 2503be0253d3e..d3f32ffe299a8 100644
2269 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
2270 ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
2271 +@@ -37,11 +37,11 @@ static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_
2272 + {
2273 + union cmd_response cmd_resp;
2274 +
2275 +- /* Get response with status within a max of 800 ms timeout */
2276 ++ /* Get response with status within a max of 1600 ms timeout */
2277 + if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
2278 + (cmd_resp.response_v2.response == sensor_sts &&
2279 + cmd_resp.response_v2.status == 0 && (sid == 0xff ||
2280 +- cmd_resp.response_v2.sensor_id == sid)), 500, 800000))
2281 ++ cmd_resp.response_v2.sensor_id == sid)), 500, 1600000))
2282 + return cmd_resp.response_v2.response;
2283 +
2284 + return SENSOR_DISABLED;
2285 +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
2286 +index ae30e059f8475..8a9c544c27aef 100644
2287 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
2288 ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
2289 +@@ -49,7 +49,7 @@ union sfh_cmd_base {
2290 + } s;
2291 + struct {
2292 + u32 cmd_id : 4;
2293 +- u32 intr_enable : 1;
2294 ++ u32 intr_disable : 1;
2295 + u32 rsvd1 : 3;
2296 + u32 length : 7;
2297 + u32 mem_type : 1;
2298 +diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
2299 +index be41f83b0289c..76095bd53c655 100644
2300 +--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
2301 ++++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
2302 +@@ -27,6 +27,7 @@
2303 + #define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02
2304 + #define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05
2305 + #define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04
2306 ++#define ILLUMINANCE_MASK GENMASK(14, 0)
2307 +
2308 + int get_report_descriptor(int sensor_idx, u8 *rep_desc)
2309 + {
2310 +@@ -246,7 +247,8 @@ u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_
2311 + get_common_inputs(&als_input.common_property, report_id);
2312 + /* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */
2313 + if (supported_input == V2_STATUS)
2314 +- als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5));
2315 ++ als_input.illuminance_value =
2316 ++ readl(privdata->mmio + AMD_C2P_MSG(5)) & ILLUMINANCE_MASK;
2317 + else
2318 + als_input.illuminance_value =
2319 + (int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
2320 +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
2321 +index a4ca5ed00e5f5..a050dbcfc60e0 100644
2322 +--- a/drivers/hid/hid-apple.c
2323 ++++ b/drivers/hid/hid-apple.c
2324 +@@ -580,49 +580,49 @@ static const struct hid_device_id apple_devices[] = {
2325 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
2326 + .driver_data = APPLE_HAS_FN },
2327 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
2328 +- .driver_data = APPLE_HAS_FN },
2329 ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
2330 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
2331 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
2332 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
2333 + .driver_data = APPLE_HAS_FN },
2334 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
2335 +- .driver_data = APPLE_HAS_FN },
2336 ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
2337 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
2338 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
2339 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
2340 + .driver_data = APPLE_HAS_FN },
2341 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
2342 +- .driver_data = APPLE_HAS_FN },
2343 ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
2344 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
2345 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
2346 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
2347 + .driver_data = APPLE_HAS_FN },
2348 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
2349 +- .driver_data = APPLE_HAS_FN },
2350 ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
2351 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
2352 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
2353 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
2354 + .driver_data = APPLE_HAS_FN },
2355 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
2356 +- .driver_data = APPLE_HAS_FN },
2357 ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
2358 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
2359 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
2360 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
2361 + .driver_data = APPLE_HAS_FN },
2362 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
2363 +- .driver_data = APPLE_HAS_FN },
2364 ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
2365 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
2366 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
2367 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
2368 + .driver_data = APPLE_HAS_FN },
2369 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
2370 +- .driver_data = APPLE_HAS_FN },
2371 ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
2372 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
2373 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
2374 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
2375 + .driver_data = APPLE_HAS_FN },
2376 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
2377 +- .driver_data = APPLE_HAS_FN },
2378 ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
2379 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
2380 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
2381 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
2382 +diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
2383 +index 8e960d7b233b3..9b42b0cdeef06 100644
2384 +--- a/drivers/hid/hid-elo.c
2385 ++++ b/drivers/hid/hid-elo.c
2386 +@@ -262,6 +262,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
2387 +
2388 + return 0;
2389 + err_free:
2390 ++ usb_put_dev(udev);
2391 + kfree(priv);
2392 + return ret;
2393 + }
2394 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
2395 +index a5a5a64c7abc4..fd224222d95f2 100644
2396 +--- a/drivers/hid/hid-ids.h
2397 ++++ b/drivers/hid/hid-ids.h
2398 +@@ -1365,6 +1365,7 @@
2399 + #define USB_VENDOR_ID_UGTIZER 0x2179
2400 + #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
2401 + #define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077
2402 ++#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004
2403 +
2404 + #define USB_VENDOR_ID_VIEWSONIC 0x0543
2405 + #define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621
2406 +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
2407 +index ee7e504e7279f..451baa9b0fbe2 100644
2408 +--- a/drivers/hid/hid-quirks.c
2409 ++++ b/drivers/hid/hid-quirks.c
2410 +@@ -187,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = {
2411 + { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
2412 + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
2413 + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
2414 ++ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT },
2415 + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
2416 + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
2417 + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
2418 +diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
2419 +index b4dad66fa954d..ec6c73f75ffe0 100644
2420 +--- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
2421 ++++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
2422 +@@ -27,7 +27,6 @@ struct i2c_hid_of_goodix {
2423 +
2424 + struct regulator *vdd;
2425 + struct notifier_block nb;
2426 +- struct mutex regulator_mutex;
2427 + struct gpio_desc *reset_gpio;
2428 + const struct goodix_i2c_hid_timing_data *timings;
2429 + };
2430 +@@ -67,8 +66,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
2431 + container_of(nb, struct i2c_hid_of_goodix, nb);
2432 + int ret = NOTIFY_OK;
2433 +
2434 +- mutex_lock(&ihid_goodix->regulator_mutex);
2435 +-
2436 + switch (event) {
2437 + case REGULATOR_EVENT_PRE_DISABLE:
2438 + gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
2439 +@@ -87,8 +84,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
2440 + break;
2441 + }
2442 +
2443 +- mutex_unlock(&ihid_goodix->regulator_mutex);
2444 +-
2445 + return ret;
2446 + }
2447 +
2448 +@@ -102,8 +97,6 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
2449 + if (!ihid_goodix)
2450 + return -ENOMEM;
2451 +
2452 +- mutex_init(&ihid_goodix->regulator_mutex);
2453 +-
2454 + ihid_goodix->ops.power_up = goodix_i2c_hid_power_up;
2455 + ihid_goodix->ops.power_down = goodix_i2c_hid_power_down;
2456 +
2457 +@@ -130,25 +123,28 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
2458 + * long. Holding the controller in reset apparently draws extra
2459 + * power.
2460 + */
2461 +- mutex_lock(&ihid_goodix->regulator_mutex);
2462 + ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify;
2463 + ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb);
2464 +- if (ret) {
2465 +- mutex_unlock(&ihid_goodix->regulator_mutex);
2466 ++ if (ret)
2467 + return dev_err_probe(&client->dev, ret,
2468 + "regulator notifier request failed\n");
2469 +- }
2470 +
2471 + /*
2472 + * If someone else is holding the regulator on (or the regulator is
2473 + * an always-on one) we might never be told to deassert reset. Do it
2474 +- * now. Here we'll assume that someone else might have _just
2475 +- * barely_ turned the regulator on so we'll do the full
2476 +- * "post_power_delay" just in case.
2477 ++ * now... and temporarily bump the regulator reference count just to
2478 ++ * make sure it is impossible for this to race with our own notifier!
2479 ++ * We also assume that someone else might have _just barely_ turned
2480 ++ * the regulator on so we'll do the full "post_power_delay" just in
2481 ++ * case.
2482 + */
2483 +- if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd))
2484 ++ if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) {
2485 ++ ret = regulator_enable(ihid_goodix->vdd);
2486 ++ if (ret)
2487 ++ return ret;
2488 + goodix_i2c_hid_deassert_reset(ihid_goodix, true);
2489 +- mutex_unlock(&ihid_goodix->regulator_mutex);
2490 ++ regulator_disable(ihid_goodix->vdd);
2491 ++ }
2492 +
2493 + return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0);
2494 + }
2495 +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
2496 +index 392c1ac4f8193..44bd0b6ff5059 100644
2497 +--- a/drivers/hv/vmbus_drv.c
2498 ++++ b/drivers/hv/vmbus_drv.c
2499 +@@ -2027,8 +2027,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
2500 + kobj->kset = dev->channels_kset;
2501 + ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
2502 + "%u", relid);
2503 +- if (ret)
2504 ++ if (ret) {
2505 ++ kobject_put(kobj);
2506 + return ret;
2507 ++ }
2508 +
2509 + ret = sysfs_create_group(kobj, &vmbus_chan_group);
2510 +
2511 +@@ -2037,6 +2039,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
2512 + * The calling functions' error handling paths will cleanup the
2513 + * empty channel directory.
2514 + */
2515 ++ kobject_put(kobj);
2516 + dev_err(device, "Unable to set up channel sysfs files\n");
2517 + return ret;
2518 + }
2519 +diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
2520 +index 490ee3962645d..b00f35c0b0662 100644
2521 +--- a/drivers/i2c/busses/i2c-brcmstb.c
2522 ++++ b/drivers/i2c/busses/i2c-brcmstb.c
2523 +@@ -673,7 +673,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
2524 +
2525 + /* set the data in/out register size for compatible SoCs */
2526 + if (of_device_is_compatible(dev->device->of_node,
2527 +- "brcmstb,brcmper-i2c"))
2528 ++ "brcm,brcmper-i2c"))
2529 + dev->data_regsz = sizeof(u8);
2530 + else
2531 + dev->data_regsz = sizeof(u32);
2532 +diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
2533 +index c1de8eb66169f..cf54f1cb4c57a 100644
2534 +--- a/drivers/i2c/busses/i2c-qcom-cci.c
2535 ++++ b/drivers/i2c/busses/i2c-qcom-cci.c
2536 +@@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev)
2537 + cci->master[idx].adap.quirks = &cci->data->quirks;
2538 + cci->master[idx].adap.algo = &cci_algo;
2539 + cci->master[idx].adap.dev.parent = dev;
2540 +- cci->master[idx].adap.dev.of_node = child;
2541 ++ cci->master[idx].adap.dev.of_node = of_node_get(child);
2542 + cci->master[idx].master = idx;
2543 + cci->master[idx].cci = cci;
2544 +
2545 +@@ -643,8 +643,10 @@ static int cci_probe(struct platform_device *pdev)
2546 + continue;
2547 +
2548 + ret = i2c_add_adapter(&cci->master[i].adap);
2549 +- if (ret < 0)
2550 ++ if (ret < 0) {
2551 ++ of_node_put(cci->master[i].adap.dev.of_node);
2552 + goto error_i2c;
2553 ++ }
2554 + }
2555 +
2556 + pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
2557 +@@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev)
2558 + return 0;
2559 +
2560 + error_i2c:
2561 +- for (; i >= 0; i--) {
2562 +- if (cci->master[i].cci)
2563 ++ for (--i ; i >= 0; i--) {
2564 ++ if (cci->master[i].cci) {
2565 + i2c_del_adapter(&cci->master[i].adap);
2566 ++ of_node_put(cci->master[i].adap.dev.of_node);
2567 ++ }
2568 + }
2569 + error:
2570 + disable_irq(cci->irq);
2571 +@@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev)
2572 + int i;
2573 +
2574 + for (i = 0; i < cci->data->num_masters; i++) {
2575 +- if (cci->master[i].cci)
2576 ++ if (cci->master[i].cci) {
2577 + i2c_del_adapter(&cci->master[i].adap);
2578 ++ of_node_put(cci->master[i].adap.dev.of_node);
2579 ++ }
2580 + cci_halt(cci, i);
2581 + }
2582 +
2583 +diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
2584 +index 259065d271ef0..09cc98266d30f 100644
2585 +--- a/drivers/irqchip/irq-sifive-plic.c
2586 ++++ b/drivers/irqchip/irq-sifive-plic.c
2587 +@@ -398,3 +398,4 @@ out_free_priv:
2588 +
2589 + IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
2590 + IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
2591 ++IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */
2592 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
2593 +index bd814fa0b7216..356a0183e1ad1 100644
2594 +--- a/drivers/md/dm.c
2595 ++++ b/drivers/md/dm.c
2596 +@@ -2140,7 +2140,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2597 + set_bit(DMF_FREEING, &md->flags);
2598 + spin_unlock(&_minor_lock);
2599 +
2600 +- blk_set_queue_dying(md->queue);
2601 ++ blk_mark_disk_dead(md->disk);
2602 +
2603 + /*
2604 + * Take suspend_lock so that presuspend and postsuspend methods
2605 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
2606 +index 90e1bcd03b46c..92713cd440651 100644
2607 +--- a/drivers/mmc/core/block.c
2608 ++++ b/drivers/mmc/core/block.c
2609 +@@ -1682,31 +1682,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
2610 + struct mmc_card *card = mq->card;
2611 + struct mmc_host *host = card->host;
2612 + blk_status_t error = BLK_STS_OK;
2613 +- int retries = 0;
2614 +
2615 + do {
2616 + u32 status;
2617 + int err;
2618 ++ int retries = 0;
2619 +
2620 +- mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
2621 ++ while (retries++ <= MMC_READ_SINGLE_RETRIES) {
2622 ++ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
2623 +
2624 +- mmc_wait_for_req(host, mrq);
2625 ++ mmc_wait_for_req(host, mrq);
2626 +
2627 +- err = mmc_send_status(card, &status);
2628 +- if (err)
2629 +- goto error_exit;
2630 +-
2631 +- if (!mmc_host_is_spi(host) &&
2632 +- !mmc_ready_for_data(status)) {
2633 +- err = mmc_blk_fix_state(card, req);
2634 ++ err = mmc_send_status(card, &status);
2635 + if (err)
2636 + goto error_exit;
2637 +- }
2638 +
2639 +- if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
2640 +- continue;
2641 ++ if (!mmc_host_is_spi(host) &&
2642 ++ !mmc_ready_for_data(status)) {
2643 ++ err = mmc_blk_fix_state(card, req);
2644 ++ if (err)
2645 ++ goto error_exit;
2646 ++ }
2647 +
2648 +- retries = 0;
2649 ++ if (!mrq->cmd->error)
2650 ++ break;
2651 ++ }
2652 +
2653 + if (mrq->cmd->error ||
2654 + mrq->data->error ||
2655 +diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
2656 +index 6ed6c51fac69e..d503821a3e606 100644
2657 +--- a/drivers/mtd/devices/phram.c
2658 ++++ b/drivers/mtd/devices/phram.c
2659 +@@ -264,16 +264,20 @@ static int phram_setup(const char *val)
2660 + }
2661 + }
2662 +
2663 +- if (erasesize)
2664 +- div_u64_rem(len, (uint32_t)erasesize, &rem);
2665 +-
2666 + if (len == 0 || erasesize == 0 || erasesize > len
2667 +- || erasesize > UINT_MAX || rem) {
2668 ++ || erasesize > UINT_MAX) {
2669 + parse_err("illegal erasesize or len\n");
2670 + ret = -EINVAL;
2671 + goto error;
2672 + }
2673 +
2674 ++ div_u64_rem(len, (uint32_t)erasesize, &rem);
2675 ++ if (rem) {
2676 ++ parse_err("len is not multiple of erasesize\n");
2677 ++ ret = -EINVAL;
2678 ++ goto error;
2679 ++ }
2680 ++
2681 + ret = register_device(name, start, len, (uint32_t)erasesize);
2682 + if (ret)
2683 + goto error;
2684 +diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
2685 +index f75929783b941..aee78f5f4f156 100644
2686 +--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
2687 ++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
2688 +@@ -2106,7 +2106,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
2689 + mtd->oobsize / trans,
2690 + host->hwcfg.sector_size_1k);
2691 +
2692 +- if (!ret) {
2693 ++ if (ret != -EBADMSG) {
2694 + *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
2695 +
2696 + if (*err_addr)
2697 +diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
2698 +index 65bcd1c548d2e..5eb20dfe4186e 100644
2699 +--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
2700 ++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
2701 +@@ -2291,7 +2291,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
2702 + this->hw.must_apply_timings = false;
2703 + ret = gpmi_nfc_apply_timings(this);
2704 + if (ret)
2705 +- return ret;
2706 ++ goto out_pm;
2707 + }
2708 +
2709 + dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
2710 +@@ -2420,6 +2420,7 @@ unmap:
2711 +
2712 + this->bch = false;
2713 +
2714 ++out_pm:
2715 + pm_runtime_mark_last_busy(this->dev);
2716 + pm_runtime_put_autosuspend(this->dev);
2717 +
2718 +diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
2719 +index efe0ffe4f1abc..9054559e52dda 100644
2720 +--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
2721 ++++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
2722 +@@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
2723 + struct ingenic_ecc *ecc;
2724 +
2725 + pdev = of_find_device_by_node(np);
2726 +- if (!pdev || !platform_get_drvdata(pdev))
2727 ++ if (!pdev)
2728 + return ERR_PTR(-EPROBE_DEFER);
2729 +
2730 ++ if (!platform_get_drvdata(pdev)) {
2731 ++ put_device(&pdev->dev);
2732 ++ return ERR_PTR(-EPROBE_DEFER);
2733 ++ }
2734 ++
2735 + ecc = platform_get_drvdata(pdev);
2736 + clk_prepare_enable(ecc->clk);
2737 +
2738 +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
2739 +index 04e6f7b267064..0f41a9a421575 100644
2740 +--- a/drivers/mtd/nand/raw/qcom_nandc.c
2741 ++++ b/drivers/mtd/nand/raw/qcom_nandc.c
2742 +@@ -2,7 +2,6 @@
2743 + /*
2744 + * Copyright (c) 2016, The Linux Foundation. All rights reserved.
2745 + */
2746 +-
2747 + #include <linux/clk.h>
2748 + #include <linux/slab.h>
2749 + #include <linux/bitops.h>
2750 +@@ -3063,10 +3062,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
2751 + if (dma_mapping_error(dev, nandc->base_dma))
2752 + return -ENXIO;
2753 +
2754 +- ret = qcom_nandc_alloc(nandc);
2755 +- if (ret)
2756 +- goto err_nandc_alloc;
2757 +-
2758 + ret = clk_prepare_enable(nandc->core_clk);
2759 + if (ret)
2760 + goto err_core_clk;
2761 +@@ -3075,6 +3070,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
2762 + if (ret)
2763 + goto err_aon_clk;
2764 +
2765 ++ ret = qcom_nandc_alloc(nandc);
2766 ++ if (ret)
2767 ++ goto err_nandc_alloc;
2768 ++
2769 + ret = qcom_nandc_setup(nandc);
2770 + if (ret)
2771 + goto err_setup;
2772 +@@ -3086,15 +3085,14 @@ static int qcom_nandc_probe(struct platform_device *pdev)
2773 + return 0;
2774 +
2775 + err_setup:
2776 ++ qcom_nandc_unalloc(nandc);
2777 ++err_nandc_alloc:
2778 + clk_disable_unprepare(nandc->aon_clk);
2779 + err_aon_clk:
2780 + clk_disable_unprepare(nandc->core_clk);
2781 + err_core_clk:
2782 +- qcom_nandc_unalloc(nandc);
2783 +-err_nandc_alloc:
2784 + dma_unmap_resource(dev, res->start, resource_size(res),
2785 + DMA_BIDIRECTIONAL, 0);
2786 +-
2787 + return ret;
2788 + }
2789 +
2790 +diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
2791 +index 06a818cd2433f..32ddfea701423 100644
2792 +--- a/drivers/mtd/parsers/qcomsmempart.c
2793 ++++ b/drivers/mtd/parsers/qcomsmempart.c
2794 +@@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
2795 + const struct mtd_partition **pparts,
2796 + struct mtd_part_parser_data *data)
2797 + {
2798 ++ size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
2799 ++ int ret, i, j, tmpparts, numparts = 0;
2800 + struct smem_flash_pentry *pentry;
2801 + struct smem_flash_ptable *ptable;
2802 +- size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
2803 + struct mtd_partition *parts;
2804 +- int ret, i, numparts;
2805 + char *name, *c;
2806 +
2807 + if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
2808 +@@ -87,8 +87,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
2809 + }
2810 +
2811 + /* Ensure that # of partitions is less than the max we have allocated */
2812 +- numparts = le32_to_cpu(ptable->numparts);
2813 +- if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
2814 ++ tmpparts = le32_to_cpu(ptable->numparts);
2815 ++ if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
2816 + pr_err("Partition numbers exceed the max limit\n");
2817 + return -EINVAL;
2818 + }
2819 +@@ -116,11 +116,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
2820 + return PTR_ERR(ptable);
2821 + }
2822 +
2823 ++ for (i = 0; i < tmpparts; i++) {
2824 ++ pentry = &ptable->pentry[i];
2825 ++ if (pentry->name[0] != '\0')
2826 ++ numparts++;
2827 ++ }
2828 ++
2829 + parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
2830 + if (!parts)
2831 + return -ENOMEM;
2832 +
2833 +- for (i = 0; i < numparts; i++) {
2834 ++ for (i = 0, j = 0; i < tmpparts; i++) {
2835 + pentry = &ptable->pentry[i];
2836 + if (pentry->name[0] == '\0')
2837 + continue;
2838 +@@ -135,24 +141,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
2839 + for (c = name; *c != '\0'; c++)
2840 + *c = tolower(*c);
2841 +
2842 +- parts[i].name = name;
2843 +- parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
2844 +- parts[i].mask_flags = pentry->attr;
2845 +- parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize;
2846 ++ parts[j].name = name;
2847 ++ parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
2848 ++ parts[j].mask_flags = pentry->attr;
2849 ++ parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize;
2850 + pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
2851 + i, pentry->name, le32_to_cpu(pentry->offset),
2852 + le32_to_cpu(pentry->length), pentry->attr);
2853 ++ j++;
2854 + }
2855 +
2856 + pr_debug("SMEM partition table found: ver: %d len: %d\n",
2857 +- le32_to_cpu(ptable->version), numparts);
2858 ++ le32_to_cpu(ptable->version), tmpparts);
2859 + *pparts = parts;
2860 +
2861 + return numparts;
2862 +
2863 + out_free_parts:
2864 +- while (--i >= 0)
2865 +- kfree(parts[i].name);
2866 ++ while (--j >= 0)
2867 ++ kfree(parts[j].name);
2868 + kfree(parts);
2869 + *pparts = NULL;
2870 +
2871 +@@ -166,6 +173,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
2872 +
2873 + for (i = 0; i < nr_parts; i++)
2874 + kfree(pparts[i].name);
2875 ++
2876 ++ kfree(pparts);
2877 + }
2878 +
2879 + static const struct of_device_id qcomsmem_of_match_table[] = {
2880 +diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
2881 +index 9fd1d6cba3cda..a86b1f71762ea 100644
2882 +--- a/drivers/net/bonding/bond_3ad.c
2883 ++++ b/drivers/net/bonding/bond_3ad.c
2884 +@@ -225,7 +225,7 @@ static inline int __check_agg_selection_timer(struct port *port)
2885 + if (bond == NULL)
2886 + return 0;
2887 +
2888 +- return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
2889 ++ return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
2890 + }
2891 +
2892 + /**
2893 +@@ -1995,7 +1995,7 @@ static void ad_marker_response_received(struct bond_marker *marker,
2894 + */
2895 + void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
2896 + {
2897 +- BOND_AD_INFO(bond).agg_select_timer = timeout;
2898 ++ atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
2899 + }
2900 +
2901 + /**
2902 +@@ -2278,6 +2278,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
2903 + spin_unlock_bh(&bond->mode_lock);
2904 + }
2905 +
2906 ++/**
2907 ++ * bond_agg_timer_advance - advance agg_select_timer
2908 ++ * @bond: bonding structure
2909 ++ *
2910 ++ * Return true when agg_select_timer reaches 0.
2911 ++ */
2912 ++static bool bond_agg_timer_advance(struct bonding *bond)
2913 ++{
2914 ++ int val, nval;
2915 ++
2916 ++ while (1) {
2917 ++ val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
2918 ++ if (!val)
2919 ++ return false;
2920 ++ nval = val - 1;
2921 ++ if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
2922 ++ val, nval) == val)
2923 ++ break;
2924 ++ }
2925 ++ return nval == 0;
2926 ++}
2927 ++
2928 + /**
2929 + * bond_3ad_state_machine_handler - handle state machines timeout
2930 + * @work: work context to fetch bonding struct to work on from
2931 +@@ -2313,9 +2335,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2932 + if (!bond_has_slaves(bond))
2933 + goto re_arm;
2934 +
2935 +- /* check if agg_select_timer timer after initialize is timed out */
2936 +- if (BOND_AD_INFO(bond).agg_select_timer &&
2937 +- !(--BOND_AD_INFO(bond).agg_select_timer)) {
2938 ++ if (bond_agg_timer_advance(bond)) {
2939 + slave = bond_first_slave_rcu(bond);
2940 + port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
2941 +
2942 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2943 +index 1db5c7a172a71..92035571490bd 100644
2944 +--- a/drivers/net/bonding/bond_main.c
2945 ++++ b/drivers/net/bonding/bond_main.c
2946 +@@ -2377,10 +2377,9 @@ static int __bond_release_one(struct net_device *bond_dev,
2947 + bond_select_active_slave(bond);
2948 + }
2949 +
2950 +- if (!bond_has_slaves(bond)) {
2951 +- bond_set_carrier(bond);
2952 ++ bond_set_carrier(bond);
2953 ++ if (!bond_has_slaves(bond))
2954 + eth_hw_addr_random(bond_dev);
2955 +- }
2956 +
2957 + unblock_netpoll_tx();
2958 + synchronize_rcu();
2959 +diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
2960 +index c0c91440340ae..0029d279616fd 100644
2961 +--- a/drivers/net/dsa/Kconfig
2962 ++++ b/drivers/net/dsa/Kconfig
2963 +@@ -82,6 +82,7 @@ config NET_DSA_REALTEK_SMI
2964 +
2965 + config NET_DSA_SMSC_LAN9303
2966 + tristate
2967 ++ depends on VLAN_8021Q || VLAN_8021Q=n
2968 + select NET_DSA_TAG_LAN9303
2969 + select REGMAP
2970 + help
2971 +diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
2972 +index 89f920289ae21..0b6f29ee87b56 100644
2973 +--- a/drivers/net/dsa/lan9303-core.c
2974 ++++ b/drivers/net/dsa/lan9303-core.c
2975 +@@ -10,6 +10,7 @@
2976 + #include <linux/mii.h>
2977 + #include <linux/phy.h>
2978 + #include <linux/if_bridge.h>
2979 ++#include <linux/if_vlan.h>
2980 + #include <linux/etherdevice.h>
2981 +
2982 + #include "lan9303.h"
2983 +@@ -1083,21 +1084,27 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
2984 + static int lan9303_port_enable(struct dsa_switch *ds, int port,
2985 + struct phy_device *phy)
2986 + {
2987 ++ struct dsa_port *dp = dsa_to_port(ds, port);
2988 + struct lan9303 *chip = ds->priv;
2989 +
2990 +- if (!dsa_is_user_port(ds, port))
2991 ++ if (!dsa_port_is_user(dp))
2992 + return 0;
2993 +
2994 ++ vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
2995 ++
2996 + return lan9303_enable_processing_port(chip, port);
2997 + }
2998 +
2999 + static void lan9303_port_disable(struct dsa_switch *ds, int port)
3000 + {
3001 ++ struct dsa_port *dp = dsa_to_port(ds, port);
3002 + struct lan9303 *chip = ds->priv;
3003 +
3004 +- if (!dsa_is_user_port(ds, port))
3005 ++ if (!dsa_port_is_user(dp))
3006 + return;
3007 +
3008 ++ vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
3009 ++
3010 + lan9303_disable_processing_port(chip, port);
3011 + lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
3012 + }
3013 +@@ -1309,7 +1316,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
3014 + struct device_node *np)
3015 + {
3016 + chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
3017 +- GPIOD_OUT_LOW);
3018 ++ GPIOD_OUT_HIGH);
3019 + if (IS_ERR(chip->reset_gpio))
3020 + return PTR_ERR(chip->reset_gpio);
3021 +
3022 +diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
3023 +index 0909b05d02133..ae91edcbfa8f6 100644
3024 +--- a/drivers/net/dsa/lantiq_gswip.c
3025 ++++ b/drivers/net/dsa/lantiq_gswip.c
3026 +@@ -2217,8 +2217,8 @@ static int gswip_remove(struct platform_device *pdev)
3027 +
3028 + if (priv->ds->slave_mii_bus) {
3029 + mdiobus_unregister(priv->ds->slave_mii_bus);
3030 +- mdiobus_free(priv->ds->slave_mii_bus);
3031 + of_node_put(priv->ds->slave_mii_bus->dev.of_node);
3032 ++ mdiobus_free(priv->ds->slave_mii_bus);
3033 + }
3034 +
3035 + for (i = 0; i < priv->num_gphy_fw; i++)
3036 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
3037 +index 70cea1b95298a..ec8b02f5459d2 100644
3038 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
3039 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
3040 +@@ -2290,6 +2290,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
3041 + if (!mv88e6xxx_max_vid(chip))
3042 + return -EOPNOTSUPP;
3043 +
3044 ++ /* The ATU removal procedure needs the FID to be mapped in the VTU,
3045 ++ * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
3046 ++ * switchdev workqueue to ensure that all FDB entries are deleted
3047 ++ * before we remove the VLAN.
3048 ++ */
3049 ++ dsa_flush_workqueue();
3050 ++
3051 + mv88e6xxx_reg_lock(chip);
3052 +
3053 + err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
3054 +diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
3055 +index da595242bc13a..f50604f3e541e 100644
3056 +--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
3057 ++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
3058 +@@ -900,7 +900,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
3059 + atl1c_clean_buffer(pdev, buffer_info);
3060 + }
3061 +
3062 +- netdev_reset_queue(adapter->netdev);
3063 ++ netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
3064 +
3065 + /* Zero out Tx-buffers */
3066 + memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
3067 +diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
3068 +index c6412c523637b..b4381cd419792 100644
3069 +--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
3070 ++++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
3071 +@@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
3072 + {
3073 + struct device_node *np = pdev->dev.of_node;
3074 + struct bgmac *bgmac;
3075 ++ struct resource *regs;
3076 + int ret;
3077 +
3078 + bgmac = bgmac_alloc(&pdev->dev);
3079 +@@ -208,15 +209,23 @@ static int bgmac_probe(struct platform_device *pdev)
3080 + if (IS_ERR(bgmac->plat.base))
3081 + return PTR_ERR(bgmac->plat.base);
3082 +
3083 +- bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
3084 +- if (IS_ERR(bgmac->plat.idm_base))
3085 +- return PTR_ERR(bgmac->plat.idm_base);
3086 +- else
3087 ++ /* The idm_base resource is optional for some platforms */
3088 ++ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
3089 ++ if (regs) {
3090 ++ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
3091 ++ if (IS_ERR(bgmac->plat.idm_base))
3092 ++ return PTR_ERR(bgmac->plat.idm_base);
3093 + bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
3094 ++ }
3095 +
3096 +- bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
3097 +- if (IS_ERR(bgmac->plat.nicpm_base))
3098 +- return PTR_ERR(bgmac->plat.nicpm_base);
3099 ++ /* The nicpm_base resource is optional for some platforms */
3100 ++ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
3101 ++ if (regs) {
3102 ++ bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
3103 ++ regs);
3104 ++ if (IS_ERR(bgmac->plat.nicpm_base))
3105 ++ return PTR_ERR(bgmac->plat.nicpm_base);
3106 ++ }
3107 +
3108 + bgmac->read = platform_bgmac_read;
3109 + bgmac->write = platform_bgmac_write;
3110 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
3111 +index ffce528aa00e4..aac1b27bfc7bf 100644
3112 +--- a/drivers/net/ethernet/cadence/macb_main.c
3113 ++++ b/drivers/net/ethernet/cadence/macb_main.c
3114 +@@ -4749,7 +4749,7 @@ static int macb_probe(struct platform_device *pdev)
3115 +
3116 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3117 + if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
3118 +- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3119 ++ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
3120 + bp->hw_dma_cap |= HW_DMA_CAP_64B;
3121 + }
3122 + #endif
3123 +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3124 +index 70c8dd6cf3508..118933efb1587 100644
3125 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3126 ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3127 +@@ -4338,7 +4338,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
3128 + }
3129 +
3130 + INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
3131 +-
3132 ++ mutex_init(&priv->onestep_tstamp_lock);
3133 + skb_queue_head_init(&priv->tx_skbs);
3134 +
3135 + priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
3136 +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
3137 +index d6eefbbf163fa..cacd454ac696c 100644
3138 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
3139 ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
3140 +@@ -532,6 +532,7 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
3141 + struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
3142 + struct flow_dissector *dissector = rule->match.dissector;
3143 + struct netlink_ext_ack *extack = cls->common.extack;
3144 ++ int ret = -EOPNOTSUPP;
3145 +
3146 + if (dissector->used_keys &
3147 + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
3148 +@@ -561,9 +562,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
3149 + }
3150 +
3151 + *vlan = (u16)match.key->vlan_id;
3152 ++ ret = 0;
3153 + }
3154 +
3155 +- return 0;
3156 ++ return ret;
3157 + }
3158 +
3159 + static int
3160 +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
3161 +index 09a3297cd63cd..edba96845baf7 100644
3162 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c
3163 ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
3164 +@@ -1641,6 +1641,12 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
3165 + if (status)
3166 + dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
3167 + vsi_num, ice_stat_str(status));
3168 ++
3169 ++ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
3170 ++ ICE_FLOW_SEG_HDR_ESP);
3171 ++ if (status)
3172 ++ dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
3173 ++ vsi_num, status);
3174 + }
3175 +
3176 + /**
3177 +diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
3178 +index dc7e5ea6ec158..148d431fcde42 100644
3179 +--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
3180 ++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
3181 +@@ -145,9 +145,9 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
3182 + skb_put(skb, byte_cnt - ETH_FCS_LEN);
3183 + eth_skb_pad(skb);
3184 + skb->protocol = eth_type_trans(skb, netdev);
3185 +- netif_rx(skb);
3186 + netdev->stats.rx_bytes += skb->len;
3187 + netdev->stats.rx_packets++;
3188 ++ netif_rx(skb);
3189 + }
3190 +
3191 + static int sparx5_inject(struct sparx5 *sparx5,
3192 +diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
3193 +index 02edd383dea22..886daa29bfb02 100644
3194 +--- a/drivers/net/ethernet/mscc/ocelot.c
3195 ++++ b/drivers/net/ethernet/mscc/ocelot.c
3196 +@@ -480,14 +480,18 @@ EXPORT_SYMBOL(ocelot_vlan_add);
3197 + int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
3198 + {
3199 + struct ocelot_port *ocelot_port = ocelot->ports[port];
3200 ++ bool del_pvid = false;
3201 + int err;
3202 +
3203 ++ if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
3204 ++ del_pvid = true;
3205 ++
3206 + err = ocelot_vlan_member_del(ocelot, port, vid);
3207 + if (err)
3208 + return err;
3209 +
3210 + /* Ingress */
3211 +- if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
3212 ++ if (del_pvid)
3213 + ocelot_port_set_pvid(ocelot, port, NULL);
3214 +
3215 + /* Egress */
3216 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
3217 +index 784292b162907..1543e47456d57 100644
3218 +--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
3219 ++++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
3220 +@@ -723,6 +723,8 @@ static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
3221 + return true;
3222 + if (netif_is_gretap(netdev))
3223 + return true;
3224 ++ if (netif_is_ip6gretap(netdev))
3225 ++ return true;
3226 +
3227 + return false;
3228 + }
3229 +diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
3230 +index 7d67f41387f55..4f5ef8a9a9a87 100644
3231 +--- a/drivers/net/ieee802154/at86rf230.c
3232 ++++ b/drivers/net/ieee802154/at86rf230.c
3233 +@@ -100,6 +100,7 @@ struct at86rf230_local {
3234 + unsigned long cal_timeout;
3235 + bool is_tx;
3236 + bool is_tx_from_off;
3237 ++ bool was_tx;
3238 + u8 tx_retry;
3239 + struct sk_buff *tx_skb;
3240 + struct at86rf230_state_change tx;
3241 +@@ -343,7 +344,11 @@ at86rf230_async_error_recover_complete(void *context)
3242 + if (ctx->free)
3243 + kfree(ctx);
3244 +
3245 +- ieee802154_wake_queue(lp->hw);
3246 ++ if (lp->was_tx) {
3247 ++ lp->was_tx = 0;
3248 ++ dev_kfree_skb_any(lp->tx_skb);
3249 ++ ieee802154_wake_queue(lp->hw);
3250 ++ }
3251 + }
3252 +
3253 + static void
3254 +@@ -352,7 +357,11 @@ at86rf230_async_error_recover(void *context)
3255 + struct at86rf230_state_change *ctx = context;
3256 + struct at86rf230_local *lp = ctx->lp;
3257 +
3258 +- lp->is_tx = 0;
3259 ++ if (lp->is_tx) {
3260 ++ lp->was_tx = 1;
3261 ++ lp->is_tx = 0;
3262 ++ }
3263 ++
3264 + at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
3265 + at86rf230_async_error_recover_complete);
3266 + }
3267 +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
3268 +index f3438d3e104ac..2bc730fd260eb 100644
3269 +--- a/drivers/net/ieee802154/ca8210.c
3270 ++++ b/drivers/net/ieee802154/ca8210.c
3271 +@@ -2975,8 +2975,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw)
3272 + ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
3273 + ca8210_hw->phy->cca_ed_level = -9800;
3274 + ca8210_hw->phy->symbol_duration = 16;
3275 +- ca8210_hw->phy->lifs_period = 40;
3276 +- ca8210_hw->phy->sifs_period = 12;
3277 ++ ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration;
3278 ++ ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration;
3279 + ca8210_hw->flags =
3280 + IEEE802154_HW_AFILT |
3281 + IEEE802154_HW_OMIT_CKSUM |
3282 +diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
3283 +index 4300261e2f9e7..378ee779061c3 100644
3284 +--- a/drivers/net/netdevsim/fib.c
3285 ++++ b/drivers/net/netdevsim/fib.c
3286 +@@ -623,14 +623,14 @@ static int nsim_fib6_rt_append(struct nsim_fib_data *data,
3287 + if (err)
3288 + goto err_fib6_rt_nh_del;
3289 +
3290 +- fib6_event->rt_arr[i]->trap = true;
3291 ++ WRITE_ONCE(fib6_event->rt_arr[i]->trap, true);
3292 + }
3293 +
3294 + return 0;
3295 +
3296 + err_fib6_rt_nh_del:
3297 + for (i--; i >= 0; i--) {
3298 +- fib6_event->rt_arr[i]->trap = false;
3299 ++ WRITE_ONCE(fib6_event->rt_arr[i]->trap, false);
3300 + nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
3301 + }
3302 + return err;
3303 +diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c
3304 +index b7a5ae20edd53..68ee434f9dea3 100644
3305 +--- a/drivers/net/phy/mediatek-ge.c
3306 ++++ b/drivers/net/phy/mediatek-ge.c
3307 +@@ -55,9 +55,6 @@ static int mt7530_phy_config_init(struct phy_device *phydev)
3308 +
3309 + static int mt7531_phy_config_init(struct phy_device *phydev)
3310 + {
3311 +- if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
3312 +- return -EINVAL;
3313 +-
3314 + mtk_gephy_config_init(phydev);
3315 +
3316 + /* PHY link down power saving enable */
3317 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
3318 +index f510e82194705..2f2abc42cecea 100644
3319 +--- a/drivers/net/usb/qmi_wwan.c
3320 ++++ b/drivers/net/usb/qmi_wwan.c
3321 +@@ -1399,6 +1399,8 @@ static const struct usb_device_id products[] = {
3322 + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
3323 + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
3324 + {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
3325 ++ {QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/
3326 ++ {QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */
3327 + {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
3328 + {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
3329 + {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
3330 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
3331 +index 0eb13e5df5177..d99140960a820 100644
3332 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
3333 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
3334 +@@ -693,7 +693,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
3335 + {
3336 + struct brcmf_fw_item *first = &req->items[0];
3337 + struct brcmf_fw *fwctx;
3338 +- char *alt_path;
3339 ++ char *alt_path = NULL;
3340 + int ret;
3341 +
3342 + brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
3343 +@@ -712,7 +712,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
3344 + fwctx->done = fw_cb;
3345 +
3346 + /* First try alternative board-specific path if any */
3347 +- alt_path = brcm_alt_fw_path(first->path, fwctx->req->board_type);
3348 ++ if (fwctx->req->board_type)
3349 ++ alt_path = brcm_alt_fw_path(first->path,
3350 ++ fwctx->req->board_type);
3351 + if (alt_path) {
3352 + ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
3353 + fwctx->dev, GFP_KERNEL, fwctx,
3354 +diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
3355 +index 418ae4f870ab7..fcfd2bd0baa6d 100644
3356 +--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
3357 ++++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
3358 +@@ -79,19 +79,6 @@ config IWLWIFI_OPMODE_MODULAR
3359 + comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
3360 + depends on IWLDVM=n && IWLMVM=n
3361 +
3362 +-config IWLWIFI_BCAST_FILTERING
3363 +- bool "Enable broadcast filtering"
3364 +- depends on IWLMVM
3365 +- help
3366 +- Say Y here to enable default bcast filtering configuration.
3367 +-
3368 +- Enabling broadcast filtering will drop any incoming wireless
3369 +- broadcast frames, except some very specific predefined
3370 +- patterns (e.g. incoming arp requests).
3371 +-
3372 +- If unsure, don't enable this option, as some programs might
3373 +- expect incoming broadcasts for their normal operations.
3374 +-
3375 + menu "Debugging Options"
3376 +
3377 + config IWLWIFI_DEBUG
3378 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
3379 +index 2e4590876bc33..19d85760dfac3 100644
3380 +--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
3381 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
3382 +@@ -1,7 +1,7 @@
3383 + // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3384 + /*
3385 + * Copyright (C) 2017 Intel Deutschland GmbH
3386 +- * Copyright (C) 2019-2021 Intel Corporation
3387 ++ * Copyright (C) 2019-2022 Intel Corporation
3388 + */
3389 + #include <linux/uuid.h>
3390 + #include "iwl-drv.h"
3391 +@@ -873,10 +873,11 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
3392 + * only one using version 36, so skip this version entirely.
3393 + */
3394 + return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
3395 +- IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 ||
3396 +- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
3397 +- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
3398 +- CSR_HW_REV_TYPE_7265D));
3399 ++ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
3400 ++ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
3401 ++ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
3402 ++ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
3403 ++ CSR_HW_REV_TYPE_7265D));
3404 + }
3405 + IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
3406 +
3407 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
3408 +index ee6b5844a871c..46ad5543a6cc8 100644
3409 +--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
3410 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
3411 +@@ -505,11 +505,6 @@ enum iwl_legacy_cmds {
3412 + */
3413 + DEBUG_LOG_MSG = 0xf7,
3414 +
3415 +- /**
3416 +- * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd
3417 +- */
3418 +- BCAST_FILTER_CMD = 0xcf,
3419 +-
3420 + /**
3421 + * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd
3422 + */
3423 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
3424 +index dd62a63956b3b..e44c70b7c7907 100644
3425 +--- a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
3426 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
3427 +@@ -36,92 +36,4 @@ struct iwl_mcast_filter_cmd {
3428 + u8 addr_list[0];
3429 + } __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
3430 +
3431 +-#define MAX_BCAST_FILTERS 8
3432 +-#define MAX_BCAST_FILTER_ATTRS 2
3433 +-
3434 +-/**
3435 +- * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
3436 +- * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
3437 +- * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
3438 +- * start of ip payload).
3439 +- */
3440 +-enum iwl_mvm_bcast_filter_attr_offset {
3441 +- BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
3442 +- BCAST_FILTER_OFFSET_IP_END = 1,
3443 +-};
3444 +-
3445 +-/**
3446 +- * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
3447 +- * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
3448 +- * @offset: starting offset of this pattern.
3449 +- * @reserved1: reserved
3450 +- * @val: value to match - big endian (MSB is the first
3451 +- * byte to match from offset pos).
3452 +- * @mask: mask to match (big endian).
3453 +- */
3454 +-struct iwl_fw_bcast_filter_attr {
3455 +- u8 offset_type;
3456 +- u8 offset;
3457 +- __le16 reserved1;
3458 +- __be32 val;
3459 +- __be32 mask;
3460 +-} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
3461 +-
3462 +-/**
3463 +- * enum iwl_mvm_bcast_filter_frame_type - filter frame type
3464 +- * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
3465 +- * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
3466 +- */
3467 +-enum iwl_mvm_bcast_filter_frame_type {
3468 +- BCAST_FILTER_FRAME_TYPE_ALL = 0,
3469 +- BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
3470 +-};
3471 +-
3472 +-/**
3473 +- * struct iwl_fw_bcast_filter - broadcast filter
3474 +- * @discard: discard frame (1) or let it pass (0).
3475 +- * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
3476 +- * @reserved1: reserved
3477 +- * @num_attrs: number of valid attributes in this filter.
3478 +- * @attrs: attributes of this filter. a filter is considered matched
3479 +- * only when all its attributes are matched (i.e. AND relationship)
3480 +- */
3481 +-struct iwl_fw_bcast_filter {
3482 +- u8 discard;
3483 +- u8 frame_type;
3484 +- u8 num_attrs;
3485 +- u8 reserved1;
3486 +- struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
3487 +-} __packed; /* BCAST_FILTER_S_VER_1 */
3488 +-
3489 +-/**
3490 +- * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
3491 +- * @default_discard: default action for this mac (discard (1) / pass (0)).
3492 +- * @reserved1: reserved
3493 +- * @attached_filters: bitmap of relevant filters for this mac.
3494 +- */
3495 +-struct iwl_fw_bcast_mac {
3496 +- u8 default_discard;
3497 +- u8 reserved1;
3498 +- __le16 attached_filters;
3499 +-} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
3500 +-
3501 +-/**
3502 +- * struct iwl_bcast_filter_cmd - broadcast filtering configuration
3503 +- * @disable: enable (0) / disable (1)
3504 +- * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
3505 +- * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
3506 +- * @reserved1: reserved
3507 +- * @filters: broadcast filters
3508 +- * @macs: broadcast filtering configuration per-mac
3509 +- */
3510 +-struct iwl_bcast_filter_cmd {
3511 +- u8 disable;
3512 +- u8 max_bcast_filters;
3513 +- u8 max_macs;
3514 +- u8 reserved1;
3515 +- struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
3516 +- struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
3517 +-} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
3518 +-
3519 + #endif /* __iwl_fw_api_filter_h__ */
3520 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
3521 +index a09081d7ed45e..f6301f898c7f5 100644
3522 +--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
3523 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
3524 +@@ -710,7 +710,6 @@ struct iwl_lq_cmd {
3525 +
3526 + u8 iwl_fw_rate_idx_to_plcp(int idx);
3527 + u32 iwl_new_rate_from_v1(u32 rate_v1);
3528 +-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags);
3529 + const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx);
3530 + const char *iwl_rs_pretty_ant(u8 ant);
3531 + const char *iwl_rs_pretty_bw(int bw);
3532 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
3533 +index 3d572f5024bbc..b44b869dd3704 100644
3534 +--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
3535 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
3536 +@@ -182,7 +182,6 @@ struct iwl_ucode_capa {
3537 + * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
3538 + * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
3539 + * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
3540 +- * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
3541 + * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
3542 + */
3543 + enum iwl_ucode_tlv_flag {
3544 +@@ -197,7 +196,6 @@ enum iwl_ucode_tlv_flag {
3545 + IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
3546 + IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
3547 + IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
3548 +- IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
3549 + };
3550 +
3551 + typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
3552 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
3553 +index a21c3befd93b5..a835214611ce5 100644
3554 +--- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c
3555 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
3556 +@@ -91,6 +91,20 @@ const char *iwl_rs_pretty_bw(int bw)
3557 + }
3558 + IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw);
3559 +
3560 ++static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
3561 ++{
3562 ++ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
3563 ++ int idx;
3564 ++ bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
3565 ++ int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
3566 ++ int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
3567 ++
3568 ++ for (idx = offset; idx < last; idx++)
3569 ++ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
3570 ++ return idx - offset;
3571 ++ return IWL_RATE_INVALID;
3572 ++}
3573 ++
3574 + u32 iwl_new_rate_from_v1(u32 rate_v1)
3575 + {
3576 + u32 rate_v2 = 0;
3577 +@@ -144,7 +158,10 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
3578 + } else {
3579 + u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
3580 +
3581 +- WARN_ON(legacy_rate < 0);
3582 ++ if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
3583 ++ legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
3584 ++ IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
3585 ++
3586 + rate_v2 |= legacy_rate;
3587 + if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
3588 + rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK;
3589 +@@ -172,20 +189,6 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
3590 + }
3591 + IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1);
3592 +
3593 +-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
3594 +-{
3595 +- int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
3596 +- int idx;
3597 +- bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
3598 +- int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
3599 +- int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
3600 +-
3601 +- for (idx = offset; idx < last; idx++)
3602 +- if (iwl_fw_rate_idx_to_plcp(idx) == rate)
3603 +- return idx - offset;
3604 +- return -1;
3605 +-}
3606 +-
3607 + int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
3608 + {
3609 + char *type;
3610 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
3611 +index 70f9dc7ecb0eb..078fd20285e6d 100644
3612 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
3613 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
3614 +@@ -1,6 +1,6 @@
3615 + /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3616 + /*
3617 +- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
3618 ++ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
3619 + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
3620 + * Copyright (C) 2016 Intel Deutschland GmbH
3621 + */
3622 +@@ -326,6 +326,7 @@ enum {
3623 + #define CSR_HW_REV_TYPE_2x00 (0x0000100)
3624 + #define CSR_HW_REV_TYPE_105 (0x0000110)
3625 + #define CSR_HW_REV_TYPE_135 (0x0000120)
3626 ++#define CSR_HW_REV_TYPE_3160 (0x0000164)
3627 + #define CSR_HW_REV_TYPE_7265D (0x0000210)
3628 + #define CSR_HW_REV_TYPE_NONE (0x00001F0)
3629 + #define CSR_HW_REV_TYPE_QNJ (0x0000360)
3630 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
3631 +index f53ce9c086947..506d05953314d 100644
3632 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
3633 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
3634 +@@ -1656,6 +1656,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
3635 + out_unbind:
3636 + complete(&drv->request_firmware_complete);
3637 + device_release_driver(drv->trans->dev);
3638 ++ /* drv has just been freed by the release */
3639 ++ failure = false;
3640 + free:
3641 + if (failure)
3642 + iwl_dealloc_ucode(drv);
3643 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
3644 +index ff66001d507ef..64100e73b5bc6 100644
3645 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
3646 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
3647 +@@ -1361,189 +1361,6 @@ static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm,
3648 + return count;
3649 + }
3650 +
3651 +-#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
3652 +-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
3653 +-static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
3654 +- char __user *user_buf,
3655 +- size_t count, loff_t *ppos)
3656 +-{
3657 +- struct iwl_mvm *mvm = file->private_data;
3658 +- struct iwl_bcast_filter_cmd cmd;
3659 +- const struct iwl_fw_bcast_filter *filter;
3660 +- char *buf;
3661 +- int bufsz = 1024;
3662 +- int i, j, pos = 0;
3663 +- ssize_t ret;
3664 +-
3665 +- buf = kzalloc(bufsz, GFP_KERNEL);
3666 +- if (!buf)
3667 +- return -ENOMEM;
3668 +-
3669 +- mutex_lock(&mvm->mutex);
3670 +- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
3671 +- ADD_TEXT("None\n");
3672 +- mutex_unlock(&mvm->mutex);
3673 +- goto out;
3674 +- }
3675 +- mutex_unlock(&mvm->mutex);
3676 +-
3677 +- for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
3678 +- filter = &cmd.filters[i];
3679 +-
3680 +- ADD_TEXT("Filter [%d]:\n", i);
3681 +- ADD_TEXT("\tDiscard=%d\n", filter->discard);
3682 +- ADD_TEXT("\tFrame Type: %s\n",
3683 +- filter->frame_type ? "IPv4" : "Generic");
3684 +-
3685 +- for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
3686 +- const struct iwl_fw_bcast_filter_attr *attr;
3687 +-
3688 +- attr = &filter->attrs[j];
3689 +- if (!attr->mask)
3690 +- break;
3691 +-
3692 +- ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
3693 +- j, attr->offset,
3694 +- attr->offset_type ? "IP End" :
3695 +- "Payload Start",
3696 +- be32_to_cpu(attr->mask),
3697 +- be32_to_cpu(attr->val),
3698 +- le16_to_cpu(attr->reserved1));
3699 +- }
3700 +- }
3701 +-out:
3702 +- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
3703 +- kfree(buf);
3704 +- return ret;
3705 +-}
3706 +-
3707 +-static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
3708 +- size_t count, loff_t *ppos)
3709 +-{
3710 +- int pos, next_pos;
3711 +- struct iwl_fw_bcast_filter filter = {};
3712 +- struct iwl_bcast_filter_cmd cmd;
3713 +- u32 filter_id, attr_id, mask, value;
3714 +- int err = 0;
3715 +-
3716 +- if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
3717 +- &filter.frame_type, &pos) != 3)
3718 +- return -EINVAL;
3719 +-
3720 +- if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
3721 +- filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
3722 +- return -EINVAL;
3723 +-
3724 +- for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
3725 +- attr_id++) {
3726 +- struct iwl_fw_bcast_filter_attr *attr =
3727 +- &filter.attrs[attr_id];
3728 +-
3729 +- if (pos >= count)
3730 +- break;
3731 +-
3732 +- if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
3733 +- &attr->offset, &attr->offset_type,
3734 +- &mask, &value, &next_pos) != 4)
3735 +- return -EINVAL;
3736 +-
3737 +- attr->mask = cpu_to_be32(mask);
3738 +- attr->val = cpu_to_be32(value);
3739 +- if (mask)
3740 +- filter.num_attrs++;
3741 +-
3742 +- pos += next_pos;
3743 +- }
3744 +-
3745 +- mutex_lock(&mvm->mutex);
3746 +- memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
3747 +- &filter, sizeof(filter));
3748 +-
3749 +- /* send updated bcast filtering configuration */
3750 +- if (iwl_mvm_firmware_running(mvm) &&
3751 +- mvm->dbgfs_bcast_filtering.override &&
3752 +- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
3753 +- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
3754 +- sizeof(cmd), &cmd);
3755 +- mutex_unlock(&mvm->mutex);
3756 +-
3757 +- return err ?: count;
3758 +-}
3759 +-
3760 +-static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
3761 +- char __user *user_buf,
3762 +- size_t count, loff_t *ppos)
3763 +-{
3764 +- struct iwl_mvm *mvm = file->private_data;
3765 +- struct iwl_bcast_filter_cmd cmd;
3766 +- char *buf;
3767 +- int bufsz = 1024;
3768 +- int i, pos = 0;
3769 +- ssize_t ret;
3770 +-
3771 +- buf = kzalloc(bufsz, GFP_KERNEL);
3772 +- if (!buf)
3773 +- return -ENOMEM;
3774 +-
3775 +- mutex_lock(&mvm->mutex);
3776 +- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
3777 +- ADD_TEXT("None\n");
3778 +- mutex_unlock(&mvm->mutex);
3779 +- goto out;
3780 +- }
3781 +- mutex_unlock(&mvm->mutex);
3782 +-
3783 +- for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
3784 +- const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
3785 +-
3786 +- ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
3787 +- i, mac->default_discard, mac->attached_filters);
3788 +- }
3789 +-out:
3790 +- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
3791 +- kfree(buf);
3792 +- return ret;
3793 +-}
3794 +-
3795 +-static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
3796 +- char *buf, size_t count,
3797 +- loff_t *ppos)
3798 +-{
3799 +- struct iwl_bcast_filter_cmd cmd;
3800 +- struct iwl_fw_bcast_mac mac = {};
3801 +- u32 mac_id, attached_filters;
3802 +- int err = 0;
3803 +-
3804 +- if (!mvm->bcast_filters)
3805 +- return -ENOENT;
3806 +-
3807 +- if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
3808 +- &attached_filters) != 3)
3809 +- return -EINVAL;
3810 +-
3811 +- if (mac_id >= ARRAY_SIZE(cmd.macs) ||
3812 +- mac.default_discard > 1 ||
3813 +- attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
3814 +- return -EINVAL;
3815 +-
3816 +- mac.attached_filters = cpu_to_le16(attached_filters);
3817 +-
3818 +- mutex_lock(&mvm->mutex);
3819 +- memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
3820 +- &mac, sizeof(mac));
3821 +-
3822 +- /* send updated bcast filtering configuration */
3823 +- if (iwl_mvm_firmware_running(mvm) &&
3824 +- mvm->dbgfs_bcast_filtering.override &&
3825 +- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
3826 +- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
3827 +- sizeof(cmd), &cmd);
3828 +- mutex_unlock(&mvm->mutex);
3829 +-
3830 +- return err ?: count;
3831 +-}
3832 +-#endif
3833 +-
3834 + #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
3835 + _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
3836 + #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
3837 +@@ -1873,11 +1690,6 @@ MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512);
3838 +
3839 + MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
3840 +
3841 +-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
3842 +-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
3843 +-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
3844 +-#endif
3845 +-
3846 + #ifdef CONFIG_ACPI
3847 + MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
3848 + #endif
3849 +@@ -2088,21 +1900,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
3850 +
3851 + MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
3852 +
3853 +-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
3854 +- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
3855 +- bcast_dir = debugfs_create_dir("bcast_filtering",
3856 +- mvm->debugfs_dir);
3857 +-
3858 +- debugfs_create_bool("override", 0600, bcast_dir,
3859 +- &mvm->dbgfs_bcast_filtering.override);
3860 +-
3861 +- MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
3862 +- bcast_dir, 0600);
3863 +- MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
3864 +- bcast_dir, 0600);
3865 +- }
3866 +-#endif
3867 +-
3868 + #ifdef CONFIG_PM_SLEEP
3869 + MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
3870 + debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
3871 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
3872 +index 9eb78461f2800..58d5395acf73c 100644
3873 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
3874 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
3875 +@@ -1636,7 +1636,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
3876 + ret = iwl_mvm_sar_init(mvm);
3877 + if (ret == 0)
3878 + ret = iwl_mvm_sar_geo_init(mvm);
3879 +- else if (ret < 0)
3880 ++ if (ret < 0)
3881 + goto error;
3882 +
3883 + iwl_mvm_tas_init(mvm);
3884 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3885 +index 9c5c10908f013..cde3d2ce0b855 100644
3886 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3887 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3888 +@@ -55,79 +55,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
3889 + },
3890 + };
3891 +
3892 +-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
3893 +-/*
3894 +- * Use the reserved field to indicate magic values.
3895 +- * these values will only be used internally by the driver,
3896 +- * and won't make it to the fw (reserved will be 0).
3897 +- * BC_FILTER_MAGIC_IP - configure the val of this attribute to
3898 +- * be the vif's ip address. in case there is not a single
3899 +- * ip address (0, or more than 1), this attribute will
3900 +- * be skipped.
3901 +- * BC_FILTER_MAGIC_MAC - set the val of this attribute to
3902 +- * the LSB bytes of the vif's mac address
3903 +- */
3904 +-enum {
3905 +- BC_FILTER_MAGIC_NONE = 0,
3906 +- BC_FILTER_MAGIC_IP,
3907 +- BC_FILTER_MAGIC_MAC,
3908 +-};
3909 +-
3910 +-static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
3911 +- {
3912 +- /* arp */
3913 +- .discard = 0,
3914 +- .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
3915 +- .attrs = {
3916 +- {
3917 +- /* frame type - arp, hw type - ethernet */
3918 +- .offset_type =
3919 +- BCAST_FILTER_OFFSET_PAYLOAD_START,
3920 +- .offset = sizeof(rfc1042_header),
3921 +- .val = cpu_to_be32(0x08060001),
3922 +- .mask = cpu_to_be32(0xffffffff),
3923 +- },
3924 +- {
3925 +- /* arp dest ip */
3926 +- .offset_type =
3927 +- BCAST_FILTER_OFFSET_PAYLOAD_START,
3928 +- .offset = sizeof(rfc1042_header) + 2 +
3929 +- sizeof(struct arphdr) +
3930 +- ETH_ALEN + sizeof(__be32) +
3931 +- ETH_ALEN,
3932 +- .mask = cpu_to_be32(0xffffffff),
3933 +- /* mark it as special field */
3934 +- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
3935 +- },
3936 +- },
3937 +- },
3938 +- {
3939 +- /* dhcp offer bcast */
3940 +- .discard = 0,
3941 +- .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
3942 +- .attrs = {
3943 +- {
3944 +- /* udp dest port - 68 (bootp client)*/
3945 +- .offset_type = BCAST_FILTER_OFFSET_IP_END,
3946 +- .offset = offsetof(struct udphdr, dest),
3947 +- .val = cpu_to_be32(0x00440000),
3948 +- .mask = cpu_to_be32(0xffff0000),
3949 +- },
3950 +- {
3951 +- /* dhcp - lsb bytes of client hw address */
3952 +- .offset_type = BCAST_FILTER_OFFSET_IP_END,
3953 +- .offset = 38,
3954 +- .mask = cpu_to_be32(0xffffffff),
3955 +- /* mark it as special field */
3956 +- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
3957 +- },
3958 +- },
3959 +- },
3960 +- /* last filter must be empty */
3961 +- {},
3962 +-};
3963 +-#endif
3964 +-
3965 + static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
3966 + .max_peers = IWL_MVM_TOF_MAX_APS,
3967 + .report_ap_tsf = 1,
3968 +@@ -683,11 +610,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
3969 + }
3970 + #endif
3971 +
3972 +-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
3973 +- /* assign default bcast filtering configuration */
3974 +- mvm->bcast_filters = iwl_mvm_default_bcast_filters;
3975 +-#endif
3976 +-
3977 + ret = iwl_mvm_leds_init(mvm);
3978 + if (ret)
3979 + return ret;
3980 +@@ -1803,162 +1725,6 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
3981 + mutex_unlock(&mvm->mutex);
3982 + }
3983 +
3984 +-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
3985 +-struct iwl_bcast_iter_data {
3986 +- struct iwl_mvm *mvm;
3987 +- struct iwl_bcast_filter_cmd *cmd;
3988 +- u8 current_filter;
3989 +-};
3990 +-
3991 +-static void
3992 +-iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
3993 +- const struct iwl_fw_bcast_filter *in_filter,
3994 +- struct iwl_fw_bcast_filter *out_filter)
3995 +-{
3996 +- struct iwl_fw_bcast_filter_attr *attr;
3997 +- int i;
3998 +-
3999 +- memcpy(out_filter, in_filter, sizeof(*out_filter));
4000 +-
4001 +- for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
4002 +- attr = &out_filter->attrs[i];
4003 +-
4004 +- if (!attr->mask)
4005 +- break;
4006 +-
4007 +- switch (attr->reserved1) {
4008 +- case cpu_to_le16(BC_FILTER_MAGIC_IP):
4009 +- if (vif->bss_conf.arp_addr_cnt != 1) {
4010 +- attr->mask = 0;
4011 +- continue;
4012 +- }
4013 +-
4014 +- attr->val = vif->bss_conf.arp_addr_list[0];
4015 +- break;
4016 +- case cpu_to_le16(BC_FILTER_MAGIC_MAC):
4017 +- attr->val = *(__be32 *)&vif->addr[2];
4018 +- break;
4019 +- default:
4020 +- break;
4021 +- }
4022 +- attr->reserved1 = 0;
4023 +- out_filter->num_attrs++;
4024 +- }
4025 +-}
4026 +-
4027 +-static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
4028 +- struct ieee80211_vif *vif)
4029 +-{
4030 +- struct iwl_bcast_iter_data *data = _data;
4031 +- struct iwl_mvm *mvm = data->mvm;
4032 +- struct iwl_bcast_filter_cmd *cmd = data->cmd;
4033 +- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4034 +- struct iwl_fw_bcast_mac *bcast_mac;
4035 +- int i;
4036 +-
4037 +- if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
4038 +- return;
4039 +-
4040 +- bcast_mac = &cmd->macs[mvmvif->id];
4041 +-
4042 +- /*
4043 +- * enable filtering only for associated stations, but not for P2P
4044 +- * Clients
4045 +- */
4046 +- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
4047 +- !vif->bss_conf.assoc)
4048 +- return;
4049 +-
4050 +- bcast_mac->default_discard = 1;
4051 +-
4052 +- /* copy all configured filters */
4053 +- for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
4054 +- /*
4055 +- * Make sure we don't exceed our filters limit.
4056 +- * if there is still a valid filter to be configured,
4057 +- * be on the safe side and just allow bcast for this mac.
4058 +- */
4059 +- if (WARN_ON_ONCE(data->current_filter >=
4060 +- ARRAY_SIZE(cmd->filters))) {
4061 +- bcast_mac->default_discard = 0;
4062 +- bcast_mac->attached_filters = 0;
4063 +- break;
4064 +- }
4065 +-
4066 +- iwl_mvm_set_bcast_filter(vif,
4067 +- &mvm->bcast_filters[i],
4068 +- &cmd->filters[data->current_filter]);
4069 +-
4070 +- /* skip current filter if it contains no attributes */
4071 +- if (!cmd->filters[data->current_filter].num_attrs)
4072 +- continue;
4073 +-
4074 +- /* attach the filter to current mac */
4075 +- bcast_mac->attached_filters |=
4076 +- cpu_to_le16(BIT(data->current_filter));
4077 +-
4078 +- data->current_filter++;
4079 +- }
4080 +-}
4081 +-
4082 +-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
4083 +- struct iwl_bcast_filter_cmd *cmd)
4084 +-{
4085 +- struct iwl_bcast_iter_data iter_data = {
4086 +- .mvm = mvm,
4087 +- .cmd = cmd,
4088 +- };
4089 +-
4090 +- if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
4091 +- return false;
4092 +-
4093 +- memset(cmd, 0, sizeof(*cmd));
4094 +- cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
4095 +- cmd->max_macs = ARRAY_SIZE(cmd->macs);
4096 +-
4097 +-#ifdef CONFIG_IWLWIFI_DEBUGFS
4098 +- /* use debugfs filters/macs if override is configured */
4099 +- if (mvm->dbgfs_bcast_filtering.override) {
4100 +- memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
4101 +- sizeof(cmd->filters));
4102 +- memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
4103 +- sizeof(cmd->macs));
4104 +- return true;
4105 +- }
4106 +-#endif
4107 +-
4108 +- /* if no filters are configured, do nothing */
4109 +- if (!mvm->bcast_filters)
4110 +- return false;
4111 +-
4112 +- /* configure and attach these filters for each associated sta vif */
4113 +- ieee80211_iterate_active_interfaces(
4114 +- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
4115 +- iwl_mvm_bcast_filter_iterator, &iter_data);
4116 +-
4117 +- return true;
4118 +-}
4119 +-
4120 +-static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
4121 +-{
4122 +- struct iwl_bcast_filter_cmd cmd;
4123 +-
4124 +- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
4125 +- return 0;
4126 +-
4127 +- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
4128 +- return 0;
4129 +-
4130 +- return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
4131 +- sizeof(cmd), &cmd);
4132 +-}
4133 +-#else
4134 +-static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
4135 +-{
4136 +- return 0;
4137 +-}
4138 +-#endif
4139 +-
4140 + static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
4141 + struct ieee80211_vif *vif)
4142 + {
4143 +@@ -2469,7 +2235,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
4144 + }
4145 +
4146 + iwl_mvm_recalc_multicast(mvm);
4147 +- iwl_mvm_configure_bcast_filter(mvm);
4148 +
4149 + /* reset rssi values */
4150 + mvmvif->bf_data.ave_beacon_signal = 0;
4151 +@@ -2519,11 +2284,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
4152 + }
4153 + }
4154 +
4155 +- if (changes & BSS_CHANGED_ARP_FILTER) {
4156 +- IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
4157 +- iwl_mvm_configure_bcast_filter(mvm);
4158 +- }
4159 +-
4160 + if (changes & BSS_CHANGED_BANDWIDTH)
4161 + iwl_mvm_apply_fw_smps_request(vif);
4162 + }
4163 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
4164 +index a72d85086fe33..da8330b5e6d5f 100644
4165 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
4166 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
4167 +@@ -872,17 +872,6 @@ struct iwl_mvm {
4168 + /* rx chain antennas set through debugfs for the scan command */
4169 + u8 scan_rx_ant;
4170 +
4171 +-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
4172 +- /* broadcast filters to configure for each associated station */
4173 +- const struct iwl_fw_bcast_filter *bcast_filters;
4174 +-#ifdef CONFIG_IWLWIFI_DEBUGFS
4175 +- struct {
4176 +- bool override;
4177 +- struct iwl_bcast_filter_cmd cmd;
4178 +- } dbgfs_bcast_filtering;
4179 +-#endif
4180 +-#endif
4181 +-
4182 + /* Internal station */
4183 + struct iwl_mvm_int_sta aux_sta;
4184 + struct iwl_mvm_int_sta snif_sta;
4185 +@@ -1570,8 +1559,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
4186 + int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
4187 +
4188 + int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
4189 +-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
4190 +- struct iwl_bcast_filter_cmd *cmd);
4191 +
4192 + /*
4193 + * FW notifications / CMD responses handlers
4194 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
4195 +index cd08e289cd9a0..364f6aefae81d 100644
4196 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
4197 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
4198 +@@ -474,7 +474,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
4199 + HCMD_NAME(MCC_CHUB_UPDATE_CMD),
4200 + HCMD_NAME(MARKER_CMD),
4201 + HCMD_NAME(BT_PROFILE_NOTIFICATION),
4202 +- HCMD_NAME(BCAST_FILTER_CMD),
4203 + HCMD_NAME(MCAST_FILTER_CMD),
4204 + HCMD_NAME(REPLY_SF_CFG_CMD),
4205 + HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
4206 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
4207 +index 76e0b7b45980d..0f96d422d6e06 100644
4208 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
4209 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
4210 +@@ -1380,7 +1380,7 @@ static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw,
4211 + struct ieee80211_tx_rate *r = &info->status.rates[0];
4212 +
4213 + if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
4214 +- TX_CMD, 0) > 6)
4215 ++ TX_CMD, 0) <= 6)
4216 + rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
4217 +
4218 + info->status.antenna =
4219 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
4220 +index 645cb4dd4e5a3..6642d85850734 100644
4221 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
4222 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
4223 +@@ -384,8 +384,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
4224 + /* This may fail if AMT took ownership of the device */
4225 + if (iwl_pcie_prepare_card_hw(trans)) {
4226 + IWL_WARN(trans, "Exit HW not ready\n");
4227 +- ret = -EIO;
4228 +- goto out;
4229 ++ return -EIO;
4230 + }
4231 +
4232 + iwl_enable_rfkill_int(trans);
4233 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
4234 +index 1efb53f78a62f..3b38c426575bc 100644
4235 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
4236 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
4237 +@@ -1303,8 +1303,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
4238 + /* This may fail if AMT took ownership of the device */
4239 + if (iwl_pcie_prepare_card_hw(trans)) {
4240 + IWL_WARN(trans, "Exit HW not ready\n");
4241 +- ret = -EIO;
4242 +- goto out;
4243 ++ return -EIO;
4244 + }
4245 +
4246 + iwl_enable_rfkill_int(trans);
4247 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
4248 +index 1af8a4513708a..352766aa3122e 100644
4249 +--- a/drivers/nvme/host/core.c
4250 ++++ b/drivers/nvme/host/core.c
4251 +@@ -4258,7 +4258,14 @@ static void nvme_async_event_work(struct work_struct *work)
4252 + container_of(work, struct nvme_ctrl, async_event_work);
4253 +
4254 + nvme_aen_uevent(ctrl);
4255 +- ctrl->ops->submit_async_event(ctrl);
4256 ++
4257 ++ /*
4258 ++ * The transport drivers must guarantee AER submission here is safe by
4259 ++ * flushing ctrl async_event_work after changing the controller state
4260 ++ * from LIVE and before freeing the admin queue.
4261 ++ */
4262 ++ if (ctrl->state == NVME_CTRL_LIVE)
4263 ++ ctrl->ops->submit_async_event(ctrl);
4264 + }
4265 +
4266 + static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
4267 +@@ -4571,7 +4578,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
4268 + if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
4269 + return;
4270 +
4271 +- blk_set_queue_dying(ns->queue);
4272 ++ blk_mark_disk_dead(ns->disk);
4273 + nvme_start_ns_queue(ns);
4274 +
4275 + set_capacity_and_notify(ns->disk, 0);
4276 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
4277 +index 13e5d503ed076..99c2307b04e2c 100644
4278 +--- a/drivers/nvme/host/multipath.c
4279 ++++ b/drivers/nvme/host/multipath.c
4280 +@@ -817,7 +817,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
4281 + {
4282 + if (!head->disk)
4283 + return;
4284 +- blk_set_queue_dying(head->disk->queue);
4285 ++ blk_mark_disk_dead(head->disk);
4286 + /* make sure all pending bios are cleaned up */
4287 + kblockd_schedule_work(&head->requeue_work);
4288 + flush_work(&head->requeue_work);
4289 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
4290 +index 850f84d204d05..9c55e4be8a398 100644
4291 +--- a/drivers/nvme/host/rdma.c
4292 ++++ b/drivers/nvme/host/rdma.c
4293 +@@ -1200,6 +1200,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
4294 + struct nvme_rdma_ctrl, err_work);
4295 +
4296 + nvme_stop_keep_alive(&ctrl->ctrl);
4297 ++ flush_work(&ctrl->ctrl.async_event_work);
4298 + nvme_rdma_teardown_io_queues(ctrl, false);
4299 + nvme_start_queues(&ctrl->ctrl);
4300 + nvme_rdma_teardown_admin_queue(ctrl, false);
4301 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
4302 +index 22046415a0942..891a36d02e7c7 100644
4303 +--- a/drivers/nvme/host/tcp.c
4304 ++++ b/drivers/nvme/host/tcp.c
4305 +@@ -2104,6 +2104,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
4306 + struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
4307 +
4308 + nvme_stop_keep_alive(ctrl);
4309 ++ flush_work(&ctrl->async_event_work);
4310 + nvme_tcp_teardown_io_queues(ctrl, false);
4311 + /* unquiesce to fail fast pending requests */
4312 + nvme_start_queues(ctrl);
4313 +diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
4314 +index 059566f544291..9be007c9420f9 100644
4315 +--- a/drivers/parisc/ccio-dma.c
4316 ++++ b/drivers/parisc/ccio-dma.c
4317 +@@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
4318 + ioc->usg_calls++;
4319 + #endif
4320 +
4321 +- while(sg_dma_len(sglist) && nents--) {
4322 ++ while (nents && sg_dma_len(sglist)) {
4323 +
4324 + #ifdef CCIO_COLLECT_STATS
4325 + ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
4326 +@@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
4327 + ccio_unmap_page(dev, sg_dma_address(sglist),
4328 + sg_dma_len(sglist), direction, 0);
4329 + ++sglist;
4330 ++ nents--;
4331 + }
4332 +
4333 + DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
4334 +diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
4335 +index e60690d38d677..374b9199878d4 100644
4336 +--- a/drivers/parisc/sba_iommu.c
4337 ++++ b/drivers/parisc/sba_iommu.c
4338 +@@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
4339 + spin_unlock_irqrestore(&ioc->res_lock, flags);
4340 + #endif
4341 +
4342 +- while (sg_dma_len(sglist) && nents--) {
4343 ++ while (nents && sg_dma_len(sglist)) {
4344 +
4345 + sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
4346 + direction, 0);
4347 +@@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
4348 + ioc->usingle_calls--; /* kluge since call is unmap_sg() */
4349 + #endif
4350 + ++sglist;
4351 ++ nents--;
4352 + }
4353 +
4354 + DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
4355 +diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
4356 +index 6733cb14e7753..c04636f52c1e9 100644
4357 +--- a/drivers/pci/controller/pci-hyperv.c
4358 ++++ b/drivers/pci/controller/pci-hyperv.c
4359 +@@ -1899,8 +1899,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
4360 + if (!hv_dev)
4361 + continue;
4362 +
4363 +- if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
4364 +- set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
4365 ++ if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
4366 ++ hv_dev->desc.virtual_numa_node < num_possible_nodes())
4367 ++ /*
4368 ++ * The kernel may boot with some NUMA nodes offline
4369 ++ * (e.g. in a KDUMP kernel) or with NUMA disabled via
4370 ++ * "numa=off". In those cases, adjust the host provided
4371 ++ * NUMA node to a valid NUMA node used by the kernel.
4372 ++ */
4373 ++ set_dev_node(&dev->dev,
4374 ++ numa_map_to_online_node(
4375 ++ hv_dev->desc.virtual_numa_node));
4376 +
4377 + put_pcichild(hv_dev);
4378 + }
4379 +diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
4380 +index 116fb23aebd99..0f1deb6e0eabf 100644
4381 +--- a/drivers/phy/broadcom/phy-brcm-usb.c
4382 ++++ b/drivers/phy/broadcom/phy-brcm-usb.c
4383 +@@ -18,6 +18,7 @@
4384 + #include <linux/soc/brcmstb/brcmstb.h>
4385 + #include <dt-bindings/phy/phy.h>
4386 + #include <linux/mfd/syscon.h>
4387 ++#include <linux/suspend.h>
4388 +
4389 + #include "phy-brcm-usb-init.h"
4390 +
4391 +@@ -70,12 +71,35 @@ struct brcm_usb_phy_data {
4392 + int init_count;
4393 + int wake_irq;
4394 + struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX];
4395 ++ struct notifier_block pm_notifier;
4396 ++ bool pm_active;
4397 + };
4398 +
4399 + static s8 *node_reg_names[BRCM_REGS_MAX] = {
4400 + "crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
4401 + };
4402 +
4403 ++static int brcm_pm_notifier(struct notifier_block *notifier,
4404 ++ unsigned long pm_event,
4405 ++ void *unused)
4406 ++{
4407 ++ struct brcm_usb_phy_data *priv =
4408 ++ container_of(notifier, struct brcm_usb_phy_data, pm_notifier);
4409 ++
4410 ++ switch (pm_event) {
4411 ++ case PM_HIBERNATION_PREPARE:
4412 ++ case PM_SUSPEND_PREPARE:
4413 ++ priv->pm_active = true;
4414 ++ break;
4415 ++ case PM_POST_RESTORE:
4416 ++ case PM_POST_HIBERNATION:
4417 ++ case PM_POST_SUSPEND:
4418 ++ priv->pm_active = false;
4419 ++ break;
4420 ++ }
4421 ++ return NOTIFY_DONE;
4422 ++}
4423 ++
4424 + static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
4425 + {
4426 + struct phy *gphy = dev_id;
4427 +@@ -91,6 +115,9 @@ static int brcm_usb_phy_init(struct phy *gphy)
4428 + struct brcm_usb_phy_data *priv =
4429 + container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
4430 +
4431 ++ if (priv->pm_active)
4432 ++ return 0;
4433 ++
4434 + /*
4435 + * Use a lock to make sure a second caller waits until
4436 + * the base phy is inited before using it.
4437 +@@ -120,6 +147,9 @@ static int brcm_usb_phy_exit(struct phy *gphy)
4438 + struct brcm_usb_phy_data *priv =
4439 + container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
4440 +
4441 ++ if (priv->pm_active)
4442 ++ return 0;
4443 ++
4444 + dev_dbg(&gphy->dev, "EXIT\n");
4445 + if (phy->id == BRCM_USB_PHY_2_0)
4446 + brcm_usb_uninit_eohci(&priv->ini);
4447 +@@ -488,6 +518,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
4448 + if (err)
4449 + return err;
4450 +
4451 ++ priv->pm_notifier.notifier_call = brcm_pm_notifier;
4452 ++ register_pm_notifier(&priv->pm_notifier);
4453 ++
4454 + mutex_init(&priv->mutex);
4455 +
4456 + /* make sure invert settings are correct */
4457 +@@ -528,7 +561,10 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
4458 +
4459 + static int brcm_usb_phy_remove(struct platform_device *pdev)
4460 + {
4461 ++ struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev);
4462 ++
4463 + sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group);
4464 ++ unregister_pm_notifier(&priv->pm_notifier);
4465 +
4466 + return 0;
4467 + }
4468 +@@ -539,6 +575,7 @@ static int brcm_usb_phy_suspend(struct device *dev)
4469 + struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
4470 +
4471 + if (priv->init_count) {
4472 ++ dev_dbg(dev, "SUSPEND\n");
4473 + priv->ini.wake_enabled = device_may_wakeup(dev);
4474 + if (priv->phys[BRCM_USB_PHY_3_0].inited)
4475 + brcm_usb_uninit_xhci(&priv->ini);
4476 +@@ -578,6 +615,7 @@ static int brcm_usb_phy_resume(struct device *dev)
4477 + * Uninitialize anything that wasn't previously initialized.
4478 + */
4479 + if (priv->init_count) {
4480 ++ dev_dbg(dev, "RESUME\n");
4481 + if (priv->wake_irq >= 0)
4482 + disable_irq_wake(priv->wake_irq);
4483 + brcm_usb_init_common(&priv->ini);
4484 +diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
4485 +index 98a942c607a67..db39b0c4649a2 100644
4486 +--- a/drivers/phy/mediatek/phy-mtk-tphy.c
4487 ++++ b/drivers/phy/mediatek/phy-mtk-tphy.c
4488 +@@ -1125,7 +1125,7 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc
4489 + /* no efuse, ignore it */
4490 + if (!instance->efuse_intr &&
4491 + !instance->efuse_rx_imp &&
4492 +- !instance->efuse_rx_imp) {
4493 ++ !instance->efuse_tx_imp) {
4494 + dev_warn(dev, "no u3 intr efuse, but dts enable it\n");
4495 + instance->efuse_sw_en = 0;
4496 + break;
4497 +diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
4498 +index 8fc1feedd8617..5116b014e2a4f 100644
4499 +--- a/drivers/pinctrl/bcm/Kconfig
4500 ++++ b/drivers/pinctrl/bcm/Kconfig
4501 +@@ -35,6 +35,7 @@ config PINCTRL_BCM63XX
4502 + select PINCONF
4503 + select GENERIC_PINCONF
4504 + select GPIOLIB
4505 ++ select REGMAP
4506 + select GPIO_REGMAP
4507 +
4508 + config PINCTRL_BCM6318
4509 +diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
4510 +index 230593ae5d6de..8c74733530e3d 100644
4511 +--- a/drivers/platform/x86/amd-pmc.c
4512 ++++ b/drivers/platform/x86/amd-pmc.c
4513 +@@ -117,9 +117,10 @@ struct amd_pmc_dev {
4514 + u32 cpu_id;
4515 + u32 active_ips;
4516 + /* SMU version information */
4517 +- u16 major;
4518 +- u16 minor;
4519 +- u16 rev;
4520 ++ u8 smu_program;
4521 ++ u8 major;
4522 ++ u8 minor;
4523 ++ u8 rev;
4524 + struct device *dev;
4525 + struct mutex lock; /* generic mutex lock */
4526 + #if IS_ENABLED(CONFIG_DEBUG_FS)
4527 +@@ -166,11 +167,13 @@ static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
4528 + if (rc)
4529 + return rc;
4530 +
4531 +- dev->major = (val >> 16) & GENMASK(15, 0);
4532 ++ dev->smu_program = (val >> 24) & GENMASK(7, 0);
4533 ++ dev->major = (val >> 16) & GENMASK(7, 0);
4534 + dev->minor = (val >> 8) & GENMASK(7, 0);
4535 + dev->rev = (val >> 0) & GENMASK(7, 0);
4536 +
4537 +- dev_dbg(dev->dev, "SMU version is %u.%u.%u\n", dev->major, dev->minor, dev->rev);
4538 ++ dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n",
4539 ++ dev->smu_program, dev->major, dev->minor, dev->rev);
4540 +
4541 + return 0;
4542 + }
4543 +diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
4544 +index c9a85eb2e8600..e8424e70d81d2 100644
4545 +--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
4546 ++++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
4547 +@@ -596,7 +596,10 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
4548 + return ret;
4549 + }
4550 +
4551 +-static DEFINE_MUTEX(punit_misc_dev_lock);
4552 ++/* Lock to prevent module registration when already opened by user space */
4553 ++static DEFINE_MUTEX(punit_misc_dev_open_lock);
4554 ++/* Lock to allow one share misc device for all ISST interace */
4555 ++static DEFINE_MUTEX(punit_misc_dev_reg_lock);
4556 + static int misc_usage_count;
4557 + static int misc_device_ret;
4558 + static int misc_device_open;
4559 +@@ -606,7 +609,7 @@ static int isst_if_open(struct inode *inode, struct file *file)
4560 + int i, ret = 0;
4561 +
4562 + /* Fail open, if a module is going away */
4563 +- mutex_lock(&punit_misc_dev_lock);
4564 ++ mutex_lock(&punit_misc_dev_open_lock);
4565 + for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
4566 + struct isst_if_cmd_cb *cb = &punit_callbacks[i];
4567 +
4568 +@@ -628,7 +631,7 @@ static int isst_if_open(struct inode *inode, struct file *file)
4569 + } else {
4570 + misc_device_open++;
4571 + }
4572 +- mutex_unlock(&punit_misc_dev_lock);
4573 ++ mutex_unlock(&punit_misc_dev_open_lock);
4574 +
4575 + return ret;
4576 + }
4577 +@@ -637,7 +640,7 @@ static int isst_if_relase(struct inode *inode, struct file *f)
4578 + {
4579 + int i;
4580 +
4581 +- mutex_lock(&punit_misc_dev_lock);
4582 ++ mutex_lock(&punit_misc_dev_open_lock);
4583 + misc_device_open--;
4584 + for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
4585 + struct isst_if_cmd_cb *cb = &punit_callbacks[i];
4586 +@@ -645,7 +648,7 @@ static int isst_if_relase(struct inode *inode, struct file *f)
4587 + if (cb->registered)
4588 + module_put(cb->owner);
4589 + }
4590 +- mutex_unlock(&punit_misc_dev_lock);
4591 ++ mutex_unlock(&punit_misc_dev_open_lock);
4592 +
4593 + return 0;
4594 + }
4595 +@@ -662,6 +665,43 @@ static struct miscdevice isst_if_char_driver = {
4596 + .fops = &isst_if_char_driver_ops,
4597 + };
4598 +
4599 ++static int isst_misc_reg(void)
4600 ++{
4601 ++ mutex_lock(&punit_misc_dev_reg_lock);
4602 ++ if (misc_device_ret)
4603 ++ goto unlock_exit;
4604 ++
4605 ++ if (!misc_usage_count) {
4606 ++ misc_device_ret = isst_if_cpu_info_init();
4607 ++ if (misc_device_ret)
4608 ++ goto unlock_exit;
4609 ++
4610 ++ misc_device_ret = misc_register(&isst_if_char_driver);
4611 ++ if (misc_device_ret) {
4612 ++ isst_if_cpu_info_exit();
4613 ++ goto unlock_exit;
4614 ++ }
4615 ++ }
4616 ++ misc_usage_count++;
4617 ++
4618 ++unlock_exit:
4619 ++ mutex_unlock(&punit_misc_dev_reg_lock);
4620 ++
4621 ++ return misc_device_ret;
4622 ++}
4623 ++
4624 ++static void isst_misc_unreg(void)
4625 ++{
4626 ++ mutex_lock(&punit_misc_dev_reg_lock);
4627 ++ if (misc_usage_count)
4628 ++ misc_usage_count--;
4629 ++ if (!misc_usage_count && !misc_device_ret) {
4630 ++ misc_deregister(&isst_if_char_driver);
4631 ++ isst_if_cpu_info_exit();
4632 ++ }
4633 ++ mutex_unlock(&punit_misc_dev_reg_lock);
4634 ++}
4635 ++
4636 + /**
4637 + * isst_if_cdev_register() - Register callback for IOCTL
4638 + * @device_type: The device type this callback handling.
4639 +@@ -679,38 +719,31 @@ static struct miscdevice isst_if_char_driver = {
4640 + */
4641 + int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
4642 + {
4643 +- if (misc_device_ret)
4644 +- return misc_device_ret;
4645 ++ int ret;
4646 +
4647 + if (device_type >= ISST_IF_DEV_MAX)
4648 + return -EINVAL;
4649 +
4650 +- mutex_lock(&punit_misc_dev_lock);
4651 ++ mutex_lock(&punit_misc_dev_open_lock);
4652 ++ /* Device is already open, we don't want to add new callbacks */
4653 + if (misc_device_open) {
4654 +- mutex_unlock(&punit_misc_dev_lock);
4655 ++ mutex_unlock(&punit_misc_dev_open_lock);
4656 + return -EAGAIN;
4657 + }
4658 +- if (!misc_usage_count) {
4659 +- int ret;
4660 +-
4661 +- misc_device_ret = misc_register(&isst_if_char_driver);
4662 +- if (misc_device_ret)
4663 +- goto unlock_exit;
4664 +-
4665 +- ret = isst_if_cpu_info_init();
4666 +- if (ret) {
4667 +- misc_deregister(&isst_if_char_driver);
4668 +- misc_device_ret = ret;
4669 +- goto unlock_exit;
4670 +- }
4671 +- }
4672 + memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
4673 + punit_callbacks[device_type].registered = 1;
4674 +- misc_usage_count++;
4675 +-unlock_exit:
4676 +- mutex_unlock(&punit_misc_dev_lock);
4677 ++ mutex_unlock(&punit_misc_dev_open_lock);
4678 +
4679 +- return misc_device_ret;
4680 ++ ret = isst_misc_reg();
4681 ++ if (ret) {
4682 ++ /*
4683 ++ * No need of mutex as the misc device register failed
4684 ++ * as no one can open device yet. Hence no contention.
4685 ++ */
4686 ++ punit_callbacks[device_type].registered = 0;
4687 ++ return ret;
4688 ++ }
4689 ++ return 0;
4690 + }
4691 + EXPORT_SYMBOL_GPL(isst_if_cdev_register);
4692 +
4693 +@@ -725,16 +758,12 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register);
4694 + */
4695 + void isst_if_cdev_unregister(int device_type)
4696 + {
4697 +- mutex_lock(&punit_misc_dev_lock);
4698 +- misc_usage_count--;
4699 ++ isst_misc_unreg();
4700 ++ mutex_lock(&punit_misc_dev_open_lock);
4701 + punit_callbacks[device_type].registered = 0;
4702 + if (device_type == ISST_IF_DEV_MBOX)
4703 + isst_delete_hash();
4704 +- if (!misc_usage_count && !misc_device_ret) {
4705 +- misc_deregister(&isst_if_char_driver);
4706 +- isst_if_cpu_info_exit();
4707 +- }
4708 +- mutex_unlock(&punit_misc_dev_lock);
4709 ++ mutex_unlock(&punit_misc_dev_open_lock);
4710 + }
4711 + EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
4712 +
4713 +diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
4714 +index 17dd54d4b783c..e318b40949679 100644
4715 +--- a/drivers/platform/x86/touchscreen_dmi.c
4716 ++++ b/drivers/platform/x86/touchscreen_dmi.c
4717 +@@ -773,6 +773,21 @@ static const struct ts_dmi_data predia_basic_data = {
4718 + .properties = predia_basic_props,
4719 + };
4720 +
4721 ++static const struct property_entry rwc_nanote_p8_props[] = {
4722 ++ PROPERTY_ENTRY_U32("touchscreen-min-y", 46),
4723 ++ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
4724 ++ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
4725 ++ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
4726 ++ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"),
4727 ++ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
4728 ++ { }
4729 ++};
4730 ++
4731 ++static const struct ts_dmi_data rwc_nanote_p8_data = {
4732 ++ .acpi_name = "MSSL1680:00",
4733 ++ .properties = rwc_nanote_p8_props,
4734 ++};
4735 ++
4736 + static const struct property_entry schneider_sct101ctm_props[] = {
4737 + PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
4738 + PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
4739 +@@ -1406,6 +1421,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
4740 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"),
4741 + },
4742 + },
4743 ++ {
4744 ++ /* RWC NANOTE P8 */
4745 ++ .driver_data = (void *)&rwc_nanote_p8_data,
4746 ++ .matches = {
4747 ++ DMI_MATCH(DMI_BOARD_VENDOR, "Default string"),
4748 ++ DMI_MATCH(DMI_PRODUCT_NAME, "AY07J"),
4749 ++ DMI_MATCH(DMI_PRODUCT_SKU, "0001")
4750 ++ },
4751 ++ },
4752 + {
4753 + /* Schneider SCT101CTM */
4754 + .driver_data = (void *)&schneider_sct101ctm_data,
4755 +diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
4756 +index f74a1c09c3518..936a7c067eef9 100644
4757 +--- a/drivers/scsi/lpfc/lpfc.h
4758 ++++ b/drivers/scsi/lpfc/lpfc.h
4759 +@@ -594,6 +594,7 @@ struct lpfc_vport {
4760 + #define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
4761 + #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
4762 + #define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
4763 ++#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */
4764 + #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
4765 + #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
4766 + #define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
4767 +diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
4768 +index bac78fbce8d6e..fa8415259cb8a 100644
4769 +--- a/drivers/scsi/lpfc/lpfc_attr.c
4770 ++++ b/drivers/scsi/lpfc/lpfc_attr.c
4771 +@@ -1315,6 +1315,9 @@ lpfc_issue_lip(struct Scsi_Host *shost)
4772 + pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
4773 + pmboxq->u.mb.mbxOwner = OWN_HOST;
4774 +
4775 ++ if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
4776 ++ vport->fc_flag &= ~FC_PT2PT_NO_NVME;
4777 ++
4778 + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
4779 +
4780 + if ((mbxstatus == MBX_SUCCESS) &&
4781 +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
4782 +index 78024f11b794a..dcfa47165acdf 100644
4783 +--- a/drivers/scsi/lpfc/lpfc_els.c
4784 ++++ b/drivers/scsi/lpfc/lpfc_els.c
4785 +@@ -1072,7 +1072,8 @@ stop_rr_fcf_flogi:
4786 +
4787 + /* FLOGI failed, so there is no fabric */
4788 + spin_lock_irq(shost->host_lock);
4789 +- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4790 ++ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
4791 ++ FC_PT2PT_NO_NVME);
4792 + spin_unlock_irq(shost->host_lock);
4793 +
4794 + /* If private loop, then allow max outstanding els to be
4795 +@@ -4607,6 +4608,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4796 + /* Added for Vendor specifc support
4797 + * Just keep retrying for these Rsn / Exp codes
4798 + */
4799 ++ if ((vport->fc_flag & FC_PT2PT) &&
4800 ++ cmd == ELS_CMD_NVMEPRLI) {
4801 ++ switch (stat.un.b.lsRjtRsnCode) {
4802 ++ case LSRJT_UNABLE_TPC:
4803 ++ case LSRJT_INVALID_CMD:
4804 ++ case LSRJT_LOGICAL_ERR:
4805 ++ case LSRJT_CMD_UNSUPPORTED:
4806 ++ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
4807 ++ "0168 NVME PRLI LS_RJT "
4808 ++ "reason %x port doesn't "
4809 ++ "support NVME, disabling NVME\n",
4810 ++ stat.un.b.lsRjtRsnCode);
4811 ++ retry = 0;
4812 ++ vport->fc_flag |= FC_PT2PT_NO_NVME;
4813 ++ goto out_retry;
4814 ++ }
4815 ++ }
4816 + switch (stat.un.b.lsRjtRsnCode) {
4817 + case LSRJT_UNABLE_TPC:
4818 + /* The driver has a VALID PLOGI but the rport has
4819 +diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
4820 +index 7d717a4ac14d1..fdf5e777bf113 100644
4821 +--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
4822 ++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
4823 +@@ -1961,8 +1961,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
4824 + * is configured try it.
4825 + */
4826 + ndlp->nlp_fc4_type |= NLP_FC4_FCP;
4827 +- if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4828 +- (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
4829 ++ if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
4830 ++ (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
4831 ++ vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
4832 + ndlp->nlp_fc4_type |= NLP_FC4_NVME;
4833 + /* We need to update the localport also */
4834 + lpfc_nvme_update_localport(vport);
4835 +diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
4836 +index 4390c8b9170cd..066290dd57565 100644
4837 +--- a/drivers/scsi/pm8001/pm8001_hwi.c
4838 ++++ b/drivers/scsi/pm8001/pm8001_hwi.c
4839 +@@ -2695,7 +2695,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
4840 + u32 tag = le32_to_cpu(psataPayload->tag);
4841 + u32 port_id = le32_to_cpu(psataPayload->port_id);
4842 + u32 dev_id = le32_to_cpu(psataPayload->device_id);
4843 +- unsigned long flags;
4844 +
4845 + ccb = &pm8001_ha->ccb_info[tag];
4846 +
4847 +@@ -2735,8 +2734,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
4848 + ts->resp = SAS_TASK_COMPLETE;
4849 + ts->stat = SAS_DATA_OVERRUN;
4850 + ts->residual = 0;
4851 +- if (pm8001_dev)
4852 +- atomic_dec(&pm8001_dev->running_req);
4853 + break;
4854 + case IO_XFER_ERROR_BREAK:
4855 + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
4856 +@@ -2778,7 +2775,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
4857 + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
4858 + ts->resp = SAS_TASK_COMPLETE;
4859 + ts->stat = SAS_QUEUE_FULL;
4860 +- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
4861 + return;
4862 + }
4863 + break;
4864 +@@ -2864,20 +2860,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
4865 + ts->stat = SAS_OPEN_TO;
4866 + break;
4867 + }
4868 +- spin_lock_irqsave(&t->task_state_lock, flags);
4869 +- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
4870 +- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
4871 +- t->task_state_flags |= SAS_TASK_STATE_DONE;
4872 +- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
4873 +- spin_unlock_irqrestore(&t->task_state_lock, flags);
4874 +- pm8001_dbg(pm8001_ha, FAIL,
4875 +- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
4876 +- t, event, ts->resp, ts->stat);
4877 +- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
4878 +- } else {
4879 +- spin_unlock_irqrestore(&t->task_state_lock, flags);
4880 +- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
4881 +- }
4882 + }
4883 +
4884 + /*See the comments for mpi_ssp_completion */
4885 +diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
4886 +index 83e73009db5cd..c0b45b8a513d7 100644
4887 +--- a/drivers/scsi/pm8001/pm8001_sas.c
4888 ++++ b/drivers/scsi/pm8001/pm8001_sas.c
4889 +@@ -753,8 +753,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
4890 + res = -TMF_RESP_FUNC_FAILED;
4891 + /* Even TMF timed out, return direct. */
4892 + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
4893 ++ struct pm8001_ccb_info *ccb = task->lldd_task;
4894 ++
4895 + pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
4896 + tmf->tmf);
4897 ++
4898 ++ if (ccb)
4899 ++ ccb->task = NULL;
4900 + goto ex_err;
4901 + }
4902 +
4903 +diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
4904 +index 4c5b945bf3187..ca4820d99dc70 100644
4905 +--- a/drivers/scsi/pm8001/pm80xx_hwi.c
4906 ++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
4907 +@@ -2184,9 +2184,9 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
4908 + pm8001_dbg(pm8001_ha, FAIL,
4909 + "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
4910 + t, status, ts->resp, ts->stat);
4911 ++ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
4912 + if (t->slow_task)
4913 + complete(&t->slow_task->completion);
4914 +- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
4915 + } else {
4916 + spin_unlock_irqrestore(&t->task_state_lock, flags);
4917 + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
4918 +@@ -2801,9 +2801,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
4919 + pm8001_dbg(pm8001_ha, FAIL,
4920 + "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
4921 + t, status, ts->resp, ts->stat);
4922 ++ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
4923 + if (t->slow_task)
4924 + complete(&t->slow_task->completion);
4925 +- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
4926 + } else {
4927 + spin_unlock_irqrestore(&t->task_state_lock, flags);
4928 + spin_unlock_irqrestore(&circularQ->oq_lock,
4929 +@@ -2828,7 +2828,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
4930 + u32 tag = le32_to_cpu(psataPayload->tag);
4931 + u32 port_id = le32_to_cpu(psataPayload->port_id);
4932 + u32 dev_id = le32_to_cpu(psataPayload->device_id);
4933 +- unsigned long flags;
4934 +
4935 + ccb = &pm8001_ha->ccb_info[tag];
4936 +
4937 +@@ -2866,8 +2865,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
4938 + ts->resp = SAS_TASK_COMPLETE;
4939 + ts->stat = SAS_DATA_OVERRUN;
4940 + ts->residual = 0;
4941 +- if (pm8001_dev)
4942 +- atomic_dec(&pm8001_dev->running_req);
4943 + break;
4944 + case IO_XFER_ERROR_BREAK:
4945 + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
4946 +@@ -2916,11 +2913,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
4947 + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
4948 + ts->resp = SAS_TASK_COMPLETE;
4949 + ts->stat = SAS_QUEUE_FULL;
4950 +- spin_unlock_irqrestore(&circularQ->oq_lock,
4951 +- circularQ->lock_flags);
4952 +- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
4953 +- spin_lock_irqsave(&circularQ->oq_lock,
4954 +- circularQ->lock_flags);
4955 + return;
4956 + }
4957 + break;
4958 +@@ -3020,24 +3012,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
4959 + ts->stat = SAS_OPEN_TO;
4960 + break;
4961 + }
4962 +- spin_lock_irqsave(&t->task_state_lock, flags);
4963 +- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
4964 +- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
4965 +- t->task_state_flags |= SAS_TASK_STATE_DONE;
4966 +- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
4967 +- spin_unlock_irqrestore(&t->task_state_lock, flags);
4968 +- pm8001_dbg(pm8001_ha, FAIL,
4969 +- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
4970 +- t, event, ts->resp, ts->stat);
4971 +- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
4972 +- } else {
4973 +- spin_unlock_irqrestore(&t->task_state_lock, flags);
4974 +- spin_unlock_irqrestore(&circularQ->oq_lock,
4975 +- circularQ->lock_flags);
4976 +- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
4977 +- spin_lock_irqsave(&circularQ->oq_lock,
4978 +- circularQ->lock_flags);
4979 +- }
4980 + }
4981 +
4982 + /*See the comments for mpi_ssp_completion */
4983 +diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
4984 +index 5916ed7662d56..4eb89aa4a39dc 100644
4985 +--- a/drivers/scsi/qedi/qedi_fw.c
4986 ++++ b/drivers/scsi/qedi/qedi_fw.c
4987 +@@ -771,11 +771,10 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
4988 + qedi_cmd->list_tmf_work = NULL;
4989 + }
4990 + }
4991 ++ spin_unlock_bh(&qedi_conn->tmf_work_lock);
4992 +
4993 +- if (!found) {
4994 +- spin_unlock_bh(&qedi_conn->tmf_work_lock);
4995 ++ if (!found)
4996 + goto check_cleanup_reqs;
4997 +- }
4998 +
4999 + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
5000 + "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
5001 +@@ -806,7 +805,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
5002 + qedi_cmd->state = CLEANUP_RECV;
5003 + unlock:
5004 + spin_unlock_bh(&conn->session->back_lock);
5005 +- spin_unlock_bh(&qedi_conn->tmf_work_lock);
5006 + wake_up_interruptible(&qedi_conn->wait_queue);
5007 + return;
5008 +
5009 +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
5010 +index 23e1c0acdeaee..d0ce723299bf7 100644
5011 +--- a/drivers/scsi/scsi_scan.c
5012 ++++ b/drivers/scsi/scsi_scan.c
5013 +@@ -214,6 +214,48 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
5014 + SCSI_TIMEOUT, 3, NULL);
5015 + }
5016 +
5017 ++static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
5018 ++ unsigned int depth)
5019 ++{
5020 ++ int new_shift = sbitmap_calculate_shift(depth);
5021 ++ bool need_alloc = !sdev->budget_map.map;
5022 ++ bool need_free = false;
5023 ++ int ret;
5024 ++ struct sbitmap sb_backup;
5025 ++
5026 ++ /*
5027 ++ * realloc if new shift is calculated, which is caused by setting
5028 ++ * up one new default queue depth after calling ->slave_configure
5029 ++ */
5030 ++ if (!need_alloc && new_shift != sdev->budget_map.shift)
5031 ++ need_alloc = need_free = true;
5032 ++
5033 ++ if (!need_alloc)
5034 ++ return 0;
5035 ++
5036 ++ /*
5037 ++ * Request queue has to be frozen for reallocating budget map,
5038 ++ * and here disk isn't added yet, so freezing is pretty fast
5039 ++ */
5040 ++ if (need_free) {
5041 ++ blk_mq_freeze_queue(sdev->request_queue);
5042 ++ sb_backup = sdev->budget_map;
5043 ++ }
5044 ++ ret = sbitmap_init_node(&sdev->budget_map,
5045 ++ scsi_device_max_queue_depth(sdev),
5046 ++ new_shift, GFP_KERNEL,
5047 ++ sdev->request_queue->node, false, true);
5048 ++ if (need_free) {
5049 ++ if (ret)
5050 ++ sdev->budget_map = sb_backup;
5051 ++ else
5052 ++ sbitmap_free(&sb_backup);
5053 ++ ret = 0;
5054 ++ blk_mq_unfreeze_queue(sdev->request_queue);
5055 ++ }
5056 ++ return ret;
5057 ++}
5058 ++
5059 + /**
5060 + * scsi_alloc_sdev - allocate and setup a scsi_Device
5061 + * @starget: which target to allocate a &scsi_device for
5062 +@@ -306,11 +348,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
5063 + * default device queue depth to figure out sbitmap shift
5064 + * since we use this queue depth most of times.
5065 + */
5066 +- if (sbitmap_init_node(&sdev->budget_map,
5067 +- scsi_device_max_queue_depth(sdev),
5068 +- sbitmap_calculate_shift(depth),
5069 +- GFP_KERNEL, sdev->request_queue->node,
5070 +- false, true)) {
5071 ++ if (scsi_realloc_sdev_budget_map(sdev, depth)) {
5072 + put_device(&starget->dev);
5073 + kfree(sdev);
5074 + goto out;
5075 +@@ -1017,6 +1055,13 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
5076 + }
5077 + return SCSI_SCAN_NO_RESPONSE;
5078 + }
5079 ++
5080 ++ /*
5081 ++ * The queue_depth is often changed in ->slave_configure.
5082 ++ * Set up budget map again since memory consumption of
5083 ++ * the map depends on actual queue depth.
5084 ++ */
5085 ++ scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
5086 + }
5087 +
5088 + if (sdev->scsi_level >= SCSI_3)
5089 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
5090 +index ec7d7e01231d7..9e7aa3a2fdf54 100644
5091 +--- a/drivers/scsi/ufs/ufshcd.c
5092 ++++ b/drivers/scsi/ufs/ufshcd.c
5093 +@@ -128,8 +128,9 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
5094 + enum {
5095 + UFSHCD_MAX_CHANNEL = 0,
5096 + UFSHCD_MAX_ID = 1,
5097 +- UFSHCD_CMD_PER_LUN = 32,
5098 +- UFSHCD_CAN_QUEUE = 32,
5099 ++ UFSHCD_NUM_RESERVED = 1,
5100 ++ UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
5101 ++ UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
5102 + };
5103 +
5104 + static const char *const ufshcd_state_name[] = {
5105 +@@ -2194,6 +2195,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
5106 + hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
5107 + hba->nutmrs =
5108 + ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
5109 ++ hba->reserved_slot = hba->nutrs - 1;
5110 +
5111 + /* Read crypto capabilities */
5112 + err = ufshcd_hba_init_crypto_capabilities(hba);
5113 +@@ -2941,30 +2943,15 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
5114 + static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
5115 + enum dev_cmd_type cmd_type, int timeout)
5116 + {
5117 +- struct request_queue *q = hba->cmd_queue;
5118 + DECLARE_COMPLETION_ONSTACK(wait);
5119 +- struct request *req;
5120 ++ const u32 tag = hba->reserved_slot;
5121 + struct ufshcd_lrb *lrbp;
5122 + int err;
5123 +- int tag;
5124 +
5125 +- down_read(&hba->clk_scaling_lock);
5126 ++ /* Protects use of hba->reserved_slot. */
5127 ++ lockdep_assert_held(&hba->dev_cmd.lock);
5128 +
5129 +- /*
5130 +- * Get free slot, sleep if slots are unavailable.
5131 +- * Even though we use wait_event() which sleeps indefinitely,
5132 +- * the maximum wait time is bounded by SCSI request timeout.
5133 +- */
5134 +- req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
5135 +- if (IS_ERR(req)) {
5136 +- err = PTR_ERR(req);
5137 +- goto out_unlock;
5138 +- }
5139 +- tag = req->tag;
5140 +- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
5141 +- /* Set the timeout such that the SCSI error handler is not activated. */
5142 +- req->timeout = msecs_to_jiffies(2 * timeout);
5143 +- blk_mq_start_request(req);
5144 ++ down_read(&hba->clk_scaling_lock);
5145 +
5146 + lrbp = &hba->lrb[tag];
5147 + WARN_ON(lrbp->cmd);
5148 +@@ -2982,8 +2969,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
5149 + (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
5150 +
5151 + out:
5152 +- blk_mq_free_request(req);
5153 +-out_unlock:
5154 + up_read(&hba->clk_scaling_lock);
5155 + return err;
5156 + }
5157 +@@ -6716,28 +6701,16 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
5158 + enum dev_cmd_type cmd_type,
5159 + enum query_opcode desc_op)
5160 + {
5161 +- struct request_queue *q = hba->cmd_queue;
5162 + DECLARE_COMPLETION_ONSTACK(wait);
5163 +- struct request *req;
5164 ++ const u32 tag = hba->reserved_slot;
5165 + struct ufshcd_lrb *lrbp;
5166 + int err = 0;
5167 +- int tag;
5168 + u8 upiu_flags;
5169 +
5170 +- down_read(&hba->clk_scaling_lock);
5171 +-
5172 +- req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
5173 +- if (IS_ERR(req)) {
5174 +- err = PTR_ERR(req);
5175 +- goto out_unlock;
5176 +- }
5177 +- tag = req->tag;
5178 +- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
5179 ++ /* Protects use of hba->reserved_slot. */
5180 ++ lockdep_assert_held(&hba->dev_cmd.lock);
5181 +
5182 +- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
5183 +- err = -EBUSY;
5184 +- goto out;
5185 +- }
5186 ++ down_read(&hba->clk_scaling_lock);
5187 +
5188 + lrbp = &hba->lrb[tag];
5189 + WARN_ON(lrbp->cmd);
5190 +@@ -6806,9 +6779,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
5191 + ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
5192 + (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
5193 +
5194 +-out:
5195 +- blk_mq_free_request(req);
5196 +-out_unlock:
5197 + up_read(&hba->clk_scaling_lock);
5198 + return err;
5199 + }
5200 +@@ -9543,8 +9513,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
5201 + /* Configure LRB */
5202 + ufshcd_host_memory_configure(hba);
5203 +
5204 +- host->can_queue = hba->nutrs;
5205 +- host->cmd_per_lun = hba->nutrs;
5206 ++ host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
5207 ++ host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
5208 + host->max_id = UFSHCD_MAX_ID;
5209 + host->max_lun = UFS_MAX_LUNS;
5210 + host->max_channel = UFSHCD_MAX_CHANNEL;
5211 +diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
5212 +index 54750d72c8fb0..26fbf1b9ab156 100644
5213 +--- a/drivers/scsi/ufs/ufshcd.h
5214 ++++ b/drivers/scsi/ufs/ufshcd.h
5215 +@@ -744,6 +744,7 @@ struct ufs_hba_monitor {
5216 + * @capabilities: UFS Controller Capabilities
5217 + * @nutrs: Transfer Request Queue depth supported by controller
5218 + * @nutmrs: Task Management Queue depth supported by controller
5219 ++ * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
5220 + * @ufs_version: UFS Version to which controller complies
5221 + * @vops: pointer to variant specific operations
5222 + * @priv: pointer to variant specific private data
5223 +@@ -836,6 +837,7 @@ struct ufs_hba {
5224 + u32 capabilities;
5225 + int nutrs;
5226 + int nutmrs;
5227 ++ u32 reserved_slot;
5228 + u32 ufs_version;
5229 + const struct ufs_hba_variant_ops *vops;
5230 + struct ufs_hba_variant_params *vps;
5231 +diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
5232 +index 72771e018c42e..258894ed234b3 100644
5233 +--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
5234 ++++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
5235 +@@ -306,10 +306,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
5236 + }
5237 +
5238 + lpc_ctrl->clk = devm_clk_get(dev, NULL);
5239 +- if (IS_ERR(lpc_ctrl->clk)) {
5240 +- dev_err(dev, "couldn't get clock\n");
5241 +- return PTR_ERR(lpc_ctrl->clk);
5242 +- }
5243 ++ if (IS_ERR(lpc_ctrl->clk))
5244 ++ return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk),
5245 ++ "couldn't get clock\n");
5246 + rc = clk_prepare_enable(lpc_ctrl->clk);
5247 + if (rc) {
5248 + dev_err(dev, "couldn't enable clock\n");
5249 +diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
5250 +index c650a32bcedff..b9505bb51f45c 100644
5251 +--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
5252 ++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
5253 +@@ -1058,15 +1058,27 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
5254 +
5255 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
5256 +
5257 ++ rcu_read_lock();
5258 + service = handle_to_service(handle);
5259 +- if (WARN_ON(!service))
5260 ++ if (WARN_ON(!service)) {
5261 ++ rcu_read_unlock();
5262 + return VCHIQ_SUCCESS;
5263 ++ }
5264 +
5265 + user_service = (struct user_service *)service->base.userdata;
5266 + instance = user_service->instance;
5267 +
5268 +- if (!instance || instance->closing)
5269 ++ if (!instance || instance->closing) {
5270 ++ rcu_read_unlock();
5271 + return VCHIQ_SUCCESS;
5272 ++ }
5273 ++
5274 ++ /*
5275 ++ * As hopping around different synchronization mechanism,
5276 ++ * taking an extra reference results in simpler implementation.
5277 ++ */
5278 ++ vchiq_service_get(service);
5279 ++ rcu_read_unlock();
5280 +
5281 + vchiq_log_trace(vchiq_arm_log_level,
5282 + "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
5283 +@@ -1097,6 +1109,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
5284 + bulk_userdata);
5285 + if (status != VCHIQ_SUCCESS) {
5286 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
5287 ++ vchiq_service_put(service);
5288 + return status;
5289 + }
5290 + }
5291 +@@ -1105,10 +1118,12 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
5292 + if (wait_for_completion_interruptible(&user_service->remove_event)) {
5293 + vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
5294 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
5295 ++ vchiq_service_put(service);
5296 + return VCHIQ_RETRY;
5297 + } else if (instance->closing) {
5298 + vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
5299 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
5300 ++ vchiq_service_put(service);
5301 + return VCHIQ_ERROR;
5302 + }
5303 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
5304 +@@ -1137,6 +1152,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
5305 + header = NULL;
5306 + }
5307 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
5308 ++ vchiq_service_put(service);
5309 +
5310 + if (skip_completion)
5311 + return VCHIQ_SUCCESS;
5312 +diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
5313 +index 2a66a5203d2fa..7bf9e888b6214 100644
5314 +--- a/drivers/tee/optee/core.c
5315 ++++ b/drivers/tee/optee/core.c
5316 +@@ -157,6 +157,7 @@ void optee_remove_common(struct optee *optee)
5317 + /* Unregister OP-TEE specific client devices on TEE bus */
5318 + optee_unregister_devices();
5319 +
5320 ++ teedev_close_context(optee->ctx);
5321 + /*
5322 + * The two devices have to be unregistered before we can free the
5323 + * other resources.
5324 +diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
5325 +index 28d7c0eafc025..39546110fc9a2 100644
5326 +--- a/drivers/tee/optee/ffa_abi.c
5327 ++++ b/drivers/tee/optee/ffa_abi.c
5328 +@@ -424,6 +424,7 @@ static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void)
5329 + */
5330 +
5331 + static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
5332 ++ struct optee *optee,
5333 + struct optee_msg_arg *arg)
5334 + {
5335 + struct tee_shm *shm;
5336 +@@ -439,7 +440,7 @@ static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
5337 + shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
5338 + break;
5339 + case OPTEE_RPC_SHM_TYPE_KERNEL:
5340 +- shm = tee_shm_alloc(ctx, arg->params[0].u.value.b,
5341 ++ shm = tee_shm_alloc(optee->ctx, arg->params[0].u.value.b,
5342 + TEE_SHM_MAPPED | TEE_SHM_PRIV);
5343 + break;
5344 + default:
5345 +@@ -493,14 +494,13 @@ err_bad_param:
5346 + }
5347 +
5348 + static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
5349 ++ struct optee *optee,
5350 + struct optee_msg_arg *arg)
5351 + {
5352 +- struct optee *optee = tee_get_drvdata(ctx->teedev);
5353 +-
5354 + arg->ret_origin = TEEC_ORIGIN_COMMS;
5355 + switch (arg->cmd) {
5356 + case OPTEE_RPC_CMD_SHM_ALLOC:
5357 +- handle_ffa_rpc_func_cmd_shm_alloc(ctx, arg);
5358 ++ handle_ffa_rpc_func_cmd_shm_alloc(ctx, optee, arg);
5359 + break;
5360 + case OPTEE_RPC_CMD_SHM_FREE:
5361 + handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg);
5362 +@@ -510,12 +510,12 @@ static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
5363 + }
5364 + }
5365 +
5366 +-static void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd,
5367 +- struct optee_msg_arg *arg)
5368 ++static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
5369 ++ u32 cmd, struct optee_msg_arg *arg)
5370 + {
5371 + switch (cmd) {
5372 + case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD:
5373 +- handle_ffa_rpc_func_cmd(ctx, arg);
5374 ++ handle_ffa_rpc_func_cmd(ctx, optee, arg);
5375 + break;
5376 + case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT:
5377 + /* Interrupt delivered by now */
5378 +@@ -582,7 +582,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
5379 + * above.
5380 + */
5381 + cond_resched();
5382 +- optee_handle_ffa_rpc(ctx, data->data1, rpc_arg);
5383 ++ optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg);
5384 + cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
5385 + data->data0 = cmd;
5386 + data->data1 = 0;
5387 +@@ -802,7 +802,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
5388 + {
5389 + const struct ffa_dev_ops *ffa_ops;
5390 + unsigned int rpc_arg_count;
5391 ++ struct tee_shm_pool *pool;
5392 + struct tee_device *teedev;
5393 ++ struct tee_context *ctx;
5394 + struct optee *optee;
5395 + int rc;
5396 +
5397 +@@ -822,12 +824,12 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
5398 + if (!optee)
5399 + return -ENOMEM;
5400 +
5401 +- optee->pool = optee_ffa_config_dyn_shm();
5402 +- if (IS_ERR(optee->pool)) {
5403 +- rc = PTR_ERR(optee->pool);
5404 +- optee->pool = NULL;
5405 +- goto err;
5406 ++ pool = optee_ffa_config_dyn_shm();
5407 ++ if (IS_ERR(pool)) {
5408 ++ rc = PTR_ERR(pool);
5409 ++ goto err_free_optee;
5410 + }
5411 ++ optee->pool = pool;
5412 +
5413 + optee->ops = &optee_ffa_ops;
5414 + optee->ffa.ffa_dev = ffa_dev;
5415 +@@ -838,7 +840,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
5416 + optee);
5417 + if (IS_ERR(teedev)) {
5418 + rc = PTR_ERR(teedev);
5419 +- goto err;
5420 ++ goto err_free_pool;
5421 + }
5422 + optee->teedev = teedev;
5423 +
5424 +@@ -846,46 +848,54 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
5425 + optee);
5426 + if (IS_ERR(teedev)) {
5427 + rc = PTR_ERR(teedev);
5428 +- goto err;
5429 ++ goto err_unreg_teedev;
5430 + }
5431 + optee->supp_teedev = teedev;
5432 +
5433 + rc = tee_device_register(optee->teedev);
5434 + if (rc)
5435 +- goto err;
5436 ++ goto err_unreg_supp_teedev;
5437 +
5438 + rc = tee_device_register(optee->supp_teedev);
5439 + if (rc)
5440 +- goto err;
5441 ++ goto err_unreg_supp_teedev;
5442 +
5443 + rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params);
5444 + if (rc)
5445 +- goto err;
5446 ++ goto err_unreg_supp_teedev;
5447 + mutex_init(&optee->ffa.mutex);
5448 + mutex_init(&optee->call_queue.mutex);
5449 + INIT_LIST_HEAD(&optee->call_queue.waiters);
5450 + optee_wait_queue_init(&optee->wait_queue);
5451 + optee_supp_init(&optee->supp);
5452 + ffa_dev_set_drvdata(ffa_dev, optee);
5453 ++ ctx = teedev_open(optee->teedev);
5454 ++ if (IS_ERR(ctx))
5455 ++ goto err_rhashtable_free;
5456 ++ optee->ctx = ctx;
5457 ++
5458 +
5459 + rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
5460 +- if (rc) {
5461 +- optee_ffa_remove(ffa_dev);
5462 +- return rc;
5463 +- }
5464 ++ if (rc)
5465 ++ goto err_unregister_devices;
5466 +
5467 + pr_info("initialized driver\n");
5468 + return 0;
5469 +-err:
5470 +- /*
5471 +- * tee_device_unregister() is safe to call even if the
5472 +- * devices hasn't been registered with
5473 +- * tee_device_register() yet.
5474 +- */
5475 ++
5476 ++err_unregister_devices:
5477 ++ optee_unregister_devices();
5478 ++ teedev_close_context(ctx);
5479 ++err_rhashtable_free:
5480 ++ rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
5481 ++ optee_supp_uninit(&optee->supp);
5482 ++ mutex_destroy(&optee->call_queue.mutex);
5483 ++err_unreg_supp_teedev:
5484 + tee_device_unregister(optee->supp_teedev);
5485 ++err_unreg_teedev:
5486 + tee_device_unregister(optee->teedev);
5487 +- if (optee->pool)
5488 +- tee_shm_pool_free(optee->pool);
5489 ++err_free_pool:
5490 ++ tee_shm_pool_free(pool);
5491 ++err_free_optee:
5492 + kfree(optee);
5493 + return rc;
5494 + }
5495 +diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
5496 +index 6660e05298db8..912b046976564 100644
5497 +--- a/drivers/tee/optee/optee_private.h
5498 ++++ b/drivers/tee/optee/optee_private.h
5499 +@@ -123,9 +123,10 @@ struct optee_ops {
5500 + /**
5501 + * struct optee - main service struct
5502 + * @supp_teedev: supplicant device
5503 ++ * @teedev: client device
5504 + * @ops: internal callbacks for different ways to reach secure
5505 + * world
5506 +- * @teedev: client device
5507 ++ * @ctx: driver internal TEE context
5508 + * @smc: specific to SMC ABI
5509 + * @ffa: specific to FF-A ABI
5510 + * @call_queue: queue of threads waiting to call @invoke_fn
5511 +@@ -142,6 +143,7 @@ struct optee {
5512 + struct tee_device *supp_teedev;
5513 + struct tee_device *teedev;
5514 + const struct optee_ops *ops;
5515 ++ struct tee_context *ctx;
5516 + union {
5517 + struct optee_smc smc;
5518 + struct optee_ffa ffa;
5519 +diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
5520 +index 09e7ec673bb6b..33f55ea52bc89 100644
5521 +--- a/drivers/tee/optee/smc_abi.c
5522 ++++ b/drivers/tee/optee/smc_abi.c
5523 +@@ -608,6 +608,7 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
5524 + }
5525 +
5526 + static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
5527 ++ struct optee *optee,
5528 + struct optee_msg_arg *arg,
5529 + struct optee_call_ctx *call_ctx)
5530 + {
5531 +@@ -637,7 +638,8 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
5532 + shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
5533 + break;
5534 + case OPTEE_RPC_SHM_TYPE_KERNEL:
5535 +- shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
5536 ++ shm = tee_shm_alloc(optee->ctx, sz,
5537 ++ TEE_SHM_MAPPED | TEE_SHM_PRIV);
5538 + break;
5539 + default:
5540 + arg->ret = TEEC_ERROR_BAD_PARAMETERS;
5541 +@@ -733,7 +735,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
5542 + switch (arg->cmd) {
5543 + case OPTEE_RPC_CMD_SHM_ALLOC:
5544 + free_pages_list(call_ctx);
5545 +- handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
5546 ++ handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
5547 + break;
5548 + case OPTEE_RPC_CMD_SHM_FREE:
5549 + handle_rpc_func_cmd_shm_free(ctx, arg);
5550 +@@ -762,7 +764,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
5551 +
5552 + switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
5553 + case OPTEE_SMC_RPC_FUNC_ALLOC:
5554 +- shm = tee_shm_alloc(ctx, param->a1,
5555 ++ shm = tee_shm_alloc(optee->ctx, param->a1,
5556 + TEE_SHM_MAPPED | TEE_SHM_PRIV);
5557 + if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
5558 + reg_pair_from_64(&param->a1, &param->a2, pa);
5559 +@@ -1207,6 +1209,7 @@ static int optee_probe(struct platform_device *pdev)
5560 + struct optee *optee = NULL;
5561 + void *memremaped_shm = NULL;
5562 + struct tee_device *teedev;
5563 ++ struct tee_context *ctx;
5564 + u32 sec_caps;
5565 + int rc;
5566 +
5567 +@@ -1284,6 +1287,10 @@ static int optee_probe(struct platform_device *pdev)
5568 + optee_supp_init(&optee->supp);
5569 + optee->smc.memremaped_shm = memremaped_shm;
5570 + optee->pool = pool;
5571 ++ ctx = teedev_open(optee->teedev);
5572 ++ if (IS_ERR(ctx))
5573 ++ goto err;
5574 ++ optee->ctx = ctx;
5575 +
5576 + /*
5577 + * Ensure that there are no pre-existing shm objects before enabling
5578 +diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
5579 +index 85102d12d7169..3fc426dad2df3 100644
5580 +--- a/drivers/tee/tee_core.c
5581 ++++ b/drivers/tee/tee_core.c
5582 +@@ -43,7 +43,7 @@ static DEFINE_SPINLOCK(driver_lock);
5583 + static struct class *tee_class;
5584 + static dev_t tee_devt;
5585 +
5586 +-static struct tee_context *teedev_open(struct tee_device *teedev)
5587 ++struct tee_context *teedev_open(struct tee_device *teedev)
5588 + {
5589 + int rc;
5590 + struct tee_context *ctx;
5591 +@@ -70,6 +70,7 @@ err:
5592 + return ERR_PTR(rc);
5593 +
5594 + }
5595 ++EXPORT_SYMBOL_GPL(teedev_open);
5596 +
5597 + void teedev_ctx_get(struct tee_context *ctx)
5598 + {
5599 +@@ -96,13 +97,14 @@ void teedev_ctx_put(struct tee_context *ctx)
5600 + kref_put(&ctx->refcount, teedev_ctx_release);
5601 + }
5602 +
5603 +-static void teedev_close_context(struct tee_context *ctx)
5604 ++void teedev_close_context(struct tee_context *ctx)
5605 + {
5606 + struct tee_device *teedev = ctx->teedev;
5607 +
5608 + teedev_ctx_put(ctx);
5609 + tee_device_put(teedev);
5610 + }
5611 ++EXPORT_SYMBOL_GPL(teedev_close_context);
5612 +
5613 + static int tee_open(struct inode *inode, struct file *filp)
5614 + {
5615 +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
5616 +index b2b98fe689e9e..5c25bbe1a09ff 100644
5617 +--- a/drivers/tty/n_tty.c
5618 ++++ b/drivers/tty/n_tty.c
5619 +@@ -1963,7 +1963,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
5620 + return false;
5621 +
5622 + canon_head = smp_load_acquire(&ldata->canon_head);
5623 +- n = min(*nr + 1, canon_head - ldata->read_tail);
5624 ++ n = min(*nr, canon_head - ldata->read_tail);
5625 +
5626 + tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
5627 + size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
5628 +@@ -1985,10 +1985,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
5629 + n += N_TTY_BUF_SIZE;
5630 + c = n + found;
5631 +
5632 +- if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
5633 +- c = min(*nr, c);
5634 ++ if (!found || read_buf(ldata, eol) != __DISABLED_CHAR)
5635 + n = c;
5636 +- }
5637 +
5638 + n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
5639 + __func__, eol, found, n, c, tail, more);
5640 +diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
5641 +index 673cda3d011d0..948d0a1c6ae8e 100644
5642 +--- a/drivers/tty/serial/8250/8250_gsc.c
5643 ++++ b/drivers/tty/serial/8250/8250_gsc.c
5644 +@@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
5645 + unsigned long address;
5646 + int err;
5647 +
5648 +-#ifdef CONFIG_64BIT
5649 ++#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC)
5650 + if (!dev->irq && (dev->id.sversion == 0xad))
5651 + dev->irq = iosapic_serial_irq(dev);
5652 + #endif
5653 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
5654 +index 48e03e176f319..cec7163bc8730 100644
5655 +--- a/fs/btrfs/ioctl.c
5656 ++++ b/fs/btrfs/ioctl.c
5657 +@@ -1184,6 +1184,10 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
5658 + if (em->generation < newer_than)
5659 + goto next;
5660 +
5661 ++ /* This em is under writeback, no need to defrag */
5662 ++ if (em->generation == (u64)-1)
5663 ++ goto next;
5664 ++
5665 + /*
5666 + * Our start offset might be in the middle of an existing extent
5667 + * map, so take that into account.
5668 +@@ -1603,6 +1607,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
5669 + ret = 0;
5670 + break;
5671 + }
5672 ++ cond_resched();
5673 + }
5674 +
5675 + if (ra_allocated)
5676 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
5677 +index 040324d711188..7e1159474a4e6 100644
5678 +--- a/fs/btrfs/send.c
5679 ++++ b/fs/btrfs/send.c
5680 +@@ -4983,6 +4983,10 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
5681 + lock_page(page);
5682 + if (!PageUptodate(page)) {
5683 + unlock_page(page);
5684 ++ btrfs_err(fs_info,
5685 ++ "send: IO error at offset %llu for inode %llu root %llu",
5686 ++ page_offset(page), sctx->cur_ino,
5687 ++ sctx->send_root->root_key.objectid);
5688 + put_page(page);
5689 + ret = -EIO;
5690 + break;
5691 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
5692 +index cefd0e9623ba9..fb69524a992bb 100644
5693 +--- a/fs/cifs/connect.c
5694 ++++ b/fs/cifs/connect.c
5695 +@@ -1796,13 +1796,9 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
5696 + int i;
5697 +
5698 + for (i = 1; i < chan_count; i++) {
5699 +- /*
5700 +- * note: for now, we're okay accessing ses->chans
5701 +- * without chan_lock. But when chans can go away, we'll
5702 +- * need to introduce ref counting to make sure that chan
5703 +- * is not freed from under us.
5704 +- */
5705 ++ spin_unlock(&ses->chan_lock);
5706 + cifs_put_tcp_session(ses->chans[i].server, 0);
5707 ++ spin_lock(&ses->chan_lock);
5708 + ses->chans[i].server = NULL;
5709 + }
5710 + }
5711 +diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
5712 +index e3ed25dc6f3f6..43c406b812fed 100644
5713 +--- a/fs/cifs/fs_context.c
5714 ++++ b/fs/cifs/fs_context.c
5715 +@@ -147,7 +147,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
5716 + fsparam_u32("echo_interval", Opt_echo_interval),
5717 + fsparam_u32("max_credits", Opt_max_credits),
5718 + fsparam_u32("handletimeout", Opt_handletimeout),
5719 +- fsparam_u32("snapshot", Opt_snapshot),
5720 ++ fsparam_u64("snapshot", Opt_snapshot),
5721 + fsparam_u32("max_channels", Opt_max_channels),
5722 +
5723 + /* Mount options which take string value */
5724 +@@ -1072,7 +1072,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
5725 + ctx->echo_interval = result.uint_32;
5726 + break;
5727 + case Opt_snapshot:
5728 +- ctx->snapshot_time = result.uint_32;
5729 ++ ctx->snapshot_time = result.uint_64;
5730 + break;
5731 + case Opt_max_credits:
5732 + if (result.uint_32 < 20 || result.uint_32 > 60000) {
5733 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
5734 +index 38574fc70117e..1ecfa53e4b0a1 100644
5735 +--- a/fs/cifs/sess.c
5736 ++++ b/fs/cifs/sess.c
5737 +@@ -76,11 +76,6 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
5738 + struct cifs_server_iface *ifaces = NULL;
5739 + size_t iface_count;
5740 +
5741 +- if (ses->server->dialect < SMB30_PROT_ID) {
5742 +- cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
5743 +- return 0;
5744 +- }
5745 +-
5746 + spin_lock(&ses->chan_lock);
5747 +
5748 + new_chan_count = old_chan_count = ses->chan_count;
5749 +@@ -94,6 +89,12 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
5750 + return 0;
5751 + }
5752 +
5753 ++ if (ses->server->dialect < SMB30_PROT_ID) {
5754 ++ spin_unlock(&ses->chan_lock);
5755 ++ cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
5756 ++ return 0;
5757 ++ }
5758 ++
5759 + if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
5760 + ses->chan_max = 1;
5761 + spin_unlock(&ses->chan_lock);
5762 +diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
5763 +index 7d8b72d67c803..9d486fbbfbbde 100644
5764 +--- a/fs/cifs/xattr.c
5765 ++++ b/fs/cifs/xattr.c
5766 +@@ -175,11 +175,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
5767 + switch (handler->flags) {
5768 + case XATTR_CIFS_NTSD_FULL:
5769 + aclflags = (CIFS_ACL_OWNER |
5770 ++ CIFS_ACL_GROUP |
5771 + CIFS_ACL_DACL |
5772 + CIFS_ACL_SACL);
5773 + break;
5774 + case XATTR_CIFS_NTSD:
5775 + aclflags = (CIFS_ACL_OWNER |
5776 ++ CIFS_ACL_GROUP |
5777 + CIFS_ACL_DACL);
5778 + break;
5779 + case XATTR_CIFS_ACL:
5780 +diff --git a/fs/io_uring.c b/fs/io_uring.c
5781 +index 698db7fb62e06..a92f276f21d9c 100644
5782 +--- a/fs/io_uring.c
5783 ++++ b/fs/io_uring.c
5784 +@@ -8872,10 +8872,9 @@ static void io_mem_free(void *ptr)
5785 +
5786 + static void *io_mem_alloc(size_t size)
5787 + {
5788 +- gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
5789 +- __GFP_NORETRY | __GFP_ACCOUNT;
5790 ++ gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
5791 +
5792 +- return (void *) __get_free_pages(gfp_flags, get_order(size));
5793 ++ return (void *) __get_free_pages(gfp, get_order(size));
5794 + }
5795 +
5796 + static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
5797 +diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
5798 +index 1ff1e52f398fc..cbbbccdc5a0a5 100644
5799 +--- a/fs/ksmbd/smb2pdu.c
5800 ++++ b/fs/ksmbd/smb2pdu.c
5801 +@@ -3423,9 +3423,9 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
5802 + goto free_conv_name;
5803 + }
5804 +
5805 +- struct_sz = readdir_info_level_struct_sz(info_level);
5806 +- next_entry_offset = ALIGN(struct_sz - 1 + conv_len,
5807 +- KSMBD_DIR_INFO_ALIGNMENT);
5808 ++ struct_sz = readdir_info_level_struct_sz(info_level) - 1 + conv_len;
5809 ++ next_entry_offset = ALIGN(struct_sz, KSMBD_DIR_INFO_ALIGNMENT);
5810 ++ d_info->last_entry_off_align = next_entry_offset - struct_sz;
5811 +
5812 + if (next_entry_offset > d_info->out_buf_len) {
5813 + d_info->out_buf_len = 0;
5814 +@@ -3977,6 +3977,7 @@ int smb2_query_dir(struct ksmbd_work *work)
5815 + ((struct file_directory_info *)
5816 + ((char *)rsp->Buffer + d_info.last_entry_offset))
5817 + ->NextEntryOffset = 0;
5818 ++ d_info.data_count -= d_info.last_entry_off_align;
5819 +
5820 + rsp->StructureSize = cpu_to_le16(9);
5821 + rsp->OutputBufferOffset = cpu_to_le16(72);
5822 +diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
5823 +index ef7f42b0290a8..9a7e211dbf4f4 100644
5824 +--- a/fs/ksmbd/smb_common.c
5825 ++++ b/fs/ksmbd/smb_common.c
5826 +@@ -308,14 +308,17 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
5827 + for (i = 0; i < 2; i++) {
5828 + struct kstat kstat;
5829 + struct ksmbd_kstat ksmbd_kstat;
5830 ++ struct dentry *dentry;
5831 +
5832 + if (!dir->dot_dotdot[i]) { /* fill dot entry info */
5833 + if (i == 0) {
5834 + d_info->name = ".";
5835 + d_info->name_len = 1;
5836 ++ dentry = dir->filp->f_path.dentry;
5837 + } else {
5838 + d_info->name = "..";
5839 + d_info->name_len = 2;
5840 ++ dentry = dir->filp->f_path.dentry->d_parent;
5841 + }
5842 +
5843 + if (!match_pattern(d_info->name, d_info->name_len,
5844 +@@ -327,7 +330,7 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
5845 + ksmbd_kstat.kstat = &kstat;
5846 + ksmbd_vfs_fill_dentry_attrs(work,
5847 + user_ns,
5848 +- dir->filp->f_path.dentry->d_parent,
5849 ++ dentry,
5850 + &ksmbd_kstat);
5851 + rc = fn(conn, info_level, d_info, &ksmbd_kstat);
5852 + if (rc)
5853 +diff --git a/fs/ksmbd/vfs.h b/fs/ksmbd/vfs.h
5854 +index adf94a4f22fa6..8c37aaf936ab1 100644
5855 +--- a/fs/ksmbd/vfs.h
5856 ++++ b/fs/ksmbd/vfs.h
5857 +@@ -47,6 +47,7 @@ struct ksmbd_dir_info {
5858 + int last_entry_offset;
5859 + bool hide_dot_file;
5860 + int flags;
5861 ++ int last_entry_off_align;
5862 + };
5863 +
5864 + struct ksmbd_readdir_data {
5865 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
5866 +index b2460a0504411..877f72433f435 100644
5867 +--- a/fs/nfs/dir.c
5868 ++++ b/fs/nfs/dir.c
5869 +@@ -1982,14 +1982,14 @@ no_open:
5870 + if (!res) {
5871 + inode = d_inode(dentry);
5872 + if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
5873 +- !S_ISDIR(inode->i_mode))
5874 ++ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
5875 + res = ERR_PTR(-ENOTDIR);
5876 + else if (inode && S_ISREG(inode->i_mode))
5877 + res = ERR_PTR(-EOPENSTALE);
5878 + } else if (!IS_ERR(res)) {
5879 + inode = d_inode(res);
5880 + if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
5881 +- !S_ISDIR(inode->i_mode)) {
5882 ++ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) {
5883 + dput(res);
5884 + res = ERR_PTR(-ENOTDIR);
5885 + } else if (inode && S_ISREG(inode->i_mode)) {
5886 +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
5887 +index fda530d5e7640..a09d3ff627c20 100644
5888 +--- a/fs/nfs/inode.c
5889 ++++ b/fs/nfs/inode.c
5890 +@@ -853,12 +853,9 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
5891 + }
5892 +
5893 + /* Flush out writes to the server in order to update c/mtime. */
5894 +- if ((request_mask & (STATX_CTIME|STATX_MTIME)) &&
5895 +- S_ISREG(inode->i_mode)) {
5896 +- err = filemap_write_and_wait(inode->i_mapping);
5897 +- if (err)
5898 +- goto out;
5899 +- }
5900 ++ if ((request_mask & (STATX_CTIME | STATX_MTIME)) &&
5901 ++ S_ISREG(inode->i_mode))
5902 ++ filemap_write_and_wait(inode->i_mapping);
5903 +
5904 + /*
5905 + * We may force a getattr if the user cares about atime.
5906 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
5907 +index 9a94e758212c8..0abbbf5d2bdf1 100644
5908 +--- a/fs/nfs/nfs4proc.c
5909 ++++ b/fs/nfs/nfs4proc.c
5910 +@@ -1233,8 +1233,7 @@ nfs4_update_changeattr_locked(struct inode *inode,
5911 + NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
5912 + NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
5913 + NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
5914 +- NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR |
5915 +- NFS_INO_REVAL_PAGECACHE;
5916 ++ NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
5917 + nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
5918 + }
5919 + nfsi->attrtimeo_timestamp = jiffies;
5920 +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
5921 +index 22d904bde6ab9..a74aef99bd3d6 100644
5922 +--- a/fs/quota/dquot.c
5923 ++++ b/fs/quota/dquot.c
5924 +@@ -690,9 +690,14 @@ int dquot_quota_sync(struct super_block *sb, int type)
5925 + /* This is not very clever (and fast) but currently I don't know about
5926 + * any other simple way of getting quota data to disk and we must get
5927 + * them there for userspace to be visible... */
5928 +- if (sb->s_op->sync_fs)
5929 +- sb->s_op->sync_fs(sb, 1);
5930 +- sync_blockdev(sb->s_bdev);
5931 ++ if (sb->s_op->sync_fs) {
5932 ++ ret = sb->s_op->sync_fs(sb, 1);
5933 ++ if (ret)
5934 ++ return ret;
5935 ++ }
5936 ++ ret = sync_blockdev(sb->s_bdev);
5937 ++ if (ret)
5938 ++ return ret;
5939 +
5940 + /*
5941 + * Now when everything is written we can discard the pagecache so
5942 +diff --git a/fs/super.c b/fs/super.c
5943 +index a6405d44d4ca2..d978dd031a036 100644
5944 +--- a/fs/super.c
5945 ++++ b/fs/super.c
5946 +@@ -1619,11 +1619,9 @@ static void lockdep_sb_freeze_acquire(struct super_block *sb)
5947 + percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
5948 + }
5949 +
5950 +-static void sb_freeze_unlock(struct super_block *sb)
5951 ++static void sb_freeze_unlock(struct super_block *sb, int level)
5952 + {
5953 +- int level;
5954 +-
5955 +- for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
5956 ++ for (level--; level >= 0; level--)
5957 + percpu_up_write(sb->s_writers.rw_sem + level);
5958 + }
5959 +
5960 +@@ -1694,7 +1692,14 @@ int freeze_super(struct super_block *sb)
5961 + sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
5962 +
5963 + /* All writers are done so after syncing there won't be dirty data */
5964 +- sync_filesystem(sb);
5965 ++ ret = sync_filesystem(sb);
5966 ++ if (ret) {
5967 ++ sb->s_writers.frozen = SB_UNFROZEN;
5968 ++ sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
5969 ++ wake_up(&sb->s_writers.wait_unfrozen);
5970 ++ deactivate_locked_super(sb);
5971 ++ return ret;
5972 ++ }
5973 +
5974 + /* Now wait for internal filesystem counter */
5975 + sb->s_writers.frozen = SB_FREEZE_FS;
5976 +@@ -1706,7 +1711,7 @@ int freeze_super(struct super_block *sb)
5977 + printk(KERN_ERR
5978 + "VFS:Filesystem freeze failed\n");
5979 + sb->s_writers.frozen = SB_UNFROZEN;
5980 +- sb_freeze_unlock(sb);
5981 ++ sb_freeze_unlock(sb, SB_FREEZE_FS);
5982 + wake_up(&sb->s_writers.wait_unfrozen);
5983 + deactivate_locked_super(sb);
5984 + return ret;
5985 +@@ -1751,7 +1756,7 @@ static int thaw_super_locked(struct super_block *sb)
5986 + }
5987 +
5988 + sb->s_writers.frozen = SB_UNFROZEN;
5989 +- sb_freeze_unlock(sb);
5990 ++ sb_freeze_unlock(sb, SB_FREEZE_FS);
5991 + out:
5992 + wake_up(&sb->s_writers.wait_unfrozen);
5993 + deactivate_locked_super(sb);
5994 +diff --git a/fs/sync.c b/fs/sync.c
5995 +index 3ce8e2137f310..c7690016453e4 100644
5996 +--- a/fs/sync.c
5997 ++++ b/fs/sync.c
5998 +@@ -29,7 +29,7 @@
5999 + */
6000 + int sync_filesystem(struct super_block *sb)
6001 + {
6002 +- int ret;
6003 ++ int ret = 0;
6004 +
6005 + /*
6006 + * We need to be protected against the filesystem going from
6007 +@@ -52,15 +52,21 @@ int sync_filesystem(struct super_block *sb)
6008 + * at a time.
6009 + */
6010 + writeback_inodes_sb(sb, WB_REASON_SYNC);
6011 +- if (sb->s_op->sync_fs)
6012 +- sb->s_op->sync_fs(sb, 0);
6013 ++ if (sb->s_op->sync_fs) {
6014 ++ ret = sb->s_op->sync_fs(sb, 0);
6015 ++ if (ret)
6016 ++ return ret;
6017 ++ }
6018 + ret = sync_blockdev_nowait(sb->s_bdev);
6019 +- if (ret < 0)
6020 ++ if (ret)
6021 + return ret;
6022 +
6023 + sync_inodes_sb(sb);
6024 +- if (sb->s_op->sync_fs)
6025 +- sb->s_op->sync_fs(sb, 1);
6026 ++ if (sb->s_op->sync_fs) {
6027 ++ ret = sb->s_op->sync_fs(sb, 1);
6028 ++ if (ret)
6029 ++ return ret;
6030 ++ }
6031 + return sync_blockdev(sb->s_bdev);
6032 + }
6033 + EXPORT_SYMBOL(sync_filesystem);
6034 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
6035 +index d73887c805e05..d405ffe770342 100644
6036 +--- a/include/linux/blkdev.h
6037 ++++ b/include/linux/blkdev.h
6038 +@@ -740,7 +740,8 @@ extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
6039 +
6040 + bool __must_check blk_get_queue(struct request_queue *);
6041 + extern void blk_put_queue(struct request_queue *);
6042 +-extern void blk_set_queue_dying(struct request_queue *);
6043 ++
6044 ++void blk_mark_disk_dead(struct gendisk *disk);
6045 +
6046 + #ifdef CONFIG_BLOCK
6047 + /*
6048 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
6049 +index 9f20b0f539f78..29b9b199c56bb 100644
6050 +--- a/include/linux/bpf.h
6051 ++++ b/include/linux/bpf.h
6052 +@@ -297,6 +297,34 @@ bool bpf_map_meta_equal(const struct bpf_map *meta0,
6053 +
6054 + extern const struct bpf_map_ops bpf_map_offload_ops;
6055 +
6056 ++/* bpf_type_flag contains a set of flags that are applicable to the values of
6057 ++ * arg_type, ret_type and reg_type. For example, a pointer value may be null,
6058 ++ * or a memory is read-only. We classify types into two categories: base types
6059 ++ * and extended types. Extended types are base types combined with a type flag.
6060 ++ *
6061 ++ * Currently there are no more than 32 base types in arg_type, ret_type and
6062 ++ * reg_types.
6063 ++ */
6064 ++#define BPF_BASE_TYPE_BITS 8
6065 ++
6066 ++enum bpf_type_flag {
6067 ++ /* PTR may be NULL. */
6068 ++ PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
6069 ++
6070 ++ /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
6071 ++ * compatible with both mutable and immutable memory.
6072 ++ */
6073 ++ MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
6074 ++
6075 ++ __BPF_TYPE_LAST_FLAG = MEM_RDONLY,
6076 ++};
6077 ++
6078 ++/* Max number of base types. */
6079 ++#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
6080 ++
6081 ++/* Max number of all types. */
6082 ++#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
6083 ++
6084 + /* function argument constraints */
6085 + enum bpf_arg_type {
6086 + ARG_DONTCARE = 0, /* unused argument in helper function */
6087 +@@ -308,13 +336,11 @@ enum bpf_arg_type {
6088 + ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
6089 + ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
6090 + ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
6091 +- ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */
6092 +
6093 + /* the following constraints used to prototype bpf_memcmp() and other
6094 + * functions that access data on eBPF program stack
6095 + */
6096 + ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
6097 +- ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
6098 + ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
6099 + * helper function must fill all bytes or clear
6100 + * them in error case.
6101 +@@ -324,42 +350,65 @@ enum bpf_arg_type {
6102 + ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
6103 +
6104 + ARG_PTR_TO_CTX, /* pointer to context */
6105 +- ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */
6106 + ARG_ANYTHING, /* any (initialized) argument is ok */
6107 + ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
6108 + ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
6109 + ARG_PTR_TO_INT, /* pointer to int */
6110 + ARG_PTR_TO_LONG, /* pointer to long */
6111 + ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
6112 +- ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */
6113 + ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
6114 + ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
6115 +- ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */
6116 + ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
6117 + ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
6118 + ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
6119 + ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
6120 +- ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */
6121 ++ ARG_PTR_TO_STACK, /* pointer to stack */
6122 + ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
6123 + ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
6124 + __BPF_ARG_TYPE_MAX,
6125 ++
6126 ++ /* Extended arg_types. */
6127 ++ ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
6128 ++ ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
6129 ++ ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
6130 ++ ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
6131 ++ ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
6132 ++ ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
6133 ++
6134 ++ /* This must be the last entry. Its purpose is to ensure the enum is
6135 ++ * wide enough to hold the higher bits reserved for bpf_type_flag.
6136 ++ */
6137 ++ __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
6138 + };
6139 ++static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
6140 +
6141 + /* type of values returned from helper functions */
6142 + enum bpf_return_type {
6143 + RET_INTEGER, /* function returns integer */
6144 + RET_VOID, /* function doesn't return anything */
6145 + RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
6146 +- RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
6147 +- RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
6148 +- RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */
6149 +- RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */
6150 +- RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */
6151 +- RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */
6152 +- RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */
6153 ++ RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
6154 ++ RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
6155 ++ RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
6156 ++ RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */
6157 + RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
6158 + RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
6159 ++ __BPF_RET_TYPE_MAX,
6160 ++
6161 ++ /* Extended ret_types. */
6162 ++ RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
6163 ++ RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
6164 ++ RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
6165 ++ RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
6166 ++ RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_ALLOC_MEM,
6167 ++ RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
6168 ++
6169 ++ /* This must be the last entry. Its purpose is to ensure the enum is
6170 ++ * wide enough to hold the higher bits reserved for bpf_type_flag.
6171 ++ */
6172 ++ __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
6173 + };
6174 ++static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
6175 +
6176 + /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
6177 + * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
6178 +@@ -421,18 +470,15 @@ enum bpf_reg_type {
6179 + PTR_TO_CTX, /* reg points to bpf_context */
6180 + CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
6181 + PTR_TO_MAP_VALUE, /* reg points to map element value */
6182 +- PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
6183 ++ PTR_TO_MAP_KEY, /* reg points to a map element key */
6184 + PTR_TO_STACK, /* reg == frame_pointer + offset */
6185 + PTR_TO_PACKET_META, /* skb->data - meta_len */
6186 + PTR_TO_PACKET, /* reg points to skb->data */
6187 + PTR_TO_PACKET_END, /* skb->data + headlen */
6188 + PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
6189 + PTR_TO_SOCKET, /* reg points to struct bpf_sock */
6190 +- PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
6191 + PTR_TO_SOCK_COMMON, /* reg points to sock_common */
6192 +- PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
6193 + PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
6194 +- PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
6195 + PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
6196 + PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
6197 + /* PTR_TO_BTF_ID points to a kernel struct that does not need
6198 +@@ -450,18 +496,25 @@ enum bpf_reg_type {
6199 + * been checked for null. Used primarily to inform the verifier
6200 + * an explicit null check is required for this struct.
6201 + */
6202 +- PTR_TO_BTF_ID_OR_NULL,
6203 + PTR_TO_MEM, /* reg points to valid memory region */
6204 +- PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */
6205 +- PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */
6206 +- PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */
6207 +- PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */
6208 +- PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
6209 ++ PTR_TO_BUF, /* reg points to a read/write buffer */
6210 + PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */
6211 + PTR_TO_FUNC, /* reg points to a bpf program function */
6212 +- PTR_TO_MAP_KEY, /* reg points to a map element key */
6213 + __BPF_REG_TYPE_MAX,
6214 ++
6215 ++ /* Extended reg_types. */
6216 ++ PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
6217 ++ PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
6218 ++ PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
6219 ++ PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
6220 ++ PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
6221 ++
6222 ++ /* This must be the last entry. Its purpose is to ensure the enum is
6223 ++ * wide enough to hold the higher bits reserved for bpf_type_flag.
6224 ++ */
6225 ++ __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
6226 + };
6227 ++static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
6228 +
6229 + /* The information passed from prog-specific *_is_valid_access
6230 + * back to the verifier.
6231 +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
6232 +index 182b16a910849..540bc0b3bfae6 100644
6233 +--- a/include/linux/bpf_verifier.h
6234 ++++ b/include/linux/bpf_verifier.h
6235 +@@ -18,6 +18,8 @@
6236 + * that converting umax_value to int cannot overflow.
6237 + */
6238 + #define BPF_MAX_VAR_SIZ (1 << 29)
6239 ++/* size of type_str_buf in bpf_verifier. */
6240 ++#define TYPE_STR_BUF_LEN 64
6241 +
6242 + /* Liveness marks, used for registers and spilled-regs (in stack slots).
6243 + * Read marks propagate upwards until they find a write mark; they record that
6244 +@@ -474,6 +476,8 @@ struct bpf_verifier_env {
6245 + /* longest register parentage chain walked for liveness marking */
6246 + u32 longest_mark_read_walk;
6247 + bpfptr_t fd_array;
6248 ++ /* buffer used in reg_type_str() to generate reg_type string */
6249 ++ char type_str_buf[TYPE_STR_BUF_LEN];
6250 + };
6251 +
6252 + __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
6253 +@@ -536,5 +540,18 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
6254 + struct bpf_attach_target_info *tgt_info);
6255 + void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
6256 +
6257 ++#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
6258 ++
6259 ++/* extract base type from bpf_{arg, return, reg}_type. */
6260 ++static inline u32 base_type(u32 type)
6261 ++{
6262 ++ return type & BPF_BASE_TYPE_MASK;
6263 ++}
6264 ++
6265 ++/* extract flags from an extended type. See bpf_type_flag in bpf.h. */
6266 ++static inline u32 type_flag(u32 type)
6267 ++{
6268 ++ return type & ~BPF_BASE_TYPE_MASK;
6269 ++}
6270 +
6271 + #endif /* _LINUX_BPF_VERIFIER_H */
6272 +diff --git a/include/linux/compiler.h b/include/linux/compiler.h
6273 +index 429dcebe2b992..0f7fd205ab7ea 100644
6274 +--- a/include/linux/compiler.h
6275 ++++ b/include/linux/compiler.h
6276 +@@ -117,14 +117,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
6277 + */
6278 + #define __stringify_label(n) #n
6279 +
6280 +-#define __annotate_reachable(c) ({ \
6281 +- asm volatile(__stringify_label(c) ":\n\t" \
6282 +- ".pushsection .discard.reachable\n\t" \
6283 +- ".long " __stringify_label(c) "b - .\n\t" \
6284 +- ".popsection\n\t" : : "i" (c)); \
6285 +-})
6286 +-#define annotate_reachable() __annotate_reachable(__COUNTER__)
6287 +-
6288 + #define __annotate_unreachable(c) ({ \
6289 + asm volatile(__stringify_label(c) ":\n\t" \
6290 + ".pushsection .discard.unreachable\n\t" \
6291 +@@ -133,24 +125,21 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
6292 + })
6293 + #define annotate_unreachable() __annotate_unreachable(__COUNTER__)
6294 +
6295 +-#define ASM_UNREACHABLE \
6296 +- "999:\n\t" \
6297 +- ".pushsection .discard.unreachable\n\t" \
6298 +- ".long 999b - .\n\t" \
6299 ++#define ASM_REACHABLE \
6300 ++ "998:\n\t" \
6301 ++ ".pushsection .discard.reachable\n\t" \
6302 ++ ".long 998b - .\n\t" \
6303 + ".popsection\n\t"
6304 +
6305 + /* Annotate a C jump table to allow objtool to follow the code flow */
6306 + #define __annotate_jump_table __section(".rodata..c_jump_table")
6307 +
6308 + #else
6309 +-#define annotate_reachable()
6310 + #define annotate_unreachable()
6311 ++# define ASM_REACHABLE
6312 + #define __annotate_jump_table
6313 + #endif
6314 +
6315 +-#ifndef ASM_UNREACHABLE
6316 +-# define ASM_UNREACHABLE
6317 +-#endif
6318 + #ifndef unreachable
6319 + # define unreachable() do { \
6320 + annotate_unreachable(); \
6321 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
6322 +index 6cbefb660fa3b..049858c671efa 100644
6323 +--- a/include/linux/netdevice.h
6324 ++++ b/include/linux/netdevice.h
6325 +@@ -2149,7 +2149,7 @@ struct net_device {
6326 + struct netdev_queue *_tx ____cacheline_aligned_in_smp;
6327 + unsigned int num_tx_queues;
6328 + unsigned int real_num_tx_queues;
6329 +- struct Qdisc *qdisc;
6330 ++ struct Qdisc __rcu *qdisc;
6331 + unsigned int tx_queue_len;
6332 + spinlock_t tx_global_lock;
6333 +
6334 +diff --git a/include/linux/sched.h b/include/linux/sched.h
6335 +index 78c351e35fec6..ee5ed88219631 100644
6336 +--- a/include/linux/sched.h
6337 ++++ b/include/linux/sched.h
6338 +@@ -1672,7 +1672,6 @@ extern struct pid *cad_pid;
6339 + #define PF_MEMALLOC 0x00000800 /* Allocating memory */
6340 + #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
6341 + #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
6342 +-#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
6343 + #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
6344 + #define PF_FROZEN 0x00010000 /* Frozen for system suspend */
6345 + #define PF_KSWAPD 0x00020000 /* I am kswapd */
6346 +diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
6347 +index cf5999626e28d..5e1533ee3785b 100644
6348 +--- a/include/linux/tee_drv.h
6349 ++++ b/include/linux/tee_drv.h
6350 +@@ -587,4 +587,18 @@ struct tee_client_driver {
6351 + #define to_tee_client_driver(d) \
6352 + container_of(d, struct tee_client_driver, driver)
6353 +
6354 ++/**
6355 ++ * teedev_open() - Open a struct tee_device
6356 ++ * @teedev: Device to open
6357 ++ *
6358 ++ * @return a pointer to struct tee_context on success or an ERR_PTR on failure.
6359 ++ */
6360 ++struct tee_context *teedev_open(struct tee_device *teedev);
6361 ++
6362 ++/**
6363 ++ * teedev_close_context() - closes a struct tee_context
6364 ++ * @ctx: The struct tee_context to close
6365 ++ */
6366 ++void teedev_close_context(struct tee_context *ctx);
6367 ++
6368 + #endif /*__TEE_DRV_H*/
6369 +diff --git a/include/net/addrconf.h b/include/net/addrconf.h
6370 +index e7ce719838b5e..59940e230b782 100644
6371 +--- a/include/net/addrconf.h
6372 ++++ b/include/net/addrconf.h
6373 +@@ -109,8 +109,6 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
6374 + int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
6375 + const struct in6_addr *daddr, unsigned int srcprefs,
6376 + struct in6_addr *saddr);
6377 +-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
6378 +- u32 banned_flags);
6379 + int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
6380 + u32 banned_flags);
6381 + bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
6382 +diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
6383 +index 38785d48baff9..184105d682942 100644
6384 +--- a/include/net/bond_3ad.h
6385 ++++ b/include/net/bond_3ad.h
6386 +@@ -262,7 +262,7 @@ struct ad_system {
6387 + struct ad_bond_info {
6388 + struct ad_system system; /* 802.3ad system structure */
6389 + struct bond_3ad_stats stats;
6390 +- u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
6391 ++ atomic_t agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
6392 + u16 aggregator_identifier;
6393 + };
6394 +
6395 +diff --git a/include/net/dsa.h b/include/net/dsa.h
6396 +index eff5c44ba3774..aede735bed64c 100644
6397 +--- a/include/net/dsa.h
6398 ++++ b/include/net/dsa.h
6399 +@@ -1094,6 +1094,7 @@ void dsa_unregister_switch(struct dsa_switch *ds);
6400 + int dsa_register_switch(struct dsa_switch *ds);
6401 + void dsa_switch_shutdown(struct dsa_switch *ds);
6402 + struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
6403 ++void dsa_flush_workqueue(void);
6404 + #ifdef CONFIG_PM_SLEEP
6405 + int dsa_switch_suspend(struct dsa_switch *ds);
6406 + int dsa_switch_resume(struct dsa_switch *ds);
6407 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
6408 +index c85b040728d7e..bbb27639f2933 100644
6409 +--- a/include/net/ip6_fib.h
6410 ++++ b/include/net/ip6_fib.h
6411 +@@ -189,14 +189,16 @@ struct fib6_info {
6412 + u32 fib6_metric;
6413 + u8 fib6_protocol;
6414 + u8 fib6_type;
6415 ++
6416 ++ u8 offload;
6417 ++ u8 trap;
6418 ++ u8 offload_failed;
6419 ++
6420 + u8 should_flush:1,
6421 + dst_nocount:1,
6422 + dst_nopolicy:1,
6423 + fib6_destroying:1,
6424 +- offload:1,
6425 +- trap:1,
6426 +- offload_failed:1,
6427 +- unused:1;
6428 ++ unused:4;
6429 +
6430 + struct rcu_head rcu;
6431 + struct nexthop *nh;
6432 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
6433 +index c19bf51ded1d0..c6ee334ad846b 100644
6434 +--- a/include/net/ipv6.h
6435 ++++ b/include/net/ipv6.h
6436 +@@ -391,17 +391,20 @@ static inline void txopt_put(struct ipv6_txoptions *opt)
6437 + kfree_rcu(opt, rcu);
6438 + }
6439 +
6440 ++#if IS_ENABLED(CONFIG_IPV6)
6441 + struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label);
6442 +
6443 + extern struct static_key_false_deferred ipv6_flowlabel_exclusive;
6444 + static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk,
6445 + __be32 label)
6446 + {
6447 +- if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key))
6448 ++ if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) &&
6449 ++ READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl))
6450 + return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT);
6451 +
6452 + return NULL;
6453 + }
6454 ++#endif
6455 +
6456 + struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
6457 + struct ip6_flowlabel *fl,
6458 +diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
6459 +index a4b5503803165..6bd7e5a85ce76 100644
6460 +--- a/include/net/netns/ipv6.h
6461 ++++ b/include/net/netns/ipv6.h
6462 +@@ -77,9 +77,10 @@ struct netns_ipv6 {
6463 + spinlock_t fib6_gc_lock;
6464 + unsigned int ip6_rt_gc_expire;
6465 + unsigned long ip6_rt_last_gc;
6466 ++ unsigned char flowlabel_has_excl;
6467 + #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6468 +- unsigned int fib6_rules_require_fldissect;
6469 + bool fib6_has_custom_rules;
6470 ++ unsigned int fib6_rules_require_fldissect;
6471 + #ifdef CONFIG_IPV6_SUBTREES
6472 + unsigned int fib6_routes_require_src;
6473 + #endif
6474 +diff --git a/kernel/async.c b/kernel/async.c
6475 +index b8d7a663497f9..b2c4ba5686ee4 100644
6476 +--- a/kernel/async.c
6477 ++++ b/kernel/async.c
6478 +@@ -205,9 +205,6 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
6479 + atomic_inc(&entry_count);
6480 + spin_unlock_irqrestore(&async_lock, flags);
6481 +
6482 +- /* mark that this task has queued an async job, used by module init */
6483 +- current->flags |= PF_USED_ASYNC;
6484 +-
6485 + /* schedule for execution */
6486 + queue_work_node(node, system_unbound_wq, &entry->work);
6487 +
6488 +diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
6489 +index 5e037070cb656..d2ff8ba7ae58f 100644
6490 +--- a/kernel/bpf/btf.c
6491 ++++ b/kernel/bpf/btf.c
6492 +@@ -4928,10 +4928,12 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
6493 + /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
6494 + for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6495 + const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6496 ++ u32 type, flag;
6497 +
6498 +- if (ctx_arg_info->offset == off &&
6499 +- (ctx_arg_info->reg_type == PTR_TO_RDONLY_BUF_OR_NULL ||
6500 +- ctx_arg_info->reg_type == PTR_TO_RDWR_BUF_OR_NULL)) {
6501 ++ type = base_type(ctx_arg_info->reg_type);
6502 ++ flag = type_flag(ctx_arg_info->reg_type);
6503 ++ if (ctx_arg_info->offset == off && type == PTR_TO_BUF &&
6504 ++ (flag & PTR_MAYBE_NULL)) {
6505 + info->reg_type = ctx_arg_info->reg_type;
6506 + return true;
6507 + }
6508 +@@ -5845,7 +5847,7 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
6509 + return -EINVAL;
6510 + }
6511 +
6512 +- reg->type = PTR_TO_MEM_OR_NULL;
6513 ++ reg->type = PTR_TO_MEM | PTR_MAYBE_NULL;
6514 + reg->id = ++env->id_gen;
6515 +
6516 + continue;
6517 +@@ -6335,7 +6337,7 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
6518 + .func = bpf_btf_find_by_name_kind,
6519 + .gpl_only = false,
6520 + .ret_type = RET_INTEGER,
6521 +- .arg1_type = ARG_PTR_TO_MEM,
6522 ++ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
6523 + .arg2_type = ARG_CONST_SIZE,
6524 + .arg3_type = ARG_ANYTHING,
6525 + .arg4_type = ARG_ANYTHING,
6526 +diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
6527 +index 43eb3501721b7..514b4681a90ac 100644
6528 +--- a/kernel/bpf/cgroup.c
6529 ++++ b/kernel/bpf/cgroup.c
6530 +@@ -1789,7 +1789,7 @@ static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
6531 + .gpl_only = false,
6532 + .ret_type = RET_INTEGER,
6533 + .arg1_type = ARG_PTR_TO_CTX,
6534 +- .arg2_type = ARG_PTR_TO_MEM,
6535 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
6536 + .arg3_type = ARG_CONST_SIZE,
6537 + };
6538 +
6539 +diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
6540 +index 649f07623df6c..acb2383b0f537 100644
6541 +--- a/kernel/bpf/helpers.c
6542 ++++ b/kernel/bpf/helpers.c
6543 +@@ -530,7 +530,7 @@ const struct bpf_func_proto bpf_strtol_proto = {
6544 + .func = bpf_strtol,
6545 + .gpl_only = false,
6546 + .ret_type = RET_INTEGER,
6547 +- .arg1_type = ARG_PTR_TO_MEM,
6548 ++ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
6549 + .arg2_type = ARG_CONST_SIZE,
6550 + .arg3_type = ARG_ANYTHING,
6551 + .arg4_type = ARG_PTR_TO_LONG,
6552 +@@ -558,7 +558,7 @@ const struct bpf_func_proto bpf_strtoul_proto = {
6553 + .func = bpf_strtoul,
6554 + .gpl_only = false,
6555 + .ret_type = RET_INTEGER,
6556 +- .arg1_type = ARG_PTR_TO_MEM,
6557 ++ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
6558 + .arg2_type = ARG_CONST_SIZE,
6559 + .arg3_type = ARG_ANYTHING,
6560 + .arg4_type = ARG_PTR_TO_LONG,
6561 +@@ -630,7 +630,7 @@ const struct bpf_func_proto bpf_event_output_data_proto = {
6562 + .arg1_type = ARG_PTR_TO_CTX,
6563 + .arg2_type = ARG_CONST_MAP_PTR,
6564 + .arg3_type = ARG_ANYTHING,
6565 +- .arg4_type = ARG_PTR_TO_MEM,
6566 ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
6567 + .arg5_type = ARG_CONST_SIZE_OR_ZERO,
6568 + };
6569 +
6570 +@@ -667,7 +667,7 @@ BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
6571 + const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
6572 + .func = bpf_per_cpu_ptr,
6573 + .gpl_only = false,
6574 +- .ret_type = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL,
6575 ++ .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
6576 + .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
6577 + .arg2_type = ARG_ANYTHING,
6578 + };
6579 +@@ -680,7 +680,7 @@ BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
6580 + const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
6581 + .func = bpf_this_cpu_ptr,
6582 + .gpl_only = false,
6583 +- .ret_type = RET_PTR_TO_MEM_OR_BTF_ID,
6584 ++ .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
6585 + .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
6586 + };
6587 +
6588 +@@ -1011,7 +1011,7 @@ const struct bpf_func_proto bpf_snprintf_proto = {
6589 + .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
6590 + .arg2_type = ARG_CONST_SIZE_OR_ZERO,
6591 + .arg3_type = ARG_PTR_TO_CONST_STR,
6592 +- .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
6593 ++ .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
6594 + .arg5_type = ARG_CONST_SIZE_OR_ZERO,
6595 + };
6596 +
6597 +diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c
6598 +index 6a9542af4212a..b0fa190b09790 100644
6599 +--- a/kernel/bpf/map_iter.c
6600 ++++ b/kernel/bpf/map_iter.c
6601 +@@ -174,9 +174,9 @@ static const struct bpf_iter_reg bpf_map_elem_reg_info = {
6602 + .ctx_arg_info_size = 2,
6603 + .ctx_arg_info = {
6604 + { offsetof(struct bpf_iter__bpf_map_elem, key),
6605 +- PTR_TO_RDONLY_BUF_OR_NULL },
6606 ++ PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
6607 + { offsetof(struct bpf_iter__bpf_map_elem, value),
6608 +- PTR_TO_RDWR_BUF_OR_NULL },
6609 ++ PTR_TO_BUF | PTR_MAYBE_NULL },
6610 + },
6611 + };
6612 +
6613 +diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
6614 +index f1c51c45667d3..710ba9de12ce4 100644
6615 +--- a/kernel/bpf/ringbuf.c
6616 ++++ b/kernel/bpf/ringbuf.c
6617 +@@ -444,7 +444,7 @@ const struct bpf_func_proto bpf_ringbuf_output_proto = {
6618 + .func = bpf_ringbuf_output,
6619 + .ret_type = RET_INTEGER,
6620 + .arg1_type = ARG_CONST_MAP_PTR,
6621 +- .arg2_type = ARG_PTR_TO_MEM,
6622 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
6623 + .arg3_type = ARG_CONST_SIZE_OR_ZERO,
6624 + .arg4_type = ARG_ANYTHING,
6625 + };
6626 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
6627 +index 1033ee8c0caf0..4c6c2c2137458 100644
6628 +--- a/kernel/bpf/syscall.c
6629 ++++ b/kernel/bpf/syscall.c
6630 +@@ -4772,7 +4772,7 @@ static const struct bpf_func_proto bpf_sys_bpf_proto = {
6631 + .gpl_only = false,
6632 + .ret_type = RET_INTEGER,
6633 + .arg1_type = ARG_ANYTHING,
6634 +- .arg2_type = ARG_PTR_TO_MEM,
6635 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
6636 + .arg3_type = ARG_CONST_SIZE,
6637 + };
6638 +
6639 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
6640 +index 6b987407752ab..40d92628e2f97 100644
6641 +--- a/kernel/bpf/verifier.c
6642 ++++ b/kernel/bpf/verifier.c
6643 +@@ -439,18 +439,6 @@ static bool reg_type_not_null(enum bpf_reg_type type)
6644 + type == PTR_TO_SOCK_COMMON;
6645 + }
6646 +
6647 +-static bool reg_type_may_be_null(enum bpf_reg_type type)
6648 +-{
6649 +- return type == PTR_TO_MAP_VALUE_OR_NULL ||
6650 +- type == PTR_TO_SOCKET_OR_NULL ||
6651 +- type == PTR_TO_SOCK_COMMON_OR_NULL ||
6652 +- type == PTR_TO_TCP_SOCK_OR_NULL ||
6653 +- type == PTR_TO_BTF_ID_OR_NULL ||
6654 +- type == PTR_TO_MEM_OR_NULL ||
6655 +- type == PTR_TO_RDONLY_BUF_OR_NULL ||
6656 +- type == PTR_TO_RDWR_BUF_OR_NULL;
6657 +-}
6658 +-
6659 + static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
6660 + {
6661 + return reg->type == PTR_TO_MAP_VALUE &&
6662 +@@ -459,12 +447,14 @@ static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
6663 +
6664 + static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
6665 + {
6666 +- return type == PTR_TO_SOCKET ||
6667 +- type == PTR_TO_SOCKET_OR_NULL ||
6668 +- type == PTR_TO_TCP_SOCK ||
6669 +- type == PTR_TO_TCP_SOCK_OR_NULL ||
6670 +- type == PTR_TO_MEM ||
6671 +- type == PTR_TO_MEM_OR_NULL;
6672 ++ return base_type(type) == PTR_TO_SOCKET ||
6673 ++ base_type(type) == PTR_TO_TCP_SOCK ||
6674 ++ base_type(type) == PTR_TO_MEM;
6675 ++}
6676 ++
6677 ++static bool type_is_rdonly_mem(u32 type)
6678 ++{
6679 ++ return type & MEM_RDONLY;
6680 + }
6681 +
6682 + static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
6683 +@@ -472,14 +462,9 @@ static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
6684 + return type == ARG_PTR_TO_SOCK_COMMON;
6685 + }
6686 +
6687 +-static bool arg_type_may_be_null(enum bpf_arg_type type)
6688 ++static bool type_may_be_null(u32 type)
6689 + {
6690 +- return type == ARG_PTR_TO_MAP_VALUE_OR_NULL ||
6691 +- type == ARG_PTR_TO_MEM_OR_NULL ||
6692 +- type == ARG_PTR_TO_CTX_OR_NULL ||
6693 +- type == ARG_PTR_TO_SOCKET_OR_NULL ||
6694 +- type == ARG_PTR_TO_ALLOC_MEM_OR_NULL ||
6695 +- type == ARG_PTR_TO_STACK_OR_NULL;
6696 ++ return type & PTR_MAYBE_NULL;
6697 + }
6698 +
6699 + /* Determine whether the function releases some resources allocated by another
6700 +@@ -539,39 +524,54 @@ static bool is_cmpxchg_insn(const struct bpf_insn *insn)
6701 + insn->imm == BPF_CMPXCHG;
6702 + }
6703 +
6704 +-/* string representation of 'enum bpf_reg_type' */
6705 +-static const char * const reg_type_str[] = {
6706 +- [NOT_INIT] = "?",
6707 +- [SCALAR_VALUE] = "inv",
6708 +- [PTR_TO_CTX] = "ctx",
6709 +- [CONST_PTR_TO_MAP] = "map_ptr",
6710 +- [PTR_TO_MAP_VALUE] = "map_value",
6711 +- [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
6712 +- [PTR_TO_STACK] = "fp",
6713 +- [PTR_TO_PACKET] = "pkt",
6714 +- [PTR_TO_PACKET_META] = "pkt_meta",
6715 +- [PTR_TO_PACKET_END] = "pkt_end",
6716 +- [PTR_TO_FLOW_KEYS] = "flow_keys",
6717 +- [PTR_TO_SOCKET] = "sock",
6718 +- [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
6719 +- [PTR_TO_SOCK_COMMON] = "sock_common",
6720 +- [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
6721 +- [PTR_TO_TCP_SOCK] = "tcp_sock",
6722 +- [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
6723 +- [PTR_TO_TP_BUFFER] = "tp_buffer",
6724 +- [PTR_TO_XDP_SOCK] = "xdp_sock",
6725 +- [PTR_TO_BTF_ID] = "ptr_",
6726 +- [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_",
6727 +- [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_",
6728 +- [PTR_TO_MEM] = "mem",
6729 +- [PTR_TO_MEM_OR_NULL] = "mem_or_null",
6730 +- [PTR_TO_RDONLY_BUF] = "rdonly_buf",
6731 +- [PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null",
6732 +- [PTR_TO_RDWR_BUF] = "rdwr_buf",
6733 +- [PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null",
6734 +- [PTR_TO_FUNC] = "func",
6735 +- [PTR_TO_MAP_KEY] = "map_key",
6736 +-};
6737 ++/* string representation of 'enum bpf_reg_type'
6738 ++ *
6739 ++ * Note that reg_type_str() can not appear more than once in a single verbose()
6740 ++ * statement.
6741 ++ */
6742 ++static const char *reg_type_str(struct bpf_verifier_env *env,
6743 ++ enum bpf_reg_type type)
6744 ++{
6745 ++ char postfix[16] = {0}, prefix[16] = {0};
6746 ++ static const char * const str[] = {
6747 ++ [NOT_INIT] = "?",
6748 ++ [SCALAR_VALUE] = "inv",
6749 ++ [PTR_TO_CTX] = "ctx",
6750 ++ [CONST_PTR_TO_MAP] = "map_ptr",
6751 ++ [PTR_TO_MAP_VALUE] = "map_value",
6752 ++ [PTR_TO_STACK] = "fp",
6753 ++ [PTR_TO_PACKET] = "pkt",
6754 ++ [PTR_TO_PACKET_META] = "pkt_meta",
6755 ++ [PTR_TO_PACKET_END] = "pkt_end",
6756 ++ [PTR_TO_FLOW_KEYS] = "flow_keys",
6757 ++ [PTR_TO_SOCKET] = "sock",
6758 ++ [PTR_TO_SOCK_COMMON] = "sock_common",
6759 ++ [PTR_TO_TCP_SOCK] = "tcp_sock",
6760 ++ [PTR_TO_TP_BUFFER] = "tp_buffer",
6761 ++ [PTR_TO_XDP_SOCK] = "xdp_sock",
6762 ++ [PTR_TO_BTF_ID] = "ptr_",
6763 ++ [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_",
6764 ++ [PTR_TO_MEM] = "mem",
6765 ++ [PTR_TO_BUF] = "buf",
6766 ++ [PTR_TO_FUNC] = "func",
6767 ++ [PTR_TO_MAP_KEY] = "map_key",
6768 ++ };
6769 ++
6770 ++ if (type & PTR_MAYBE_NULL) {
6771 ++ if (base_type(type) == PTR_TO_BTF_ID ||
6772 ++ base_type(type) == PTR_TO_PERCPU_BTF_ID)
6773 ++ strncpy(postfix, "or_null_", 16);
6774 ++ else
6775 ++ strncpy(postfix, "_or_null", 16);
6776 ++ }
6777 ++
6778 ++ if (type & MEM_RDONLY)
6779 ++ strncpy(prefix, "rdonly_", 16);
6780 ++
6781 ++ snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
6782 ++ prefix, str[base_type(type)], postfix);
6783 ++ return env->type_str_buf;
6784 ++}
6785 +
6786 + static char slot_type_char[] = {
6787 + [STACK_INVALID] = '?',
6788 +@@ -636,7 +636,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
6789 + continue;
6790 + verbose(env, " R%d", i);
6791 + print_liveness(env, reg->live);
6792 +- verbose(env, "=%s", reg_type_str[t]);
6793 ++ verbose(env, "=%s", reg_type_str(env, t));
6794 + if (t == SCALAR_VALUE && reg->precise)
6795 + verbose(env, "P");
6796 + if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
6797 +@@ -644,9 +644,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
6798 + /* reg->off should be 0 for SCALAR_VALUE */
6799 + verbose(env, "%lld", reg->var_off.value + reg->off);
6800 + } else {
6801 +- if (t == PTR_TO_BTF_ID ||
6802 +- t == PTR_TO_BTF_ID_OR_NULL ||
6803 +- t == PTR_TO_PERCPU_BTF_ID)
6804 ++ if (base_type(t) == PTR_TO_BTF_ID ||
6805 ++ base_type(t) == PTR_TO_PERCPU_BTF_ID)
6806 + verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
6807 + verbose(env, "(id=%d", reg->id);
6808 + if (reg_type_may_be_refcounted_or_null(t))
6809 +@@ -655,10 +654,9 @@ static void print_verifier_state(struct bpf_verifier_env *env,
6810 + verbose(env, ",off=%d", reg->off);
6811 + if (type_is_pkt_pointer(t))
6812 + verbose(env, ",r=%d", reg->range);
6813 +- else if (t == CONST_PTR_TO_MAP ||
6814 +- t == PTR_TO_MAP_KEY ||
6815 +- t == PTR_TO_MAP_VALUE ||
6816 +- t == PTR_TO_MAP_VALUE_OR_NULL)
6817 ++ else if (base_type(t) == CONST_PTR_TO_MAP ||
6818 ++ base_type(t) == PTR_TO_MAP_KEY ||
6819 ++ base_type(t) == PTR_TO_MAP_VALUE)
6820 + verbose(env, ",ks=%d,vs=%d",
6821 + reg->map_ptr->key_size,
6822 + reg->map_ptr->value_size);
6823 +@@ -728,7 +726,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
6824 + if (is_spilled_reg(&state->stack[i])) {
6825 + reg = &state->stack[i].spilled_ptr;
6826 + t = reg->type;
6827 +- verbose(env, "=%s", reg_type_str[t]);
6828 ++ verbose(env, "=%s", reg_type_str(env, t));
6829 + if (t == SCALAR_VALUE && reg->precise)
6830 + verbose(env, "P");
6831 + if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
6832 +@@ -1141,8 +1139,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env,
6833 +
6834 + static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
6835 + {
6836 +- switch (reg->type) {
6837 +- case PTR_TO_MAP_VALUE_OR_NULL: {
6838 ++ if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
6839 + const struct bpf_map *map = reg->map_ptr;
6840 +
6841 + if (map->inner_map_meta) {
6842 +@@ -1161,32 +1158,10 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
6843 + } else {
6844 + reg->type = PTR_TO_MAP_VALUE;
6845 + }
6846 +- break;
6847 +- }
6848 +- case PTR_TO_SOCKET_OR_NULL:
6849 +- reg->type = PTR_TO_SOCKET;
6850 +- break;
6851 +- case PTR_TO_SOCK_COMMON_OR_NULL:
6852 +- reg->type = PTR_TO_SOCK_COMMON;
6853 +- break;
6854 +- case PTR_TO_TCP_SOCK_OR_NULL:
6855 +- reg->type = PTR_TO_TCP_SOCK;
6856 +- break;
6857 +- case PTR_TO_BTF_ID_OR_NULL:
6858 +- reg->type = PTR_TO_BTF_ID;
6859 +- break;
6860 +- case PTR_TO_MEM_OR_NULL:
6861 +- reg->type = PTR_TO_MEM;
6862 +- break;
6863 +- case PTR_TO_RDONLY_BUF_OR_NULL:
6864 +- reg->type = PTR_TO_RDONLY_BUF;
6865 +- break;
6866 +- case PTR_TO_RDWR_BUF_OR_NULL:
6867 +- reg->type = PTR_TO_RDWR_BUF;
6868 +- break;
6869 +- default:
6870 +- WARN_ONCE(1, "unknown nullable register type");
6871 ++ return;
6872 + }
6873 ++
6874 ++ reg->type &= ~PTR_MAYBE_NULL;
6875 + }
6876 +
6877 + static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
6878 +@@ -2047,7 +2022,7 @@ static int mark_reg_read(struct bpf_verifier_env *env,
6879 + break;
6880 + if (parent->live & REG_LIVE_DONE) {
6881 + verbose(env, "verifier BUG type %s var_off %lld off %d\n",
6882 +- reg_type_str[parent->type],
6883 ++ reg_type_str(env, parent->type),
6884 + parent->var_off.value, parent->off);
6885 + return -EFAULT;
6886 + }
6887 +@@ -2706,9 +2681,8 @@ static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
6888 +
6889 + static bool is_spillable_regtype(enum bpf_reg_type type)
6890 + {
6891 +- switch (type) {
6892 ++ switch (base_type(type)) {
6893 + case PTR_TO_MAP_VALUE:
6894 +- case PTR_TO_MAP_VALUE_OR_NULL:
6895 + case PTR_TO_STACK:
6896 + case PTR_TO_CTX:
6897 + case PTR_TO_PACKET:
6898 +@@ -2717,21 +2691,13 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
6899 + case PTR_TO_FLOW_KEYS:
6900 + case CONST_PTR_TO_MAP:
6901 + case PTR_TO_SOCKET:
6902 +- case PTR_TO_SOCKET_OR_NULL:
6903 + case PTR_TO_SOCK_COMMON:
6904 +- case PTR_TO_SOCK_COMMON_OR_NULL:
6905 + case PTR_TO_TCP_SOCK:
6906 +- case PTR_TO_TCP_SOCK_OR_NULL:
6907 + case PTR_TO_XDP_SOCK:
6908 + case PTR_TO_BTF_ID:
6909 +- case PTR_TO_BTF_ID_OR_NULL:
6910 +- case PTR_TO_RDONLY_BUF:
6911 +- case PTR_TO_RDONLY_BUF_OR_NULL:
6912 +- case PTR_TO_RDWR_BUF:
6913 +- case PTR_TO_RDWR_BUF_OR_NULL:
6914 ++ case PTR_TO_BUF:
6915 + case PTR_TO_PERCPU_BTF_ID:
6916 + case PTR_TO_MEM:
6917 +- case PTR_TO_MEM_OR_NULL:
6918 + case PTR_TO_FUNC:
6919 + case PTR_TO_MAP_KEY:
6920 + return true;
6921 +@@ -3572,7 +3538,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
6922 + */
6923 + *reg_type = info.reg_type;
6924 +
6925 +- if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) {
6926 ++ if (base_type(*reg_type) == PTR_TO_BTF_ID) {
6927 + *btf = info.btf;
6928 + *btf_id = info.btf_id;
6929 + } else {
6930 +@@ -3640,7 +3606,7 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
6931 + }
6932 +
6933 + verbose(env, "R%d invalid %s access off=%d size=%d\n",
6934 +- regno, reg_type_str[reg->type], off, size);
6935 ++ regno, reg_type_str(env, reg->type), off, size);
6936 +
6937 + return -EACCES;
6938 + }
6939 +@@ -4367,15 +4333,30 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
6940 + mark_reg_unknown(env, regs, value_regno);
6941 + }
6942 + }
6943 +- } else if (reg->type == PTR_TO_MEM) {
6944 ++ } else if (base_type(reg->type) == PTR_TO_MEM) {
6945 ++ bool rdonly_mem = type_is_rdonly_mem(reg->type);
6946 ++
6947 ++ if (type_may_be_null(reg->type)) {
6948 ++ verbose(env, "R%d invalid mem access '%s'\n", regno,
6949 ++ reg_type_str(env, reg->type));
6950 ++ return -EACCES;
6951 ++ }
6952 ++
6953 ++ if (t == BPF_WRITE && rdonly_mem) {
6954 ++ verbose(env, "R%d cannot write into %s\n",
6955 ++ regno, reg_type_str(env, reg->type));
6956 ++ return -EACCES;
6957 ++ }
6958 ++
6959 + if (t == BPF_WRITE && value_regno >= 0 &&
6960 + is_pointer_value(env, value_regno)) {
6961 + verbose(env, "R%d leaks addr into mem\n", value_regno);
6962 + return -EACCES;
6963 + }
6964 ++
6965 + err = check_mem_region_access(env, regno, off, size,
6966 + reg->mem_size, false);
6967 +- if (!err && t == BPF_READ && value_regno >= 0)
6968 ++ if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
6969 + mark_reg_unknown(env, regs, value_regno);
6970 + } else if (reg->type == PTR_TO_CTX) {
6971 + enum bpf_reg_type reg_type = SCALAR_VALUE;
6972 +@@ -4405,7 +4386,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
6973 + } else {
6974 + mark_reg_known_zero(env, regs,
6975 + value_regno);
6976 +- if (reg_type_may_be_null(reg_type))
6977 ++ if (type_may_be_null(reg_type))
6978 + regs[value_regno].id = ++env->id_gen;
6979 + /* A load of ctx field could have different
6980 + * actual load size with the one encoded in the
6981 +@@ -4413,8 +4394,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
6982 + * a sub-register.
6983 + */
6984 + regs[value_regno].subreg_def = DEF_NOT_SUBREG;
6985 +- if (reg_type == PTR_TO_BTF_ID ||
6986 +- reg_type == PTR_TO_BTF_ID_OR_NULL) {
6987 ++ if (base_type(reg_type) == PTR_TO_BTF_ID) {
6988 + regs[value_regno].btf = btf;
6989 + regs[value_regno].btf_id = btf_id;
6990 + }
6991 +@@ -4467,7 +4447,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
6992 + } else if (type_is_sk_pointer(reg->type)) {
6993 + if (t == BPF_WRITE) {
6994 + verbose(env, "R%d cannot write into %s\n",
6995 +- regno, reg_type_str[reg->type]);
6996 ++ regno, reg_type_str(env, reg->type));
6997 + return -EACCES;
6998 + }
6999 + err = check_sock_access(env, insn_idx, regno, off, size, t);
7000 +@@ -4483,26 +4463,32 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
7001 + } else if (reg->type == CONST_PTR_TO_MAP) {
7002 + err = check_ptr_to_map_access(env, regs, regno, off, size, t,
7003 + value_regno);
7004 +- } else if (reg->type == PTR_TO_RDONLY_BUF) {
7005 +- if (t == BPF_WRITE) {
7006 +- verbose(env, "R%d cannot write into %s\n",
7007 +- regno, reg_type_str[reg->type]);
7008 +- return -EACCES;
7009 ++ } else if (base_type(reg->type) == PTR_TO_BUF) {
7010 ++ bool rdonly_mem = type_is_rdonly_mem(reg->type);
7011 ++ const char *buf_info;
7012 ++ u32 *max_access;
7013 ++
7014 ++ if (rdonly_mem) {
7015 ++ if (t == BPF_WRITE) {
7016 ++ verbose(env, "R%d cannot write into %s\n",
7017 ++ regno, reg_type_str(env, reg->type));
7018 ++ return -EACCES;
7019 ++ }
7020 ++ buf_info = "rdonly";
7021 ++ max_access = &env->prog->aux->max_rdonly_access;
7022 ++ } else {
7023 ++ buf_info = "rdwr";
7024 ++ max_access = &env->prog->aux->max_rdwr_access;
7025 + }
7026 ++
7027 + err = check_buffer_access(env, reg, regno, off, size, false,
7028 +- "rdonly",
7029 +- &env->prog->aux->max_rdonly_access);
7030 +- if (!err && value_regno >= 0)
7031 +- mark_reg_unknown(env, regs, value_regno);
7032 +- } else if (reg->type == PTR_TO_RDWR_BUF) {
7033 +- err = check_buffer_access(env, reg, regno, off, size, false,
7034 +- "rdwr",
7035 +- &env->prog->aux->max_rdwr_access);
7036 +- if (!err && t == BPF_READ && value_regno >= 0)
7037 ++ buf_info, max_access);
7038 ++
7039 ++ if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
7040 + mark_reg_unknown(env, regs, value_regno);
7041 + } else {
7042 + verbose(env, "R%d invalid mem access '%s'\n", regno,
7043 +- reg_type_str[reg->type]);
7044 ++ reg_type_str(env, reg->type));
7045 + return -EACCES;
7046 + }
7047 +
7048 +@@ -4576,7 +4562,7 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
7049 + is_sk_reg(env, insn->dst_reg)) {
7050 + verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
7051 + insn->dst_reg,
7052 +- reg_type_str[reg_state(env, insn->dst_reg)->type]);
7053 ++ reg_type_str(env, reg_state(env, insn->dst_reg)->type));
7054 + return -EACCES;
7055 + }
7056 +
7057 +@@ -4759,8 +4745,10 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
7058 + struct bpf_call_arg_meta *meta)
7059 + {
7060 + struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7061 ++ const char *buf_info;
7062 ++ u32 *max_access;
7063 +
7064 +- switch (reg->type) {
7065 ++ switch (base_type(reg->type)) {
7066 + case PTR_TO_PACKET:
7067 + case PTR_TO_PACKET_META:
7068 + return check_packet_access(env, regno, reg->off, access_size,
7069 +@@ -4779,18 +4767,20 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
7070 + return check_mem_region_access(env, regno, reg->off,
7071 + access_size, reg->mem_size,
7072 + zero_size_allowed);
7073 +- case PTR_TO_RDONLY_BUF:
7074 +- if (meta && meta->raw_mode)
7075 +- return -EACCES;
7076 +- return check_buffer_access(env, reg, regno, reg->off,
7077 +- access_size, zero_size_allowed,
7078 +- "rdonly",
7079 +- &env->prog->aux->max_rdonly_access);
7080 +- case PTR_TO_RDWR_BUF:
7081 ++ case PTR_TO_BUF:
7082 ++ if (type_is_rdonly_mem(reg->type)) {
7083 ++ if (meta && meta->raw_mode)
7084 ++ return -EACCES;
7085 ++
7086 ++ buf_info = "rdonly";
7087 ++ max_access = &env->prog->aux->max_rdonly_access;
7088 ++ } else {
7089 ++ buf_info = "rdwr";
7090 ++ max_access = &env->prog->aux->max_rdwr_access;
7091 ++ }
7092 + return check_buffer_access(env, reg, regno, reg->off,
7093 + access_size, zero_size_allowed,
7094 +- "rdwr",
7095 +- &env->prog->aux->max_rdwr_access);
7096 ++ buf_info, max_access);
7097 + case PTR_TO_STACK:
7098 + return check_stack_range_initialized(
7099 + env,
7100 +@@ -4802,9 +4792,9 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
7101 + register_is_null(reg))
7102 + return 0;
7103 +
7104 +- verbose(env, "R%d type=%s expected=%s\n", regno,
7105 +- reg_type_str[reg->type],
7106 +- reg_type_str[PTR_TO_STACK]);
7107 ++ verbose(env, "R%d type=%s ", regno,
7108 ++ reg_type_str(env, reg->type));
7109 ++ verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
7110 + return -EACCES;
7111 + }
7112 + }
7113 +@@ -4815,7 +4805,7 @@ int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
7114 + if (register_is_null(reg))
7115 + return 0;
7116 +
7117 +- if (reg_type_may_be_null(reg->type)) {
7118 ++ if (type_may_be_null(reg->type)) {
7119 + /* Assuming that the register contains a value check if the memory
7120 + * access is safe. Temporarily save and restore the register's state as
7121 + * the conversion shouldn't be visible to a caller.
7122 +@@ -4963,9 +4953,8 @@ static int process_timer_func(struct bpf_verifier_env *env, int regno,
7123 +
7124 + static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
7125 + {
7126 +- return type == ARG_PTR_TO_MEM ||
7127 +- type == ARG_PTR_TO_MEM_OR_NULL ||
7128 +- type == ARG_PTR_TO_UNINIT_MEM;
7129 ++ return base_type(type) == ARG_PTR_TO_MEM ||
7130 ++ base_type(type) == ARG_PTR_TO_UNINIT_MEM;
7131 + }
7132 +
7133 + static bool arg_type_is_mem_size(enum bpf_arg_type type)
7134 +@@ -5070,8 +5059,7 @@ static const struct bpf_reg_types mem_types = {
7135 + PTR_TO_MAP_KEY,
7136 + PTR_TO_MAP_VALUE,
7137 + PTR_TO_MEM,
7138 +- PTR_TO_RDONLY_BUF,
7139 +- PTR_TO_RDWR_BUF,
7140 ++ PTR_TO_BUF,
7141 + },
7142 + };
7143 +
7144 +@@ -5102,31 +5090,26 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
7145 + [ARG_PTR_TO_MAP_KEY] = &map_key_value_types,
7146 + [ARG_PTR_TO_MAP_VALUE] = &map_key_value_types,
7147 + [ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types,
7148 +- [ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types,
7149 + [ARG_CONST_SIZE] = &scalar_types,
7150 + [ARG_CONST_SIZE_OR_ZERO] = &scalar_types,
7151 + [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types,
7152 + [ARG_CONST_MAP_PTR] = &const_map_ptr_types,
7153 + [ARG_PTR_TO_CTX] = &context_types,
7154 +- [ARG_PTR_TO_CTX_OR_NULL] = &context_types,
7155 + [ARG_PTR_TO_SOCK_COMMON] = &sock_types,
7156 + #ifdef CONFIG_NET
7157 + [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types,
7158 + #endif
7159 + [ARG_PTR_TO_SOCKET] = &fullsock_types,
7160 +- [ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types,
7161 + [ARG_PTR_TO_BTF_ID] = &btf_ptr_types,
7162 + [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
7163 + [ARG_PTR_TO_MEM] = &mem_types,
7164 +- [ARG_PTR_TO_MEM_OR_NULL] = &mem_types,
7165 + [ARG_PTR_TO_UNINIT_MEM] = &mem_types,
7166 + [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types,
7167 +- [ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types,
7168 + [ARG_PTR_TO_INT] = &int_ptr_types,
7169 + [ARG_PTR_TO_LONG] = &int_ptr_types,
7170 + [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types,
7171 + [ARG_PTR_TO_FUNC] = &func_ptr_types,
7172 +- [ARG_PTR_TO_STACK_OR_NULL] = &stack_ptr_types,
7173 ++ [ARG_PTR_TO_STACK] = &stack_ptr_types,
7174 + [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types,
7175 + [ARG_PTR_TO_TIMER] = &timer_types,
7176 + };
7177 +@@ -5140,12 +5123,27 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
7178 + const struct bpf_reg_types *compatible;
7179 + int i, j;
7180 +
7181 +- compatible = compatible_reg_types[arg_type];
7182 ++ compatible = compatible_reg_types[base_type(arg_type)];
7183 + if (!compatible) {
7184 + verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
7185 + return -EFAULT;
7186 + }
7187 +
7188 ++ /* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
7189 ++ * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
7190 ++ *
7191 ++ * Same for MAYBE_NULL:
7192 ++ *
7193 ++ * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
7194 ++ * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
7195 ++ *
7196 ++ * Therefore we fold these flags depending on the arg_type before comparison.
7197 ++ */
7198 ++ if (arg_type & MEM_RDONLY)
7199 ++ type &= ~MEM_RDONLY;
7200 ++ if (arg_type & PTR_MAYBE_NULL)
7201 ++ type &= ~PTR_MAYBE_NULL;
7202 ++
7203 + for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
7204 + expected = compatible->types[i];
7205 + if (expected == NOT_INIT)
7206 +@@ -5155,14 +5153,14 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
7207 + goto found;
7208 + }
7209 +
7210 +- verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]);
7211 ++ verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
7212 + for (j = 0; j + 1 < i; j++)
7213 +- verbose(env, "%s, ", reg_type_str[compatible->types[j]]);
7214 +- verbose(env, "%s\n", reg_type_str[compatible->types[j]]);
7215 ++ verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
7216 ++ verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
7217 + return -EACCES;
7218 +
7219 + found:
7220 +- if (type == PTR_TO_BTF_ID) {
7221 ++ if (reg->type == PTR_TO_BTF_ID) {
7222 + if (!arg_btf_id) {
7223 + if (!compatible->btf_id) {
7224 + verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
7225 +@@ -5221,15 +5219,14 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
7226 + return -EACCES;
7227 + }
7228 +
7229 +- if (arg_type == ARG_PTR_TO_MAP_VALUE ||
7230 +- arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
7231 +- arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
7232 ++ if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE ||
7233 ++ base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) {
7234 + err = resolve_map_arg_type(env, meta, &arg_type);
7235 + if (err)
7236 + return err;
7237 + }
7238 +
7239 +- if (register_is_null(reg) && arg_type_may_be_null(arg_type))
7240 ++ if (register_is_null(reg) && type_may_be_null(arg_type))
7241 + /* A NULL register has a SCALAR_VALUE type, so skip
7242 + * type checking.
7243 + */
7244 +@@ -5298,10 +5295,11 @@ skip_type_check:
7245 + err = check_helper_mem_access(env, regno,
7246 + meta->map_ptr->key_size, false,
7247 + NULL);
7248 +- } else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
7249 +- (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL &&
7250 +- !register_is_null(reg)) ||
7251 +- arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
7252 ++ } else if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE ||
7253 ++ base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) {
7254 ++ if (type_may_be_null(arg_type) && register_is_null(reg))
7255 ++ return 0;
7256 ++
7257 + /* bpf_map_xxx(..., map_ptr, ..., value) call:
7258 + * check [value, value + map->value_size) validity
7259 + */
7260 +@@ -6386,6 +6384,8 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
7261 + int *insn_idx_p)
7262 + {
7263 + const struct bpf_func_proto *fn = NULL;
7264 ++ enum bpf_return_type ret_type;
7265 ++ enum bpf_type_flag ret_flag;
7266 + struct bpf_reg_state *regs;
7267 + struct bpf_call_arg_meta meta;
7268 + int insn_idx = *insn_idx_p;
7269 +@@ -6519,13 +6519,14 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
7270 + regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
7271 +
7272 + /* update return register (already marked as written above) */
7273 +- if (fn->ret_type == RET_INTEGER) {
7274 ++ ret_type = fn->ret_type;
7275 ++ ret_flag = type_flag(fn->ret_type);
7276 ++ if (ret_type == RET_INTEGER) {
7277 + /* sets type to SCALAR_VALUE */
7278 + mark_reg_unknown(env, regs, BPF_REG_0);
7279 +- } else if (fn->ret_type == RET_VOID) {
7280 ++ } else if (ret_type == RET_VOID) {
7281 + regs[BPF_REG_0].type = NOT_INIT;
7282 +- } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
7283 +- fn->ret_type == RET_PTR_TO_MAP_VALUE) {
7284 ++ } else if (base_type(ret_type) == RET_PTR_TO_MAP_VALUE) {
7285 + /* There is no offset yet applied, variable or fixed */
7286 + mark_reg_known_zero(env, regs, BPF_REG_0);
7287 + /* remember map_ptr, so that check_map_access()
7288 +@@ -6539,28 +6540,25 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
7289 + }
7290 + regs[BPF_REG_0].map_ptr = meta.map_ptr;
7291 + regs[BPF_REG_0].map_uid = meta.map_uid;
7292 +- if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
7293 +- regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
7294 +- if (map_value_has_spin_lock(meta.map_ptr))
7295 +- regs[BPF_REG_0].id = ++env->id_gen;
7296 +- } else {
7297 +- regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
7298 ++ regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
7299 ++ if (!type_may_be_null(ret_type) &&
7300 ++ map_value_has_spin_lock(meta.map_ptr)) {
7301 ++ regs[BPF_REG_0].id = ++env->id_gen;
7302 + }
7303 +- } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
7304 ++ } else if (base_type(ret_type) == RET_PTR_TO_SOCKET) {
7305 + mark_reg_known_zero(env, regs, BPF_REG_0);
7306 +- regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
7307 +- } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
7308 ++ regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
7309 ++ } else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) {
7310 + mark_reg_known_zero(env, regs, BPF_REG_0);
7311 +- regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
7312 +- } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
7313 ++ regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
7314 ++ } else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) {
7315 + mark_reg_known_zero(env, regs, BPF_REG_0);
7316 +- regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
7317 +- } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) {
7318 ++ regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
7319 ++ } else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) {
7320 + mark_reg_known_zero(env, regs, BPF_REG_0);
7321 +- regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
7322 ++ regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
7323 + regs[BPF_REG_0].mem_size = meta.mem_size;
7324 +- } else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL ||
7325 +- fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID) {
7326 ++ } else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) {
7327 + const struct btf_type *t;
7328 +
7329 + mark_reg_known_zero(env, regs, BPF_REG_0);
7330 +@@ -6578,29 +6576,30 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
7331 + tname, PTR_ERR(ret));
7332 + return -EINVAL;
7333 + }
7334 +- regs[BPF_REG_0].type =
7335 +- fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
7336 +- PTR_TO_MEM : PTR_TO_MEM_OR_NULL;
7337 ++ regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
7338 + regs[BPF_REG_0].mem_size = tsize;
7339 + } else {
7340 +- regs[BPF_REG_0].type =
7341 +- fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
7342 +- PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL;
7343 ++ /* MEM_RDONLY may be carried from ret_flag, but it
7344 ++ * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
7345 ++ * it will confuse the check of PTR_TO_BTF_ID in
7346 ++ * check_mem_access().
7347 ++ */
7348 ++ ret_flag &= ~MEM_RDONLY;
7349 ++
7350 ++ regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
7351 + regs[BPF_REG_0].btf = meta.ret_btf;
7352 + regs[BPF_REG_0].btf_id = meta.ret_btf_id;
7353 + }
7354 +- } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL ||
7355 +- fn->ret_type == RET_PTR_TO_BTF_ID) {
7356 ++ } else if (base_type(ret_type) == RET_PTR_TO_BTF_ID) {
7357 + int ret_btf_id;
7358 +
7359 + mark_reg_known_zero(env, regs, BPF_REG_0);
7360 +- regs[BPF_REG_0].type = fn->ret_type == RET_PTR_TO_BTF_ID ?
7361 +- PTR_TO_BTF_ID :
7362 +- PTR_TO_BTF_ID_OR_NULL;
7363 ++ regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
7364 + ret_btf_id = *fn->ret_btf_id;
7365 + if (ret_btf_id == 0) {
7366 +- verbose(env, "invalid return type %d of func %s#%d\n",
7367 +- fn->ret_type, func_id_name(func_id), func_id);
7368 ++ verbose(env, "invalid return type %u of func %s#%d\n",
7369 ++ base_type(ret_type), func_id_name(func_id),
7370 ++ func_id);
7371 + return -EINVAL;
7372 + }
7373 + /* current BPF helper definitions are only coming from
7374 +@@ -6609,12 +6608,12 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
7375 + regs[BPF_REG_0].btf = btf_vmlinux;
7376 + regs[BPF_REG_0].btf_id = ret_btf_id;
7377 + } else {
7378 +- verbose(env, "unknown return type %d of func %s#%d\n",
7379 +- fn->ret_type, func_id_name(func_id), func_id);
7380 ++ verbose(env, "unknown return type %u of func %s#%d\n",
7381 ++ base_type(ret_type), func_id_name(func_id), func_id);
7382 + return -EINVAL;
7383 + }
7384 +
7385 +- if (reg_type_may_be_null(regs[BPF_REG_0].type))
7386 ++ if (type_may_be_null(regs[BPF_REG_0].type))
7387 + regs[BPF_REG_0].id = ++env->id_gen;
7388 +
7389 + if (is_ptr_cast_function(func_id)) {
7390 +@@ -6823,25 +6822,25 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env,
7391 +
7392 + if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
7393 + verbose(env, "math between %s pointer and %lld is not allowed\n",
7394 +- reg_type_str[type], val);
7395 ++ reg_type_str(env, type), val);
7396 + return false;
7397 + }
7398 +
7399 + if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
7400 + verbose(env, "%s pointer offset %d is not allowed\n",
7401 +- reg_type_str[type], reg->off);
7402 ++ reg_type_str(env, type), reg->off);
7403 + return false;
7404 + }
7405 +
7406 + if (smin == S64_MIN) {
7407 + verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
7408 +- reg_type_str[type]);
7409 ++ reg_type_str(env, type));
7410 + return false;
7411 + }
7412 +
7413 + if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
7414 + verbose(env, "value %lld makes %s pointer be out of bounds\n",
7415 +- smin, reg_type_str[type]);
7416 ++ smin, reg_type_str(env, type));
7417 + return false;
7418 + }
7419 +
7420 +@@ -7218,11 +7217,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
7421 + return -EACCES;
7422 + }
7423 +
7424 +- switch (ptr_reg->type) {
7425 +- case PTR_TO_MAP_VALUE_OR_NULL:
7426 ++ if (ptr_reg->type & PTR_MAYBE_NULL) {
7427 + verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
7428 +- dst, reg_type_str[ptr_reg->type]);
7429 ++ dst, reg_type_str(env, ptr_reg->type));
7430 + return -EACCES;
7431 ++ }
7432 ++
7433 ++ switch (base_type(ptr_reg->type)) {
7434 + case CONST_PTR_TO_MAP:
7435 + /* smin_val represents the known value */
7436 + if (known && smin_val == 0 && opcode == BPF_ADD)
7437 +@@ -7235,10 +7236,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
7438 + case PTR_TO_XDP_SOCK:
7439 + reject:
7440 + verbose(env, "R%d pointer arithmetic on %s prohibited\n",
7441 +- dst, reg_type_str[ptr_reg->type]);
7442 ++ dst, reg_type_str(env, ptr_reg->type));
7443 + return -EACCES;
7444 + default:
7445 +- if (reg_type_may_be_null(ptr_reg->type))
7446 ++ if (type_may_be_null(ptr_reg->type))
7447 + goto reject;
7448 + break;
7449 + }
7450 +@@ -8960,7 +8961,7 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
7451 + struct bpf_reg_state *reg, u32 id,
7452 + bool is_null)
7453 + {
7454 +- if (reg_type_may_be_null(reg->type) && reg->id == id &&
7455 ++ if (type_may_be_null(reg->type) && reg->id == id &&
7456 + !WARN_ON_ONCE(!reg->id)) {
7457 + if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
7458 + !tnum_equals_const(reg->var_off, 0) ||
7459 +@@ -9338,7 +9339,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
7460 + */
7461 + if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
7462 + insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
7463 +- reg_type_may_be_null(dst_reg->type)) {
7464 ++ type_may_be_null(dst_reg->type)) {
7465 + /* Mark all identical registers in each branch as either
7466 + * safe or unknown depending R == 0 or R != 0 conditional.
7467 + */
7468 +@@ -9397,7 +9398,7 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
7469 +
7470 + if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
7471 + dst_reg->type = aux->btf_var.reg_type;
7472 +- switch (dst_reg->type) {
7473 ++ switch (base_type(dst_reg->type)) {
7474 + case PTR_TO_MEM:
7475 + dst_reg->mem_size = aux->btf_var.mem_size;
7476 + break;
7477 +@@ -9595,7 +9596,7 @@ static int check_return_code(struct bpf_verifier_env *env)
7478 + /* enforce return zero from async callbacks like timer */
7479 + if (reg->type != SCALAR_VALUE) {
7480 + verbose(env, "In async callback the register R0 is not a known value (%s)\n",
7481 +- reg_type_str[reg->type]);
7482 ++ reg_type_str(env, reg->type));
7483 + return -EINVAL;
7484 + }
7485 +
7486 +@@ -9609,7 +9610,7 @@ static int check_return_code(struct bpf_verifier_env *env)
7487 + if (is_subprog) {
7488 + if (reg->type != SCALAR_VALUE) {
7489 + verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
7490 +- reg_type_str[reg->type]);
7491 ++ reg_type_str(env, reg->type));
7492 + return -EINVAL;
7493 + }
7494 + return 0;
7495 +@@ -9673,7 +9674,7 @@ static int check_return_code(struct bpf_verifier_env *env)
7496 +
7497 + if (reg->type != SCALAR_VALUE) {
7498 + verbose(env, "At program exit the register R0 is not a known value (%s)\n",
7499 +- reg_type_str[reg->type]);
7500 ++ reg_type_str(env, reg->type));
7501 + return -EINVAL;
7502 + }
7503 +
7504 +@@ -10454,7 +10455,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
7505 + return true;
7506 + if (rcur->type == NOT_INIT)
7507 + return false;
7508 +- switch (rold->type) {
7509 ++ switch (base_type(rold->type)) {
7510 + case SCALAR_VALUE:
7511 + if (env->explore_alu_limits)
7512 + return false;
7513 +@@ -10476,6 +10477,22 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
7514 + }
7515 + case PTR_TO_MAP_KEY:
7516 + case PTR_TO_MAP_VALUE:
7517 ++ /* a PTR_TO_MAP_VALUE could be safe to use as a
7518 ++ * PTR_TO_MAP_VALUE_OR_NULL into the same map.
7519 ++ * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
7520 ++ * checked, doing so could have affected others with the same
7521 ++ * id, and we can't check for that because we lost the id when
7522 ++ * we converted to a PTR_TO_MAP_VALUE.
7523 ++ */
7524 ++ if (type_may_be_null(rold->type)) {
7525 ++ if (!type_may_be_null(rcur->type))
7526 ++ return false;
7527 ++ if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
7528 ++ return false;
7529 ++ /* Check our ids match any regs they're supposed to */
7530 ++ return check_ids(rold->id, rcur->id, idmap);
7531 ++ }
7532 ++
7533 + /* If the new min/max/var_off satisfy the old ones and
7534 + * everything else matches, we are OK.
7535 + * 'id' is not compared, since it's only used for maps with
7536 +@@ -10487,20 +10504,6 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
7537 + return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
7538 + range_within(rold, rcur) &&
7539 + tnum_in(rold->var_off, rcur->var_off);
7540 +- case PTR_TO_MAP_VALUE_OR_NULL:
7541 +- /* a PTR_TO_MAP_VALUE could be safe to use as a
7542 +- * PTR_TO_MAP_VALUE_OR_NULL into the same map.
7543 +- * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
7544 +- * checked, doing so could have affected others with the same
7545 +- * id, and we can't check for that because we lost the id when
7546 +- * we converted to a PTR_TO_MAP_VALUE.
7547 +- */
7548 +- if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
7549 +- return false;
7550 +- if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
7551 +- return false;
7552 +- /* Check our ids match any regs they're supposed to */
7553 +- return check_ids(rold->id, rcur->id, idmap);
7554 + case PTR_TO_PACKET_META:
7555 + case PTR_TO_PACKET:
7556 + if (rcur->type != rold->type)
7557 +@@ -10529,11 +10532,8 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
7558 + case PTR_TO_PACKET_END:
7559 + case PTR_TO_FLOW_KEYS:
7560 + case PTR_TO_SOCKET:
7561 +- case PTR_TO_SOCKET_OR_NULL:
7562 + case PTR_TO_SOCK_COMMON:
7563 +- case PTR_TO_SOCK_COMMON_OR_NULL:
7564 + case PTR_TO_TCP_SOCK:
7565 +- case PTR_TO_TCP_SOCK_OR_NULL:
7566 + case PTR_TO_XDP_SOCK:
7567 + /* Only valid matches are exact, which memcmp() above
7568 + * would have accepted
7569 +@@ -11059,17 +11059,13 @@ next:
7570 + /* Return true if it's OK to have the same insn return a different type. */
7571 + static bool reg_type_mismatch_ok(enum bpf_reg_type type)
7572 + {
7573 +- switch (type) {
7574 ++ switch (base_type(type)) {
7575 + case PTR_TO_CTX:
7576 + case PTR_TO_SOCKET:
7577 +- case PTR_TO_SOCKET_OR_NULL:
7578 + case PTR_TO_SOCK_COMMON:
7579 +- case PTR_TO_SOCK_COMMON_OR_NULL:
7580 + case PTR_TO_TCP_SOCK:
7581 +- case PTR_TO_TCP_SOCK_OR_NULL:
7582 + case PTR_TO_XDP_SOCK:
7583 + case PTR_TO_BTF_ID:
7584 +- case PTR_TO_BTF_ID_OR_NULL:
7585 + return false;
7586 + default:
7587 + return true;
7588 +@@ -11293,7 +11289,7 @@ static int do_check(struct bpf_verifier_env *env)
7589 + if (is_ctx_reg(env, insn->dst_reg)) {
7590 + verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
7591 + insn->dst_reg,
7592 +- reg_type_str[reg_state(env, insn->dst_reg)->type]);
7593 ++ reg_type_str(env, reg_state(env, insn->dst_reg)->type));
7594 + return -EACCES;
7595 + }
7596 +
7597 +@@ -11545,7 +11541,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
7598 + err = -EINVAL;
7599 + goto err_put;
7600 + }
7601 +- aux->btf_var.reg_type = PTR_TO_MEM;
7602 ++ aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
7603 + aux->btf_var.mem_size = tsize;
7604 + } else {
7605 + aux->btf_var.reg_type = PTR_TO_BTF_ID;
7606 +@@ -13376,7 +13372,7 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
7607 + mark_reg_known_zero(env, regs, i);
7608 + else if (regs[i].type == SCALAR_VALUE)
7609 + mark_reg_unknown(env, regs, i);
7610 +- else if (regs[i].type == PTR_TO_MEM_OR_NULL) {
7611 ++ else if (base_type(regs[i].type) == PTR_TO_MEM) {
7612 + const u32 mem_size = regs[i].mem_size;
7613 +
7614 + mark_reg_known_zero(env, regs, i);
7615 +diff --git a/kernel/cred.c b/kernel/cred.c
7616 +index 473d17c431f3a..933155c969227 100644
7617 +--- a/kernel/cred.c
7618 ++++ b/kernel/cred.c
7619 +@@ -665,21 +665,16 @@ EXPORT_SYMBOL(cred_fscmp);
7620 +
7621 + int set_cred_ucounts(struct cred *new)
7622 + {
7623 +- struct task_struct *task = current;
7624 +- const struct cred *old = task->real_cred;
7625 + struct ucounts *new_ucounts, *old_ucounts = new->ucounts;
7626 +
7627 +- if (new->user == old->user && new->user_ns == old->user_ns)
7628 +- return 0;
7629 +-
7630 + /*
7631 + * This optimization is needed because alloc_ucounts() uses locks
7632 + * for table lookups.
7633 + */
7634 +- if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
7635 ++ if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->uid))
7636 + return 0;
7637 +
7638 +- if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid)))
7639 ++ if (!(new_ucounts = alloc_ucounts(new->user_ns, new->uid)))
7640 + return -EAGAIN;
7641 +
7642 + new->ucounts = new_ucounts;
7643 +diff --git a/kernel/fork.c b/kernel/fork.c
7644 +index 3244cc56b697d..50d02e3103a57 100644
7645 +--- a/kernel/fork.c
7646 ++++ b/kernel/fork.c
7647 +@@ -2052,18 +2052,18 @@ static __latent_entropy struct task_struct *copy_process(
7648 + #ifdef CONFIG_PROVE_LOCKING
7649 + DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
7650 + #endif
7651 ++ retval = copy_creds(p, clone_flags);
7652 ++ if (retval < 0)
7653 ++ goto bad_fork_free;
7654 ++
7655 + retval = -EAGAIN;
7656 + if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
7657 + if (p->real_cred->user != INIT_USER &&
7658 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
7659 +- goto bad_fork_free;
7660 ++ goto bad_fork_cleanup_count;
7661 + }
7662 + current->flags &= ~PF_NPROC_EXCEEDED;
7663 +
7664 +- retval = copy_creds(p, clone_flags);
7665 +- if (retval < 0)
7666 +- goto bad_fork_free;
7667 +-
7668 + /*
7669 + * If multiple threads are within copy_process(), then this check
7670 + * triggers too late. This doesn't hurt, the check is only there
7671 +@@ -2350,10 +2350,6 @@ static __latent_entropy struct task_struct *copy_process(
7672 + goto bad_fork_cancel_cgroup;
7673 + }
7674 +
7675 +- /* past the last point of failure */
7676 +- if (pidfile)
7677 +- fd_install(pidfd, pidfile);
7678 +-
7679 + init_task_pid_links(p);
7680 + if (likely(p->pid)) {
7681 + ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
7682 +@@ -2402,6 +2398,9 @@ static __latent_entropy struct task_struct *copy_process(
7683 + syscall_tracepoint_update(p);
7684 + write_unlock_irq(&tasklist_lock);
7685 +
7686 ++ if (pidfile)
7687 ++ fd_install(pidfd, pidfile);
7688 ++
7689 + proc_fork_connector(p);
7690 + sched_post_fork(p, args);
7691 + cgroup_post_fork(p, args);
7692 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
7693 +index 2270ec68f10a1..d48cd608376ae 100644
7694 +--- a/kernel/locking/lockdep.c
7695 ++++ b/kernel/locking/lockdep.c
7696 +@@ -3462,7 +3462,7 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
7697 + u16 chain_hlock = chain_hlocks[chain->base + i];
7698 + unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
7699 +
7700 +- return lock_classes + class_idx - 1;
7701 ++ return lock_classes + class_idx;
7702 + }
7703 +
7704 + /*
7705 +@@ -3530,7 +3530,7 @@ static void print_chain_keys_chain(struct lock_chain *chain)
7706 + hlock_id = chain_hlocks[chain->base + i];
7707 + chain_key = print_chain_key_iteration(hlock_id, chain_key);
7708 +
7709 +- print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id) - 1);
7710 ++ print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
7711 + printk("\n");
7712 + }
7713 + }
7714 +diff --git a/kernel/module.c b/kernel/module.c
7715 +index 84a9141a5e159..f25e7653aa150 100644
7716 +--- a/kernel/module.c
7717 ++++ b/kernel/module.c
7718 +@@ -3722,12 +3722,6 @@ static noinline int do_init_module(struct module *mod)
7719 + }
7720 + freeinit->module_init = mod->init_layout.base;
7721 +
7722 +- /*
7723 +- * We want to find out whether @mod uses async during init. Clear
7724 +- * PF_USED_ASYNC. async_schedule*() will set it.
7725 +- */
7726 +- current->flags &= ~PF_USED_ASYNC;
7727 +-
7728 + do_mod_ctors(mod);
7729 + /* Start the module */
7730 + if (mod->init != NULL)
7731 +@@ -3753,22 +3747,13 @@ static noinline int do_init_module(struct module *mod)
7732 +
7733 + /*
7734 + * We need to finish all async code before the module init sequence
7735 +- * is done. This has potential to deadlock. For example, a newly
7736 +- * detected block device can trigger request_module() of the
7737 +- * default iosched from async probing task. Once userland helper
7738 +- * reaches here, async_synchronize_full() will wait on the async
7739 +- * task waiting on request_module() and deadlock.
7740 +- *
7741 +- * This deadlock is avoided by perfomring async_synchronize_full()
7742 +- * iff module init queued any async jobs. This isn't a full
7743 +- * solution as it will deadlock the same if module loading from
7744 +- * async jobs nests more than once; however, due to the various
7745 +- * constraints, this hack seems to be the best option for now.
7746 +- * Please refer to the following thread for details.
7747 ++ * is done. This has potential to deadlock if synchronous module
7748 ++ * loading is requested from async (which is not allowed!).
7749 + *
7750 +- * http://thread.gmane.org/gmane.linux.kernel/1420814
7751 ++ * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous
7752 ++ * request_module() from async workers") for more details.
7753 + */
7754 +- if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
7755 ++ if (!mod->async_probe_requested)
7756 + async_synchronize_full();
7757 +
7758 + ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
7759 +diff --git a/kernel/stackleak.c b/kernel/stackleak.c
7760 +index ce161a8e8d975..dd07239ddff9f 100644
7761 +--- a/kernel/stackleak.c
7762 ++++ b/kernel/stackleak.c
7763 +@@ -48,7 +48,7 @@ int stack_erasing_sysctl(struct ctl_table *table, int write,
7764 + #define skip_erasing() false
7765 + #endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
7766 +
7767 +-asmlinkage void notrace stackleak_erase(void)
7768 ++asmlinkage void noinstr stackleak_erase(void)
7769 + {
7770 + /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
7771 + unsigned long kstack_ptr = current->lowest_stack;
7772 +@@ -102,9 +102,8 @@ asmlinkage void notrace stackleak_erase(void)
7773 + /* Reset the 'lowest_stack' value for the next syscall */
7774 + current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
7775 + }
7776 +-NOKPROBE_SYMBOL(stackleak_erase);
7777 +
7778 +-void __used __no_caller_saved_registers notrace stackleak_track_stack(void)
7779 ++void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
7780 + {
7781 + unsigned long sp = current_stack_pointer;
7782 +
7783 +diff --git a/kernel/sys.c b/kernel/sys.c
7784 +index 8fdac0d90504a..3e4e8930fafc6 100644
7785 +--- a/kernel/sys.c
7786 ++++ b/kernel/sys.c
7787 +@@ -472,6 +472,16 @@ static int set_user(struct cred *new)
7788 + if (!new_user)
7789 + return -EAGAIN;
7790 +
7791 ++ free_uid(new->user);
7792 ++ new->user = new_user;
7793 ++ return 0;
7794 ++}
7795 ++
7796 ++static void flag_nproc_exceeded(struct cred *new)
7797 ++{
7798 ++ if (new->ucounts == current_ucounts())
7799 ++ return;
7800 ++
7801 + /*
7802 + * We don't fail in case of NPROC limit excess here because too many
7803 + * poorly written programs don't check set*uid() return code, assuming
7804 +@@ -480,15 +490,10 @@ static int set_user(struct cred *new)
7805 + * failure to the execve() stage.
7806 + */
7807 + if (is_ucounts_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) &&
7808 +- new_user != INIT_USER &&
7809 +- !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
7810 ++ new->user != INIT_USER)
7811 + current->flags |= PF_NPROC_EXCEEDED;
7812 + else
7813 + current->flags &= ~PF_NPROC_EXCEEDED;
7814 +-
7815 +- free_uid(new->user);
7816 +- new->user = new_user;
7817 +- return 0;
7818 + }
7819 +
7820 + /*
7821 +@@ -563,6 +568,7 @@ long __sys_setreuid(uid_t ruid, uid_t euid)
7822 + if (retval < 0)
7823 + goto error;
7824 +
7825 ++ flag_nproc_exceeded(new);
7826 + return commit_creds(new);
7827 +
7828 + error:
7829 +@@ -625,6 +631,7 @@ long __sys_setuid(uid_t uid)
7830 + if (retval < 0)
7831 + goto error;
7832 +
7833 ++ flag_nproc_exceeded(new);
7834 + return commit_creds(new);
7835 +
7836 + error:
7837 +@@ -704,6 +711,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
7838 + if (retval < 0)
7839 + goto error;
7840 +
7841 ++ flag_nproc_exceeded(new);
7842 + return commit_creds(new);
7843 +
7844 + error:
7845 +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
7846 +index e36d184615fb7..4cc73a0d1215b 100644
7847 +--- a/kernel/trace/bpf_trace.c
7848 ++++ b/kernel/trace/bpf_trace.c
7849 +@@ -345,7 +345,7 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = {
7850 + .gpl_only = true,
7851 + .ret_type = RET_INTEGER,
7852 + .arg1_type = ARG_ANYTHING,
7853 +- .arg2_type = ARG_PTR_TO_MEM,
7854 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
7855 + .arg3_type = ARG_CONST_SIZE,
7856 + };
7857 +
7858 +@@ -394,7 +394,7 @@ static const struct bpf_func_proto bpf_trace_printk_proto = {
7859 + .func = bpf_trace_printk,
7860 + .gpl_only = true,
7861 + .ret_type = RET_INTEGER,
7862 +- .arg1_type = ARG_PTR_TO_MEM,
7863 ++ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
7864 + .arg2_type = ARG_CONST_SIZE,
7865 + };
7866 +
7867 +@@ -450,9 +450,9 @@ static const struct bpf_func_proto bpf_trace_vprintk_proto = {
7868 + .func = bpf_trace_vprintk,
7869 + .gpl_only = true,
7870 + .ret_type = RET_INTEGER,
7871 +- .arg1_type = ARG_PTR_TO_MEM,
7872 ++ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
7873 + .arg2_type = ARG_CONST_SIZE,
7874 +- .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
7875 ++ .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
7876 + .arg4_type = ARG_CONST_SIZE_OR_ZERO,
7877 + };
7878 +
7879 +@@ -492,9 +492,9 @@ static const struct bpf_func_proto bpf_seq_printf_proto = {
7880 + .ret_type = RET_INTEGER,
7881 + .arg1_type = ARG_PTR_TO_BTF_ID,
7882 + .arg1_btf_id = &btf_seq_file_ids[0],
7883 +- .arg2_type = ARG_PTR_TO_MEM,
7884 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
7885 + .arg3_type = ARG_CONST_SIZE,
7886 +- .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
7887 ++ .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
7888 + .arg5_type = ARG_CONST_SIZE_OR_ZERO,
7889 + };
7890 +
7891 +@@ -509,7 +509,7 @@ static const struct bpf_func_proto bpf_seq_write_proto = {
7892 + .ret_type = RET_INTEGER,
7893 + .arg1_type = ARG_PTR_TO_BTF_ID,
7894 + .arg1_btf_id = &btf_seq_file_ids[0],
7895 +- .arg2_type = ARG_PTR_TO_MEM,
7896 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
7897 + .arg3_type = ARG_CONST_SIZE_OR_ZERO,
7898 + };
7899 +
7900 +@@ -533,7 +533,7 @@ static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
7901 + .ret_type = RET_INTEGER,
7902 + .arg1_type = ARG_PTR_TO_BTF_ID,
7903 + .arg1_btf_id = &btf_seq_file_ids[0],
7904 +- .arg2_type = ARG_PTR_TO_MEM,
7905 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
7906 + .arg3_type = ARG_CONST_SIZE_OR_ZERO,
7907 + .arg4_type = ARG_ANYTHING,
7908 + };
7909 +@@ -694,7 +694,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
7910 + .arg1_type = ARG_PTR_TO_CTX,
7911 + .arg2_type = ARG_CONST_MAP_PTR,
7912 + .arg3_type = ARG_ANYTHING,
7913 +- .arg4_type = ARG_PTR_TO_MEM,
7914 ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
7915 + .arg5_type = ARG_CONST_SIZE_OR_ZERO,
7916 + };
7917 +
7918 +@@ -1004,7 +1004,7 @@ const struct bpf_func_proto bpf_snprintf_btf_proto = {
7919 + .ret_type = RET_INTEGER,
7920 + .arg1_type = ARG_PTR_TO_MEM,
7921 + .arg2_type = ARG_CONST_SIZE,
7922 +- .arg3_type = ARG_PTR_TO_MEM,
7923 ++ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
7924 + .arg4_type = ARG_CONST_SIZE,
7925 + .arg5_type = ARG_ANYTHING,
7926 + };
7927 +@@ -1285,7 +1285,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
7928 + .arg1_type = ARG_PTR_TO_CTX,
7929 + .arg2_type = ARG_CONST_MAP_PTR,
7930 + .arg3_type = ARG_ANYTHING,
7931 +- .arg4_type = ARG_PTR_TO_MEM,
7932 ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
7933 + .arg5_type = ARG_CONST_SIZE_OR_ZERO,
7934 + };
7935 +
7936 +@@ -1507,7 +1507,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
7937 + .arg1_type = ARG_PTR_TO_CTX,
7938 + .arg2_type = ARG_CONST_MAP_PTR,
7939 + .arg3_type = ARG_ANYTHING,
7940 +- .arg4_type = ARG_PTR_TO_MEM,
7941 ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
7942 + .arg5_type = ARG_CONST_SIZE_OR_ZERO,
7943 + };
7944 +
7945 +@@ -1561,7 +1561,7 @@ static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
7946 + .gpl_only = true,
7947 + .ret_type = RET_INTEGER,
7948 + .arg1_type = ARG_PTR_TO_CTX,
7949 +- .arg2_type = ARG_PTR_TO_MEM,
7950 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
7951 + .arg3_type = ARG_CONST_SIZE_OR_ZERO,
7952 + .arg4_type = ARG_ANYTHING,
7953 + };
7954 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
7955 +index ae9f9e4af9314..bb15059020445 100644
7956 +--- a/kernel/trace/trace.c
7957 ++++ b/kernel/trace/trace.c
7958 +@@ -252,6 +252,10 @@ __setup("trace_clock=", set_trace_boot_clock);
7959 +
7960 + static int __init set_tracepoint_printk(char *str)
7961 + {
7962 ++ /* Ignore the "tp_printk_stop_on_boot" param */
7963 ++ if (*str == '_')
7964 ++ return 0;
7965 ++
7966 + if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
7967 + tracepoint_printk = 1;
7968 + return 1;
7969 +diff --git a/kernel/ucount.c b/kernel/ucount.c
7970 +index 65b597431c861..06ea04d446852 100644
7971 +--- a/kernel/ucount.c
7972 ++++ b/kernel/ucount.c
7973 +@@ -350,7 +350,8 @@ bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsign
7974 + if (rlimit > LONG_MAX)
7975 + max = LONG_MAX;
7976 + for (iter = ucounts; iter; iter = iter->ns->ucounts) {
7977 +- if (get_ucounts_value(iter, type) > max)
7978 ++ long val = get_ucounts_value(iter, type);
7979 ++ if (val < 0 || val > max)
7980 + return true;
7981 + max = READ_ONCE(iter->ns->ucount_max[type]);
7982 + }
7983 +diff --git a/lib/iov_iter.c b/lib/iov_iter.c
7984 +index 66a740e6e153c..6d146f77601d7 100644
7985 +--- a/lib/iov_iter.c
7986 ++++ b/lib/iov_iter.c
7987 +@@ -416,6 +416,7 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
7988 + return 0;
7989 +
7990 + buf->ops = &page_cache_pipe_buf_ops;
7991 ++ buf->flags = 0;
7992 + get_page(page);
7993 + buf->page = page;
7994 + buf->offset = offset;
7995 +@@ -579,6 +580,7 @@ static size_t push_pipe(struct iov_iter *i, size_t size,
7996 + break;
7997 +
7998 + buf->ops = &default_pipe_buf_ops;
7999 ++ buf->flags = 0;
8000 + buf->page = page;
8001 + buf->offset = 0;
8002 + buf->len = min_t(ssize_t, left, PAGE_SIZE);
8003 +diff --git a/mm/mprotect.c b/mm/mprotect.c
8004 +index e552f5e0ccbde..02a11c49b5a87 100644
8005 +--- a/mm/mprotect.c
8006 ++++ b/mm/mprotect.c
8007 +@@ -94,7 +94,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
8008 +
8009 + /* Also skip shared copy-on-write pages */
8010 + if (is_cow_mapping(vma->vm_flags) &&
8011 +- page_mapcount(page) != 1)
8012 ++ page_count(page) != 1)
8013 + continue;
8014 +
8015 + /*
8016 +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
8017 +index 02f43f3e2c564..44a8730c26acc 100644
8018 +--- a/net/ax25/af_ax25.c
8019 ++++ b/net/ax25/af_ax25.c
8020 +@@ -77,6 +77,7 @@ static void ax25_kill_by_device(struct net_device *dev)
8021 + {
8022 + ax25_dev *ax25_dev;
8023 + ax25_cb *s;
8024 ++ struct sock *sk;
8025 +
8026 + if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
8027 + return;
8028 +@@ -85,13 +86,15 @@ static void ax25_kill_by_device(struct net_device *dev)
8029 + again:
8030 + ax25_for_each(s, &ax25_list) {
8031 + if (s->ax25_dev == ax25_dev) {
8032 ++ sk = s->sk;
8033 ++ sock_hold(sk);
8034 + spin_unlock_bh(&ax25_list_lock);
8035 +- lock_sock(s->sk);
8036 ++ lock_sock(sk);
8037 + s->ax25_dev = NULL;
8038 +- release_sock(s->sk);
8039 ++ release_sock(sk);
8040 + ax25_disconnect(s, ENETUNREACH);
8041 + spin_lock_bh(&ax25_list_lock);
8042 +-
8043 ++ sock_put(sk);
8044 + /* The entry could have been deleted from the
8045 + * list meanwhile and thus the next pointer is
8046 + * no longer valid. Play it safe and restart
8047 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
8048 +index de24098894897..db4f2641d1cd1 100644
8049 +--- a/net/bridge/br_multicast.c
8050 ++++ b/net/bridge/br_multicast.c
8051 +@@ -82,6 +82,9 @@ static void br_multicast_find_del_pg(struct net_bridge *br,
8052 + struct net_bridge_port_group *pg);
8053 + static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
8054 +
8055 ++static int br_mc_disabled_update(struct net_device *dev, bool value,
8056 ++ struct netlink_ext_ack *extack);
8057 ++
8058 + static struct net_bridge_port_group *
8059 + br_sg_port_find(struct net_bridge *br,
8060 + struct net_bridge_port_group_sg_key *sg_p)
8061 +@@ -1156,6 +1159,7 @@ struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
8062 + return mp;
8063 +
8064 + if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
8065 ++ br_mc_disabled_update(br->dev, false, NULL);
8066 + br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
8067 + return ERR_PTR(-E2BIG);
8068 + }
8069 +diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
8070 +index 68d2cbf8331ac..ea61dfe19c869 100644
8071 +--- a/net/core/bpf_sk_storage.c
8072 ++++ b/net/core/bpf_sk_storage.c
8073 +@@ -929,7 +929,7 @@ static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
8074 + { offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
8075 + PTR_TO_BTF_ID_OR_NULL },
8076 + { offsetof(struct bpf_iter__bpf_sk_storage_map, value),
8077 +- PTR_TO_RDWR_BUF_OR_NULL },
8078 ++ PTR_TO_BUF | PTR_MAYBE_NULL },
8079 + },
8080 + .seq_info = &iter_seq_info,
8081 + };
8082 +diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
8083 +index 49442cae6f69d..1d99b731e5b21 100644
8084 +--- a/net/core/drop_monitor.c
8085 ++++ b/net/core/drop_monitor.c
8086 +@@ -280,13 +280,17 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi,
8087 +
8088 + rcu_read_lock();
8089 + list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
8090 ++ struct net_device *dev;
8091 ++
8092 + /*
8093 + * only add a note to our monitor buffer if:
8094 + * 1) this is the dev we received on
8095 + * 2) its after the last_rx delta
8096 + * 3) our rx_dropped count has gone up
8097 + */
8098 +- if ((new_stat->dev == napi->dev) &&
8099 ++ /* Paired with WRITE_ONCE() in dropmon_net_event() */
8100 ++ dev = READ_ONCE(new_stat->dev);
8101 ++ if ((dev == napi->dev) &&
8102 + (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
8103 + (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
8104 + trace_drop_common(NULL, NULL);
8105 +@@ -1572,7 +1576,10 @@ static int dropmon_net_event(struct notifier_block *ev_block,
8106 + mutex_lock(&net_dm_mutex);
8107 + list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
8108 + if (new_stat->dev == dev) {
8109 +- new_stat->dev = NULL;
8110 ++
8111 ++ /* Paired with READ_ONCE() in trace_napi_poll_hit() */
8112 ++ WRITE_ONCE(new_stat->dev, NULL);
8113 ++
8114 + if (trace_state == TRACE_OFF) {
8115 + list_del_rcu(&new_stat->list);
8116 + kfree_rcu(new_stat, rcu);
8117 +diff --git a/net/core/filter.c b/net/core/filter.c
8118 +index 5b82a817f65a6..22bed067284fb 100644
8119 +--- a/net/core/filter.c
8120 ++++ b/net/core/filter.c
8121 +@@ -1713,7 +1713,7 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
8122 + .ret_type = RET_INTEGER,
8123 + .arg1_type = ARG_PTR_TO_CTX,
8124 + .arg2_type = ARG_ANYTHING,
8125 +- .arg3_type = ARG_PTR_TO_MEM,
8126 ++ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8127 + .arg4_type = ARG_CONST_SIZE,
8128 + .arg5_type = ARG_ANYTHING,
8129 + };
8130 +@@ -2018,9 +2018,9 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
8131 + .gpl_only = false,
8132 + .pkt_access = true,
8133 + .ret_type = RET_INTEGER,
8134 +- .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
8135 ++ .arg1_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
8136 + .arg2_type = ARG_CONST_SIZE_OR_ZERO,
8137 +- .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
8138 ++ .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
8139 + .arg4_type = ARG_CONST_SIZE_OR_ZERO,
8140 + .arg5_type = ARG_ANYTHING,
8141 + };
8142 +@@ -2541,7 +2541,7 @@ static const struct bpf_func_proto bpf_redirect_neigh_proto = {
8143 + .gpl_only = false,
8144 + .ret_type = RET_INTEGER,
8145 + .arg1_type = ARG_ANYTHING,
8146 +- .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
8147 ++ .arg2_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
8148 + .arg3_type = ARG_CONST_SIZE_OR_ZERO,
8149 + .arg4_type = ARG_ANYTHING,
8150 + };
8151 +@@ -4174,7 +4174,7 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
8152 + .arg1_type = ARG_PTR_TO_CTX,
8153 + .arg2_type = ARG_CONST_MAP_PTR,
8154 + .arg3_type = ARG_ANYTHING,
8155 +- .arg4_type = ARG_PTR_TO_MEM,
8156 ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8157 + .arg5_type = ARG_CONST_SIZE_OR_ZERO,
8158 + };
8159 +
8160 +@@ -4188,7 +4188,7 @@ const struct bpf_func_proto bpf_skb_output_proto = {
8161 + .arg1_btf_id = &bpf_skb_output_btf_ids[0],
8162 + .arg2_type = ARG_CONST_MAP_PTR,
8163 + .arg3_type = ARG_ANYTHING,
8164 +- .arg4_type = ARG_PTR_TO_MEM,
8165 ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8166 + .arg5_type = ARG_CONST_SIZE_OR_ZERO,
8167 + };
8168 +
8169 +@@ -4371,7 +4371,7 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
8170 + .gpl_only = false,
8171 + .ret_type = RET_INTEGER,
8172 + .arg1_type = ARG_PTR_TO_CTX,
8173 +- .arg2_type = ARG_PTR_TO_MEM,
8174 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8175 + .arg3_type = ARG_CONST_SIZE,
8176 + .arg4_type = ARG_ANYTHING,
8177 + };
8178 +@@ -4397,7 +4397,7 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
8179 + .gpl_only = false,
8180 + .ret_type = RET_INTEGER,
8181 + .arg1_type = ARG_PTR_TO_CTX,
8182 +- .arg2_type = ARG_PTR_TO_MEM,
8183 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8184 + .arg3_type = ARG_CONST_SIZE,
8185 + };
8186 +
8187 +@@ -4567,7 +4567,7 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
8188 + .arg1_type = ARG_PTR_TO_CTX,
8189 + .arg2_type = ARG_CONST_MAP_PTR,
8190 + .arg3_type = ARG_ANYTHING,
8191 +- .arg4_type = ARG_PTR_TO_MEM,
8192 ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8193 + .arg5_type = ARG_CONST_SIZE_OR_ZERO,
8194 + };
8195 +
8196 +@@ -4581,7 +4581,7 @@ const struct bpf_func_proto bpf_xdp_output_proto = {
8197 + .arg1_btf_id = &bpf_xdp_output_btf_ids[0],
8198 + .arg2_type = ARG_CONST_MAP_PTR,
8199 + .arg3_type = ARG_ANYTHING,
8200 +- .arg4_type = ARG_PTR_TO_MEM,
8201 ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8202 + .arg5_type = ARG_CONST_SIZE_OR_ZERO,
8203 + };
8204 +
8205 +@@ -5069,7 +5069,7 @@ const struct bpf_func_proto bpf_sk_setsockopt_proto = {
8206 + .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
8207 + .arg2_type = ARG_ANYTHING,
8208 + .arg3_type = ARG_ANYTHING,
8209 +- .arg4_type = ARG_PTR_TO_MEM,
8210 ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8211 + .arg5_type = ARG_CONST_SIZE,
8212 + };
8213 +
8214 +@@ -5103,7 +5103,7 @@ static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = {
8215 + .arg1_type = ARG_PTR_TO_CTX,
8216 + .arg2_type = ARG_ANYTHING,
8217 + .arg3_type = ARG_ANYTHING,
8218 +- .arg4_type = ARG_PTR_TO_MEM,
8219 ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8220 + .arg5_type = ARG_CONST_SIZE,
8221 + };
8222 +
8223 +@@ -5137,7 +5137,7 @@ static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
8224 + .arg1_type = ARG_PTR_TO_CTX,
8225 + .arg2_type = ARG_ANYTHING,
8226 + .arg3_type = ARG_ANYTHING,
8227 +- .arg4_type = ARG_PTR_TO_MEM,
8228 ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8229 + .arg5_type = ARG_CONST_SIZE,
8230 + };
8231 +
8232 +@@ -5312,7 +5312,7 @@ static const struct bpf_func_proto bpf_bind_proto = {
8233 + .gpl_only = false,
8234 + .ret_type = RET_INTEGER,
8235 + .arg1_type = ARG_PTR_TO_CTX,
8236 +- .arg2_type = ARG_PTR_TO_MEM,
8237 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8238 + .arg3_type = ARG_CONST_SIZE,
8239 + };
8240 +
8241 +@@ -5900,7 +5900,7 @@ static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
8242 + .ret_type = RET_INTEGER,
8243 + .arg1_type = ARG_PTR_TO_CTX,
8244 + .arg2_type = ARG_ANYTHING,
8245 +- .arg3_type = ARG_PTR_TO_MEM,
8246 ++ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8247 + .arg4_type = ARG_CONST_SIZE
8248 + };
8249 +
8250 +@@ -5910,7 +5910,7 @@ static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
8251 + .ret_type = RET_INTEGER,
8252 + .arg1_type = ARG_PTR_TO_CTX,
8253 + .arg2_type = ARG_ANYTHING,
8254 +- .arg3_type = ARG_PTR_TO_MEM,
8255 ++ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8256 + .arg4_type = ARG_CONST_SIZE
8257 + };
8258 +
8259 +@@ -5953,7 +5953,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
8260 + .ret_type = RET_INTEGER,
8261 + .arg1_type = ARG_PTR_TO_CTX,
8262 + .arg2_type = ARG_ANYTHING,
8263 +- .arg3_type = ARG_PTR_TO_MEM,
8264 ++ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8265 + .arg4_type = ARG_CONST_SIZE
8266 + };
8267 +
8268 +@@ -6041,7 +6041,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
8269 + .ret_type = RET_INTEGER,
8270 + .arg1_type = ARG_PTR_TO_CTX,
8271 + .arg2_type = ARG_ANYTHING,
8272 +- .arg3_type = ARG_PTR_TO_MEM,
8273 ++ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8274 + .arg4_type = ARG_CONST_SIZE
8275 + };
8276 +
8277 +@@ -6266,7 +6266,7 @@ static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
8278 + .pkt_access = true,
8279 + .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
8280 + .arg1_type = ARG_PTR_TO_CTX,
8281 +- .arg2_type = ARG_PTR_TO_MEM,
8282 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8283 + .arg3_type = ARG_CONST_SIZE,
8284 + .arg4_type = ARG_ANYTHING,
8285 + .arg5_type = ARG_ANYTHING,
8286 +@@ -6285,7 +6285,7 @@ static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
8287 + .pkt_access = true,
8288 + .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
8289 + .arg1_type = ARG_PTR_TO_CTX,
8290 +- .arg2_type = ARG_PTR_TO_MEM,
8291 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8292 + .arg3_type = ARG_CONST_SIZE,
8293 + .arg4_type = ARG_ANYTHING,
8294 + .arg5_type = ARG_ANYTHING,
8295 +@@ -6304,7 +6304,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
8296 + .pkt_access = true,
8297 + .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
8298 + .arg1_type = ARG_PTR_TO_CTX,
8299 +- .arg2_type = ARG_PTR_TO_MEM,
8300 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8301 + .arg3_type = ARG_CONST_SIZE,
8302 + .arg4_type = ARG_ANYTHING,
8303 + .arg5_type = ARG_ANYTHING,
8304 +@@ -6341,7 +6341,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
8305 + .pkt_access = true,
8306 + .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
8307 + .arg1_type = ARG_PTR_TO_CTX,
8308 +- .arg2_type = ARG_PTR_TO_MEM,
8309 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8310 + .arg3_type = ARG_CONST_SIZE,
8311 + .arg4_type = ARG_ANYTHING,
8312 + .arg5_type = ARG_ANYTHING,
8313 +@@ -6364,7 +6364,7 @@ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
8314 + .pkt_access = true,
8315 + .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
8316 + .arg1_type = ARG_PTR_TO_CTX,
8317 +- .arg2_type = ARG_PTR_TO_MEM,
8318 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8319 + .arg3_type = ARG_CONST_SIZE,
8320 + .arg4_type = ARG_ANYTHING,
8321 + .arg5_type = ARG_ANYTHING,
8322 +@@ -6387,7 +6387,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
8323 + .pkt_access = true,
8324 + .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
8325 + .arg1_type = ARG_PTR_TO_CTX,
8326 +- .arg2_type = ARG_PTR_TO_MEM,
8327 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8328 + .arg3_type = ARG_CONST_SIZE,
8329 + .arg4_type = ARG_ANYTHING,
8330 + .arg5_type = ARG_ANYTHING,
8331 +@@ -6406,7 +6406,7 @@ static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
8332 + .gpl_only = false,
8333 + .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
8334 + .arg1_type = ARG_PTR_TO_CTX,
8335 +- .arg2_type = ARG_PTR_TO_MEM,
8336 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8337 + .arg3_type = ARG_CONST_SIZE,
8338 + .arg4_type = ARG_ANYTHING,
8339 + .arg5_type = ARG_ANYTHING,
8340 +@@ -6425,7 +6425,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
8341 + .gpl_only = false,
8342 + .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
8343 + .arg1_type = ARG_PTR_TO_CTX,
8344 +- .arg2_type = ARG_PTR_TO_MEM,
8345 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8346 + .arg3_type = ARG_CONST_SIZE,
8347 + .arg4_type = ARG_ANYTHING,
8348 + .arg5_type = ARG_ANYTHING,
8349 +@@ -6444,7 +6444,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
8350 + .gpl_only = false,
8351 + .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
8352 + .arg1_type = ARG_PTR_TO_CTX,
8353 +- .arg2_type = ARG_PTR_TO_MEM,
8354 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8355 + .arg3_type = ARG_CONST_SIZE,
8356 + .arg4_type = ARG_ANYTHING,
8357 + .arg5_type = ARG_ANYTHING,
8358 +@@ -6757,9 +6757,9 @@ static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
8359 + .pkt_access = true,
8360 + .ret_type = RET_INTEGER,
8361 + .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
8362 +- .arg2_type = ARG_PTR_TO_MEM,
8363 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8364 + .arg3_type = ARG_CONST_SIZE,
8365 +- .arg4_type = ARG_PTR_TO_MEM,
8366 ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8367 + .arg5_type = ARG_CONST_SIZE,
8368 + };
8369 +
8370 +@@ -6826,9 +6826,9 @@ static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
8371 + .pkt_access = true,
8372 + .ret_type = RET_INTEGER,
8373 + .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
8374 +- .arg2_type = ARG_PTR_TO_MEM,
8375 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8376 + .arg3_type = ARG_CONST_SIZE,
8377 +- .arg4_type = ARG_PTR_TO_MEM,
8378 ++ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8379 + .arg5_type = ARG_CONST_SIZE,
8380 + };
8381 +
8382 +@@ -7057,7 +7057,7 @@ static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = {
8383 + .gpl_only = false,
8384 + .ret_type = RET_INTEGER,
8385 + .arg1_type = ARG_PTR_TO_CTX,
8386 +- .arg2_type = ARG_PTR_TO_MEM,
8387 ++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8388 + .arg3_type = ARG_CONST_SIZE,
8389 + .arg4_type = ARG_ANYTHING,
8390 + };
8391 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
8392 +index abab13633f845..8b5c5703d7582 100644
8393 +--- a/net/core/rtnetlink.c
8394 ++++ b/net/core/rtnetlink.c
8395 +@@ -1698,6 +1698,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
8396 + {
8397 + struct ifinfomsg *ifm;
8398 + struct nlmsghdr *nlh;
8399 ++ struct Qdisc *qdisc;
8400 +
8401 + ASSERT_RTNL();
8402 + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
8403 +@@ -1715,6 +1716,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
8404 + if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
8405 + goto nla_put_failure;
8406 +
8407 ++ qdisc = rtnl_dereference(dev->qdisc);
8408 + if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
8409 + nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
8410 + nla_put_u8(skb, IFLA_OPERSTATE,
8411 +@@ -1733,8 +1735,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
8412 + #endif
8413 + put_master_ifindex(skb, dev) ||
8414 + nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
8415 +- (dev->qdisc &&
8416 +- nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
8417 ++ (qdisc &&
8418 ++ nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
8419 + nla_put_ifalias(skb, dev) ||
8420 + nla_put_u32(skb, IFLA_CARRIER_CHANGES,
8421 + atomic_read(&dev->carrier_up_count) +
8422 +diff --git a/net/core/sock_map.c b/net/core/sock_map.c
8423 +index 687c81386518c..1827669eedd6f 100644
8424 +--- a/net/core/sock_map.c
8425 ++++ b/net/core/sock_map.c
8426 +@@ -1569,7 +1569,7 @@ static struct bpf_iter_reg sock_map_iter_reg = {
8427 + .ctx_arg_info_size = 2,
8428 + .ctx_arg_info = {
8429 + { offsetof(struct bpf_iter__sockmap, key),
8430 +- PTR_TO_RDONLY_BUF_OR_NULL },
8431 ++ PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
8432 + { offsetof(struct bpf_iter__sockmap, sk),
8433 + PTR_TO_BTF_ID_OR_NULL },
8434 + },
8435 +diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
8436 +index ea5169e671aea..817f884b43d6e 100644
8437 +--- a/net/dsa/dsa.c
8438 ++++ b/net/dsa/dsa.c
8439 +@@ -349,6 +349,7 @@ void dsa_flush_workqueue(void)
8440 + {
8441 + flush_workqueue(dsa_owq);
8442 + }
8443 ++EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
8444 +
8445 + int dsa_devlink_param_get(struct devlink *dl, u32 id,
8446 + struct devlink_param_gset_ctx *ctx)
8447 +diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
8448 +index a5c9bc7b66c6e..33ab7d7af9eb4 100644
8449 +--- a/net/dsa/dsa_priv.h
8450 ++++ b/net/dsa/dsa_priv.h
8451 +@@ -170,7 +170,6 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
8452 + const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
8453 +
8454 + bool dsa_schedule_work(struct work_struct *work);
8455 +-void dsa_flush_workqueue(void);
8456 + const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
8457 +
8458 + static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
8459 +diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
8460 +index cb548188f8134..98d7d7120bab2 100644
8461 +--- a/net/dsa/tag_lan9303.c
8462 ++++ b/net/dsa/tag_lan9303.c
8463 +@@ -77,7 +77,6 @@ static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
8464 +
8465 + static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
8466 + {
8467 +- __be16 *lan9303_tag;
8468 + u16 lan9303_tag1;
8469 + unsigned int source_port;
8470 +
8471 +@@ -87,14 +86,15 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
8472 + return NULL;
8473 + }
8474 +
8475 +- lan9303_tag = dsa_etype_header_pos_rx(skb);
8476 +-
8477 +- if (lan9303_tag[0] != htons(ETH_P_8021Q)) {
8478 +- dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n");
8479 +- return NULL;
8480 ++ if (skb_vlan_tag_present(skb)) {
8481 ++ lan9303_tag1 = skb_vlan_tag_get(skb);
8482 ++ __vlan_hwaccel_clear_tag(skb);
8483 ++ } else {
8484 ++ skb_push_rcsum(skb, ETH_HLEN);
8485 ++ __skb_vlan_pop(skb, &lan9303_tag1);
8486 ++ skb_pull_rcsum(skb, ETH_HLEN);
8487 + }
8488 +
8489 +- lan9303_tag1 = ntohs(lan9303_tag[1]);
8490 + source_port = lan9303_tag1 & 0x3;
8491 +
8492 + skb->dev = dsa_master_find_slave(dev, 0, source_port);
8493 +@@ -103,13 +103,6 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
8494 + return NULL;
8495 + }
8496 +
8497 +- /* remove the special VLAN tag between the MAC addresses
8498 +- * and the current ethertype field.
8499 +- */
8500 +- skb_pull_rcsum(skb, 2 + 2);
8501 +-
8502 +- dsa_strip_etype_header(skb, LAN9303_TAG_LEN);
8503 +-
8504 + if (!(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU))
8505 + dsa_default_offload_fwd_mark(skb);
8506 +
8507 +diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
8508 +index e184bcb199434..78e40ea42e58d 100644
8509 +--- a/net/ipv4/fib_lookup.h
8510 ++++ b/net/ipv4/fib_lookup.h
8511 +@@ -16,10 +16,9 @@ struct fib_alias {
8512 + u8 fa_slen;
8513 + u32 tb_id;
8514 + s16 fa_default;
8515 +- u8 offload:1,
8516 +- trap:1,
8517 +- offload_failed:1,
8518 +- unused:5;
8519 ++ u8 offload;
8520 ++ u8 trap;
8521 ++ u8 offload_failed;
8522 + struct rcu_head rcu;
8523 + };
8524 +
8525 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
8526 +index 5dfb94abe7b10..d244c57b73031 100644
8527 +--- a/net/ipv4/fib_semantics.c
8528 ++++ b/net/ipv4/fib_semantics.c
8529 +@@ -524,9 +524,9 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
8530 + fri.dst_len = dst_len;
8531 + fri.tos = fa->fa_tos;
8532 + fri.type = fa->fa_type;
8533 +- fri.offload = fa->offload;
8534 +- fri.trap = fa->trap;
8535 +- fri.offload_failed = fa->offload_failed;
8536 ++ fri.offload = READ_ONCE(fa->offload);
8537 ++ fri.trap = READ_ONCE(fa->trap);
8538 ++ fri.offload_failed = READ_ONCE(fa->offload_failed);
8539 + err = fib_dump_info(skb, info->portid, seq, event, &fri, nlm_flags);
8540 + if (err < 0) {
8541 + /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
8542 +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
8543 +index 8060524f42566..f7f74d5c14da6 100644
8544 +--- a/net/ipv4/fib_trie.c
8545 ++++ b/net/ipv4/fib_trie.c
8546 +@@ -1047,19 +1047,23 @@ void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
8547 + if (!fa_match)
8548 + goto out;
8549 +
8550 +- if (fa_match->offload == fri->offload && fa_match->trap == fri->trap &&
8551 +- fa_match->offload_failed == fri->offload_failed)
8552 ++ /* These are paired with the WRITE_ONCE() happening in this function.
8553 ++ * The reason is that we are only protected by RCU at this point.
8554 ++ */
8555 ++ if (READ_ONCE(fa_match->offload) == fri->offload &&
8556 ++ READ_ONCE(fa_match->trap) == fri->trap &&
8557 ++ READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
8558 + goto out;
8559 +
8560 +- fa_match->offload = fri->offload;
8561 +- fa_match->trap = fri->trap;
8562 ++ WRITE_ONCE(fa_match->offload, fri->offload);
8563 ++ WRITE_ONCE(fa_match->trap, fri->trap);
8564 +
8565 + /* 2 means send notifications only if offload_failed was changed. */
8566 + if (net->ipv4.sysctl_fib_notify_on_flag_change == 2 &&
8567 +- fa_match->offload_failed == fri->offload_failed)
8568 ++ READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
8569 + goto out;
8570 +
8571 +- fa_match->offload_failed = fri->offload_failed;
8572 ++ WRITE_ONCE(fa_match->offload_failed, fri->offload_failed);
8573 +
8574 + if (!net->ipv4.sysctl_fib_notify_on_flag_change)
8575 + goto out;
8576 +@@ -2297,9 +2301,9 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
8577 + fri.dst_len = KEYLENGTH - fa->fa_slen;
8578 + fri.tos = fa->fa_tos;
8579 + fri.type = fa->fa_type;
8580 +- fri.offload = fa->offload;
8581 +- fri.trap = fa->trap;
8582 +- fri.offload_failed = fa->offload_failed;
8583 ++ fri.offload = READ_ONCE(fa->offload);
8584 ++ fri.trap = READ_ONCE(fa->trap);
8585 ++ fri.offload_failed = READ_ONCE(fa->offload_failed);
8586 + err = fib_dump_info(skb,
8587 + NETLINK_CB(cb->skb).portid,
8588 + cb->nlh->nlmsg_seq,
8589 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
8590 +index 086822cb1cc96..e3a159c8f231e 100644
8591 +--- a/net/ipv4/ping.c
8592 ++++ b/net/ipv4/ping.c
8593 +@@ -172,16 +172,23 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
8594 + struct sock *sk = NULL;
8595 + struct inet_sock *isk;
8596 + struct hlist_nulls_node *hnode;
8597 +- int dif = skb->dev->ifindex;
8598 ++ int dif, sdif;
8599 +
8600 + if (skb->protocol == htons(ETH_P_IP)) {
8601 ++ dif = inet_iif(skb);
8602 ++ sdif = inet_sdif(skb);
8603 + pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
8604 + (int)ident, &ip_hdr(skb)->daddr, dif);
8605 + #if IS_ENABLED(CONFIG_IPV6)
8606 + } else if (skb->protocol == htons(ETH_P_IPV6)) {
8607 ++ dif = inet6_iif(skb);
8608 ++ sdif = inet6_sdif(skb);
8609 + pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n",
8610 + (int)ident, &ipv6_hdr(skb)->daddr, dif);
8611 + #endif
8612 ++ } else {
8613 ++ pr_err("ping: protocol(%x) is not supported\n", ntohs(skb->protocol));
8614 ++ return NULL;
8615 + }
8616 +
8617 + read_lock_bh(&ping_table.lock);
8618 +@@ -221,7 +228,7 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
8619 + }
8620 +
8621 + if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
8622 +- sk->sk_bound_dev_if != inet_sdif(skb))
8623 ++ sk->sk_bound_dev_if != sdif)
8624 + continue;
8625 +
8626 + sock_hold(sk);
8627 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
8628 +index 0b4103b1e6220..2c30c599cc161 100644
8629 +--- a/net/ipv4/route.c
8630 ++++ b/net/ipv4/route.c
8631 +@@ -3393,8 +3393,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
8632 + fa->fa_tos == fri.tos &&
8633 + fa->fa_info == res.fi &&
8634 + fa->fa_type == fri.type) {
8635 +- fri.offload = fa->offload;
8636 +- fri.trap = fa->trap;
8637 ++ fri.offload = READ_ONCE(fa->offload);
8638 ++ fri.trap = READ_ONCE(fa->trap);
8639 + break;
8640 + }
8641 + }
8642 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
8643 +index 87961f1d9959b..6652d96329a0c 100644
8644 +--- a/net/ipv6/addrconf.c
8645 ++++ b/net/ipv6/addrconf.c
8646 +@@ -1839,8 +1839,8 @@ out:
8647 + }
8648 + EXPORT_SYMBOL(ipv6_dev_get_saddr);
8649 +
8650 +-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
8651 +- u32 banned_flags)
8652 ++static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
8653 ++ u32 banned_flags)
8654 + {
8655 + struct inet6_ifaddr *ifp;
8656 + int err = -EADDRNOTAVAIL;
8657 +diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
8658 +index aa673a6a7e432..ceb85c67ce395 100644
8659 +--- a/net/ipv6/ip6_flowlabel.c
8660 ++++ b/net/ipv6/ip6_flowlabel.c
8661 +@@ -450,8 +450,10 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
8662 + err = -EINVAL;
8663 + goto done;
8664 + }
8665 +- if (fl_shared_exclusive(fl) || fl->opt)
8666 ++ if (fl_shared_exclusive(fl) || fl->opt) {
8667 ++ WRITE_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl, 1);
8668 + static_branch_deferred_inc(&ipv6_flowlabel_exclusive);
8669 ++ }
8670 + return fl;
8671 +
8672 + done:
8673 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
8674 +index bed8155508c85..a8861db52c187 100644
8675 +--- a/net/ipv6/mcast.c
8676 ++++ b/net/ipv6/mcast.c
8677 +@@ -1759,7 +1759,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
8678 + skb_reserve(skb, hlen);
8679 + skb_tailroom_reserve(skb, mtu, tlen);
8680 +
8681 +- if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
8682 ++ if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
8683 + /* <draft-ietf-magma-mld-source-05.txt>:
8684 + * use unspecified address as the source address
8685 + * when a valid link-local address is not available.
8686 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
8687 +index 49fee1f1951c2..75f916b7460c7 100644
8688 +--- a/net/ipv6/route.c
8689 ++++ b/net/ipv6/route.c
8690 +@@ -5767,11 +5767,11 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
8691 + }
8692 +
8693 + if (!dst) {
8694 +- if (rt->offload)
8695 ++ if (READ_ONCE(rt->offload))
8696 + rtm->rtm_flags |= RTM_F_OFFLOAD;
8697 +- if (rt->trap)
8698 ++ if (READ_ONCE(rt->trap))
8699 + rtm->rtm_flags |= RTM_F_TRAP;
8700 +- if (rt->offload_failed)
8701 ++ if (READ_ONCE(rt->offload_failed))
8702 + rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
8703 + }
8704 +
8705 +@@ -6229,19 +6229,20 @@ void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
8706 + struct sk_buff *skb;
8707 + int err;
8708 +
8709 +- if (f6i->offload == offload && f6i->trap == trap &&
8710 +- f6i->offload_failed == offload_failed)
8711 ++ if (READ_ONCE(f6i->offload) == offload &&
8712 ++ READ_ONCE(f6i->trap) == trap &&
8713 ++ READ_ONCE(f6i->offload_failed) == offload_failed)
8714 + return;
8715 +
8716 +- f6i->offload = offload;
8717 +- f6i->trap = trap;
8718 ++ WRITE_ONCE(f6i->offload, offload);
8719 ++ WRITE_ONCE(f6i->trap, trap);
8720 +
8721 + /* 2 means send notifications only if offload_failed was changed. */
8722 + if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 &&
8723 +- f6i->offload_failed == offload_failed)
8724 ++ READ_ONCE(f6i->offload_failed) == offload_failed)
8725 + return;
8726 +
8727 +- f6i->offload_failed = offload_failed;
8728 ++ WRITE_ONCE(f6i->offload_failed, offload_failed);
8729 +
8730 + if (!rcu_access_pointer(f6i->fib6_node))
8731 + /* The route was removed from the tree, do not send
8732 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
8733 +index 3147ca89f608e..311b4d9344959 100644
8734 +--- a/net/mac80211/mlme.c
8735 ++++ b/net/mac80211/mlme.c
8736 +@@ -664,7 +664,7 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
8737 + ieee80211_ie_build_he_6ghz_cap(sdata, skb);
8738 + }
8739 +
8740 +-static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
8741 ++static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
8742 + {
8743 + struct ieee80211_local *local = sdata->local;
8744 + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
8745 +@@ -684,6 +684,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
8746 + enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
8747 + const struct ieee80211_sband_iftype_data *iftd;
8748 + struct ieee80211_prep_tx_info info = {};
8749 ++ int ret;
8750 +
8751 + /* we know it's writable, cast away the const */
8752 + if (assoc_data->ie_len)
8753 +@@ -697,7 +698,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
8754 + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
8755 + if (WARN_ON(!chanctx_conf)) {
8756 + rcu_read_unlock();
8757 +- return;
8758 ++ return -EINVAL;
8759 + }
8760 + chan = chanctx_conf->def.chan;
8761 + rcu_read_unlock();
8762 +@@ -748,7 +749,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
8763 + (iftd ? iftd->vendor_elems.len : 0),
8764 + GFP_KERNEL);
8765 + if (!skb)
8766 +- return;
8767 ++ return -ENOMEM;
8768 +
8769 + skb_reserve(skb, local->hw.extra_tx_headroom);
8770 +
8771 +@@ -1029,15 +1030,22 @@ skip_rates:
8772 + skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
8773 + }
8774 +
8775 +- if (assoc_data->fils_kek_len &&
8776 +- fils_encrypt_assoc_req(skb, assoc_data) < 0) {
8777 +- dev_kfree_skb(skb);
8778 +- return;
8779 ++ if (assoc_data->fils_kek_len) {
8780 ++ ret = fils_encrypt_assoc_req(skb, assoc_data);
8781 ++ if (ret < 0) {
8782 ++ dev_kfree_skb(skb);
8783 ++ return ret;
8784 ++ }
8785 + }
8786 +
8787 + pos = skb_tail_pointer(skb);
8788 + kfree(ifmgd->assoc_req_ies);
8789 + ifmgd->assoc_req_ies = kmemdup(ie_start, pos - ie_start, GFP_ATOMIC);
8790 ++ if (!ifmgd->assoc_req_ies) {
8791 ++ dev_kfree_skb(skb);
8792 ++ return -ENOMEM;
8793 ++ }
8794 ++
8795 + ifmgd->assoc_req_ies_len = pos - ie_start;
8796 +
8797 + drv_mgd_prepare_tx(local, sdata, &info);
8798 +@@ -1047,6 +1055,8 @@ skip_rates:
8799 + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
8800 + IEEE80211_TX_INTFL_MLME_CONN_TX;
8801 + ieee80211_tx_skb(sdata, skb);
8802 ++
8803 ++ return 0;
8804 + }
8805 +
8806 + void ieee80211_send_pspoll(struct ieee80211_local *local,
8807 +@@ -4491,6 +4501,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
8808 + {
8809 + struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
8810 + struct ieee80211_local *local = sdata->local;
8811 ++ int ret;
8812 +
8813 + sdata_assert_lock(sdata);
8814 +
8815 +@@ -4511,7 +4522,9 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
8816 + sdata_info(sdata, "associate with %pM (try %d/%d)\n",
8817 + assoc_data->bss->bssid, assoc_data->tries,
8818 + IEEE80211_ASSOC_MAX_TRIES);
8819 +- ieee80211_send_assoc(sdata);
8820 ++ ret = ieee80211_send_assoc(sdata);
8821 ++ if (ret)
8822 ++ return ret;
8823 +
8824 + if (!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
8825 + assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
8826 +diff --git a/net/mctp/route.c b/net/mctp/route.c
8827 +index cdf09c2a7007a..f8c0cb2de98be 100644
8828 +--- a/net/mctp/route.c
8829 ++++ b/net/mctp/route.c
8830 +@@ -414,13 +414,14 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
8831 + * this function.
8832 + */
8833 + rc = mctp_key_add(key, msk);
8834 +- if (rc)
8835 ++ if (rc) {
8836 + kfree(key);
8837 ++ } else {
8838 ++ trace_mctp_key_acquire(key);
8839 +
8840 +- trace_mctp_key_acquire(key);
8841 +-
8842 +- /* we don't need to release key->lock on exit */
8843 +- mctp_key_unref(key);
8844 ++ /* we don't need to release key->lock on exit */
8845 ++ mctp_key_unref(key);
8846 ++ }
8847 + key = NULL;
8848 +
8849 + } else {
8850 +diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
8851 +index 2394238d01c91..5a936334b517a 100644
8852 +--- a/net/netfilter/nf_conntrack_proto_sctp.c
8853 ++++ b/net/netfilter/nf_conntrack_proto_sctp.c
8854 +@@ -489,6 +489,15 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
8855 + pr_debug("Setting vtag %x for dir %d\n",
8856 + ih->init_tag, !dir);
8857 + ct->proto.sctp.vtag[!dir] = ih->init_tag;
8858 ++
8859 ++ /* don't renew timeout on init retransmit so
8860 ++ * port reuse by client or NAT middlebox cannot
8861 ++ * keep entry alive indefinitely (incl. nat info).
8862 ++ */
8863 ++ if (new_state == SCTP_CONNTRACK_CLOSED &&
8864 ++ old_state == SCTP_CONNTRACK_CLOSED &&
8865 ++ nf_ct_is_confirmed(ct))
8866 ++ ignore = true;
8867 + }
8868 +
8869 + ct->proto.sctp.state = new_state;
8870 +diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
8871 +index a0109fa1e92d0..1133e06f3c40e 100644
8872 +--- a/net/netfilter/nft_synproxy.c
8873 ++++ b/net/netfilter/nft_synproxy.c
8874 +@@ -191,8 +191,10 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx,
8875 + if (err)
8876 + goto nf_ct_failure;
8877 + err = nf_synproxy_ipv6_init(snet, ctx->net);
8878 +- if (err)
8879 ++ if (err) {
8880 ++ nf_synproxy_ipv4_fini(snet, ctx->net);
8881 + goto nf_ct_failure;
8882 ++ }
8883 + break;
8884 + }
8885 +
8886 +diff --git a/net/sched/act_api.c b/net/sched/act_api.c
8887 +index 3258da3d5bed5..2f46f9f9afb95 100644
8888 +--- a/net/sched/act_api.c
8889 ++++ b/net/sched/act_api.c
8890 +@@ -730,15 +730,24 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
8891 + restart_act_graph:
8892 + for (i = 0; i < nr_actions; i++) {
8893 + const struct tc_action *a = actions[i];
8894 ++ int repeat_ttl;
8895 +
8896 + if (jmp_prgcnt > 0) {
8897 + jmp_prgcnt -= 1;
8898 + continue;
8899 + }
8900 ++
8901 ++ repeat_ttl = 32;
8902 + repeat:
8903 + ret = a->ops->act(skb, a, res);
8904 +- if (ret == TC_ACT_REPEAT)
8905 +- goto repeat; /* we need a ttl - JHS */
8906 ++
8907 ++ if (unlikely(ret == TC_ACT_REPEAT)) {
8908 ++ if (--repeat_ttl != 0)
8909 ++ goto repeat;
8910 ++ /* suspicious opcode, stop pipeline */
8911 ++ net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
8912 ++ return TC_ACT_OK;
8913 ++ }
8914 +
8915 + if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
8916 + jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
8917 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
8918 +index 56dba8519d7c3..cd44cac7fbcf9 100644
8919 +--- a/net/sched/cls_api.c
8920 ++++ b/net/sched/cls_api.c
8921 +@@ -1044,7 +1044,7 @@ static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
8922 +
8923 + /* Find qdisc */
8924 + if (!*parent) {
8925 +- *q = dev->qdisc;
8926 ++ *q = rcu_dereference(dev->qdisc);
8927 + *parent = (*q)->handle;
8928 + } else {
8929 + *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
8930 +@@ -2587,7 +2587,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
8931 +
8932 + parent = tcm->tcm_parent;
8933 + if (!parent)
8934 +- q = dev->qdisc;
8935 ++ q = rtnl_dereference(dev->qdisc);
8936 + else
8937 + q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
8938 + if (!q)
8939 +@@ -2962,7 +2962,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
8940 + return skb->len;
8941 +
8942 + if (!tcm->tcm_parent)
8943 +- q = dev->qdisc;
8944 ++ q = rtnl_dereference(dev->qdisc);
8945 + else
8946 + q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
8947 +
8948 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
8949 +index e4a7ce5c79f4f..6d9411b44258e 100644
8950 +--- a/net/sched/sch_api.c
8951 ++++ b/net/sched/sch_api.c
8952 +@@ -301,7 +301,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
8953 +
8954 + if (!handle)
8955 + return NULL;
8956 +- q = qdisc_match_from_root(dev->qdisc, handle);
8957 ++ q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
8958 + if (q)
8959 + goto out;
8960 +
8961 +@@ -320,7 +320,7 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
8962 +
8963 + if (!handle)
8964 + return NULL;
8965 +- q = qdisc_match_from_root(dev->qdisc, handle);
8966 ++ q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
8967 + if (q)
8968 + goto out;
8969 +
8970 +@@ -1082,10 +1082,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
8971 + skip:
8972 + if (!ingress) {
8973 + notify_and_destroy(net, skb, n, classid,
8974 +- dev->qdisc, new);
8975 ++ rtnl_dereference(dev->qdisc), new);
8976 + if (new && !new->ops->attach)
8977 + qdisc_refcount_inc(new);
8978 +- dev->qdisc = new ? : &noop_qdisc;
8979 ++ rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
8980 +
8981 + if (new && new->ops->attach)
8982 + new->ops->attach(new);
8983 +@@ -1451,7 +1451,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
8984 + q = dev_ingress_queue(dev)->qdisc_sleeping;
8985 + }
8986 + } else {
8987 +- q = dev->qdisc;
8988 ++ q = rtnl_dereference(dev->qdisc);
8989 + }
8990 + if (!q) {
8991 + NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
8992 +@@ -1540,7 +1540,7 @@ replay:
8993 + q = dev_ingress_queue(dev)->qdisc_sleeping;
8994 + }
8995 + } else {
8996 +- q = dev->qdisc;
8997 ++ q = rtnl_dereference(dev->qdisc);
8998 + }
8999 +
9000 + /* It may be default qdisc, ignore it */
9001 +@@ -1762,7 +1762,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
9002 + s_q_idx = 0;
9003 + q_idx = 0;
9004 +
9005 +- if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
9006 ++ if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
9007 ++ skb, cb, &q_idx, s_q_idx,
9008 + true, tca[TCA_DUMP_INVISIBLE]) < 0)
9009 + goto done;
9010 +
9011 +@@ -2033,7 +2034,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
9012 + } else if (qid1) {
9013 + qid = qid1;
9014 + } else if (qid == 0)
9015 +- qid = dev->qdisc->handle;
9016 ++ qid = rtnl_dereference(dev->qdisc)->handle;
9017 +
9018 + /* Now qid is genuine qdisc handle consistent
9019 + * both with parent and child.
9020 +@@ -2044,7 +2045,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
9021 + portid = TC_H_MAKE(qid, portid);
9022 + } else {
9023 + if (qid == 0)
9024 +- qid = dev->qdisc->handle;
9025 ++ qid = rtnl_dereference(dev->qdisc)->handle;
9026 + }
9027 +
9028 + /* OK. Locate qdisc */
9029 +@@ -2205,7 +2206,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
9030 + s_t = cb->args[0];
9031 + t = 0;
9032 +
9033 +- if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0)
9034 ++ if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
9035 ++ skb, tcm, cb, &t, s_t, true) < 0)
9036 + goto done;
9037 +
9038 + dev_queue = dev_ingress_queue(dev);
9039 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
9040 +index 5d391fe3137dc..28052706c4e36 100644
9041 +--- a/net/sched/sch_generic.c
9042 ++++ b/net/sched/sch_generic.c
9043 +@@ -1109,30 +1109,33 @@ static void attach_default_qdiscs(struct net_device *dev)
9044 + if (!netif_is_multiqueue(dev) ||
9045 + dev->priv_flags & IFF_NO_QUEUE) {
9046 + netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
9047 +- dev->qdisc = txq->qdisc_sleeping;
9048 +- qdisc_refcount_inc(dev->qdisc);
9049 ++ qdisc = txq->qdisc_sleeping;
9050 ++ rcu_assign_pointer(dev->qdisc, qdisc);
9051 ++ qdisc_refcount_inc(qdisc);
9052 + } else {
9053 + qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
9054 + if (qdisc) {
9055 +- dev->qdisc = qdisc;
9056 ++ rcu_assign_pointer(dev->qdisc, qdisc);
9057 + qdisc->ops->attach(qdisc);
9058 + }
9059 + }
9060 ++ qdisc = rtnl_dereference(dev->qdisc);
9061 +
9062 + /* Detect default qdisc setup/init failed and fallback to "noqueue" */
9063 +- if (dev->qdisc == &noop_qdisc) {
9064 ++ if (qdisc == &noop_qdisc) {
9065 + netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
9066 + default_qdisc_ops->id, noqueue_qdisc_ops.id);
9067 + dev->priv_flags |= IFF_NO_QUEUE;
9068 + netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
9069 +- dev->qdisc = txq->qdisc_sleeping;
9070 +- qdisc_refcount_inc(dev->qdisc);
9071 ++ qdisc = txq->qdisc_sleeping;
9072 ++ rcu_assign_pointer(dev->qdisc, qdisc);
9073 ++ qdisc_refcount_inc(qdisc);
9074 + dev->priv_flags ^= IFF_NO_QUEUE;
9075 + }
9076 +
9077 + #ifdef CONFIG_NET_SCHED
9078 +- if (dev->qdisc != &noop_qdisc)
9079 +- qdisc_hash_add(dev->qdisc, false);
9080 ++ if (qdisc != &noop_qdisc)
9081 ++ qdisc_hash_add(qdisc, false);
9082 + #endif
9083 + }
9084 +
9085 +@@ -1162,7 +1165,7 @@ void dev_activate(struct net_device *dev)
9086 + * and noqueue_qdisc for virtual interfaces
9087 + */
9088 +
9089 +- if (dev->qdisc == &noop_qdisc)
9090 ++ if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
9091 + attach_default_qdiscs(dev);
9092 +
9093 + if (!netif_carrier_ok(dev))
9094 +@@ -1328,7 +1331,7 @@ static int qdisc_change_tx_queue_len(struct net_device *dev,
9095 + void dev_qdisc_change_real_num_tx(struct net_device *dev,
9096 + unsigned int new_real_tx)
9097 + {
9098 +- struct Qdisc *qdisc = dev->qdisc;
9099 ++ struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
9100 +
9101 + if (qdisc->ops->change_real_num_tx)
9102 + qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
9103 +@@ -1392,7 +1395,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
9104 +
9105 + void dev_init_scheduler(struct net_device *dev)
9106 + {
9107 +- dev->qdisc = &noop_qdisc;
9108 ++ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
9109 + netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
9110 + if (dev_ingress_queue(dev))
9111 + dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
9112 +@@ -1420,8 +1423,8 @@ void dev_shutdown(struct net_device *dev)
9113 + netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
9114 + if (dev_ingress_queue(dev))
9115 + shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
9116 +- qdisc_put(dev->qdisc);
9117 +- dev->qdisc = &noop_qdisc;
9118 ++ qdisc_put(rtnl_dereference(dev->qdisc));
9119 ++ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
9120 +
9121 + WARN_ON(timer_pending(&dev->watchdog_timer));
9122 + }
9123 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
9124 +index 0f5c305c55c5a..10d2d81f93376 100644
9125 +--- a/net/smc/af_smc.c
9126 ++++ b/net/smc/af_smc.c
9127 +@@ -667,14 +667,17 @@ static void smc_fback_error_report(struct sock *clcsk)
9128 + static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
9129 + {
9130 + struct sock *clcsk;
9131 ++ int rc = 0;
9132 +
9133 + mutex_lock(&smc->clcsock_release_lock);
9134 + if (!smc->clcsock) {
9135 +- mutex_unlock(&smc->clcsock_release_lock);
9136 +- return -EBADF;
9137 ++ rc = -EBADF;
9138 ++ goto out;
9139 + }
9140 + clcsk = smc->clcsock->sk;
9141 +
9142 ++ if (smc->use_fallback)
9143 ++ goto out;
9144 + smc->use_fallback = true;
9145 + smc->fallback_rsn = reason_code;
9146 + smc_stat_fallback(smc);
9147 +@@ -702,8 +705,9 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
9148 + smc->clcsock->sk->sk_user_data =
9149 + (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
9150 + }
9151 ++out:
9152 + mutex_unlock(&smc->clcsock_release_lock);
9153 +- return 0;
9154 ++ return rc;
9155 + }
9156 +
9157 + /* fall back during connect */
9158 +diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
9159 +index 3d3673ba9e1e5..2a2e1514ac79a 100644
9160 +--- a/net/sunrpc/xprtrdma/verbs.c
9161 ++++ b/net/sunrpc/xprtrdma/verbs.c
9162 +@@ -436,6 +436,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
9163 + IB_POLL_WORKQUEUE);
9164 + if (IS_ERR(ep->re_attr.send_cq)) {
9165 + rc = PTR_ERR(ep->re_attr.send_cq);
9166 ++ ep->re_attr.send_cq = NULL;
9167 + goto out_destroy;
9168 + }
9169 +
9170 +@@ -444,6 +445,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
9171 + IB_POLL_WORKQUEUE);
9172 + if (IS_ERR(ep->re_attr.recv_cq)) {
9173 + rc = PTR_ERR(ep->re_attr.recv_cq);
9174 ++ ep->re_attr.recv_cq = NULL;
9175 + goto out_destroy;
9176 + }
9177 + ep->re_receive_count = 0;
9178 +@@ -482,6 +484,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
9179 + ep->re_pd = ib_alloc_pd(device, 0);
9180 + if (IS_ERR(ep->re_pd)) {
9181 + rc = PTR_ERR(ep->re_pd);
9182 ++ ep->re_pd = NULL;
9183 + goto out_destroy;
9184 + }
9185 +
9186 +diff --git a/net/tipc/node.c b/net/tipc/node.c
9187 +index 9947b7dfe1d2d..6ef95ce565bd3 100644
9188 +--- a/net/tipc/node.c
9189 ++++ b/net/tipc/node.c
9190 +@@ -403,7 +403,7 @@ static void tipc_node_write_unlock(struct tipc_node *n)
9191 + u32 flags = n->action_flags;
9192 + struct list_head *publ_list;
9193 + struct tipc_uaddr ua;
9194 +- u32 bearer_id;
9195 ++ u32 bearer_id, node;
9196 +
9197 + if (likely(!flags)) {
9198 + write_unlock_bh(&n->lock);
9199 +@@ -413,7 +413,8 @@ static void tipc_node_write_unlock(struct tipc_node *n)
9200 + tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
9201 + TIPC_LINK_STATE, n->addr, n->addr);
9202 + sk.ref = n->link_id;
9203 +- sk.node = n->addr;
9204 ++ sk.node = tipc_own_addr(net);
9205 ++ node = n->addr;
9206 + bearer_id = n->link_id & 0xffff;
9207 + publ_list = &n->publ_list;
9208 +
9209 +@@ -423,17 +424,17 @@ static void tipc_node_write_unlock(struct tipc_node *n)
9210 + write_unlock_bh(&n->lock);
9211 +
9212 + if (flags & TIPC_NOTIFY_NODE_DOWN)
9213 +- tipc_publ_notify(net, publ_list, sk.node, n->capabilities);
9214 ++ tipc_publ_notify(net, publ_list, node, n->capabilities);
9215 +
9216 + if (flags & TIPC_NOTIFY_NODE_UP)
9217 +- tipc_named_node_up(net, sk.node, n->capabilities);
9218 ++ tipc_named_node_up(net, node, n->capabilities);
9219 +
9220 + if (flags & TIPC_NOTIFY_LINK_UP) {
9221 +- tipc_mon_peer_up(net, sk.node, bearer_id);
9222 ++ tipc_mon_peer_up(net, node, bearer_id);
9223 + tipc_nametbl_publish(net, &ua, &sk, sk.ref);
9224 + }
9225 + if (flags & TIPC_NOTIFY_LINK_DOWN) {
9226 +- tipc_mon_peer_down(net, sk.node, bearer_id);
9227 ++ tipc_mon_peer_down(net, node, bearer_id);
9228 + tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
9229 + }
9230 + }
9231 +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
9232 +index ed0df839c38ce..8e4f2a1346be6 100644
9233 +--- a/net/vmw_vsock/af_vsock.c
9234 ++++ b/net/vmw_vsock/af_vsock.c
9235 +@@ -1400,6 +1400,7 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
9236 + sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
9237 + sock->state = SS_UNCONNECTED;
9238 + vsock_transport_cancel_pkt(vsk);
9239 ++ vsock_remove_connected(vsk);
9240 + goto out_wait;
9241 + } else if (timeout == 0) {
9242 + err = -ETIMEDOUT;
9243 +diff --git a/net/wireless/core.c b/net/wireless/core.c
9244 +index eb297e1015e05..441136646f89a 100644
9245 +--- a/net/wireless/core.c
9246 ++++ b/net/wireless/core.c
9247 +@@ -5,7 +5,7 @@
9248 + * Copyright 2006-2010 Johannes Berg <johannes@××××××××××××.net>
9249 + * Copyright 2013-2014 Intel Mobile Communications GmbH
9250 + * Copyright 2015-2017 Intel Deutschland GmbH
9251 +- * Copyright (C) 2018-2021 Intel Corporation
9252 ++ * Copyright (C) 2018-2022 Intel Corporation
9253 + */
9254 +
9255 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9256 +@@ -332,29 +332,20 @@ static void cfg80211_event_work(struct work_struct *work)
9257 + void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
9258 + {
9259 + struct wireless_dev *wdev, *tmp;
9260 +- bool found = false;
9261 +
9262 + ASSERT_RTNL();
9263 +
9264 +- list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
9265 ++ list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
9266 + if (wdev->nl_owner_dead) {
9267 + if (wdev->netdev)
9268 + dev_close(wdev->netdev);
9269 +- found = true;
9270 +- }
9271 +- }
9272 +-
9273 +- if (!found)
9274 +- return;
9275 +
9276 +- wiphy_lock(&rdev->wiphy);
9277 +- list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
9278 +- if (wdev->nl_owner_dead) {
9279 ++ wiphy_lock(&rdev->wiphy);
9280 + cfg80211_leave(rdev, wdev);
9281 + rdev_del_virtual_intf(rdev, wdev);
9282 ++ wiphy_unlock(&rdev->wiphy);
9283 + }
9284 + }
9285 +- wiphy_unlock(&rdev->wiphy);
9286 + }
9287 +
9288 + static void cfg80211_destroy_iface_wk(struct work_struct *work)
9289 +diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
9290 +index 00284c03da4d3..027f4c28dc320 100644
9291 +--- a/scripts/kconfig/confdata.c
9292 ++++ b/scripts/kconfig/confdata.c
9293 +@@ -992,14 +992,19 @@ static int conf_write_autoconf_cmd(const char *autoconf_name)
9294 +
9295 + static int conf_touch_deps(void)
9296 + {
9297 +- const char *name;
9298 ++ const char *name, *tmp;
9299 + struct symbol *sym;
9300 + int res, i;
9301 +
9302 +- strcpy(depfile_path, "include/config/");
9303 +- depfile_prefix_len = strlen(depfile_path);
9304 +-
9305 + name = conf_get_autoconfig_name();
9306 ++ tmp = strrchr(name, '/');
9307 ++ depfile_prefix_len = tmp ? tmp - name + 1 : 0;
9308 ++ if (depfile_prefix_len + 1 > sizeof(depfile_path))
9309 ++ return -1;
9310 ++
9311 ++ strncpy(depfile_path, name, depfile_prefix_len);
9312 ++ depfile_path[depfile_prefix_len] = 0;
9313 ++
9314 + conf_read_simple(name, S_DEF_AUTO);
9315 + sym_calc_value(modules_sym);
9316 +
9317 +diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
9318 +index 0590f86df6e40..748da578b418c 100644
9319 +--- a/scripts/kconfig/preprocess.c
9320 ++++ b/scripts/kconfig/preprocess.c
9321 +@@ -141,7 +141,7 @@ static char *do_lineno(int argc, char *argv[])
9322 + static char *do_shell(int argc, char *argv[])
9323 + {
9324 + FILE *p;
9325 +- char buf[256];
9326 ++ char buf[4096];
9327 + char *cmd;
9328 + size_t nread;
9329 + int i;
9330 +diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
9331 +index 9fc971a704a9e..b03c185f6b21c 100644
9332 +--- a/sound/core/memalloc.c
9333 ++++ b/sound/core/memalloc.c
9334 +@@ -511,7 +511,8 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
9335 + DEFAULT_GFP, 0);
9336 + if (!sgt)
9337 + return NULL;
9338 +- dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
9339 ++ dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
9340 ++ sg_dma_address(sgt->sgl));
9341 + p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
9342 + if (p)
9343 + dmab->private_data = sgt;
9344 +@@ -540,9 +541,9 @@ static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
9345 + if (mode == SNDRV_DMA_SYNC_CPU) {
9346 + if (dmab->dev.dir == DMA_TO_DEVICE)
9347 + return;
9348 ++ invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
9349 + dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
9350 + dmab->dev.dir);
9351 +- invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
9352 + } else {
9353 + if (dmab->dev.dir == DMA_FROM_DEVICE)
9354 + return;
9355 +@@ -625,9 +626,13 @@ static const struct snd_malloc_ops snd_dma_noncontig_ops = {
9356 + */
9357 + static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
9358 + {
9359 +- dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
9360 +- return dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
9361 +- dmab->dev.dir, DEFAULT_GFP);
9362 ++ void *p;
9363 ++
9364 ++ p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
9365 ++ dmab->dev.dir, DEFAULT_GFP);
9366 ++ if (p)
9367 ++ dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
9368 ++ return p;
9369 + }
9370 +
9371 + static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
9372 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
9373 +index 1b46b599a5cff..3b6f2aacda459 100644
9374 +--- a/sound/pci/hda/hda_intel.c
9375 ++++ b/sound/pci/hda/hda_intel.c
9376 +@@ -1611,6 +1611,7 @@ static const struct snd_pci_quirk probe_mask_list[] = {
9377 + /* forced codec slots */
9378 + SND_PCI_QUIRK(0x1043, 0x1262, "ASUS W5Fm", 0x103),
9379 + SND_PCI_QUIRK(0x1046, 0x1262, "ASUS W5F", 0x103),
9380 ++ SND_PCI_QUIRK(0x1558, 0x0351, "Schenker Dock 15", 0x105),
9381 + /* WinFast VP200 H (Teradici) user reported broken communication */
9382 + SND_PCI_QUIRK(0x3a21, 0x040d, "WinFast VP200 H", 0x101),
9383 + {}
9384 +@@ -1794,8 +1795,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
9385 +
9386 + assign_position_fix(chip, check_position_fix(chip, position_fix[dev]));
9387 +
9388 +- check_probe_mask(chip, dev);
9389 +-
9390 + if (single_cmd < 0) /* allow fallback to single_cmd at errors */
9391 + chip->fallback_to_single_cmd = 1;
9392 + else /* explicitly set to single_cmd or not */
9393 +@@ -1821,6 +1820,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
9394 + chip->bus.core.needs_damn_long_delay = 1;
9395 + }
9396 +
9397 ++ check_probe_mask(chip, dev);
9398 ++
9399 + err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
9400 + if (err < 0) {
9401 + dev_err(card->dev, "Error creating device [card]!\n");
9402 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
9403 +index 18f04137f61cf..83b56c1ba3996 100644
9404 +--- a/sound/pci/hda/patch_realtek.c
9405 ++++ b/sound/pci/hda/patch_realtek.c
9406 +@@ -133,6 +133,22 @@ struct alc_spec {
9407 + * COEF access helper functions
9408 + */
9409 +
9410 ++static void coef_mutex_lock(struct hda_codec *codec)
9411 ++{
9412 ++ struct alc_spec *spec = codec->spec;
9413 ++
9414 ++ snd_hda_power_up_pm(codec);
9415 ++ mutex_lock(&spec->coef_mutex);
9416 ++}
9417 ++
9418 ++static void coef_mutex_unlock(struct hda_codec *codec)
9419 ++{
9420 ++ struct alc_spec *spec = codec->spec;
9421 ++
9422 ++ mutex_unlock(&spec->coef_mutex);
9423 ++ snd_hda_power_down_pm(codec);
9424 ++}
9425 ++
9426 + static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
9427 + unsigned int coef_idx)
9428 + {
9429 +@@ -146,12 +162,11 @@ static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
9430 + static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
9431 + unsigned int coef_idx)
9432 + {
9433 +- struct alc_spec *spec = codec->spec;
9434 + unsigned int val;
9435 +
9436 +- mutex_lock(&spec->coef_mutex);
9437 ++ coef_mutex_lock(codec);
9438 + val = __alc_read_coefex_idx(codec, nid, coef_idx);
9439 +- mutex_unlock(&spec->coef_mutex);
9440 ++ coef_mutex_unlock(codec);
9441 + return val;
9442 + }
9443 +
9444 +@@ -168,11 +183,9 @@ static void __alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
9445 + static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
9446 + unsigned int coef_idx, unsigned int coef_val)
9447 + {
9448 +- struct alc_spec *spec = codec->spec;
9449 +-
9450 +- mutex_lock(&spec->coef_mutex);
9451 ++ coef_mutex_lock(codec);
9452 + __alc_write_coefex_idx(codec, nid, coef_idx, coef_val);
9453 +- mutex_unlock(&spec->coef_mutex);
9454 ++ coef_mutex_unlock(codec);
9455 + }
9456 +
9457 + #define alc_write_coef_idx(codec, coef_idx, coef_val) \
9458 +@@ -193,11 +206,9 @@ static void alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
9459 + unsigned int coef_idx, unsigned int mask,
9460 + unsigned int bits_set)
9461 + {
9462 +- struct alc_spec *spec = codec->spec;
9463 +-
9464 +- mutex_lock(&spec->coef_mutex);
9465 ++ coef_mutex_lock(codec);
9466 + __alc_update_coefex_idx(codec, nid, coef_idx, mask, bits_set);
9467 +- mutex_unlock(&spec->coef_mutex);
9468 ++ coef_mutex_unlock(codec);
9469 + }
9470 +
9471 + #define alc_update_coef_idx(codec, coef_idx, mask, bits_set) \
9472 +@@ -230,9 +241,7 @@ struct coef_fw {
9473 + static void alc_process_coef_fw(struct hda_codec *codec,
9474 + const struct coef_fw *fw)
9475 + {
9476 +- struct alc_spec *spec = codec->spec;
9477 +-
9478 +- mutex_lock(&spec->coef_mutex);
9479 ++ coef_mutex_lock(codec);
9480 + for (; fw->nid; fw++) {
9481 + if (fw->mask == (unsigned short)-1)
9482 + __alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val);
9483 +@@ -240,7 +249,7 @@ static void alc_process_coef_fw(struct hda_codec *codec,
9484 + __alc_update_coefex_idx(codec, fw->nid, fw->idx,
9485 + fw->mask, fw->val);
9486 + }
9487 +- mutex_unlock(&spec->coef_mutex);
9488 ++ coef_mutex_unlock(codec);
9489 + }
9490 +
9491 + /*
9492 +@@ -9013,6 +9022,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9493 + SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
9494 + SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
9495 + SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
9496 ++ SND_PCI_QUIRK(0x17aa, 0x383d, "Legion Y9000X 2019", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
9497 + SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
9498 + SND_PCI_QUIRK(0x17aa, 0x384a, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
9499 + SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
9500 +diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
9501 +index 6549e7fef3e32..c5ea3b115966b 100644
9502 +--- a/sound/soc/codecs/tas2770.c
9503 ++++ b/sound/soc/codecs/tas2770.c
9504 +@@ -38,10 +38,12 @@ static void tas2770_reset(struct tas2770_priv *tas2770)
9505 + gpiod_set_value_cansleep(tas2770->reset_gpio, 0);
9506 + msleep(20);
9507 + gpiod_set_value_cansleep(tas2770->reset_gpio, 1);
9508 ++ usleep_range(1000, 2000);
9509 + }
9510 +
9511 + snd_soc_component_write(tas2770->component, TAS2770_SW_RST,
9512 + TAS2770_RST);
9513 ++ usleep_range(1000, 2000);
9514 + }
9515 +
9516 + static int tas2770_set_bias_level(struct snd_soc_component *component,
9517 +@@ -110,6 +112,7 @@ static int tas2770_codec_resume(struct snd_soc_component *component)
9518 +
9519 + if (tas2770->sdz_gpio) {
9520 + gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
9521 ++ usleep_range(1000, 2000);
9522 + } else {
9523 + ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
9524 + TAS2770_PWR_CTRL_MASK,
9525 +@@ -510,8 +513,10 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
9526 +
9527 + tas2770->component = component;
9528 +
9529 +- if (tas2770->sdz_gpio)
9530 ++ if (tas2770->sdz_gpio) {
9531 + gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
9532 ++ usleep_range(1000, 2000);
9533 ++ }
9534 +
9535 + tas2770_reset(tas2770);
9536 +
9537 +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
9538 +index 6cb01a8e08fb6..738038f141964 100644
9539 +--- a/sound/soc/codecs/wm_adsp.c
9540 ++++ b/sound/soc/codecs/wm_adsp.c
9541 +@@ -1448,7 +1448,8 @@ static int wm_adsp_buffer_parse_coeff(struct cs_dsp_coeff_ctl *cs_ctl)
9542 + int ret, i;
9543 +
9544 + for (i = 0; i < 5; ++i) {
9545 +- ret = cs_dsp_coeff_read_ctrl(cs_ctl, &coeff_v1, sizeof(coeff_v1));
9546 ++ ret = cs_dsp_coeff_read_ctrl(cs_ctl, &coeff_v1,
9547 ++ min(cs_ctl->len, sizeof(coeff_v1)));
9548 + if (ret < 0)
9549 + return ret;
9550 +
9551 +diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
9552 +index 3b1ddea26a9ef..76f191ec7bf84 100644
9553 +--- a/sound/soc/mediatek/Kconfig
9554 ++++ b/sound/soc/mediatek/Kconfig
9555 +@@ -215,7 +215,7 @@ config SND_SOC_MT8195_MT6359_RT1019_RT5682
9556 +
9557 + config SND_SOC_MT8195_MT6359_RT1011_RT5682
9558 + tristate "ASoC Audio driver for MT8195 with MT6359 RT1011 RT5682 codec"
9559 +- depends on I2C
9560 ++ depends on I2C && GPIOLIB
9561 + depends on SND_SOC_MT8195 && MTK_PMIC_WRAP
9562 + select SND_SOC_MT6359
9563 + select SND_SOC_RT1011
9564 +diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
9565 +index a59e9d20cb46b..4b1773c1fb95f 100644
9566 +--- a/sound/soc/qcom/lpass-platform.c
9567 ++++ b/sound/soc/qcom/lpass-platform.c
9568 +@@ -524,7 +524,7 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
9569 + return -EINVAL;
9570 + }
9571 +
9572 +- ret = regmap_update_bits(map, reg_irqclr, val_irqclr, val_irqclr);
9573 ++ ret = regmap_write_bits(map, reg_irqclr, val_irqclr, val_irqclr);
9574 + if (ret) {
9575 + dev_err(soc_runtime->dev, "error writing to irqclear reg: %d\n", ret);
9576 + return ret;
9577 +@@ -665,7 +665,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
9578 + return -EINVAL;
9579 + }
9580 + if (interrupts & LPAIF_IRQ_PER(chan)) {
9581 +- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
9582 ++ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
9583 + if (rv) {
9584 + dev_err(soc_runtime->dev,
9585 + "error writing to irqclear reg: %d\n", rv);
9586 +@@ -676,7 +676,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
9587 + }
9588 +
9589 + if (interrupts & LPAIF_IRQ_XRUN(chan)) {
9590 +- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
9591 ++ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
9592 + if (rv) {
9593 + dev_err(soc_runtime->dev,
9594 + "error writing to irqclear reg: %d\n", rv);
9595 +@@ -688,7 +688,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
9596 + }
9597 +
9598 + if (interrupts & LPAIF_IRQ_ERR(chan)) {
9599 +- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
9600 ++ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
9601 + if (rv) {
9602 + dev_err(soc_runtime->dev,
9603 + "error writing to irqclear reg: %d\n", rv);
9604 +diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
9605 +index dc0e7c8d31f37..53457a0d466d3 100644
9606 +--- a/sound/soc/soc-ops.c
9607 ++++ b/sound/soc/soc-ops.c
9608 +@@ -308,7 +308,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
9609 + unsigned int sign_bit = mc->sign_bit;
9610 + unsigned int mask = (1 << fls(max)) - 1;
9611 + unsigned int invert = mc->invert;
9612 +- int err;
9613 ++ int err, ret;
9614 + bool type_2r = false;
9615 + unsigned int val2 = 0;
9616 + unsigned int val, val_mask;
9617 +@@ -350,12 +350,18 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
9618 + err = snd_soc_component_update_bits(component, reg, val_mask, val);
9619 + if (err < 0)
9620 + return err;
9621 ++ ret = err;
9622 +
9623 +- if (type_2r)
9624 ++ if (type_2r) {
9625 + err = snd_soc_component_update_bits(component, reg2, val_mask,
9626 +- val2);
9627 ++ val2);
9628 ++ /* Don't discard any error code or drop change flag */
9629 ++ if (ret == 0 || err < 0) {
9630 ++ ret = err;
9631 ++ }
9632 ++ }
9633 +
9634 +- return err;
9635 ++ return ret;
9636 + }
9637 + EXPORT_SYMBOL_GPL(snd_soc_put_volsw);
9638 +
9639 +@@ -421,6 +427,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
9640 + int min = mc->min;
9641 + unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
9642 + int err = 0;
9643 ++ int ret;
9644 + unsigned int val, val_mask;
9645 +
9646 + val = ucontrol->value.integer.value[0];
9647 +@@ -437,6 +444,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
9648 + err = snd_soc_component_update_bits(component, reg, val_mask, val);
9649 + if (err < 0)
9650 + return err;
9651 ++ ret = err;
9652 +
9653 + if (snd_soc_volsw_is_stereo(mc)) {
9654 + unsigned int val2;
9655 +@@ -447,6 +455,11 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
9656 +
9657 + err = snd_soc_component_update_bits(component, reg2, val_mask,
9658 + val2);
9659 ++
9660 ++ /* Don't discard any error code or drop change flag */
9661 ++ if (ret == 0 || err < 0) {
9662 ++ ret = err;
9663 ++ }
9664 + }
9665 + return err;
9666 + }
9667 +@@ -506,7 +519,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
9668 + unsigned int mask = (1 << fls(max)) - 1;
9669 + unsigned int invert = mc->invert;
9670 + unsigned int val, val_mask;
9671 +- int ret;
9672 ++ int err, ret;
9673 +
9674 + if (invert)
9675 + val = (max - ucontrol->value.integer.value[0]) & mask;
9676 +@@ -515,9 +528,10 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
9677 + val_mask = mask << shift;
9678 + val = val << shift;
9679 +
9680 +- ret = snd_soc_component_update_bits(component, reg, val_mask, val);
9681 +- if (ret < 0)
9682 +- return ret;
9683 ++ err = snd_soc_component_update_bits(component, reg, val_mask, val);
9684 ++ if (err < 0)
9685 ++ return err;
9686 ++ ret = err;
9687 +
9688 + if (snd_soc_volsw_is_stereo(mc)) {
9689 + if (invert)
9690 +@@ -527,8 +541,12 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
9691 + val_mask = mask << shift;
9692 + val = val << shift;
9693 +
9694 +- ret = snd_soc_component_update_bits(component, rreg, val_mask,
9695 ++ err = snd_soc_component_update_bits(component, rreg, val_mask,
9696 + val);
9697 ++ /* Don't discard any error code or drop change flag */
9698 ++ if (ret == 0 || err < 0) {
9699 ++ ret = err;
9700 ++ }
9701 + }
9702 +
9703 + return ret;
9704 +@@ -877,6 +895,7 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
9705 + unsigned long mask = (1UL<<mc->nbits)-1;
9706 + long max = mc->max;
9707 + long val = ucontrol->value.integer.value[0];
9708 ++ int ret = 0;
9709 + unsigned int i;
9710 +
9711 + if (val < mc->min || val > mc->max)
9712 +@@ -891,9 +910,11 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
9713 + regmask, regval);
9714 + if (err < 0)
9715 + return err;
9716 ++ if (err > 0)
9717 ++ ret = err;
9718 + }
9719 +
9720 +- return 0;
9721 ++ return ret;
9722 + }
9723 + EXPORT_SYMBOL_GPL(snd_soc_put_xr_sx);
9724 +
9725 +diff --git a/sound/usb/implicit.c b/sound/usb/implicit.c
9726 +index 70319c822c10b..2d444ec742029 100644
9727 +--- a/sound/usb/implicit.c
9728 ++++ b/sound/usb/implicit.c
9729 +@@ -47,13 +47,13 @@ struct snd_usb_implicit_fb_match {
9730 + static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
9731 + /* Generic matching */
9732 + IMPLICIT_FB_GENERIC_DEV(0x0499, 0x1509), /* Steinberg UR22 */
9733 +- IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2080), /* M-Audio FastTrack Ultra */
9734 +- IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2081), /* M-Audio FastTrack Ultra */
9735 + IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2030), /* M-Audio Fast Track C400 */
9736 + IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2031), /* M-Audio Fast Track C600 */
9737 +
9738 + /* Fixed EP */
9739 + /* FIXME: check the availability of generic matching */
9740 ++ IMPLICIT_FB_FIXED_DEV(0x0763, 0x2080, 0x81, 2), /* M-Audio FastTrack Ultra */
9741 ++ IMPLICIT_FB_FIXED_DEV(0x0763, 0x2081, 0x81, 2), /* M-Audio FastTrack Ultra */
9742 + IMPLICIT_FB_FIXED_DEV(0x2466, 0x8010, 0x81, 2), /* Fractal Audio Axe-Fx III */
9743 + IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0001, 0x81, 2), /* Solid State Logic SSL2 */
9744 + IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0002, 0x81, 2), /* Solid State Logic SSL2+ */
9745 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
9746 +index 2bd28474e8fae..258165e7c8074 100644
9747 +--- a/sound/usb/mixer.c
9748 ++++ b/sound/usb/mixer.c
9749 +@@ -3678,17 +3678,14 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
9750 + err = snd_usb_set_cur_mix_value(cval, c + 1, idx,
9751 + cval->cache_val[idx]);
9752 + if (err < 0)
9753 +- return err;
9754 ++ break;
9755 + }
9756 + idx++;
9757 + }
9758 + } else {
9759 + /* master */
9760 +- if (cval->cached) {
9761 +- err = snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val);
9762 +- if (err < 0)
9763 +- return err;
9764 +- }
9765 ++ if (cval->cached)
9766 ++ snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val);
9767 + }
9768 +
9769 + return 0;
9770 +diff --git a/tools/lib/subcmd/subcmd-util.h b/tools/lib/subcmd/subcmd-util.h
9771 +index 794a375dad360..b2aec04fce8f6 100644
9772 +--- a/tools/lib/subcmd/subcmd-util.h
9773 ++++ b/tools/lib/subcmd/subcmd-util.h
9774 +@@ -50,15 +50,8 @@ static NORETURN inline void die(const char *err, ...)
9775 + static inline void *xrealloc(void *ptr, size_t size)
9776 + {
9777 + void *ret = realloc(ptr, size);
9778 +- if (!ret && !size)
9779 +- ret = realloc(ptr, 1);
9780 +- if (!ret) {
9781 +- ret = realloc(ptr, size);
9782 +- if (!ret && !size)
9783 +- ret = realloc(ptr, 1);
9784 +- if (!ret)
9785 +- die("Out of memory, realloc failed");
9786 +- }
9787 ++ if (!ret)
9788 ++ die("Out of memory, realloc failed");
9789 + return ret;
9790 + }
9791 +
9792 +diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
9793 +index fbb3c4057c302..71710a1da4472 100644
9794 +--- a/tools/perf/util/bpf-loader.c
9795 ++++ b/tools/perf/util/bpf-loader.c
9796 +@@ -1214,9 +1214,10 @@ bpf__obj_config_map(struct bpf_object *obj,
9797 + pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
9798 + err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
9799 + out:
9800 +- free(map_name);
9801 + if (!err)
9802 + *key_scan_pos += strlen(map_opt);
9803 ++
9804 ++ free(map_name);
9805 + return err;
9806 + }
9807 +
9808 +diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
9809 +index 66095568bf327..fae843bf2f0eb 100644
9810 +--- a/tools/testing/kunit/kunit_kernel.py
9811 ++++ b/tools/testing/kunit/kunit_kernel.py
9812 +@@ -6,6 +6,7 @@
9813 + # Author: Felix Guo <felixguoxiuping@×××××.com>
9814 + # Author: Brendan Higgins <brendanhiggins@××××××.com>
9815 +
9816 ++import importlib.abc
9817 + import importlib.util
9818 + import logging
9819 + import subprocess
9820 +diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
9821 +index 79f6bd1e50d60..f6933b06daf88 100644
9822 +--- a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
9823 ++++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
9824 +@@ -8,6 +8,7 @@
9825 + #include "test_ksyms_btf_null_check.skel.h"
9826 + #include "test_ksyms_weak.skel.h"
9827 + #include "test_ksyms_weak.lskel.h"
9828 ++#include "test_ksyms_btf_write_check.skel.h"
9829 +
9830 + static int duration;
9831 +
9832 +@@ -137,6 +138,16 @@ cleanup:
9833 + test_ksyms_weak_lskel__destroy(skel);
9834 + }
9835 +
9836 ++static void test_write_check(void)
9837 ++{
9838 ++ struct test_ksyms_btf_write_check *skel;
9839 ++
9840 ++ skel = test_ksyms_btf_write_check__open_and_load();
9841 ++ ASSERT_ERR_PTR(skel, "unexpected load of a prog writing to ksym memory\n");
9842 ++
9843 ++ test_ksyms_btf_write_check__destroy(skel);
9844 ++}
9845 ++
9846 + void test_ksyms_btf(void)
9847 + {
9848 + int percpu_datasec;
9849 +@@ -167,4 +178,7 @@ void test_ksyms_btf(void)
9850 +
9851 + if (test__start_subtest("weak_ksyms_lskel"))
9852 + test_weak_syms_lskel();
9853 ++
9854 ++ if (test__start_subtest("write_check"))
9855 ++ test_write_check();
9856 + }
9857 +diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
9858 +new file mode 100644
9859 +index 0000000000000..2180c41cd890f
9860 +--- /dev/null
9861 ++++ b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
9862 +@@ -0,0 +1,29 @@
9863 ++// SPDX-License-Identifier: GPL-2.0
9864 ++/* Copyright (c) 2021 Google */
9865 ++
9866 ++#include "vmlinux.h"
9867 ++
9868 ++#include <bpf/bpf_helpers.h>
9869 ++
9870 ++extern const int bpf_prog_active __ksym; /* int type global var. */
9871 ++
9872 ++SEC("raw_tp/sys_enter")
9873 ++int handler(const void *ctx)
9874 ++{
9875 ++ int *active;
9876 ++ __u32 cpu;
9877 ++
9878 ++ cpu = bpf_get_smp_processor_id();
9879 ++ active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
9880 ++ if (active) {
9881 ++ /* Kernel memory obtained from bpf_{per,this}_cpu_ptr
9882 ++ * is read-only, should _not_ pass verification.
9883 ++ */
9884 ++ /* WRITE_ONCE */
9885 ++ *(volatile int *)active = -1;
9886 ++ }
9887 ++
9888 ++ return 0;
9889 ++}
9890 ++
9891 ++char _license[] SEC("license") = "GPL";
9892 +diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c
9893 +index 076cf4325f783..cd4582129c7d6 100644
9894 +--- a/tools/testing/selftests/clone3/clone3.c
9895 ++++ b/tools/testing/selftests/clone3/clone3.c
9896 +@@ -126,8 +126,6 @@ static void test_clone3(uint64_t flags, size_t size, int expected,
9897 +
9898 + int main(int argc, char *argv[])
9899 + {
9900 +- pid_t pid;
9901 +-
9902 + uid_t uid = getuid();
9903 +
9904 + ksft_print_header();
9905 +diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
9906 +index 12c5e27d32c16..2d7fca446c7f7 100644
9907 +--- a/tools/testing/selftests/exec/Makefile
9908 ++++ b/tools/testing/selftests/exec/Makefile
9909 +@@ -3,8 +3,8 @@ CFLAGS = -Wall
9910 + CFLAGS += -Wno-nonnull
9911 + CFLAGS += -D_GNU_SOURCE
9912 +
9913 +-TEST_PROGS := binfmt_script non-regular
9914 +-TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216
9915 ++TEST_PROGS := binfmt_script
9916 ++TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular
9917 + TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir
9918 + # Makefile is a run-time dependency, since it's accessed by the execveat test
9919 + TEST_FILES := Makefile
9920 +diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
9921 +index 79a182cfa43ad..78e59620d28de 100644
9922 +--- a/tools/testing/selftests/kselftest_harness.h
9923 ++++ b/tools/testing/selftests/kselftest_harness.h
9924 +@@ -875,7 +875,8 @@ static void __timeout_handler(int sig, siginfo_t *info, void *ucontext)
9925 + }
9926 +
9927 + t->timed_out = true;
9928 +- kill(t->pid, SIGKILL);
9929 ++ // signal process group
9930 ++ kill(-(t->pid), SIGKILL);
9931 + }
9932 +
9933 + void __wait_for_test(struct __test_metadata *t)
9934 +@@ -985,6 +986,7 @@ void __run_test(struct __fixture_metadata *f,
9935 + ksft_print_msg("ERROR SPAWNING TEST CHILD\n");
9936 + t->passed = 0;
9937 + } else if (t->pid == 0) {
9938 ++ setpgrp();
9939 + t->fn(t, variant);
9940 + if (t->skip)
9941 + _exit(255);
9942 +diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
9943 +index 4fdfb42aeddba..d2e0b9091fdca 100644
9944 +--- a/tools/testing/selftests/kvm/Makefile
9945 ++++ b/tools/testing/selftests/kvm/Makefile
9946 +@@ -75,7 +75,6 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
9947 + TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
9948 + TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
9949 + TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
9950 +-TEST_GEN_PROGS_x86_64 += x86_64/vmx_pi_mmio_test
9951 + TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
9952 + TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
9953 + TEST_GEN_PROGS_x86_64 += demand_paging_test
9954 +diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c
9955 +index e54106643337b..4c88238fc8f05 100644
9956 +--- a/tools/testing/selftests/mincore/mincore_selftest.c
9957 ++++ b/tools/testing/selftests/mincore/mincore_selftest.c
9958 +@@ -207,15 +207,21 @@ TEST(check_file_mmap)
9959 +
9960 + errno = 0;
9961 + fd = open(".", O_TMPFILE | O_RDWR, 0600);
9962 +- ASSERT_NE(-1, fd) {
9963 +- TH_LOG("Can't create temporary file: %s",
9964 +- strerror(errno));
9965 ++ if (fd < 0) {
9966 ++ ASSERT_EQ(errno, EOPNOTSUPP) {
9967 ++ TH_LOG("Can't create temporary file: %s",
9968 ++ strerror(errno));
9969 ++ }
9970 ++ SKIP(goto out_free, "O_TMPFILE not supported by filesystem.");
9971 + }
9972 + errno = 0;
9973 + retval = fallocate(fd, 0, 0, FILE_SIZE);
9974 +- ASSERT_EQ(0, retval) {
9975 +- TH_LOG("Error allocating space for the temporary file: %s",
9976 +- strerror(errno));
9977 ++ if (retval) {
9978 ++ ASSERT_EQ(errno, EOPNOTSUPP) {
9979 ++ TH_LOG("Error allocating space for the temporary file: %s",
9980 ++ strerror(errno));
9981 ++ }
9982 ++ SKIP(goto out_close, "fallocate not supported by filesystem.");
9983 + }
9984 +
9985 + /*
9986 +@@ -271,7 +277,9 @@ TEST(check_file_mmap)
9987 + }
9988 +
9989 + munmap(addr, FILE_SIZE);
9990 ++out_close:
9991 + close(fd);
9992 ++out_free:
9993 + free(vec);
9994 + }
9995 +
9996 +diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
9997 +index f31205f04ee05..8c5fea68ae677 100644
9998 +--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
9999 ++++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
10000 +@@ -1236,7 +1236,7 @@ static int get_userns_fd(unsigned long nsid, unsigned long hostid, unsigned long
10001 + }
10002 +
10003 + /**
10004 +- * Validate that an attached mount in our mount namespace can be idmapped.
10005 ++ * Validate that an attached mount in our mount namespace cannot be idmapped.
10006 + * (The kernel enforces that the mount's mount namespace and the caller's mount
10007 + * namespace match.)
10008 + */
10009 +@@ -1259,7 +1259,7 @@ TEST_F(mount_setattr_idmapped, attached_mount_inside_current_mount_namespace)
10010 +
10011 + attr.userns_fd = get_userns_fd(0, 10000, 10000);
10012 + ASSERT_GE(attr.userns_fd, 0);
10013 +- ASSERT_EQ(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
10014 ++ ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
10015 + ASSERT_EQ(close(attr.userns_fd), 0);
10016 + ASSERT_EQ(close(open_tree_fd), 0);
10017 + }
10018 +diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh
10019 +index df322e47a54fb..b35010cc7f6ae 100755
10020 +--- a/tools/testing/selftests/netfilter/nft_concat_range.sh
10021 ++++ b/tools/testing/selftests/netfilter/nft_concat_range.sh
10022 +@@ -1601,4 +1601,4 @@ for name in ${TESTS}; do
10023 + done
10024 + done
10025 +
10026 +-[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP}
10027 ++[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP} || exit 0
10028 +diff --git a/tools/testing/selftests/netfilter/nft_fib.sh b/tools/testing/selftests/netfilter/nft_fib.sh
10029 +index 6caf6ac8c285f..695a1958723f5 100755
10030 +--- a/tools/testing/selftests/netfilter/nft_fib.sh
10031 ++++ b/tools/testing/selftests/netfilter/nft_fib.sh
10032 +@@ -174,6 +174,7 @@ test_ping() {
10033 + ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
10034 + ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
10035 + ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
10036 ++ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.rp_filter=0 > /dev/null
10037 +
10038 + sleep 3
10039 +
10040 +diff --git a/tools/testing/selftests/netfilter/nft_zones_many.sh b/tools/testing/selftests/netfilter/nft_zones_many.sh
10041 +index 04633119b29a0..5a8db0b48928f 100755
10042 +--- a/tools/testing/selftests/netfilter/nft_zones_many.sh
10043 ++++ b/tools/testing/selftests/netfilter/nft_zones_many.sh
10044 +@@ -9,7 +9,7 @@ ns="ns-$sfx"
10045 + # Kselftest framework requirement - SKIP code is 4.
10046 + ksft_skip=4
10047 +
10048 +-zones=20000
10049 ++zones=2000
10050 + have_ct_tool=0
10051 + ret=0
10052 +
10053 +@@ -75,10 +75,10 @@ EOF
10054 +
10055 + while [ $i -lt $max_zones ]; do
10056 + local start=$(date +%s%3N)
10057 +- i=$((i + 10000))
10058 ++ i=$((i + 1000))
10059 + j=$((j + 1))
10060 + # nft rule in output places each packet in a different zone.
10061 +- dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345
10062 ++ dd if=/dev/zero of=/dev/stdout bs=8k count=1000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345
10063 + if [ $? -ne 0 ] ;then
10064 + ret=1
10065 + break
10066 +@@ -86,7 +86,7 @@ EOF
10067 +
10068 + stop=$(date +%s%3N)
10069 + local duration=$((stop-start))
10070 +- echo "PASS: added 10000 entries in $duration ms (now $i total, loop $j)"
10071 ++ echo "PASS: added 1000 entries in $duration ms (now $i total, loop $j)"
10072 + done
10073 +
10074 + if [ $have_ct_tool -eq 1 ]; then
10075 +@@ -128,11 +128,11 @@ test_conntrack_tool() {
10076 + break
10077 + fi
10078 +
10079 +- if [ $((i%10000)) -eq 0 ];then
10080 ++ if [ $((i%1000)) -eq 0 ];then
10081 + stop=$(date +%s%3N)
10082 +
10083 + local duration=$((stop-start))
10084 +- echo "PASS: added 10000 entries in $duration ms (now $i total)"
10085 ++ echo "PASS: added 1000 entries in $duration ms (now $i total)"
10086 + start=$stop
10087 + fi
10088 + done
10089 +diff --git a/tools/testing/selftests/openat2/Makefile b/tools/testing/selftests/openat2/Makefile
10090 +index 4b93b1417b862..843ba56d8e49e 100644
10091 +--- a/tools/testing/selftests/openat2/Makefile
10092 ++++ b/tools/testing/selftests/openat2/Makefile
10093 +@@ -5,4 +5,4 @@ TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test
10094 +
10095 + include ../lib.mk
10096 +
10097 +-$(TEST_GEN_PROGS): helpers.c
10098 ++$(TEST_GEN_PROGS): helpers.c helpers.h
10099 +diff --git a/tools/testing/selftests/openat2/helpers.h b/tools/testing/selftests/openat2/helpers.h
10100 +index a6ea27344db2d..7056340b9339e 100644
10101 +--- a/tools/testing/selftests/openat2/helpers.h
10102 ++++ b/tools/testing/selftests/openat2/helpers.h
10103 +@@ -9,6 +9,7 @@
10104 +
10105 + #define _GNU_SOURCE
10106 + #include <stdint.h>
10107 ++#include <stdbool.h>
10108 + #include <errno.h>
10109 + #include <linux/types.h>
10110 + #include "../kselftest.h"
10111 +@@ -62,11 +63,12 @@ bool needs_openat2(const struct open_how *how);
10112 + (similar to chroot(2)). */
10113 + #endif /* RESOLVE_IN_ROOT */
10114 +
10115 +-#define E_func(func, ...) \
10116 +- do { \
10117 +- if (func(__VA_ARGS__) < 0) \
10118 +- ksft_exit_fail_msg("%s:%d %s failed\n", \
10119 +- __FILE__, __LINE__, #func);\
10120 ++#define E_func(func, ...) \
10121 ++ do { \
10122 ++ errno = 0; \
10123 ++ if (func(__VA_ARGS__) < 0) \
10124 ++ ksft_exit_fail_msg("%s:%d %s failed - errno:%d\n", \
10125 ++ __FILE__, __LINE__, #func, errno); \
10126 + } while (0)
10127 +
10128 + #define E_asprintf(...) E_func(asprintf, __VA_ARGS__)
10129 +diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
10130 +index 1bddbe934204c..7fb902099de45 100644
10131 +--- a/tools/testing/selftests/openat2/openat2_test.c
10132 ++++ b/tools/testing/selftests/openat2/openat2_test.c
10133 +@@ -259,6 +259,16 @@ void test_openat2_flags(void)
10134 + unlink(path);
10135 +
10136 + fd = sys_openat2(AT_FDCWD, path, &test->how);
10137 ++ if (fd < 0 && fd == -EOPNOTSUPP) {
10138 ++ /*
10139 ++ * Skip the testcase if it failed because not supported
10140 ++ * by FS. (e.g. a valid O_TMPFILE combination on NFS)
10141 ++ */
10142 ++ ksft_test_result_skip("openat2 with %s fails with %d (%s)\n",
10143 ++ test->name, fd, strerror(-fd));
10144 ++ goto next;
10145 ++ }
10146 ++
10147 + if (test->err >= 0)
10148 + failed = (fd < 0);
10149 + else
10150 +@@ -303,7 +313,7 @@ skip:
10151 + else
10152 + resultfn("openat2 with %s fails with %d (%s)\n",
10153 + test->name, test->err, strerror(-test->err));
10154 +-
10155 ++next:
10156 + free(fdpath);
10157 + fflush(stdout);
10158 + }
10159 +diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h
10160 +index 01f8d3c0cf2cb..6922d6417e1cf 100644
10161 +--- a/tools/testing/selftests/pidfd/pidfd.h
10162 ++++ b/tools/testing/selftests/pidfd/pidfd.h
10163 +@@ -68,7 +68,7 @@
10164 + #define PIDFD_SKIP 3
10165 + #define PIDFD_XFAIL 4
10166 +
10167 +-int wait_for_pid(pid_t pid)
10168 ++static inline int wait_for_pid(pid_t pid)
10169 + {
10170 + int status, ret;
10171 +
10172 +@@ -78,13 +78,20 @@ again:
10173 + if (errno == EINTR)
10174 + goto again;
10175 +
10176 ++ ksft_print_msg("waitpid returned -1, errno=%d\n", errno);
10177 + return -1;
10178 + }
10179 +
10180 +- if (!WIFEXITED(status))
10181 ++ if (!WIFEXITED(status)) {
10182 ++ ksft_print_msg(
10183 ++ "waitpid !WIFEXITED, WIFSIGNALED=%d, WTERMSIG=%d\n",
10184 ++ WIFSIGNALED(status), WTERMSIG(status));
10185 + return -1;
10186 ++ }
10187 +
10188 +- return WEXITSTATUS(status);
10189 ++ ret = WEXITSTATUS(status);
10190 ++ ksft_print_msg("waitpid WEXITSTATUS=%d\n", ret);
10191 ++ return ret;
10192 + }
10193 +
10194 + static inline int sys_pidfd_open(pid_t pid, unsigned int flags)
10195 +diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
10196 +index 22558524f71c3..3fd8e903118f5 100644
10197 +--- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
10198 ++++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
10199 +@@ -12,6 +12,7 @@
10200 + #include <string.h>
10201 + #include <syscall.h>
10202 + #include <sys/wait.h>
10203 ++#include <sys/mman.h>
10204 +
10205 + #include "pidfd.h"
10206 + #include "../kselftest.h"
10207 +@@ -80,7 +81,10 @@ static inline int error_check(struct error *err, const char *test_name)
10208 + return err->code;
10209 + }
10210 +
10211 ++#define CHILD_STACK_SIZE 8192
10212 ++
10213 + struct child {
10214 ++ char *stack;
10215 + pid_t pid;
10216 + int fd;
10217 + };
10218 +@@ -89,17 +93,22 @@ static struct child clone_newns(int (*fn)(void *), void *args,
10219 + struct error *err)
10220 + {
10221 + static int flags = CLONE_PIDFD | CLONE_NEWPID | CLONE_NEWNS | SIGCHLD;
10222 +- size_t stack_size = 1024;
10223 +- char *stack[1024] = { 0 };
10224 + struct child ret;
10225 +
10226 + if (!(flags & CLONE_NEWUSER) && geteuid() != 0)
10227 + flags |= CLONE_NEWUSER;
10228 +
10229 ++ ret.stack = mmap(NULL, CHILD_STACK_SIZE, PROT_READ | PROT_WRITE,
10230 ++ MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
10231 ++ if (ret.stack == MAP_FAILED) {
10232 ++ error_set(err, -1, "mmap of stack failed (errno %d)", errno);
10233 ++ return ret;
10234 ++ }
10235 ++
10236 + #ifdef __ia64__
10237 +- ret.pid = __clone2(fn, stack, stack_size, flags, args, &ret.fd);
10238 ++ ret.pid = __clone2(fn, ret.stack, CHILD_STACK_SIZE, flags, args, &ret.fd);
10239 + #else
10240 +- ret.pid = clone(fn, stack + stack_size, flags, args, &ret.fd);
10241 ++ ret.pid = clone(fn, ret.stack + CHILD_STACK_SIZE, flags, args, &ret.fd);
10242 + #endif
10243 +
10244 + if (ret.pid < 0) {
10245 +@@ -129,6 +138,11 @@ static inline int child_join(struct child *child, struct error *err)
10246 + else if (r > 0)
10247 + error_set(err, r, "child %d reported: %d", child->pid, r);
10248 +
10249 ++ if (munmap(child->stack, CHILD_STACK_SIZE)) {
10250 ++ error_set(err, -1, "munmap of child stack failed (errno %d)", errno);
10251 ++ r = -1;
10252 ++ }
10253 ++
10254 + return r;
10255 + }
10256 +
10257 +diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
10258 +index 529eb700ac26a..9a2d64901d591 100644
10259 +--- a/tools/testing/selftests/pidfd/pidfd_test.c
10260 ++++ b/tools/testing/selftests/pidfd/pidfd_test.c
10261 +@@ -441,7 +441,6 @@ static void test_pidfd_poll_exec(int use_waitpid)
10262 + {
10263 + int pid, pidfd = 0;
10264 + int status, ret;
10265 +- pthread_t t1;
10266 + time_t prog_start = time(NULL);
10267 + const char *test_name = "pidfd_poll check for premature notification on child thread exec";
10268 +
10269 +@@ -500,13 +499,14 @@ static int child_poll_leader_exit_test(void *args)
10270 + */
10271 + *child_exit_secs = time(NULL);
10272 + syscall(SYS_exit, 0);
10273 ++ /* Never reached, but appeases compiler thinking we should return. */
10274 ++ exit(0);
10275 + }
10276 +
10277 + static void test_pidfd_poll_leader_exit(int use_waitpid)
10278 + {
10279 + int pid, pidfd = 0;
10280 +- int status, ret;
10281 +- time_t prog_start = time(NULL);
10282 ++ int status, ret = 0;
10283 + const char *test_name = "pidfd_poll check for premature notification on non-empty"
10284 + "group leader exit";
10285 +
10286 +diff --git a/tools/testing/selftests/pidfd/pidfd_wait.c b/tools/testing/selftests/pidfd/pidfd_wait.c
10287 +index be2943f072f60..17999e082aa71 100644
10288 +--- a/tools/testing/selftests/pidfd/pidfd_wait.c
10289 ++++ b/tools/testing/selftests/pidfd/pidfd_wait.c
10290 +@@ -39,7 +39,7 @@ static int sys_waitid(int which, pid_t pid, siginfo_t *info, int options,
10291 +
10292 + TEST(wait_simple)
10293 + {
10294 +- int pidfd = -1, status = 0;
10295 ++ int pidfd = -1;
10296 + pid_t parent_tid = -1;
10297 + struct clone_args args = {
10298 + .parent_tid = ptr_to_u64(&parent_tid),
10299 +@@ -47,7 +47,6 @@ TEST(wait_simple)
10300 + .flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
10301 + .exit_signal = SIGCHLD,
10302 + };
10303 +- int ret;
10304 + pid_t pid;
10305 + siginfo_t info = {
10306 + .si_signo = 0,
10307 +@@ -88,7 +87,7 @@ TEST(wait_simple)
10308 +
10309 + TEST(wait_states)
10310 + {
10311 +- int pidfd = -1, status = 0;
10312 ++ int pidfd = -1;
10313 + pid_t parent_tid = -1;
10314 + struct clone_args args = {
10315 + .parent_tid = ptr_to_u64(&parent_tid),
10316 +diff --git a/tools/testing/selftests/rtc/settings b/tools/testing/selftests/rtc/settings
10317 +index ba4d85f74cd6b..a953c96aa16e1 100644
10318 +--- a/tools/testing/selftests/rtc/settings
10319 ++++ b/tools/testing/selftests/rtc/settings
10320 +@@ -1 +1 @@
10321 +-timeout=90
10322 ++timeout=180
10323 +diff --git a/tools/testing/selftests/vDSO/vdso_test_abi.c b/tools/testing/selftests/vDSO/vdso_test_abi.c
10324 +index 3d603f1394af4..883ca85424bc5 100644
10325 +--- a/tools/testing/selftests/vDSO/vdso_test_abi.c
10326 ++++ b/tools/testing/selftests/vDSO/vdso_test_abi.c
10327 +@@ -33,110 +33,114 @@ typedef long (*vdso_clock_gettime_t)(clockid_t clk_id, struct timespec *ts);
10328 + typedef long (*vdso_clock_getres_t)(clockid_t clk_id, struct timespec *ts);
10329 + typedef time_t (*vdso_time_t)(time_t *t);
10330 +
10331 +-static int vdso_test_gettimeofday(void)
10332 ++#define VDSO_TEST_PASS_MSG() "\n%s(): PASS\n", __func__
10333 ++#define VDSO_TEST_FAIL_MSG(x) "\n%s(): %s FAIL\n", __func__, x
10334 ++#define VDSO_TEST_SKIP_MSG(x) "\n%s(): SKIP: Could not find %s\n", __func__, x
10335 ++
10336 ++static void vdso_test_gettimeofday(void)
10337 + {
10338 + /* Find gettimeofday. */
10339 + vdso_gettimeofday_t vdso_gettimeofday =
10340 + (vdso_gettimeofday_t)vdso_sym(version, name[0]);
10341 +
10342 + if (!vdso_gettimeofday) {
10343 +- printf("Could not find %s\n", name[0]);
10344 +- return KSFT_SKIP;
10345 ++ ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[0]));
10346 ++ return;
10347 + }
10348 +
10349 + struct timeval tv;
10350 + long ret = vdso_gettimeofday(&tv, 0);
10351 +
10352 + if (ret == 0) {
10353 +- printf("The time is %lld.%06lld\n",
10354 +- (long long)tv.tv_sec, (long long)tv.tv_usec);
10355 ++ ksft_print_msg("The time is %lld.%06lld\n",
10356 ++ (long long)tv.tv_sec, (long long)tv.tv_usec);
10357 ++ ksft_test_result_pass(VDSO_TEST_PASS_MSG());
10358 + } else {
10359 +- printf("%s failed\n", name[0]);
10360 +- return KSFT_FAIL;
10361 ++ ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[0]));
10362 + }
10363 +-
10364 +- return KSFT_PASS;
10365 + }
10366 +
10367 +-static int vdso_test_clock_gettime(clockid_t clk_id)
10368 ++static void vdso_test_clock_gettime(clockid_t clk_id)
10369 + {
10370 + /* Find clock_gettime. */
10371 + vdso_clock_gettime_t vdso_clock_gettime =
10372 + (vdso_clock_gettime_t)vdso_sym(version, name[1]);
10373 +
10374 + if (!vdso_clock_gettime) {
10375 +- printf("Could not find %s\n", name[1]);
10376 +- return KSFT_SKIP;
10377 ++ ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[1]));
10378 ++ return;
10379 + }
10380 +
10381 + struct timespec ts;
10382 + long ret = vdso_clock_gettime(clk_id, &ts);
10383 +
10384 + if (ret == 0) {
10385 +- printf("The time is %lld.%06lld\n",
10386 +- (long long)ts.tv_sec, (long long)ts.tv_nsec);
10387 ++ ksft_print_msg("The time is %lld.%06lld\n",
10388 ++ (long long)ts.tv_sec, (long long)ts.tv_nsec);
10389 ++ ksft_test_result_pass(VDSO_TEST_PASS_MSG());
10390 + } else {
10391 +- printf("%s failed\n", name[1]);
10392 +- return KSFT_FAIL;
10393 ++ ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[1]));
10394 + }
10395 +-
10396 +- return KSFT_PASS;
10397 + }
10398 +
10399 +-static int vdso_test_time(void)
10400 ++static void vdso_test_time(void)
10401 + {
10402 + /* Find time. */
10403 + vdso_time_t vdso_time =
10404 + (vdso_time_t)vdso_sym(version, name[2]);
10405 +
10406 + if (!vdso_time) {
10407 +- printf("Could not find %s\n", name[2]);
10408 +- return KSFT_SKIP;
10409 ++ ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[2]));
10410 ++ return;
10411 + }
10412 +
10413 + long ret = vdso_time(NULL);
10414 +
10415 + if (ret > 0) {
10416 +- printf("The time in hours since January 1, 1970 is %lld\n",
10417 ++ ksft_print_msg("The time in hours since January 1, 1970 is %lld\n",
10418 + (long long)(ret / 3600));
10419 ++ ksft_test_result_pass(VDSO_TEST_PASS_MSG());
10420 + } else {
10421 +- printf("%s failed\n", name[2]);
10422 +- return KSFT_FAIL;
10423 ++ ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[2]));
10424 + }
10425 +-
10426 +- return KSFT_PASS;
10427 + }
10428 +
10429 +-static int vdso_test_clock_getres(clockid_t clk_id)
10430 ++static void vdso_test_clock_getres(clockid_t clk_id)
10431 + {
10432 ++ int clock_getres_fail = 0;
10433 ++
10434 + /* Find clock_getres. */
10435 + vdso_clock_getres_t vdso_clock_getres =
10436 + (vdso_clock_getres_t)vdso_sym(version, name[3]);
10437 +
10438 + if (!vdso_clock_getres) {
10439 +- printf("Could not find %s\n", name[3]);
10440 +- return KSFT_SKIP;
10441 ++ ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[3]));
10442 ++ return;
10443 + }
10444 +
10445 + struct timespec ts, sys_ts;
10446 + long ret = vdso_clock_getres(clk_id, &ts);
10447 +
10448 + if (ret == 0) {
10449 +- printf("The resolution is %lld %lld\n",
10450 +- (long long)ts.tv_sec, (long long)ts.tv_nsec);
10451 ++ ksft_print_msg("The vdso resolution is %lld %lld\n",
10452 ++ (long long)ts.tv_sec, (long long)ts.tv_nsec);
10453 + } else {
10454 +- printf("%s failed\n", name[3]);
10455 +- return KSFT_FAIL;
10456 ++ clock_getres_fail++;
10457 + }
10458 +
10459 + ret = syscall(SYS_clock_getres, clk_id, &sys_ts);
10460 +
10461 +- if ((sys_ts.tv_sec != ts.tv_sec) || (sys_ts.tv_nsec != ts.tv_nsec)) {
10462 +- printf("%s failed\n", name[3]);
10463 +- return KSFT_FAIL;
10464 +- }
10465 ++ ksft_print_msg("The syscall resolution is %lld %lld\n",
10466 ++ (long long)sys_ts.tv_sec, (long long)sys_ts.tv_nsec);
10467 +
10468 +- return KSFT_PASS;
10469 ++ if ((sys_ts.tv_sec != ts.tv_sec) || (sys_ts.tv_nsec != ts.tv_nsec))
10470 ++ clock_getres_fail++;
10471 ++
10472 ++ if (clock_getres_fail > 0) {
10473 ++ ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[3]));
10474 ++ } else {
10475 ++ ksft_test_result_pass(VDSO_TEST_PASS_MSG());
10476 ++ }
10477 + }
10478 +
10479 + const char *vdso_clock_name[12] = {
10480 +@@ -158,36 +162,23 @@ const char *vdso_clock_name[12] = {
10481 + * This function calls vdso_test_clock_gettime and vdso_test_clock_getres
10482 + * with different values for clock_id.
10483 + */
10484 +-static inline int vdso_test_clock(clockid_t clock_id)
10485 ++static inline void vdso_test_clock(clockid_t clock_id)
10486 + {
10487 +- int ret0, ret1;
10488 +-
10489 +- ret0 = vdso_test_clock_gettime(clock_id);
10490 +- /* A skipped test is considered passed */
10491 +- if (ret0 == KSFT_SKIP)
10492 +- ret0 = KSFT_PASS;
10493 +-
10494 +- ret1 = vdso_test_clock_getres(clock_id);
10495 +- /* A skipped test is considered passed */
10496 +- if (ret1 == KSFT_SKIP)
10497 +- ret1 = KSFT_PASS;
10498 ++ ksft_print_msg("\nclock_id: %s\n", vdso_clock_name[clock_id]);
10499 +
10500 +- ret0 += ret1;
10501 ++ vdso_test_clock_gettime(clock_id);
10502 +
10503 +- printf("clock_id: %s", vdso_clock_name[clock_id]);
10504 +-
10505 +- if (ret0 > 0)
10506 +- printf(" [FAIL]\n");
10507 +- else
10508 +- printf(" [PASS]\n");
10509 +-
10510 +- return ret0;
10511 ++ vdso_test_clock_getres(clock_id);
10512 + }
10513 +
10514 ++#define VDSO_TEST_PLAN 16
10515 ++
10516 + int main(int argc, char **argv)
10517 + {
10518 + unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
10519 +- int ret;
10520 ++
10521 ++ ksft_print_header();
10522 ++ ksft_set_plan(VDSO_TEST_PLAN);
10523 +
10524 + if (!sysinfo_ehdr) {
10525 + printf("AT_SYSINFO_EHDR is not present!\n");
10526 +@@ -201,44 +192,42 @@ int main(int argc, char **argv)
10527 +
10528 + vdso_init_from_sysinfo_ehdr(getauxval(AT_SYSINFO_EHDR));
10529 +
10530 +- ret = vdso_test_gettimeofday();
10531 ++ vdso_test_gettimeofday();
10532 +
10533 + #if _POSIX_TIMERS > 0
10534 +
10535 + #ifdef CLOCK_REALTIME
10536 +- ret += vdso_test_clock(CLOCK_REALTIME);
10537 ++ vdso_test_clock(CLOCK_REALTIME);
10538 + #endif
10539 +
10540 + #ifdef CLOCK_BOOTTIME
10541 +- ret += vdso_test_clock(CLOCK_BOOTTIME);
10542 ++ vdso_test_clock(CLOCK_BOOTTIME);
10543 + #endif
10544 +
10545 + #ifdef CLOCK_TAI
10546 +- ret += vdso_test_clock(CLOCK_TAI);
10547 ++ vdso_test_clock(CLOCK_TAI);
10548 + #endif
10549 +
10550 + #ifdef CLOCK_REALTIME_COARSE
10551 +- ret += vdso_test_clock(CLOCK_REALTIME_COARSE);
10552 ++ vdso_test_clock(CLOCK_REALTIME_COARSE);
10553 + #endif
10554 +
10555 + #ifdef CLOCK_MONOTONIC
10556 +- ret += vdso_test_clock(CLOCK_MONOTONIC);
10557 ++ vdso_test_clock(CLOCK_MONOTONIC);
10558 + #endif
10559 +
10560 + #ifdef CLOCK_MONOTONIC_RAW
10561 +- ret += vdso_test_clock(CLOCK_MONOTONIC_RAW);
10562 ++ vdso_test_clock(CLOCK_MONOTONIC_RAW);
10563 + #endif
10564 +
10565 + #ifdef CLOCK_MONOTONIC_COARSE
10566 +- ret += vdso_test_clock(CLOCK_MONOTONIC_COARSE);
10567 ++ vdso_test_clock(CLOCK_MONOTONIC_COARSE);
10568 + #endif
10569 +
10570 + #endif
10571 +
10572 +- ret += vdso_test_time();
10573 +-
10574 +- if (ret > 0)
10575 +- return KSFT_FAIL;
10576 ++ vdso_test_time();
10577 +
10578 +- return KSFT_PASS;
10579 ++ ksft_print_cnts();
10580 ++ return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
10581 + }
10582 +diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh
10583 +index 232e958ec4547..b0b91d9b0dc21 100755
10584 +--- a/tools/testing/selftests/zram/zram.sh
10585 ++++ b/tools/testing/selftests/zram/zram.sh
10586 +@@ -2,9 +2,6 @@
10587 + # SPDX-License-Identifier: GPL-2.0
10588 + TCID="zram.sh"
10589 +
10590 +-# Kselftest framework requirement - SKIP code is 4.
10591 +-ksft_skip=4
10592 +-
10593 + . ./zram_lib.sh
10594 +
10595 + run_zram () {
10596 +@@ -18,14 +15,4 @@ echo ""
10597 +
10598 + check_prereqs
10599 +
10600 +-# check zram module exists
10601 +-MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko
10602 +-if [ -f $MODULE_PATH ]; then
10603 +- run_zram
10604 +-elif [ -b /dev/zram0 ]; then
10605 +- run_zram
10606 +-else
10607 +- echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
10608 +- echo "$TCID : CONFIG_ZRAM is not set"
10609 +- exit $ksft_skip
10610 +-fi
10611 ++run_zram
10612 +diff --git a/tools/testing/selftests/zram/zram01.sh b/tools/testing/selftests/zram/zram01.sh
10613 +index 114863d9fb876..8f4affe34f3e4 100755
10614 +--- a/tools/testing/selftests/zram/zram01.sh
10615 ++++ b/tools/testing/selftests/zram/zram01.sh
10616 +@@ -33,9 +33,7 @@ zram_algs="lzo"
10617 +
10618 + zram_fill_fs()
10619 + {
10620 +- local mem_free0=$(free -m | awk 'NR==2 {print $4}')
10621 +-
10622 +- for i in $(seq 0 $(($dev_num - 1))); do
10623 ++ for i in $(seq $dev_start $dev_end); do
10624 + echo "fill zram$i..."
10625 + local b=0
10626 + while [ true ]; do
10627 +@@ -45,29 +43,17 @@ zram_fill_fs()
10628 + b=$(($b + 1))
10629 + done
10630 + echo "zram$i can be filled with '$b' KB"
10631 +- done
10632 +
10633 +- local mem_free1=$(free -m | awk 'NR==2 {print $4}')
10634 +- local used_mem=$(($mem_free0 - $mem_free1))
10635 ++ local mem_used_total=`awk '{print $3}' "/sys/block/zram$i/mm_stat"`
10636 ++ local v=$((100 * 1024 * $b / $mem_used_total))
10637 ++ if [ "$v" -lt 100 ]; then
10638 ++ echo "FAIL compression ratio: 0.$v:1"
10639 ++ ERR_CODE=-1
10640 ++ return
10641 ++ fi
10642 +
10643 +- local total_size=0
10644 +- for sm in $zram_sizes; do
10645 +- local s=$(echo $sm | sed 's/M//')
10646 +- total_size=$(($total_size + $s))
10647 ++ echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK"
10648 + done
10649 +-
10650 +- echo "zram used ${used_mem}M, zram disk sizes ${total_size}M"
10651 +-
10652 +- local v=$((100 * $total_size / $used_mem))
10653 +-
10654 +- if [ "$v" -lt 100 ]; then
10655 +- echo "FAIL compression ratio: 0.$v:1"
10656 +- ERR_CODE=-1
10657 +- zram_cleanup
10658 +- return
10659 +- fi
10660 +-
10661 +- echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK"
10662 + }
10663 +
10664 + check_prereqs
10665 +@@ -81,7 +67,6 @@ zram_mount
10666 +
10667 + zram_fill_fs
10668 + zram_cleanup
10669 +-zram_unload
10670 +
10671 + if [ $ERR_CODE -ne 0 ]; then
10672 + echo "$TCID : [FAIL]"
10673 +diff --git a/tools/testing/selftests/zram/zram02.sh b/tools/testing/selftests/zram/zram02.sh
10674 +index e83b404807c09..2418b0c4ed136 100755
10675 +--- a/tools/testing/selftests/zram/zram02.sh
10676 ++++ b/tools/testing/selftests/zram/zram02.sh
10677 +@@ -36,7 +36,6 @@ zram_set_memlimit
10678 + zram_makeswap
10679 + zram_swapoff
10680 + zram_cleanup
10681 +-zram_unload
10682 +
10683 + if [ $ERR_CODE -ne 0 ]; then
10684 + echo "$TCID : [FAIL]"
10685 +diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh
10686 +index 6f872f266fd11..21ec1966de76c 100755
10687 +--- a/tools/testing/selftests/zram/zram_lib.sh
10688 ++++ b/tools/testing/selftests/zram/zram_lib.sh
10689 +@@ -5,12 +5,17 @@
10690 + # Author: Alexey Kodanev <alexey.kodanev@××××××.com>
10691 + # Modified: Naresh Kamboju <naresh.kamboju@××××××.org>
10692 +
10693 +-MODULE=0
10694 + dev_makeswap=-1
10695 + dev_mounted=-1
10696 +-
10697 ++dev_start=0
10698 ++dev_end=-1
10699 ++module_load=-1
10700 ++sys_control=-1
10701 + # Kselftest framework requirement - SKIP code is 4.
10702 + ksft_skip=4
10703 ++kernel_version=`uname -r | cut -d'.' -f1,2`
10704 ++kernel_major=${kernel_version%.*}
10705 ++kernel_minor=${kernel_version#*.}
10706 +
10707 + trap INT
10708 +
10709 +@@ -25,68 +30,104 @@ check_prereqs()
10710 + fi
10711 + }
10712 +
10713 ++kernel_gte()
10714 ++{
10715 ++ major=${1%.*}
10716 ++ minor=${1#*.}
10717 ++
10718 ++ if [ $kernel_major -gt $major ]; then
10719 ++ return 0
10720 ++ elif [[ $kernel_major -eq $major && $kernel_minor -ge $minor ]]; then
10721 ++ return 0
10722 ++ fi
10723 ++
10724 ++ return 1
10725 ++}
10726 ++
10727 + zram_cleanup()
10728 + {
10729 + echo "zram cleanup"
10730 + local i=
10731 +- for i in $(seq 0 $dev_makeswap); do
10732 ++ for i in $(seq $dev_start $dev_makeswap); do
10733 + swapoff /dev/zram$i
10734 + done
10735 +
10736 +- for i in $(seq 0 $dev_mounted); do
10737 ++ for i in $(seq $dev_start $dev_mounted); do
10738 + umount /dev/zram$i
10739 + done
10740 +
10741 +- for i in $(seq 0 $(($dev_num - 1))); do
10742 ++ for i in $(seq $dev_start $dev_end); do
10743 + echo 1 > /sys/block/zram${i}/reset
10744 + rm -rf zram$i
10745 + done
10746 +
10747 +-}
10748 ++ if [ $sys_control -eq 1 ]; then
10749 ++ for i in $(seq $dev_start $dev_end); do
10750 ++ echo $i > /sys/class/zram-control/hot_remove
10751 ++ done
10752 ++ fi
10753 +
10754 +-zram_unload()
10755 +-{
10756 +- if [ $MODULE -ne 0 ] ; then
10757 +- echo "zram rmmod zram"
10758 ++ if [ $module_load -eq 1 ]; then
10759 + rmmod zram > /dev/null 2>&1
10760 + fi
10761 + }
10762 +
10763 + zram_load()
10764 + {
10765 +- # check zram module exists
10766 +- MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko
10767 +- if [ -f $MODULE_PATH ]; then
10768 +- MODULE=1
10769 +- echo "create '$dev_num' zram device(s)"
10770 +- modprobe zram num_devices=$dev_num
10771 +- if [ $? -ne 0 ]; then
10772 +- echo "failed to insert zram module"
10773 +- exit 1
10774 +- fi
10775 +-
10776 +- dev_num_created=$(ls /dev/zram* | wc -w)
10777 ++ echo "create '$dev_num' zram device(s)"
10778 ++
10779 ++ # zram module loaded, new kernel
10780 ++ if [ -d "/sys/class/zram-control" ]; then
10781 ++ echo "zram modules already loaded, kernel supports" \
10782 ++ "zram-control interface"
10783 ++ dev_start=$(ls /dev/zram* | wc -w)
10784 ++ dev_end=$(($dev_start + $dev_num - 1))
10785 ++ sys_control=1
10786 ++
10787 ++ for i in $(seq $dev_start $dev_end); do
10788 ++ cat /sys/class/zram-control/hot_add > /dev/null
10789 ++ done
10790 ++
10791 ++ echo "all zram devices (/dev/zram$dev_start~$dev_end" \
10792 ++ "successfully created"
10793 ++ return 0
10794 ++ fi
10795 +
10796 +- if [ "$dev_num_created" -ne "$dev_num" ]; then
10797 +- echo "unexpected num of devices: $dev_num_created"
10798 +- ERR_CODE=-1
10799 ++ # detect old kernel or built-in
10800 ++ modprobe zram num_devices=$dev_num
10801 ++ if [ ! -d "/sys/class/zram-control" ]; then
10802 ++ if grep -q '^zram' /proc/modules; then
10803 ++ rmmod zram > /dev/null 2>&1
10804 ++ if [ $? -ne 0 ]; then
10805 ++ echo "zram module is being used on old kernel" \
10806 ++ "without zram-control interface"
10807 ++ exit $ksft_skip
10808 ++ fi
10809 + else
10810 +- echo "zram load module successful"
10811 ++ echo "test needs CONFIG_ZRAM=m on old kernel without" \
10812 ++ "zram-control interface"
10813 ++ exit $ksft_skip
10814 + fi
10815 +- elif [ -b /dev/zram0 ]; then
10816 +- echo "/dev/zram0 device file found: OK"
10817 +- else
10818 +- echo "ERROR: No zram.ko module or no /dev/zram0 device found"
10819 +- echo "$TCID : CONFIG_ZRAM is not set"
10820 +- exit 1
10821 ++ modprobe zram num_devices=$dev_num
10822 + fi
10823 ++
10824 ++ module_load=1
10825 ++ dev_end=$(($dev_num - 1))
10826 ++ echo "all zram devices (/dev/zram0~$dev_end) successfully created"
10827 + }
10828 +
10829 + zram_max_streams()
10830 + {
10831 + echo "set max_comp_streams to zram device(s)"
10832 +
10833 +- local i=0
10834 ++ kernel_gte 4.7
10835 ++ if [ $? -eq 0 ]; then
10836 ++ echo "The device attribute max_comp_streams was"\
10837 ++ "deprecated in 4.7"
10838 ++ return 0
10839 ++ fi
10840 ++
10841 ++ local i=$dev_start
10842 + for max_s in $zram_max_streams; do
10843 + local sys_path="/sys/block/zram${i}/max_comp_streams"
10844 + echo $max_s > $sys_path || \
10845 +@@ -98,7 +139,7 @@ zram_max_streams()
10846 + echo "FAIL can't set max_streams '$max_s', get $max_stream"
10847 +
10848 + i=$(($i + 1))
10849 +- echo "$sys_path = '$max_streams' ($i/$dev_num)"
10850 ++ echo "$sys_path = '$max_streams'"
10851 + done
10852 +
10853 + echo "zram max streams: OK"
10854 +@@ -108,15 +149,16 @@ zram_compress_alg()
10855 + {
10856 + echo "test that we can set compression algorithm"
10857 +
10858 +- local algs=$(cat /sys/block/zram0/comp_algorithm)
10859 ++ local i=$dev_start
10860 ++ local algs=$(cat /sys/block/zram${i}/comp_algorithm)
10861 + echo "supported algs: $algs"
10862 +- local i=0
10863 ++
10864 + for alg in $zram_algs; do
10865 + local sys_path="/sys/block/zram${i}/comp_algorithm"
10866 + echo "$alg" > $sys_path || \
10867 + echo "FAIL can't set '$alg' to $sys_path"
10868 + i=$(($i + 1))
10869 +- echo "$sys_path = '$alg' ($i/$dev_num)"
10870 ++ echo "$sys_path = '$alg'"
10871 + done
10872 +
10873 + echo "zram set compression algorithm: OK"
10874 +@@ -125,14 +167,14 @@ zram_compress_alg()
10875 + zram_set_disksizes()
10876 + {
10877 + echo "set disk size to zram device(s)"
10878 +- local i=0
10879 ++ local i=$dev_start
10880 + for ds in $zram_sizes; do
10881 + local sys_path="/sys/block/zram${i}/disksize"
10882 + echo "$ds" > $sys_path || \
10883 + echo "FAIL can't set '$ds' to $sys_path"
10884 +
10885 + i=$(($i + 1))
10886 +- echo "$sys_path = '$ds' ($i/$dev_num)"
10887 ++ echo "$sys_path = '$ds'"
10888 + done
10889 +
10890 + echo "zram set disksizes: OK"
10891 +@@ -142,14 +184,14 @@ zram_set_memlimit()
10892 + {
10893 + echo "set memory limit to zram device(s)"
10894 +
10895 +- local i=0
10896 ++ local i=$dev_start
10897 + for ds in $zram_mem_limits; do
10898 + local sys_path="/sys/block/zram${i}/mem_limit"
10899 + echo "$ds" > $sys_path || \
10900 + echo "FAIL can't set '$ds' to $sys_path"
10901 +
10902 + i=$(($i + 1))
10903 +- echo "$sys_path = '$ds' ($i/$dev_num)"
10904 ++ echo "$sys_path = '$ds'"
10905 + done
10906 +
10907 + echo "zram set memory limit: OK"
10908 +@@ -158,8 +200,8 @@ zram_set_memlimit()
10909 + zram_makeswap()
10910 + {
10911 + echo "make swap with zram device(s)"
10912 +- local i=0
10913 +- for i in $(seq 0 $(($dev_num - 1))); do
10914 ++ local i=$dev_start
10915 ++ for i in $(seq $dev_start $dev_end); do
10916 + mkswap /dev/zram$i > err.log 2>&1
10917 + if [ $? -ne 0 ]; then
10918 + cat err.log
10919 +@@ -182,7 +224,7 @@ zram_makeswap()
10920 + zram_swapoff()
10921 + {
10922 + local i=
10923 +- for i in $(seq 0 $dev_makeswap); do
10924 ++ for i in $(seq $dev_start $dev_end); do
10925 + swapoff /dev/zram$i > err.log 2>&1
10926 + if [ $? -ne 0 ]; then
10927 + cat err.log
10928 +@@ -196,7 +238,7 @@ zram_swapoff()
10929 +
10930 + zram_makefs()
10931 + {
10932 +- local i=0
10933 ++ local i=$dev_start
10934 + for fs in $zram_filesystems; do
10935 + # if requested fs not supported default it to ext2
10936 + which mkfs.$fs > /dev/null 2>&1 || fs=ext2
10937 +@@ -215,7 +257,7 @@ zram_makefs()
10938 + zram_mount()
10939 + {
10940 + local i=0
10941 +- for i in $(seq 0 $(($dev_num - 1))); do
10942 ++ for i in $(seq $dev_start $dev_end); do
10943 + echo "mount /dev/zram$i"
10944 + mkdir zram$i
10945 + mount /dev/zram$i zram$i > /dev/null || \