Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Wed, 01 Dec 2021 12:48:16
Message-Id: 1638362877.fc48aa026f80ac12fafd7aadc002890b03765588.mpagano@gentoo
1 commit: fc48aa026f80ac12fafd7aadc002890b03765588
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Dec 1 12:47:57 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Dec 1 12:47:57 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fc48aa02
7
8 Linux patch 5.15.6
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1005_linux-5.15.6.patch | 6959 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6963 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 68d0c0db..fcc761b6 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -63,6 +63,10 @@ Patch: 1004_linux-5.15.5.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.5
23
24 +Patch: 1005_linux-5.15.6.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.6
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1005_linux-5.15.6.patch b/1005_linux-5.15.6.patch
33 new file mode 100644
34 index 00000000..169a4694
35 --- /dev/null
36 +++ b/1005_linux-5.15.6.patch
37 @@ -0,0 +1,6959 @@
38 +diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
39 +index 426162009ce99..0e486f41185ef 100644
40 +--- a/Documentation/admin-guide/sysctl/kernel.rst
41 ++++ b/Documentation/admin-guide/sysctl/kernel.rst
42 +@@ -1099,7 +1099,7 @@ task_delayacct
43 + ===============
44 +
45 + Enables/disables task delay accounting (see
46 +-:doc:`accounting/delay-accounting.rst`). Enabling this feature incurs
47 ++Documentation/accounting/delay-accounting.rst. Enabling this feature incurs
48 + a small amount of overhead in the scheduler but is useful for debugging
49 + and performance tuning. It is required by some tools such as iotop.
50 +
51 +diff --git a/Documentation/networking/ipvs-sysctl.rst b/Documentation/networking/ipvs-sysctl.rst
52 +index 2afccc63856ee..1cfbf1add2fc9 100644
53 +--- a/Documentation/networking/ipvs-sysctl.rst
54 ++++ b/Documentation/networking/ipvs-sysctl.rst
55 +@@ -37,8 +37,7 @@ conn_reuse_mode - INTEGER
56 +
57 + 0: disable any special handling on port reuse. The new
58 + connection will be delivered to the same real server that was
59 +- servicing the previous connection. This will effectively
60 +- disable expire_nodest_conn.
61 ++ servicing the previous connection.
62 +
63 + bit 1: enable rescheduling of new connections when it is safe.
64 + That is, whenever expire_nodest_conn and for TCP sockets, when
65 +diff --git a/Makefile b/Makefile
66 +index 820ccbe7586fe..0faa647332816 100644
67 +--- a/Makefile
68 ++++ b/Makefile
69 +@@ -1,7 +1,7 @@
70 + # SPDX-License-Identifier: GPL-2.0
71 + VERSION = 5
72 + PATCHLEVEL = 15
73 +-SUBLEVEL = 5
74 ++SUBLEVEL = 6
75 + EXTRAVERSION =
76 + NAME = Trick or Treat
77 +
78 +diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi
79 +index 3b60297af7f60..9e01dbca4a011 100644
80 +--- a/arch/arm/boot/dts/bcm2711.dtsi
81 ++++ b/arch/arm/boot/dts/bcm2711.dtsi
82 +@@ -506,11 +506,17 @@
83 + #address-cells = <3>;
84 + #interrupt-cells = <1>;
85 + #size-cells = <2>;
86 +- interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
87 ++ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
88 + <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
89 + interrupt-names = "pcie", "msi";
90 + interrupt-map-mask = <0x0 0x0 0x0 0x7>;
91 + interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143
92 ++ IRQ_TYPE_LEVEL_HIGH>,
93 ++ <0 0 0 2 &gicv2 GIC_SPI 144
94 ++ IRQ_TYPE_LEVEL_HIGH>,
95 ++ <0 0 0 3 &gicv2 GIC_SPI 145
96 ++ IRQ_TYPE_LEVEL_HIGH>,
97 ++ <0 0 0 4 &gicv2 GIC_SPI 146
98 + IRQ_TYPE_LEVEL_HIGH>;
99 + msi-controller;
100 + msi-parent = <&pcie0>;
101 +diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
102 +index d4f355015e3ca..f69d2af3c1fa4 100644
103 +--- a/arch/arm/boot/dts/bcm5301x.dtsi
104 ++++ b/arch/arm/boot/dts/bcm5301x.dtsi
105 +@@ -242,6 +242,8 @@
106 +
107 + gpio-controller;
108 + #gpio-cells = <2>;
109 ++ interrupt-controller;
110 ++ #interrupt-cells = <2>;
111 + };
112 +
113 + pcie0: pcie@12000 {
114 +@@ -408,7 +410,7 @@
115 + i2c0: i2c@18009000 {
116 + compatible = "brcm,iproc-i2c";
117 + reg = <0x18009000 0x50>;
118 +- interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
119 ++ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
120 + #address-cells = <1>;
121 + #size-cells = <0>;
122 + clock-frequency = <100000>;
123 +diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
124 +index fc2608b18a0d0..18f01190dcfd4 100644
125 +--- a/arch/arm/mach-socfpga/core.h
126 ++++ b/arch/arm/mach-socfpga/core.h
127 +@@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr;
128 + u32 socfpga_sdram_self_refresh(u32 sdr_base);
129 + extern unsigned int socfpga_sdram_self_refresh_sz;
130 +
131 +-extern char secondary_trampoline, secondary_trampoline_end;
132 ++extern char secondary_trampoline[], secondary_trampoline_end[];
133 +
134 + extern unsigned long socfpga_cpu1start_addr;
135 +
136 +diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
137 +index fbb80b883e5dd..201191cf68f32 100644
138 +--- a/arch/arm/mach-socfpga/platsmp.c
139 ++++ b/arch/arm/mach-socfpga/platsmp.c
140 +@@ -20,14 +20,14 @@
141 +
142 + static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
143 + {
144 +- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
145 ++ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
146 +
147 + if (socfpga_cpu1start_addr) {
148 + /* This will put CPU #1 into reset. */
149 + writel(RSTMGR_MPUMODRST_CPU1,
150 + rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
151 +
152 +- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
153 ++ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
154 +
155 + writel(__pa_symbol(secondary_startup),
156 + sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
157 +@@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
158 +
159 + static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
160 + {
161 +- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
162 ++ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
163 +
164 + if (socfpga_cpu1start_addr) {
165 + writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
166 + SOCFPGA_A10_RSTMGR_MODMPURST);
167 +- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
168 ++ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
169 +
170 + writel(__pa_symbol(secondary_startup),
171 + sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
172 +diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
173 +index 8433a2058eb15..237224484d0f6 100644
174 +--- a/arch/arm64/include/asm/pgalloc.h
175 ++++ b/arch/arm64/include/asm/pgalloc.h
176 +@@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
177 + static inline void
178 + pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
179 + {
180 +- VM_BUG_ON(mm != &init_mm);
181 ++ VM_BUG_ON(mm && mm != &init_mm);
182 + __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
183 + }
184 +
185 +diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
186 +index 190b494e22ab9..0fd6056ba412b 100644
187 +--- a/arch/arm64/include/asm/uaccess.h
188 ++++ b/arch/arm64/include/asm/uaccess.h
189 +@@ -292,12 +292,22 @@ do { \
190 + (x) = (__force __typeof__(*(ptr)))__gu_val; \
191 + } while (0)
192 +
193 ++/*
194 ++ * We must not call into the scheduler between uaccess_ttbr0_enable() and
195 ++ * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
196 ++ * we must evaluate these outside of the critical section.
197 ++ */
198 + #define __raw_get_user(x, ptr, err) \
199 + do { \
200 ++ __typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
201 ++ __typeof__(x) __rgu_val; \
202 + __chk_user_ptr(ptr); \
203 ++ \
204 + uaccess_ttbr0_enable(); \
205 +- __raw_get_mem("ldtr", x, ptr, err); \
206 ++ __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
207 + uaccess_ttbr0_disable(); \
208 ++ \
209 ++ (x) = __rgu_val; \
210 + } while (0)
211 +
212 + #define __get_user_error(x, ptr, err) \
213 +@@ -321,14 +331,22 @@ do { \
214 +
215 + #define get_user __get_user
216 +
217 ++/*
218 ++ * We must not call into the scheduler between __uaccess_enable_tco_async() and
219 ++ * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
220 ++ * functions, we must evaluate these outside of the critical section.
221 ++ */
222 + #define __get_kernel_nofault(dst, src, type, err_label) \
223 + do { \
224 ++ __typeof__(dst) __gkn_dst = (dst); \
225 ++ __typeof__(src) __gkn_src = (src); \
226 + int __gkn_err = 0; \
227 + \
228 + __uaccess_enable_tco_async(); \
229 +- __raw_get_mem("ldr", *((type *)(dst)), \
230 +- (__force type *)(src), __gkn_err); \
231 ++ __raw_get_mem("ldr", *((type *)(__gkn_dst)), \
232 ++ (__force type *)(__gkn_src), __gkn_err); \
233 + __uaccess_disable_tco_async(); \
234 ++ \
235 + if (unlikely(__gkn_err)) \
236 + goto err_label; \
237 + } while (0)
238 +@@ -367,11 +385,19 @@ do { \
239 + } \
240 + } while (0)
241 +
242 ++/*
243 ++ * We must not call into the scheduler between uaccess_ttbr0_enable() and
244 ++ * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
245 ++ * we must evaluate these outside of the critical section.
246 ++ */
247 + #define __raw_put_user(x, ptr, err) \
248 + do { \
249 +- __chk_user_ptr(ptr); \
250 ++ __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
251 ++ __typeof__(*(ptr)) __rpu_val = (x); \
252 ++ __chk_user_ptr(__rpu_ptr); \
253 ++ \
254 + uaccess_ttbr0_enable(); \
255 +- __raw_put_mem("sttr", x, ptr, err); \
256 ++ __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
257 + uaccess_ttbr0_disable(); \
258 + } while (0)
259 +
260 +@@ -396,14 +422,22 @@ do { \
261 +
262 + #define put_user __put_user
263 +
264 ++/*
265 ++ * We must not call into the scheduler between __uaccess_enable_tco_async() and
266 ++ * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
267 ++ * functions, we must evaluate these outside of the critical section.
268 ++ */
269 + #define __put_kernel_nofault(dst, src, type, err_label) \
270 + do { \
271 ++ __typeof__(dst) __pkn_dst = (dst); \
272 ++ __typeof__(src) __pkn_src = (src); \
273 + int __pkn_err = 0; \
274 + \
275 + __uaccess_enable_tco_async(); \
276 +- __raw_put_mem("str", *((type *)(src)), \
277 +- (__force type *)(dst), __pkn_err); \
278 ++ __raw_put_mem("str", *((type *)(__pkn_src)), \
279 ++ (__force type *)(__pkn_dst), __pkn_err); \
280 + __uaccess_disable_tco_async(); \
281 ++ \
282 + if (unlikely(__pkn_err)) \
283 + goto err_label; \
284 + } while(0)
285 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
286 +index a917d408d27d8..23654ccdbfb12 100644
287 +--- a/arch/mips/Kconfig
288 ++++ b/arch/mips/Kconfig
289 +@@ -3189,7 +3189,7 @@ config STACKTRACE_SUPPORT
290 + config PGTABLE_LEVELS
291 + int
292 + default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
293 +- default 3 if 64BIT && !PAGE_SIZE_64KB
294 ++ default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
295 + default 2
296 +
297 + config MIPS_AUTO_PFN_OFFSET
298 +diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
299 +index 630fcb4cb30e7..7c861e6a89529 100644
300 +--- a/arch/mips/kernel/cpu-probe.c
301 ++++ b/arch/mips/kernel/cpu-probe.c
302 +@@ -1734,8 +1734,6 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
303 +
304 + static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
305 + {
306 +- decode_configs(c);
307 +-
308 + /* All Loongson processors covered here define ExcCode 16 as GSExc. */
309 + c->options |= MIPS_CPU_GSEXCEX;
310 +
311 +@@ -1796,6 +1794,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
312 + panic("Unknown Loongson Processor ID!");
313 + break;
314 + }
315 ++
316 ++ decode_configs(c);
317 + }
318 + #else
319 + static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
320 +diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
321 +index 3d208afd15bc6..2769eb991f58d 100644
322 +--- a/arch/parisc/kernel/vmlinux.lds.S
323 ++++ b/arch/parisc/kernel/vmlinux.lds.S
324 +@@ -57,8 +57,6 @@ SECTIONS
325 + {
326 + . = KERNEL_BINARY_TEXT_START;
327 +
328 +- _stext = .; /* start of kernel text, includes init code & data */
329 +-
330 + __init_begin = .;
331 + HEAD_TEXT_SECTION
332 + MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
333 +@@ -82,6 +80,7 @@ SECTIONS
334 + /* freed after init ends here */
335 +
336 + _text = .; /* Text and read-only data */
337 ++ _stext = .;
338 + MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
339 + .text ALIGN(PAGE_SIZE) : {
340 + TEXT_TEXT
341 +diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
342 +index 6b1ec9e3541b9..349c4a820231b 100644
343 +--- a/arch/powerpc/kernel/head_32.h
344 ++++ b/arch/powerpc/kernel/head_32.h
345 +@@ -202,11 +202,11 @@ vmap_stack_overflow:
346 + mfspr r1, SPRN_SPRG_THREAD
347 + lwz r1, TASK_CPU - THREAD(r1)
348 + slwi r1, r1, 3
349 +- addis r1, r1, emergency_ctx@ha
350 ++ addis r1, r1, emergency_ctx-PAGE_OFFSET@ha
351 + #else
352 +- lis r1, emergency_ctx@ha
353 ++ lis r1, emergency_ctx-PAGE_OFFSET@ha
354 + #endif
355 +- lwz r1, emergency_ctx@l(r1)
356 ++ lwz r1, emergency_ctx-PAGE_OFFSET@l(r1)
357 + addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
358 + EXCEPTION_PROLOG_2 0 vmap_stack_overflow
359 + prepare_transfer_to_handler
360 +diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
361 +index fcf4760a3a0ea..70b7a8f971538 100644
362 +--- a/arch/powerpc/kvm/book3s_hv_builtin.c
363 ++++ b/arch/powerpc/kvm/book3s_hv_builtin.c
364 +@@ -695,6 +695,7 @@ static void flush_guest_tlb(struct kvm *kvm)
365 + "r" (0) : "memory");
366 + }
367 + asm volatile("ptesync": : :"memory");
368 ++ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
369 + asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
370 + } else {
371 + for (set = 0; set < kvm->arch.tlb_sets; ++set) {
372 +@@ -705,7 +706,9 @@ static void flush_guest_tlb(struct kvm *kvm)
373 + rb += PPC_BIT(51); /* increment set number */
374 + }
375 + asm volatile("ptesync": : :"memory");
376 +- asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
377 ++ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
378 ++ if (cpu_has_feature(CPU_FTR_ARCH_300))
379 ++ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
380 + }
381 + }
382 +
383 +diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
384 +index b254c60589a1c..cce5eca31f257 100644
385 +--- a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
386 ++++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
387 +@@ -12,7 +12,7 @@
388 + #address-cells = <2>;
389 + #size-cells = <2>;
390 + model = "Microchip PolarFire-SoC Icicle Kit";
391 +- compatible = "microchip,mpfs-icicle-kit";
392 ++ compatible = "microchip,mpfs-icicle-kit", "microchip,mpfs";
393 +
394 + aliases {
395 + ethernet0 = &emac1;
396 +@@ -56,8 +56,17 @@
397 + status = "okay";
398 + };
399 +
400 +-&sdcard {
401 ++&mmc {
402 + status = "okay";
403 ++
404 ++ bus-width = <4>;
405 ++ disable-wp;
406 ++ cap-sd-highspeed;
407 ++ card-detect-delay = <200>;
408 ++ sd-uhs-sdr12;
409 ++ sd-uhs-sdr25;
410 ++ sd-uhs-sdr50;
411 ++ sd-uhs-sdr104;
412 + };
413 +
414 + &emac0 {
415 +diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
416 +index 9d2fbbc1f7778..b12fd594e7172 100644
417 +--- a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
418 ++++ b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
419 +@@ -6,8 +6,8 @@
420 + / {
421 + #address-cells = <2>;
422 + #size-cells = <2>;
423 +- model = "Microchip MPFS Icicle Kit";
424 +- compatible = "microchip,mpfs-icicle-kit";
425 ++ model = "Microchip PolarFire SoC";
426 ++ compatible = "microchip,mpfs";
427 +
428 + chosen {
429 + };
430 +@@ -262,39 +262,14 @@
431 + status = "disabled";
432 + };
433 +
434 +- emmc: mmc@20008000 {
435 ++ /* Common node entry for emmc/sd */
436 ++ mmc: mmc@20008000 {
437 + compatible = "cdns,sd4hc";
438 + reg = <0x0 0x20008000 0x0 0x1000>;
439 + interrupt-parent = <&plic>;
440 + interrupts = <88 89>;
441 + pinctrl-names = "default";
442 + clocks = <&clkcfg 6>;
443 +- bus-width = <4>;
444 +- cap-mmc-highspeed;
445 +- mmc-ddr-3_3v;
446 +- max-frequency = <200000000>;
447 +- non-removable;
448 +- no-sd;
449 +- no-sdio;
450 +- voltage-ranges = <3300 3300>;
451 +- status = "disabled";
452 +- };
453 +-
454 +- sdcard: sdhc@20008000 {
455 +- compatible = "cdns,sd4hc";
456 +- reg = <0x0 0x20008000 0x0 0x1000>;
457 +- interrupt-parent = <&plic>;
458 +- interrupts = <88>;
459 +- pinctrl-names = "default";
460 +- clocks = <&clkcfg 6>;
461 +- bus-width = <4>;
462 +- disable-wp;
463 +- cap-sd-highspeed;
464 +- card-detect-delay = <200>;
465 +- sd-uhs-sdr12;
466 +- sd-uhs-sdr25;
467 +- sd-uhs-sdr50;
468 +- sd-uhs-sdr104;
469 + max-frequency = <200000000>;
470 + status = "disabled";
471 + };
472 +diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
473 +index ff4b52e37e60d..5adab895127e1 100644
474 +--- a/arch/x86/include/asm/xen/hypervisor.h
475 ++++ b/arch/x86/include/asm/xen/hypervisor.h
476 +@@ -62,4 +62,9 @@ void xen_arch_register_cpu(int num);
477 + void xen_arch_unregister_cpu(int num);
478 + #endif
479 +
480 ++#ifdef CONFIG_PVH
481 ++void __init xen_pvh_init(struct boot_params *boot_params);
482 ++void __init mem_map_via_hcall(struct boot_params *boot_params_p);
483 ++#endif
484 ++
485 + #endif /* _ASM_X86_XEN_HYPERVISOR_H */
486 +diff --git a/block/blk-core.c b/block/blk-core.c
487 +index 12aa8c1da6003..c2d912d0c976c 100644
488 +--- a/block/blk-core.c
489 ++++ b/block/blk-core.c
490 +@@ -389,8 +389,10 @@ void blk_cleanup_queue(struct request_queue *q)
491 + blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
492 +
493 + blk_sync_queue(q);
494 +- if (queue_is_mq(q))
495 ++ if (queue_is_mq(q)) {
496 ++ blk_mq_cancel_work_sync(q);
497 + blk_mq_exit_queue(q);
498 ++ }
499 +
500 + /*
501 + * In theory, request pool of sched_tags belongs to request queue.
502 +diff --git a/block/blk-mq.c b/block/blk-mq.c
503 +index c8a9d10f7c18b..82de39926a9f6 100644
504 +--- a/block/blk-mq.c
505 ++++ b/block/blk-mq.c
506 +@@ -4018,6 +4018,19 @@ unsigned int blk_mq_rq_cpu(struct request *rq)
507 + }
508 + EXPORT_SYMBOL(blk_mq_rq_cpu);
509 +
510 ++void blk_mq_cancel_work_sync(struct request_queue *q)
511 ++{
512 ++ if (queue_is_mq(q)) {
513 ++ struct blk_mq_hw_ctx *hctx;
514 ++ int i;
515 ++
516 ++ cancel_delayed_work_sync(&q->requeue_work);
517 ++
518 ++ queue_for_each_hw_ctx(q, hctx, i)
519 ++ cancel_delayed_work_sync(&hctx->run_work);
520 ++ }
521 ++}
522 ++
523 + static int __init blk_mq_init(void)
524 + {
525 + int i;
526 +diff --git a/block/blk-mq.h b/block/blk-mq.h
527 +index d08779f77a265..7cdca23b6263d 100644
528 +--- a/block/blk-mq.h
529 ++++ b/block/blk-mq.h
530 +@@ -129,6 +129,8 @@ extern int blk_mq_sysfs_register(struct request_queue *q);
531 + extern void blk_mq_sysfs_unregister(struct request_queue *q);
532 + extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
533 +
534 ++void blk_mq_cancel_work_sync(struct request_queue *q);
535 ++
536 + void blk_mq_release(struct request_queue *q);
537 +
538 + static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
539 +diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
540 +index 614d9d47de36b..4737ec024ee9b 100644
541 +--- a/block/blk-sysfs.c
542 ++++ b/block/blk-sysfs.c
543 +@@ -805,16 +805,6 @@ static void blk_release_queue(struct kobject *kobj)
544 +
545 + blk_free_queue_stats(q->stats);
546 +
547 +- if (queue_is_mq(q)) {
548 +- struct blk_mq_hw_ctx *hctx;
549 +- int i;
550 +-
551 +- cancel_delayed_work_sync(&q->requeue_work);
552 +-
553 +- queue_for_each_hw_ctx(q, hctx, i)
554 +- cancel_delayed_work_sync(&hctx->run_work);
555 +- }
556 +-
557 + blk_exit_queue(q);
558 +
559 + blk_queue_free_zone_bitmaps(q);
560 +diff --git a/block/elevator.c b/block/elevator.c
561 +index ff45d8388f487..cd02ae332c4eb 100644
562 +--- a/block/elevator.c
563 ++++ b/block/elevator.c
564 +@@ -694,12 +694,18 @@ void elevator_init_mq(struct request_queue *q)
565 + if (!e)
566 + return;
567 +
568 ++ /*
569 ++ * We are called before adding disk, when there isn't any FS I/O,
570 ++ * so freezing queue plus canceling dispatch work is enough to
571 ++ * drain any dispatch activities originated from passthrough
572 ++ * requests, then no need to quiesce queue which may add long boot
573 ++ * latency, especially when lots of disks are involved.
574 ++ */
575 + blk_mq_freeze_queue(q);
576 +- blk_mq_quiesce_queue(q);
577 ++ blk_mq_cancel_work_sync(q);
578 +
579 + err = blk_mq_init_sched(q, e);
580 +
581 +- blk_mq_unquiesce_queue(q);
582 + blk_mq_unfreeze_queue(q);
583 +
584 + if (err) {
585 +diff --git a/block/genhd.c b/block/genhd.c
586 +index 6accd0b185e9e..f091a60dcf1ea 100644
587 +--- a/block/genhd.c
588 ++++ b/block/genhd.c
589 +@@ -1086,6 +1086,8 @@ static void disk_release(struct device *dev)
590 + might_sleep();
591 + WARN_ON_ONCE(disk_live(disk));
592 +
593 ++ blk_mq_cancel_work_sync(disk->queue);
594 ++
595 + disk_release_events(disk);
596 + kfree(disk->random);
597 + xa_destroy(&disk->part_tbl);
598 +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
599 +index bd482108310cf..3fbb17ecce2d5 100644
600 +--- a/drivers/acpi/cppc_acpi.c
601 ++++ b/drivers/acpi/cppc_acpi.c
602 +@@ -1011,7 +1011,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
603 + static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
604 + {
605 + struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
606 +- struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx];
607 ++ struct cpc_register_resource *reg;
608 ++
609 ++ if (!cpc_desc) {
610 ++ pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
611 ++ return -ENODEV;
612 ++ }
613 ++
614 ++ reg = &cpc_desc->cpc_regs[reg_idx];
615 +
616 + if (CPC_IN_PCC(reg)) {
617 + int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
618 +diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
619 +index e312ebaed8db4..781e312f45342 100644
620 +--- a/drivers/acpi/property.c
621 ++++ b/drivers/acpi/property.c
622 +@@ -1090,15 +1090,10 @@ struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
623 + /* All data nodes have parent pointer so just return that */
624 + return to_acpi_data_node(fwnode)->parent;
625 + } else if (is_acpi_device_node(fwnode)) {
626 +- acpi_handle handle, parent_handle;
627 ++ struct device *dev = to_acpi_device_node(fwnode)->dev.parent;
628 +
629 +- handle = to_acpi_device_node(fwnode)->handle;
630 +- if (ACPI_SUCCESS(acpi_get_parent(handle, &parent_handle))) {
631 +- struct acpi_device *adev;
632 +-
633 +- if (!acpi_bus_get_device(parent_handle, &adev))
634 +- return acpi_fwnode_handle(adev);
635 +- }
636 ++ if (dev)
637 ++ return acpi_fwnode_handle(to_acpi_device(dev));
638 + }
639 +
640 + return NULL;
641 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
642 +index 49fb74196d02f..cffbe57a8e086 100644
643 +--- a/drivers/android/binder.c
644 ++++ b/drivers/android/binder.c
645 +@@ -2710,7 +2710,7 @@ static void binder_transaction(struct binder_proc *proc,
646 + t->from = thread;
647 + else
648 + t->from = NULL;
649 +- t->sender_euid = proc->cred->euid;
650 ++ t->sender_euid = task_euid(proc->tsk);
651 + t->to_proc = target_proc;
652 + t->to_thread = target_thread;
653 + t->code = tr->code;
654 +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
655 +index dafa631582bac..e15c3bc17a55c 100644
656 +--- a/drivers/cpufreq/intel_pstate.c
657 ++++ b/drivers/cpufreq/intel_pstate.c
658 +@@ -999,6 +999,12 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
659 + */
660 + value &= ~GENMASK_ULL(31, 24);
661 + value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
662 ++ /*
663 ++ * However, make sure that EPP will be set to "performance" when
664 ++ * the CPU is brought back online again and the "performance"
665 ++ * scaling algorithm is still in effect.
666 ++ */
667 ++ cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
668 + }
669 +
670 + /*
671 +@@ -2249,6 +2255,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
672 + X86_MATCH(BROADWELL_D, core_funcs),
673 + X86_MATCH(BROADWELL_X, core_funcs),
674 + X86_MATCH(SKYLAKE_X, core_funcs),
675 ++ X86_MATCH(ICELAKE_X, core_funcs),
676 + {}
677 + };
678 +
679 +diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
680 +index de416f9e79213..f5219334fd3a5 100644
681 +--- a/drivers/firmware/arm_scmi/base.c
682 ++++ b/drivers/firmware/arm_scmi/base.c
683 +@@ -34,6 +34,12 @@ struct scmi_msg_resp_base_attributes {
684 + __le16 reserved;
685 + };
686 +
687 ++struct scmi_msg_resp_base_discover_agent {
688 ++ __le32 agent_id;
689 ++ u8 name[SCMI_MAX_STR_SIZE];
690 ++};
691 ++
692 ++
693 + struct scmi_msg_base_error_notify {
694 + __le32 event_control;
695 + #define BASE_TP_NOTIFY_ALL BIT(0)
696 +@@ -225,18 +231,21 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph,
697 + int id, char *name)
698 + {
699 + int ret;
700 ++ struct scmi_msg_resp_base_discover_agent *agent_info;
701 + struct scmi_xfer *t;
702 +
703 + ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT,
704 +- sizeof(__le32), SCMI_MAX_STR_SIZE, &t);
705 ++ sizeof(__le32), sizeof(*agent_info), &t);
706 + if (ret)
707 + return ret;
708 +
709 + put_unaligned_le32(id, t->tx.buf);
710 +
711 + ret = ph->xops->do_xfer(ph, t);
712 +- if (!ret)
713 +- strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
714 ++ if (!ret) {
715 ++ agent_info = t->rx.buf;
716 ++ strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE);
717 ++ }
718 +
719 + ph->xops->xfer_put(ph, t);
720 +
721 +diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
722 +index 4371fdcd5a73f..581d34c957695 100644
723 +--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
724 ++++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
725 +@@ -138,9 +138,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
726 + scmi_pd_data->domains = domains;
727 + scmi_pd_data->num_domains = num_domains;
728 +
729 +- of_genpd_add_provider_onecell(np, scmi_pd_data);
730 +-
731 +- return 0;
732 ++ return of_genpd_add_provider_onecell(np, scmi_pd_data);
733 + }
734 +
735 + static const struct scmi_device_id scmi_id_table[] = {
736 +diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
737 +index 308471586381f..cdbb287bd8bcd 100644
738 +--- a/drivers/firmware/arm_scmi/sensors.c
739 ++++ b/drivers/firmware/arm_scmi/sensors.c
740 +@@ -637,7 +637,7 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
741 + if (ret)
742 + return ret;
743 +
744 +- put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf);
745 ++ put_unaligned_le32(sensor_id, t->tx.buf);
746 + ret = ph->xops->do_xfer(ph, t);
747 + if (!ret) {
748 + struct sensors_info *si = ph->get_priv(ph);
749 +diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
750 +index 11e8efb713751..87039c5c03fdb 100644
751 +--- a/drivers/firmware/arm_scmi/virtio.c
752 ++++ b/drivers/firmware/arm_scmi/virtio.c
753 +@@ -82,7 +82,8 @@ static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
754 + }
755 +
756 + static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
757 +- struct scmi_vio_msg *msg)
758 ++ struct scmi_vio_msg *msg,
759 ++ struct device *dev)
760 + {
761 + struct scatterlist sg_in;
762 + int rc;
763 +@@ -94,8 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
764 +
765 + rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
766 + if (rc)
767 +- dev_err_once(vioch->cinfo->dev,
768 +- "failed to add to virtqueue (%d)\n", rc);
769 ++ dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc);
770 + else
771 + virtqueue_kick(vioch->vqueue);
772 +
773 +@@ -108,7 +108,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch,
774 + struct scmi_vio_msg *msg)
775 + {
776 + if (vioch->is_rx) {
777 +- scmi_vio_feed_vq_rx(vioch, msg);
778 ++ scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev);
779 + } else {
780 + /* Here IRQs are assumed to be already disabled by the caller */
781 + spin_lock(&vioch->lock);
782 +@@ -269,7 +269,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
783 + list_add_tail(&msg->list, &vioch->free_list);
784 + spin_unlock_irqrestore(&vioch->lock, flags);
785 + } else {
786 +- scmi_vio_feed_vq_rx(vioch, msg);
787 ++ scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev);
788 + }
789 + }
790 +
791 +diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
792 +index a5048956a0be9..ac08e819088bb 100644
793 +--- a/drivers/firmware/arm_scmi/voltage.c
794 ++++ b/drivers/firmware/arm_scmi/voltage.c
795 +@@ -156,7 +156,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
796 + int cnt;
797 +
798 + cmd->domain_id = cpu_to_le32(v->id);
799 +- cmd->level_index = desc_index;
800 ++ cmd->level_index = cpu_to_le32(desc_index);
801 + ret = ph->xops->do_xfer(ph, tl);
802 + if (ret)
803 + break;
804 +diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
805 +index 581aa5e9b0778..dd7c3d5e8b0bb 100644
806 +--- a/drivers/firmware/smccc/soc_id.c
807 ++++ b/drivers/firmware/smccc/soc_id.c
808 +@@ -50,7 +50,7 @@ static int __init smccc_soc_init(void)
809 + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
810 + ARM_SMCCC_ARCH_SOC_ID, &res);
811 +
812 +- if (res.a0 == SMCCC_RET_NOT_SUPPORTED) {
813 ++ if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) {
814 + pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
815 + return 0;
816 + }
817 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
818 +index f3d62e196901a..0c7963dfacad1 100644
819 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
820 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
821 +@@ -223,7 +223,7 @@ int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
822 + */
823 + int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
824 + {
825 +- unsigned int count = AMDGPU_IH_MAX_NUM_IVS;
826 ++ unsigned int count;
827 + u32 wptr;
828 +
829 + if (!ih->enabled || adev->shutdown)
830 +@@ -232,6 +232,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
831 + wptr = amdgpu_ih_get_wptr(adev, ih);
832 +
833 + restart_ih:
834 ++ count = AMDGPU_IH_MAX_NUM_IVS;
835 + DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
836 +
837 + /* Order reading of wptr vs. reading of IH ring data */
838 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
839 +index 16dbe593cba2e..970d59a21005a 100644
840 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
841 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
842 +@@ -7729,8 +7729,19 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
843 + switch (adev->asic_type) {
844 + case CHIP_VANGOGH:
845 + case CHIP_YELLOW_CARP:
846 +- clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
847 +- ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
848 ++ preempt_disable();
849 ++ clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
850 ++ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
851 ++ hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
852 ++ /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
853 ++ * roughly every 42 seconds.
854 ++ */
855 ++ if (hi_check != clock_hi) {
856 ++ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
857 ++ clock_hi = hi_check;
858 ++ }
859 ++ preempt_enable();
860 ++ clock = clock_lo | (clock_hi << 32ULL);
861 + break;
862 + default:
863 + preempt_disable();
864 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
865 +index 025184a556ee6..55f8dd6e56b48 100644
866 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
867 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
868 +@@ -140,6 +140,11 @@ MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
869 + #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
870 + #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
871 +
872 ++#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025
873 ++#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1
874 ++#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026
875 ++#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1
876 ++
877 + enum ta_ras_gfx_subblock {
878 + /*CPC*/
879 + TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
880 +@@ -4228,19 +4233,38 @@ failed_kiq_read:
881 +
882 + static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
883 + {
884 +- uint64_t clock;
885 ++ uint64_t clock, clock_lo, clock_hi, hi_check;
886 +
887 +- amdgpu_gfx_off_ctrl(adev, false);
888 +- mutex_lock(&adev->gfx.gpu_clock_mutex);
889 +- if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
890 +- clock = gfx_v9_0_kiq_read_clock(adev);
891 +- } else {
892 +- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
893 +- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
894 +- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
895 ++ switch (adev->asic_type) {
896 ++ case CHIP_RENOIR:
897 ++ preempt_disable();
898 ++ clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
899 ++ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
900 ++ hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
901 ++ /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
902 ++ * roughly every 42 seconds.
903 ++ */
904 ++ if (hi_check != clock_hi) {
905 ++ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
906 ++ clock_hi = hi_check;
907 ++ }
908 ++ preempt_enable();
909 ++ clock = clock_lo | (clock_hi << 32ULL);
910 ++ break;
911 ++ default:
912 ++ amdgpu_gfx_off_ctrl(adev, false);
913 ++ mutex_lock(&adev->gfx.gpu_clock_mutex);
914 ++ if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
915 ++ clock = gfx_v9_0_kiq_read_clock(adev);
916 ++ } else {
917 ++ WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
918 ++ clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
919 ++ ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
920 ++ }
921 ++ mutex_unlock(&adev->gfx.gpu_clock_mutex);
922 ++ amdgpu_gfx_off_ctrl(adev, true);
923 ++ break;
924 + }
925 +- mutex_unlock(&adev->gfx.gpu_clock_mutex);
926 +- amdgpu_gfx_off_ctrl(adev, true);
927 + return clock;
928 + }
929 +
930 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
931 +index 084491afe5405..dc995ce52eff2 100644
932 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
933 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
934 +@@ -2213,6 +2213,8 @@ static int dm_resume(void *handle)
935 + if (amdgpu_in_reset(adev)) {
936 + dc_state = dm->cached_dc_state;
937 +
938 ++ amdgpu_dm_outbox_init(adev);
939 ++
940 + r = dm_dmub_hw_init(adev);
941 + if (r)
942 + DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
943 +@@ -2224,8 +2226,8 @@ static int dm_resume(void *handle)
944 +
945 + for (i = 0; i < dc_state->stream_count; i++) {
946 + dc_state->streams[i]->mode_changed = true;
947 +- for (j = 0; j < dc_state->stream_status->plane_count; j++) {
948 +- dc_state->stream_status->plane_states[j]->update_flags.raw
949 ++ for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
950 ++ dc_state->stream_status[i].plane_states[j]->update_flags.raw
951 + = 0xffffffff;
952 + }
953 + }
954 +@@ -3846,6 +3848,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
955 + } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
956 + amdgpu_dm_update_connector_after_detect(aconnector);
957 + register_backlight_device(dm, link);
958 ++
959 ++ if (dm->num_of_edps)
960 ++ update_connector_ext_caps(aconnector);
961 + if (amdgpu_dc_feature_mask & DC_PSR_MASK)
962 + amdgpu_dm_set_psr_caps(link);
963 + }
964 +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
965 +index 258c573acc979..1f406f21b452f 100644
966 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
967 ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
968 +@@ -1024,8 +1024,6 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
969 + uint32_t min_freq, max_freq = 0;
970 + uint32_t ret = 0;
971 +
972 +- phm_get_sysfs_buf(&buf, &size);
973 +-
974 + switch (type) {
975 + case PP_SCLK:
976 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
977 +@@ -1038,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
978 + else
979 + i = 1;
980 +
981 +- size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
982 ++ size += sprintf(buf + size, "0: %uMhz %s\n",
983 + data->gfx_min_freq_limit/100,
984 + i == 0 ? "*" : "");
985 +- size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
986 ++ size += sprintf(buf + size, "1: %uMhz %s\n",
987 + i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
988 + i == 1 ? "*" : "");
989 +- size += sysfs_emit_at(buf, size, "2: %uMhz %s\n",
990 ++ size += sprintf(buf + size, "2: %uMhz %s\n",
991 + data->gfx_max_freq_limit/100,
992 + i == 2 ? "*" : "");
993 + break;
994 +@@ -1052,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
995 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
996 +
997 + for (i = 0; i < mclk_table->count; i++)
998 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
999 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1000 + i,
1001 + mclk_table->entries[i].clk / 100,
1002 + ((mclk_table->entries[i].clk / 100)
1003 +@@ -1067,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
1004 + if (ret)
1005 + return ret;
1006 +
1007 +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
1008 +- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
1009 ++ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
1010 ++ size += sprintf(buf + size, "0: %10uMhz\n",
1011 + (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
1012 +- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
1013 ++ size += sprintf(buf + size, "1: %10uMhz\n",
1014 + (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
1015 + }
1016 + break;
1017 +@@ -1083,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
1018 + if (ret)
1019 + return ret;
1020 +
1021 +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1022 +- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
1023 ++ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
1024 ++ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
1025 + min_freq, max_freq);
1026 + }
1027 + break;
1028 +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
1029 +index aceebf5842253..611969bf45207 100644
1030 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
1031 ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
1032 +@@ -4914,8 +4914,6 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
1033 + int size = 0;
1034 + uint32_t i, now, clock, pcie_speed;
1035 +
1036 +- phm_get_sysfs_buf(&buf, &size);
1037 +-
1038 + switch (type) {
1039 + case PP_SCLK:
1040 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
1041 +@@ -4928,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
1042 + now = i;
1043 +
1044 + for (i = 0; i < sclk_table->count; i++)
1045 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1046 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1047 + i, sclk_table->dpm_levels[i].value / 100,
1048 + (i == now) ? "*" : "");
1049 + break;
1050 +@@ -4943,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
1051 + now = i;
1052 +
1053 + for (i = 0; i < mclk_table->count; i++)
1054 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1055 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1056 + i, mclk_table->dpm_levels[i].value / 100,
1057 + (i == now) ? "*" : "");
1058 + break;
1059 +@@ -4957,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
1060 + now = i;
1061 +
1062 + for (i = 0; i < pcie_table->count; i++)
1063 +- size += sysfs_emit_at(buf, size, "%d: %s %s\n", i,
1064 ++ size += sprintf(buf + size, "%d: %s %s\n", i,
1065 + (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
1066 + (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
1067 + (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
1068 +@@ -4965,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
1069 + break;
1070 + case OD_SCLK:
1071 + if (hwmgr->od_enabled) {
1072 +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
1073 ++ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
1074 + for (i = 0; i < odn_sclk_table->num_of_pl; i++)
1075 +- size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
1076 ++ size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
1077 + i, odn_sclk_table->entries[i].clock/100,
1078 + odn_sclk_table->entries[i].vddc);
1079 + }
1080 + break;
1081 + case OD_MCLK:
1082 + if (hwmgr->od_enabled) {
1083 +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
1084 ++ size += sprintf(buf + size, "%s:\n", "OD_MCLK");
1085 + for (i = 0; i < odn_mclk_table->num_of_pl; i++)
1086 +- size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
1087 ++ size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
1088 + i, odn_mclk_table->entries[i].clock/100,
1089 + odn_mclk_table->entries[i].vddc);
1090 + }
1091 + break;
1092 + case OD_RANGE:
1093 + if (hwmgr->od_enabled) {
1094 +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1095 +- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
1096 ++ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
1097 ++ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
1098 + data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
1099 + hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
1100 +- size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
1101 ++ size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
1102 + data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
1103 + hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
1104 +- size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
1105 ++ size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
1106 + data->odn_dpm_table.min_vddc,
1107 + data->odn_dpm_table.max_vddc);
1108 + }
1109 +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1110 +index 8e28a8eecefc6..03bf8f0692228 100644
1111 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1112 ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1113 +@@ -1550,8 +1550,6 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
1114 + uint32_t i, now;
1115 + int size = 0;
1116 +
1117 +- phm_get_sysfs_buf(&buf, &size);
1118 +-
1119 + switch (type) {
1120 + case PP_SCLK:
1121 + now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1122 +@@ -1561,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
1123 + CURR_SCLK_INDEX);
1124 +
1125 + for (i = 0; i < sclk_table->count; i++)
1126 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1127 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1128 + i, sclk_table->entries[i].clk / 100,
1129 + (i == now) ? "*" : "");
1130 + break;
1131 +@@ -1573,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
1132 + CURR_MCLK_INDEX);
1133 +
1134 + for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
1135 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1136 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1137 + SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
1138 + (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
1139 + break;
1140 +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1141 +index c981fc2882f01..e6336654c5655 100644
1142 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1143 ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1144 +@@ -4639,8 +4639,6 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
1145 +
1146 + int i, now, size = 0, count = 0;
1147 +
1148 +- phm_get_sysfs_buf(&buf, &size);
1149 +-
1150 + switch (type) {
1151 + case PP_SCLK:
1152 + if (data->registry_data.sclk_dpm_key_disabled)
1153 +@@ -4654,7 +4652,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
1154 + else
1155 + count = sclk_table->count;
1156 + for (i = 0; i < count; i++)
1157 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1158 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1159 + i, sclk_table->dpm_levels[i].value / 100,
1160 + (i == now) ? "*" : "");
1161 + break;
1162 +@@ -4665,7 +4663,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
1163 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
1164 +
1165 + for (i = 0; i < mclk_table->count; i++)
1166 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1167 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1168 + i, mclk_table->dpm_levels[i].value / 100,
1169 + (i == now) ? "*" : "");
1170 + break;
1171 +@@ -4676,7 +4674,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
1172 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
1173 +
1174 + for (i = 0; i < soc_table->count; i++)
1175 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1176 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1177 + i, soc_table->dpm_levels[i].value / 100,
1178 + (i == now) ? "*" : "");
1179 + break;
1180 +@@ -4688,7 +4686,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
1181 + PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
1182 +
1183 + for (i = 0; i < dcef_table->count; i++)
1184 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1185 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1186 + i, dcef_table->dpm_levels[i].value / 100,
1187 + (dcef_table->dpm_levels[i].value / 100 == now) ?
1188 + "*" : "");
1189 +@@ -4702,7 +4700,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
1190 + gen_speed = pptable->PcieGenSpeed[i];
1191 + lane_width = pptable->PcieLaneCount[i];
1192 +
1193 +- size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i,
1194 ++ size += sprintf(buf + size, "%d: %s %s %s\n", i,
1195 + (gen_speed == 0) ? "2.5GT/s," :
1196 + (gen_speed == 1) ? "5.0GT/s," :
1197 + (gen_speed == 2) ? "8.0GT/s," :
1198 +@@ -4721,34 +4719,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
1199 +
1200 + case OD_SCLK:
1201 + if (hwmgr->od_enabled) {
1202 +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
1203 ++ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
1204 + podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
1205 + for (i = 0; i < podn_vdd_dep->count; i++)
1206 +- size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
1207 ++ size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
1208 + i, podn_vdd_dep->entries[i].clk / 100,
1209 + podn_vdd_dep->entries[i].vddc);
1210 + }
1211 + break;
1212 + case OD_MCLK:
1213 + if (hwmgr->od_enabled) {
1214 +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
1215 ++ size += sprintf(buf + size, "%s:\n", "OD_MCLK");
1216 + podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
1217 + for (i = 0; i < podn_vdd_dep->count; i++)
1218 +- size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
1219 ++ size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
1220 + i, podn_vdd_dep->entries[i].clk/100,
1221 + podn_vdd_dep->entries[i].vddc);
1222 + }
1223 + break;
1224 + case OD_RANGE:
1225 + if (hwmgr->od_enabled) {
1226 +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1227 +- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
1228 ++ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
1229 ++ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
1230 + data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
1231 + hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
1232 +- size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
1233 ++ size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
1234 + data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
1235 + hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
1236 +- size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
1237 ++ size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
1238 + data->odn_dpm_table.min_vddc,
1239 + data->odn_dpm_table.max_vddc);
1240 + }
1241 +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1242 +index f7e783e1c888f..a2f4d6773d458 100644
1243 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1244 ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1245 +@@ -2246,8 +2246,6 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1246 + int i, now, size = 0;
1247 + struct pp_clock_levels_with_latency clocks;
1248 +
1249 +- phm_get_sysfs_buf(&buf, &size);
1250 +-
1251 + switch (type) {
1252 + case PP_SCLK:
1253 + PP_ASSERT_WITH_CODE(
1254 +@@ -2260,7 +2258,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1255 + "Attempt to get gfx clk levels Failed!",
1256 + return -1);
1257 + for (i = 0; i < clocks.num_levels; i++)
1258 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1259 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1260 + i, clocks.data[i].clocks_in_khz / 1000,
1261 + (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
1262 + break;
1263 +@@ -2276,7 +2274,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1264 + "Attempt to get memory clk levels Failed!",
1265 + return -1);
1266 + for (i = 0; i < clocks.num_levels; i++)
1267 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1268 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1269 + i, clocks.data[i].clocks_in_khz / 1000,
1270 + (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
1271 + break;
1272 +@@ -2294,7 +2292,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1273 + "Attempt to get soc clk levels Failed!",
1274 + return -1);
1275 + for (i = 0; i < clocks.num_levels; i++)
1276 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1277 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1278 + i, clocks.data[i].clocks_in_khz / 1000,
1279 + (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
1280 + break;
1281 +@@ -2312,7 +2310,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1282 + "Attempt to get dcef clk levels Failed!",
1283 + return -1);
1284 + for (i = 0; i < clocks.num_levels; i++)
1285 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1286 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1287 + i, clocks.data[i].clocks_in_khz / 1000,
1288 + (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
1289 + break;
1290 +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1291 +index 03e63be4ee275..85d55ab4e369f 100644
1292 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1293 ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1294 +@@ -3366,8 +3366,6 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
1295 + int ret = 0;
1296 + uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
1297 +
1298 +- phm_get_sysfs_buf(&buf, &size);
1299 +-
1300 + switch (type) {
1301 + case PP_SCLK:
1302 + ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
1303 +@@ -3376,13 +3374,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
1304 + return ret);
1305 +
1306 + if (vega20_get_sclks(hwmgr, &clocks)) {
1307 +- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
1308 ++ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
1309 + now / 100);
1310 + break;
1311 + }
1312 +
1313 + for (i = 0; i < clocks.num_levels; i++)
1314 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1315 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1316 + i, clocks.data[i].clocks_in_khz / 1000,
1317 + (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
1318 + break;
1319 +@@ -3394,13 +3392,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
1320 + return ret);
1321 +
1322 + if (vega20_get_memclocks(hwmgr, &clocks)) {
1323 +- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
1324 ++ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
1325 + now / 100);
1326 + break;
1327 + }
1328 +
1329 + for (i = 0; i < clocks.num_levels; i++)
1330 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1331 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1332 + i, clocks.data[i].clocks_in_khz / 1000,
1333 + (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
1334 + break;
1335 +@@ -3412,13 +3410,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
1336 + return ret);
1337 +
1338 + if (vega20_get_socclocks(hwmgr, &clocks)) {
1339 +- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
1340 ++ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
1341 + now / 100);
1342 + break;
1343 + }
1344 +
1345 + for (i = 0; i < clocks.num_levels; i++)
1346 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1347 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1348 + i, clocks.data[i].clocks_in_khz / 1000,
1349 + (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
1350 + break;
1351 +@@ -3430,7 +3428,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
1352 + return ret);
1353 +
1354 + for (i = 0; i < fclk_dpm_table->count; i++)
1355 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1356 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1357 + i, fclk_dpm_table->dpm_levels[i].value,
1358 + fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
1359 + break;
1360 +@@ -3442,13 +3440,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
1361 + return ret);
1362 +
1363 + if (vega20_get_dcefclocks(hwmgr, &clocks)) {
1364 +- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
1365 ++ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
1366 + now / 100);
1367 + break;
1368 + }
1369 +
1370 + for (i = 0; i < clocks.num_levels; i++)
1371 +- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1372 ++ size += sprintf(buf + size, "%d: %uMhz %s\n",
1373 + i, clocks.data[i].clocks_in_khz / 1000,
1374 + (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
1375 + break;
1376 +@@ -3462,7 +3460,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
1377 + gen_speed = pptable->PcieGenSpeed[i];
1378 + lane_width = pptable->PcieLaneCount[i];
1379 +
1380 +- size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
1381 ++ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
1382 + (gen_speed == 0) ? "2.5GT/s," :
1383 + (gen_speed == 1) ? "5.0GT/s," :
1384 + (gen_speed == 2) ? "8.0GT/s," :
1385 +@@ -3483,18 +3481,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
1386 + case OD_SCLK:
1387 + if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
1388 + od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
1389 +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
1390 +- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
1391 ++ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
1392 ++ size += sprintf(buf + size, "0: %10uMhz\n",
1393 + od_table->GfxclkFmin);
1394 +- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
1395 ++ size += sprintf(buf + size, "1: %10uMhz\n",
1396 + od_table->GfxclkFmax);
1397 + }
1398 + break;
1399 +
1400 + case OD_MCLK:
1401 + if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
1402 +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
1403 +- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
1404 ++ size += sprintf(buf + size, "%s:\n", "OD_MCLK");
1405 ++ size += sprintf(buf + size, "1: %10uMhz\n",
1406 + od_table->UclkFmax);
1407 + }
1408 +
1409 +@@ -3507,14 +3505,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
1410 + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
1411 + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
1412 + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
1413 +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE");
1414 +- size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n",
1415 ++ size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE");
1416 ++ size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
1417 + od_table->GfxclkFreq1,
1418 + od_table->GfxclkVolt1 / VOLTAGE_SCALE);
1419 +- size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n",
1420 ++ size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
1421 + od_table->GfxclkFreq2,
1422 + od_table->GfxclkVolt2 / VOLTAGE_SCALE);
1423 +- size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n",
1424 ++ size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
1425 + od_table->GfxclkFreq3,
1426 + od_table->GfxclkVolt3 / VOLTAGE_SCALE);
1427 + }
1428 +@@ -3522,17 +3520,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
1429 + break;
1430 +
1431 + case OD_RANGE:
1432 +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1433 ++ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
1434 +
1435 + if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
1436 + od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
1437 +- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
1438 ++ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
1439 + od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
1440 + od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
1441 + }
1442 +
1443 + if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
1444 +- size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
1445 ++ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
1446 + od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
1447 + od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
1448 + }
1449 +@@ -3543,22 +3541,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
1450 + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
1451 + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
1452 + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
1453 +- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
1454 ++ size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
1455 + od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
1456 + od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
1457 +- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
1458 ++ size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
1459 + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
1460 + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
1461 +- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
1462 ++ size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
1463 + od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
1464 + od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
1465 +- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
1466 ++ size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
1467 + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
1468 + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
1469 +- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
1470 ++ size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
1471 + od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
1472 + od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
1473 +- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
1474 ++ size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
1475 + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
1476 + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
1477 + }
1478 +diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
1479 +index b53fee6f1c170..65f172807a0d5 100644
1480 +--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
1481 ++++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
1482 +@@ -291,7 +291,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf)
1483 + if (rc)
1484 + return rc;
1485 +
1486 +- return sprintf(buf, "%u\n", reg & 1);
1487 ++ return sprintf(buf, "%u\n", reg);
1488 + }
1489 + static DEVICE_ATTR_RO(vga_pw);
1490 +
1491 +diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
1492 +index cd818a6291835..00e53de4812bb 100644
1493 +--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
1494 ++++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
1495 +@@ -225,12 +225,29 @@ static int hyperv_vmbus_remove(struct hv_device *hdev)
1496 + {
1497 + struct drm_device *dev = hv_get_drvdata(hdev);
1498 + struct hyperv_drm_device *hv = to_hv(dev);
1499 ++ struct pci_dev *pdev;
1500 +
1501 + drm_dev_unplug(dev);
1502 + drm_atomic_helper_shutdown(dev);
1503 + vmbus_close(hdev->channel);
1504 + hv_set_drvdata(hdev, NULL);
1505 +- vmbus_free_mmio(hv->mem->start, hv->fb_size);
1506 ++
1507 ++ /*
1508 ++ * Free allocated MMIO memory only on Gen2 VMs.
1509 ++ * On Gen1 VMs, release the PCI device
1510 ++ */
1511 ++ if (efi_enabled(EFI_BOOT)) {
1512 ++ vmbus_free_mmio(hv->mem->start, hv->fb_size);
1513 ++ } else {
1514 ++ pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
1515 ++ PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
1516 ++ if (!pdev) {
1517 ++ drm_err(dev, "Unable to find PCI Hyper-V video\n");
1518 ++ return -ENODEV;
1519 ++ }
1520 ++ pci_release_region(pdev, 0);
1521 ++ pci_dev_put(pdev);
1522 ++ }
1523 +
1524 + return 0;
1525 + }
1526 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1527 +index b51d690f375ff..88d262ba648cf 100644
1528 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1529 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1530 +@@ -2626,6 +2626,27 @@ nv174_chipset = {
1531 + .fifo = { 0x00000001, ga102_fifo_new },
1532 + };
1533 +
1534 ++static const struct nvkm_device_chip
1535 ++nv176_chipset = {
1536 ++ .name = "GA106",
1537 ++ .bar = { 0x00000001, tu102_bar_new },
1538 ++ .bios = { 0x00000001, nvkm_bios_new },
1539 ++ .devinit = { 0x00000001, ga100_devinit_new },
1540 ++ .fb = { 0x00000001, ga102_fb_new },
1541 ++ .gpio = { 0x00000001, ga102_gpio_new },
1542 ++ .i2c = { 0x00000001, gm200_i2c_new },
1543 ++ .imem = { 0x00000001, nv50_instmem_new },
1544 ++ .mc = { 0x00000001, ga100_mc_new },
1545 ++ .mmu = { 0x00000001, tu102_mmu_new },
1546 ++ .pci = { 0x00000001, gp100_pci_new },
1547 ++ .privring = { 0x00000001, gm200_privring_new },
1548 ++ .timer = { 0x00000001, gk20a_timer_new },
1549 ++ .top = { 0x00000001, ga100_top_new },
1550 ++ .disp = { 0x00000001, ga102_disp_new },
1551 ++ .dma = { 0x00000001, gv100_dma_new },
1552 ++ .fifo = { 0x00000001, ga102_fifo_new },
1553 ++};
1554 ++
1555 + static const struct nvkm_device_chip
1556 + nv177_chipset = {
1557 + .name = "GA107",
1558 +@@ -3072,6 +3093,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
1559 + case 0x168: device->chip = &nv168_chipset; break;
1560 + case 0x172: device->chip = &nv172_chipset; break;
1561 + case 0x174: device->chip = &nv174_chipset; break;
1562 ++ case 0x176: device->chip = &nv176_chipset; break;
1563 + case 0x177: device->chip = &nv177_chipset; break;
1564 + default:
1565 + if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
1566 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
1567 +index cdb1ead26d84f..82b4c8e1457c2 100644
1568 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
1569 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
1570 +@@ -207,11 +207,13 @@ int
1571 + gm200_acr_wpr_parse(struct nvkm_acr *acr)
1572 + {
1573 + const struct wpr_header *hdr = (void *)acr->wpr_fw->data;
1574 ++ struct nvkm_acr_lsfw *lsfw;
1575 +
1576 + while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) {
1577 + wpr_header_dump(&acr->subdev, hdr);
1578 +- if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
1579 +- return -ENOMEM;
1580 ++ lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
1581 ++ if (IS_ERR(lsfw))
1582 ++ return PTR_ERR(lsfw);
1583 + }
1584 +
1585 + return 0;
1586 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
1587 +index fb9132a39bb1a..fd97a935a380e 100644
1588 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
1589 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
1590 +@@ -161,11 +161,13 @@ int
1591 + gp102_acr_wpr_parse(struct nvkm_acr *acr)
1592 + {
1593 + const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data;
1594 ++ struct nvkm_acr_lsfw *lsfw;
1595 +
1596 + while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
1597 + wpr_header_v1_dump(&acr->subdev, hdr);
1598 +- if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
1599 +- return -ENOMEM;
1600 ++ lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
1601 ++ if (IS_ERR(lsfw))
1602 ++ return PTR_ERR(lsfw);
1603 + }
1604 +
1605 + return 0;
1606 +diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
1607 +index fddaeb0b09c11..f642bd6e71ff4 100644
1608 +--- a/drivers/gpu/drm/vc4/vc4_bo.c
1609 ++++ b/drivers/gpu/drm/vc4/vc4_bo.c
1610 +@@ -391,7 +391,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
1611 +
1612 + bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1613 + if (!bo)
1614 +- return ERR_PTR(-ENOMEM);
1615 ++ return NULL;
1616 +
1617 + bo->madv = VC4_MADV_WILLNEED;
1618 + refcount_set(&bo->usecnt, 0);
1619 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
1620 +index 4b5ebeacd2836..6561770f1af55 100644
1621 +--- a/drivers/hid/hid-input.c
1622 ++++ b/drivers/hid/hid-input.c
1623 +@@ -160,6 +160,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
1624 + if (usage) {
1625 + *old_keycode = usage->type == EV_KEY ?
1626 + usage->code : KEY_RESERVED;
1627 ++ usage->type = EV_KEY;
1628 + usage->code = ke->keycode;
1629 +
1630 + clear_bit(*old_keycode, dev->keybit);
1631 +@@ -650,10 +651,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
1632 + code += KEY_MACRO1;
1633 + else
1634 + code += BTN_TRIGGER_HAPPY - 0x1e;
1635 +- } else {
1636 +- goto ignore;
1637 ++ break;
1638 + }
1639 +- break;
1640 ++ fallthrough;
1641 + default:
1642 + switch (field->physical) {
1643 + case HID_GD_MOUSE:
1644 +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
1645 +index 686788ebf3e1e..d7687ce706144 100644
1646 +--- a/drivers/hid/hid-magicmouse.c
1647 ++++ b/drivers/hid/hid-magicmouse.c
1648 +@@ -256,8 +256,11 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
1649 + unsigned long now = jiffies;
1650 + int step_x = msc->touches[id].scroll_x - x;
1651 + int step_y = msc->touches[id].scroll_y - y;
1652 +- int step_hr = ((64 - (int)scroll_speed) * msc->scroll_accel) /
1653 +- SCROLL_HR_STEPS;
1654 ++ int step_hr =
1655 ++ max_t(int,
1656 ++ ((64 - (int)scroll_speed) * msc->scroll_accel) /
1657 ++ SCROLL_HR_STEPS,
1658 ++ 1);
1659 + int step_x_hr = msc->touches[id].scroll_x_hr - x;
1660 + int step_y_hr = msc->touches[id].scroll_y_hr - y;
1661 +
1662 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1663 +index 33a6908995b1b..2a4cc39962e76 100644
1664 +--- a/drivers/hid/wacom_wac.c
1665 ++++ b/drivers/hid/wacom_wac.c
1666 +@@ -2603,6 +2603,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
1667 + return;
1668 +
1669 + switch (equivalent_usage) {
1670 ++ case HID_DG_CONFIDENCE:
1671 ++ wacom_wac->hid_data.confidence = value;
1672 ++ break;
1673 + case HID_GD_X:
1674 + wacom_wac->hid_data.x = value;
1675 + break;
1676 +@@ -2635,7 +2638,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
1677 + }
1678 +
1679 + if (usage->usage_index + 1 == field->report_count) {
1680 +- if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
1681 ++ if (equivalent_usage == wacom_wac->hid_data.last_slot_field &&
1682 ++ wacom_wac->hid_data.confidence)
1683 + wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
1684 + }
1685 + }
1686 +@@ -2653,6 +2657,8 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
1687 +
1688 + wacom_wac->is_invalid_bt_frame = false;
1689 +
1690 ++ hid_data->confidence = true;
1691 ++
1692 + for (i = 0; i < report->maxfield; i++) {
1693 + struct hid_field *field = report->field[i];
1694 + int j;
1695 +diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
1696 +index 8b2d4e5b2303c..466b62cc16dc1 100644
1697 +--- a/drivers/hid/wacom_wac.h
1698 ++++ b/drivers/hid/wacom_wac.h
1699 +@@ -301,6 +301,7 @@ struct hid_data {
1700 + bool barrelswitch;
1701 + bool barrelswitch2;
1702 + bool serialhi;
1703 ++ bool confidence;
1704 + int x;
1705 + int y;
1706 + int pressure;
1707 +diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
1708 +index f10a603b13fb0..7b2474e6876f4 100644
1709 +--- a/drivers/i2c/busses/i2c-virtio.c
1710 ++++ b/drivers/i2c/busses/i2c-virtio.c
1711 +@@ -106,11 +106,10 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
1712 +
1713 + static int virtio_i2c_complete_reqs(struct virtqueue *vq,
1714 + struct virtio_i2c_req *reqs,
1715 +- struct i2c_msg *msgs, int num,
1716 +- bool timedout)
1717 ++ struct i2c_msg *msgs, int num)
1718 + {
1719 + struct virtio_i2c_req *req;
1720 +- bool failed = timedout;
1721 ++ bool failed = false;
1722 + unsigned int len;
1723 + int i, j = 0;
1724 +
1725 +@@ -132,7 +131,7 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq,
1726 + j++;
1727 + }
1728 +
1729 +- return timedout ? -ETIMEDOUT : j;
1730 ++ return j;
1731 + }
1732 +
1733 + static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
1734 +@@ -141,7 +140,6 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
1735 + struct virtio_i2c *vi = i2c_get_adapdata(adap);
1736 + struct virtqueue *vq = vi->vq;
1737 + struct virtio_i2c_req *reqs;
1738 +- unsigned long time_left;
1739 + int count;
1740 +
1741 + reqs = kcalloc(num, sizeof(*reqs), GFP_KERNEL);
1742 +@@ -164,11 +162,9 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
1743 + reinit_completion(&vi->completion);
1744 + virtqueue_kick(vq);
1745 +
1746 +- time_left = wait_for_completion_timeout(&vi->completion, adap->timeout);
1747 +- if (!time_left)
1748 +- dev_err(&adap->dev, "virtio i2c backend timeout.\n");
1749 ++ wait_for_completion(&vi->completion);
1750 +
1751 +- count = virtio_i2c_complete_reqs(vq, reqs, msgs, count, !time_left);
1752 ++ count = virtio_i2c_complete_reqs(vq, reqs, msgs, count);
1753 +
1754 + err_free:
1755 + kfree(reqs);
1756 +diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
1757 +index a9e568276c99f..a45c5536d2506 100644
1758 +--- a/drivers/iommu/amd/iommu_v2.c
1759 ++++ b/drivers/iommu/amd/iommu_v2.c
1760 +@@ -928,10 +928,8 @@ static int __init amd_iommu_v2_init(void)
1761 + {
1762 + int ret;
1763 +
1764 +- pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@××××.de>\n");
1765 +-
1766 + if (!amd_iommu_v2_supported()) {
1767 +- pr_info("AMD IOMMUv2 functionality not available on this system\n");
1768 ++ pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n");
1769 + /*
1770 + * Load anyway to provide the symbols to other modules
1771 + * which may use AMD IOMMUv2 optionally.
1772 +@@ -946,6 +944,8 @@ static int __init amd_iommu_v2_init(void)
1773 +
1774 + amd_iommu_register_ppr_notifier(&ppr_nb);
1775 +
1776 ++ pr_info("AMD IOMMUv2 loaded and initialized\n");
1777 ++
1778 + return 0;
1779 +
1780 + out:
1781 +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
1782 +index 9a356075d3450..78f8c8e6803e9 100644
1783 +--- a/drivers/iommu/intel/iommu.c
1784 ++++ b/drivers/iommu/intel/iommu.c
1785 +@@ -1226,13 +1226,11 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1786 + pte = &pte[pfn_level_offset(pfn, level)];
1787 +
1788 + do {
1789 +- unsigned long level_pfn;
1790 ++ unsigned long level_pfn = pfn & level_mask(level);
1791 +
1792 + if (!dma_pte_present(pte))
1793 + goto next;
1794 +
1795 +- level_pfn = pfn & level_mask(level);
1796 +-
1797 + /* If range covers entire pagetable, free it */
1798 + if (start_pfn <= level_pfn &&
1799 + last_pfn >= level_pfn + level_size(level) - 1) {
1800 +@@ -1253,7 +1251,7 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1801 + freelist);
1802 + }
1803 + next:
1804 +- pfn += level_size(level);
1805 ++ pfn = level_pfn + level_size(level);
1806 + } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1807 +
1808 + if (first_pte)
1809 +diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
1810 +index 5cb260820eda6..7f23ad61c094f 100644
1811 +--- a/drivers/iommu/rockchip-iommu.c
1812 ++++ b/drivers/iommu/rockchip-iommu.c
1813 +@@ -200,8 +200,8 @@ static inline phys_addr_t rk_dte_pt_address(u32 dte)
1814 + #define DTE_HI_MASK2 GENMASK(7, 4)
1815 + #define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */
1816 + #define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */
1817 +-#define PAGE_DESC_HI_MASK1 GENMASK_ULL(39, 36)
1818 +-#define PAGE_DESC_HI_MASK2 GENMASK_ULL(35, 32)
1819 ++#define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32)
1820 ++#define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36)
1821 +
1822 + static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
1823 + {
1824 +diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
1825 +index 79fa36de8a04a..cd9cb354dc2c7 100644
1826 +--- a/drivers/media/cec/core/cec-adap.c
1827 ++++ b/drivers/media/cec/core/cec-adap.c
1828 +@@ -1199,6 +1199,7 @@ void cec_received_msg_ts(struct cec_adapter *adap,
1829 + if (abort)
1830 + dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
1831 + msg->flags = dst->flags;
1832 ++ msg->sequence = dst->sequence;
1833 + /* Remove it from the wait_queue */
1834 + list_del_init(&data->list);
1835 +
1836 +diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
1837 +index 47aff3b197426..80aaf07b16f28 100644
1838 +--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
1839 ++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
1840 +@@ -744,10 +744,6 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *p64,
1841 + /*
1842 + * x86 is the only compat architecture with different struct alignment
1843 + * between 32-bit and 64-bit tasks.
1844 +- *
1845 +- * On all other architectures, v4l2_event32 and v4l2_event32_time32 are
1846 +- * the same as v4l2_event and v4l2_event_time32, so we can use the native
1847 +- * handlers, converting v4l2_event to v4l2_event_time32 if necessary.
1848 + */
1849 + struct v4l2_event32 {
1850 + __u32 type;
1851 +@@ -765,21 +761,6 @@ struct v4l2_event32 {
1852 + __u32 reserved[8];
1853 + };
1854 +
1855 +-#ifdef CONFIG_COMPAT_32BIT_TIME
1856 +-struct v4l2_event32_time32 {
1857 +- __u32 type;
1858 +- union {
1859 +- compat_s64 value64;
1860 +- __u8 data[64];
1861 +- } u;
1862 +- __u32 pending;
1863 +- __u32 sequence;
1864 +- struct old_timespec32 timestamp;
1865 +- __u32 id;
1866 +- __u32 reserved[8];
1867 +-};
1868 +-#endif
1869 +-
1870 + static int put_v4l2_event32(struct v4l2_event *p64,
1871 + struct v4l2_event32 __user *p32)
1872 + {
1873 +@@ -795,7 +776,22 @@ static int put_v4l2_event32(struct v4l2_event *p64,
1874 + return 0;
1875 + }
1876 +
1877 ++#endif
1878 ++
1879 + #ifdef CONFIG_COMPAT_32BIT_TIME
1880 ++struct v4l2_event32_time32 {
1881 ++ __u32 type;
1882 ++ union {
1883 ++ compat_s64 value64;
1884 ++ __u8 data[64];
1885 ++ } u;
1886 ++ __u32 pending;
1887 ++ __u32 sequence;
1888 ++ struct old_timespec32 timestamp;
1889 ++ __u32 id;
1890 ++ __u32 reserved[8];
1891 ++};
1892 ++
1893 + static int put_v4l2_event32_time32(struct v4l2_event *p64,
1894 + struct v4l2_event32_time32 __user *p32)
1895 + {
1896 +@@ -811,7 +807,6 @@ static int put_v4l2_event32_time32(struct v4l2_event *p64,
1897 + return 0;
1898 + }
1899 + #endif
1900 +-#endif
1901 +
1902 + struct v4l2_edid32 {
1903 + __u32 pad;
1904 +@@ -873,9 +868,7 @@ static int put_v4l2_edid32(struct v4l2_edid *p64,
1905 + #define VIDIOC_QUERYBUF32_TIME32 _IOWR('V', 9, struct v4l2_buffer32_time32)
1906 + #define VIDIOC_QBUF32_TIME32 _IOWR('V', 15, struct v4l2_buffer32_time32)
1907 + #define VIDIOC_DQBUF32_TIME32 _IOWR('V', 17, struct v4l2_buffer32_time32)
1908 +-#ifdef CONFIG_X86_64
1909 + #define VIDIOC_DQEVENT32_TIME32 _IOR ('V', 89, struct v4l2_event32_time32)
1910 +-#endif
1911 + #define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32)
1912 + #endif
1913 +
1914 +@@ -929,10 +922,10 @@ unsigned int v4l2_compat_translate_cmd(unsigned int cmd)
1915 + #ifdef CONFIG_X86_64
1916 + case VIDIOC_DQEVENT32:
1917 + return VIDIOC_DQEVENT;
1918 ++#endif
1919 + #ifdef CONFIG_COMPAT_32BIT_TIME
1920 + case VIDIOC_DQEVENT32_TIME32:
1921 + return VIDIOC_DQEVENT;
1922 +-#endif
1923 + #endif
1924 + }
1925 + return cmd;
1926 +@@ -1025,10 +1018,10 @@ int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd)
1927 + #ifdef CONFIG_X86_64
1928 + case VIDIOC_DQEVENT32:
1929 + return put_v4l2_event32(parg, arg);
1930 ++#endif
1931 + #ifdef CONFIG_COMPAT_32BIT_TIME
1932 + case VIDIOC_DQEVENT32_TIME32:
1933 + return put_v4l2_event32_time32(parg, arg);
1934 +-#endif
1935 + #endif
1936 + }
1937 + return 0;
1938 +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
1939 +index e658f01742420..60f19369de845 100644
1940 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c
1941 ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
1942 +@@ -300,7 +300,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = {
1943 + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
1944 + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
1945 + | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
1946 +- | ESDHC_FLAG_CQHCI
1947 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE
1948 + | ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME,
1949 + };
1950 +@@ -309,7 +308,6 @@ static struct esdhc_soc_data usdhc_imx8mm_data = {
1951 + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
1952 + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
1953 + | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
1954 +- | ESDHC_FLAG_CQHCI
1955 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE,
1956 + };
1957 +
1958 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1959 +index 2d80a04e11d87..7728f26adb19f 100644
1960 +--- a/drivers/mmc/host/sdhci.c
1961 ++++ b/drivers/mmc/host/sdhci.c
1962 +@@ -771,7 +771,19 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
1963 + len -= offset;
1964 + }
1965 +
1966 +- BUG_ON(len > 65536);
1967 ++ /*
1968 ++ * The block layer forces a minimum segment size of PAGE_SIZE,
1969 ++ * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
1970 ++ * multiple descriptors, noting that the ADMA table is sized
1971 ++ * for 4KiB chunks anyway, so it will be big enough.
1972 ++ */
1973 ++ while (len > host->max_adma) {
1974 ++ int n = 32 * 1024; /* 32KiB*/
1975 ++
1976 ++ __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
1977 ++ addr += n;
1978 ++ len -= n;
1979 ++ }
1980 +
1981 + /* tran, valid */
1982 + if (len)
1983 +@@ -3952,6 +3964,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
1984 + * descriptor for each segment, plus 1 for a nop end descriptor.
1985 + */
1986 + host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
1987 ++ host->max_adma = 65536;
1988 +
1989 + host->max_timeout_count = 0xE;
1990 +
1991 +@@ -4617,10 +4630,12 @@ int sdhci_setup_host(struct sdhci_host *host)
1992 + * be larger than 64 KiB though.
1993 + */
1994 + if (host->flags & SDHCI_USE_ADMA) {
1995 +- if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
1996 ++ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
1997 ++ host->max_adma = 65532; /* 32-bit alignment */
1998 + mmc->max_seg_size = 65535;
1999 +- else
2000 ++ } else {
2001 + mmc->max_seg_size = 65536;
2002 ++ }
2003 + } else {
2004 + mmc->max_seg_size = mmc->max_req_size;
2005 + }
2006 +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
2007 +index e8d04e42a5afd..6c689be3e48f6 100644
2008 +--- a/drivers/mmc/host/sdhci.h
2009 ++++ b/drivers/mmc/host/sdhci.h
2010 +@@ -340,7 +340,8 @@ struct sdhci_adma2_64_desc {
2011 +
2012 + /*
2013 + * Maximum segments assuming a 512KiB maximum requisition size and a minimum
2014 +- * 4KiB page size.
2015 ++ * 4KiB page size. Note this also allows enough for multiple descriptors in
2016 ++ * case of PAGE_SIZE >= 64KiB.
2017 + */
2018 + #define SDHCI_MAX_SEGS 128
2019 +
2020 +@@ -543,6 +544,7 @@ struct sdhci_host {
2021 + unsigned int blocks; /* remaining PIO blocks */
2022 +
2023 + int sg_count; /* Mapped sg entries */
2024 ++ int max_adma; /* Max. length in ADMA descriptor */
2025 +
2026 + void *adma_table; /* ADMA descriptor table */
2027 + void *align_buffer; /* Bounce buffer */
2028 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
2029 +index 5ebd96f6833d6..526fb56c84f24 100644
2030 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
2031 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
2032 +@@ -985,6 +985,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
2033 + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
2034 + const struct hnae3_ae_ops *ops = h->ae_algo->ops;
2035 + const struct hns3_reset_type_map *rst_type_map;
2036 ++ enum ethtool_reset_flags rst_flags;
2037 + u32 i, size;
2038 +
2039 + if (ops->ae_dev_resetting && ops->ae_dev_resetting(h))
2040 +@@ -1004,6 +1005,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
2041 + for (i = 0; i < size; i++) {
2042 + if (rst_type_map[i].rst_flags == *flags) {
2043 + rst_type = rst_type_map[i].rst_type;
2044 ++ rst_flags = rst_type_map[i].rst_flags;
2045 + break;
2046 + }
2047 + }
2048 +@@ -1019,6 +1021,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
2049 +
2050 + ops->reset_event(h->pdev, h);
2051 +
2052 ++ *flags &= ~rst_flags;
2053 ++
2054 + return 0;
2055 + }
2056 +
2057 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2058 +index 3b8bde58613a8..fee7d9e79f8c3 100644
2059 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2060 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2061 +@@ -703,9 +703,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
2062 + roundup_size = ilog2(roundup_size);
2063 +
2064 + for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
2065 +- tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
2066 ++ tc_valid[i] = 1;
2067 + tc_size[i] = roundup_size;
2068 +- tc_offset[i] = rss_size * i;
2069 ++ tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
2070 + }
2071 +
2072 + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
2073 +diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
2074 +index 46312a4415baf..0ae6da2992d01 100644
2075 +--- a/drivers/net/ethernet/intel/iavf/iavf.h
2076 ++++ b/drivers/net/ethernet/intel/iavf/iavf.h
2077 +@@ -305,6 +305,7 @@ struct iavf_adapter {
2078 + #define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26)
2079 + #define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27)
2080 + #define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28)
2081 ++#define IAVF_FLAG_AQ_REQUEST_STATS BIT(29)
2082 +
2083 + /* OS defined structs */
2084 + struct net_device *netdev;
2085 +@@ -398,6 +399,7 @@ int iavf_up(struct iavf_adapter *adapter);
2086 + void iavf_down(struct iavf_adapter *adapter);
2087 + int iavf_process_config(struct iavf_adapter *adapter);
2088 + void iavf_schedule_reset(struct iavf_adapter *adapter);
2089 ++void iavf_schedule_request_stats(struct iavf_adapter *adapter);
2090 + void iavf_reset(struct iavf_adapter *adapter);
2091 + void iavf_set_ethtool_ops(struct net_device *netdev);
2092 + void iavf_update_stats(struct iavf_adapter *adapter);
2093 +@@ -455,4 +457,5 @@ void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
2094 + void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter);
2095 + struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
2096 + const u8 *macaddr);
2097 ++int iavf_lock_timeout(struct mutex *lock, unsigned int msecs);
2098 + #endif /* _IAVF_H_ */
2099 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
2100 +index 144a776793597..0cecaff38d042 100644
2101 +--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
2102 ++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
2103 +@@ -354,6 +354,9 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
2104 + struct iavf_adapter *adapter = netdev_priv(netdev);
2105 + unsigned int i;
2106 +
2107 ++ /* Explicitly request stats refresh */
2108 ++ iavf_schedule_request_stats(adapter);
2109 ++
2110 + iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
2111 +
2112 + rcu_read_lock();
2113 +@@ -723,12 +726,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
2114 + *
2115 + * Change the ITR settings for a specific queue.
2116 + **/
2117 +-static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
2118 +- struct ethtool_coalesce *ec, int queue)
2119 ++static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
2120 ++ struct ethtool_coalesce *ec, int queue)
2121 + {
2122 + struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
2123 + struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
2124 + struct iavf_q_vector *q_vector;
2125 ++ u16 itr_setting;
2126 ++
2127 ++ itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
2128 ++
2129 ++ if (ec->rx_coalesce_usecs != itr_setting &&
2130 ++ ec->use_adaptive_rx_coalesce) {
2131 ++ netif_info(adapter, drv, adapter->netdev,
2132 ++ "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
2133 ++ return -EINVAL;
2134 ++ }
2135 ++
2136 ++ itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
2137 ++
2138 ++ if (ec->tx_coalesce_usecs != itr_setting &&
2139 ++ ec->use_adaptive_tx_coalesce) {
2140 ++ netif_info(adapter, drv, adapter->netdev,
2141 ++ "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
2142 ++ return -EINVAL;
2143 ++ }
2144 +
2145 + rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
2146 + tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
2147 +@@ -751,6 +773,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
2148 + * the Tx and Rx ITR values based on the values we have entered
2149 + * into the q_vector, no need to write the values now.
2150 + */
2151 ++ return 0;
2152 + }
2153 +
2154 + /**
2155 +@@ -792,9 +815,11 @@ static int __iavf_set_coalesce(struct net_device *netdev,
2156 + */
2157 + if (queue < 0) {
2158 + for (i = 0; i < adapter->num_active_queues; i++)
2159 +- iavf_set_itr_per_queue(adapter, ec, i);
2160 ++ if (iavf_set_itr_per_queue(adapter, ec, i))
2161 ++ return -EINVAL;
2162 + } else if (queue < adapter->num_active_queues) {
2163 +- iavf_set_itr_per_queue(adapter, ec, queue);
2164 ++ if (iavf_set_itr_per_queue(adapter, ec, queue))
2165 ++ return -EINVAL;
2166 + } else {
2167 + netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
2168 + adapter->num_active_queues - 1);
2169 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
2170 +index aaf8a2f396e46..fd3717ae70ab1 100644
2171 +--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
2172 ++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
2173 +@@ -138,7 +138,7 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
2174 + *
2175 + * Returns 0 on success, negative on failure
2176 + **/
2177 +-static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
2178 ++int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
2179 + {
2180 + unsigned int wait, delay = 10;
2181 +
2182 +@@ -165,6 +165,19 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
2183 + }
2184 + }
2185 +
2186 ++/**
2187 ++ * iavf_schedule_request_stats - Set the flags and schedule statistics request
2188 ++ * @adapter: board private structure
2189 ++ *
2190 ++ * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
2191 ++ * request and refresh ethtool stats
2192 ++ **/
2193 ++void iavf_schedule_request_stats(struct iavf_adapter *adapter)
2194 ++{
2195 ++ adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
2196 ++ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
2197 ++}
2198 ++
2199 + /**
2200 + * iavf_tx_timeout - Respond to a Tx Hang
2201 + * @netdev: network interface device structure
2202 +@@ -695,13 +708,11 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
2203 + **/
2204 + static void iavf_restore_filters(struct iavf_adapter *adapter)
2205 + {
2206 +- /* re-add all VLAN filters */
2207 +- if (VLAN_ALLOWED(adapter)) {
2208 +- u16 vid;
2209 ++ u16 vid;
2210 +
2211 +- for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
2212 +- iavf_add_vlan(adapter, vid);
2213 +- }
2214 ++ /* re-add all VLAN filters */
2215 ++ for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
2216 ++ iavf_add_vlan(adapter, vid);
2217 + }
2218 +
2219 + /**
2220 +@@ -736,9 +747,6 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
2221 + {
2222 + struct iavf_adapter *adapter = netdev_priv(netdev);
2223 +
2224 +- if (!VLAN_ALLOWED(adapter))
2225 +- return -EIO;
2226 +-
2227 + iavf_del_vlan(adapter, vid);
2228 + clear_bit(vid, adapter->vsi.active_vlans);
2229 +
2230 +@@ -1700,6 +1708,11 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
2231 + iavf_del_adv_rss_cfg(adapter);
2232 + return 0;
2233 + }
2234 ++ if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
2235 ++ iavf_request_stats(adapter);
2236 ++ return 0;
2237 ++ }
2238 ++
2239 + return -EAGAIN;
2240 + }
2241 +
2242 +@@ -2124,7 +2137,6 @@ static void iavf_reset_task(struct work_struct *work)
2243 + struct net_device *netdev = adapter->netdev;
2244 + struct iavf_hw *hw = &adapter->hw;
2245 + struct iavf_mac_filter *f, *ftmp;
2246 +- struct iavf_vlan_filter *vlf;
2247 + struct iavf_cloud_filter *cf;
2248 + u32 reg_val;
2249 + int i = 0, err;
2250 +@@ -2264,11 +2276,6 @@ continue_reset:
2251 + list_for_each_entry(f, &adapter->mac_filter_list, list) {
2252 + f->add = true;
2253 + }
2254 +- /* re-add all VLAN filters */
2255 +- list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
2256 +- vlf->add = true;
2257 +- }
2258 +-
2259 + spin_unlock_bh(&adapter->mac_vlan_list_lock);
2260 +
2261 + /* check if TCs are running and re-add all cloud filters */
2262 +@@ -2282,7 +2289,6 @@ continue_reset:
2263 + spin_unlock_bh(&adapter->cloud_filter_list_lock);
2264 +
2265 + adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
2266 +- adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2267 + adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2268 + iavf_misc_irq_enable(adapter);
2269 +
2270 +@@ -3380,11 +3386,16 @@ static int iavf_set_features(struct net_device *netdev,
2271 + {
2272 + struct iavf_adapter *adapter = netdev_priv(netdev);
2273 +
2274 +- /* Don't allow changing VLAN_RX flag when adapter is not capable
2275 +- * of VLAN offload
2276 ++ /* Don't allow enabling VLAN features when adapter is not capable
2277 ++ * of VLAN offload/filtering
2278 + */
2279 + if (!VLAN_ALLOWED(adapter)) {
2280 +- if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
2281 ++ netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
2282 ++ NETIF_F_HW_VLAN_CTAG_TX |
2283 ++ NETIF_F_HW_VLAN_CTAG_FILTER);
2284 ++ if (features & (NETIF_F_HW_VLAN_CTAG_RX |
2285 ++ NETIF_F_HW_VLAN_CTAG_TX |
2286 ++ NETIF_F_HW_VLAN_CTAG_FILTER))
2287 + return -EINVAL;
2288 + } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
2289 + if (features & NETIF_F_HW_VLAN_CTAG_RX)
2290 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
2291 +index 3c735968e1b85..08302ab35d687 100644
2292 +--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
2293 ++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
2294 +@@ -607,7 +607,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
2295 + if (f->add)
2296 + count++;
2297 + }
2298 +- if (!count) {
2299 ++ if (!count || !VLAN_ALLOWED(adapter)) {
2300 + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2301 + spin_unlock_bh(&adapter->mac_vlan_list_lock);
2302 + return;
2303 +@@ -673,9 +673,19 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
2304 +
2305 + spin_lock_bh(&adapter->mac_vlan_list_lock);
2306 +
2307 +- list_for_each_entry(f, &adapter->vlan_filter_list, list) {
2308 +- if (f->remove)
2309 ++ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
2310 ++ /* since VLAN capabilities are not allowed, we dont want to send
2311 ++ * a VLAN delete request because it will most likely fail and
2312 ++ * create unnecessary errors/noise, so just free the VLAN
2313 ++ * filters marked for removal to enable bailing out before
2314 ++ * sending a virtchnl message
2315 ++ */
2316 ++ if (f->remove && !VLAN_ALLOWED(adapter)) {
2317 ++ list_del(&f->list);
2318 ++ kfree(f);
2319 ++ } else if (f->remove) {
2320 + count++;
2321 ++ }
2322 + }
2323 + if (!count) {
2324 + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
2325 +@@ -784,6 +794,8 @@ void iavf_request_stats(struct iavf_adapter *adapter)
2326 + /* no error message, this isn't crucial */
2327 + return;
2328 + }
2329 ++
2330 ++ adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS;
2331 + adapter->current_op = VIRTCHNL_OP_GET_STATS;
2332 + vqs.vsi_id = adapter->vsi_res->vsi_id;
2333 + /* queue maps are ignored for this message - only the vsi is used */
2334 +@@ -1722,8 +1734,37 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
2335 + }
2336 + spin_lock_bh(&adapter->mac_vlan_list_lock);
2337 + iavf_add_filter(adapter, adapter->hw.mac.addr);
2338 ++
2339 ++ if (VLAN_ALLOWED(adapter)) {
2340 ++ if (!list_empty(&adapter->vlan_filter_list)) {
2341 ++ struct iavf_vlan_filter *vlf;
2342 ++
2343 ++ /* re-add all VLAN filters over virtchnl */
2344 ++ list_for_each_entry(vlf,
2345 ++ &adapter->vlan_filter_list,
2346 ++ list)
2347 ++ vlf->add = true;
2348 ++
2349 ++ adapter->aq_required |=
2350 ++ IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2351 ++ }
2352 ++ }
2353 ++
2354 + spin_unlock_bh(&adapter->mac_vlan_list_lock);
2355 + iavf_process_config(adapter);
2356 ++
2357 ++ /* unlock crit_lock before acquiring rtnl_lock as other
2358 ++ * processes holding rtnl_lock could be waiting for the same
2359 ++ * crit_lock
2360 ++ */
2361 ++ mutex_unlock(&adapter->crit_lock);
2362 ++ rtnl_lock();
2363 ++ netdev_update_features(adapter->netdev);
2364 ++ rtnl_unlock();
2365 ++ if (iavf_lock_timeout(&adapter->crit_lock, 10000))
2366 ++ dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
2367 ++ __FUNCTION__);
2368 ++
2369 + }
2370 + break;
2371 + case VIRTCHNL_OP_ENABLE_QUEUES:
2372 +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
2373 +index e47920fe73b88..62bf879dc6232 100644
2374 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c
2375 ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
2376 +@@ -83,8 +83,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
2377 + if (!vsi->rx_rings)
2378 + goto err_rings;
2379 +
2380 +- /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
2381 +- vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq),
2382 ++ /* txq_map needs to have enough space to track both Tx (stack) rings
2383 ++ * and XDP rings; at this point vsi->num_xdp_txq might not be set,
2384 ++ * so use num_possible_cpus() as we want to always provide XDP ring
2385 ++ * per CPU, regardless of queue count settings from user that might
2386 ++ * have come from ethtool's set_channels() callback;
2387 ++ */
2388 ++ vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
2389 + sizeof(*vsi->txq_map), GFP_KERNEL);
2390 +
2391 + if (!vsi->txq_map)
2392 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
2393 +index a39136b0bd16a..f622ee20ac40d 100644
2394 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
2395 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
2396 +@@ -2497,7 +2497,18 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2397 + ice_stat_str(status));
2398 + goto clear_xdp_rings;
2399 + }
2400 +- ice_vsi_assign_bpf_prog(vsi, prog);
2401 ++
2402 ++ /* assign the prog only when it's not already present on VSI;
2403 ++ * this flow is a subject of both ethtool -L and ndo_bpf flows;
2404 ++ * VSI rebuild that happens under ethtool -L can expose us to
2405 ++ * the bpf_prog refcount issues as we would be swapping same
2406 ++ * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2407 ++ * on it as it would be treated as an 'old_prog'; for ndo_bpf
2408 ++ * this is not harmful as dev_xdp_install bumps the refcount
2409 ++ * before calling the op exposed by the driver;
2410 ++ */
2411 ++ if (!ice_is_xdp_ena_vsi(vsi))
2412 ++ ice_vsi_assign_bpf_prog(vsi, prog);
2413 +
2414 + return 0;
2415 + clear_xdp_rings:
2416 +@@ -2643,6 +2654,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2417 + if (xdp_ring_err)
2418 + NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2419 + } else {
2420 ++ /* safe to call even when prog == vsi->xdp_prog as
2421 ++ * dev_xdp_install in net/core/dev.c incremented prog's
2422 ++ * refcount so corresponding bpf_prog_put won't cause
2423 ++ * underflow
2424 ++ */
2425 + ice_vsi_assign_bpf_prog(vsi, prog);
2426 + }
2427 +
2428 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
2429 +index 751de06019a0e..8f30577386b6f 100644
2430 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
2431 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
2432 +@@ -8019,7 +8019,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
2433 + if (likely(napi_complete_done(napi, work_done)))
2434 + igb_ring_irq_enable(q_vector);
2435 +
2436 +- return min(work_done, budget - 1);
2437 ++ return work_done;
2438 + }
2439 +
2440 + /**
2441 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2442 +index d74d4966b13fc..ed6d0c019573b 100644
2443 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2444 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2445 +@@ -5017,11 +5017,13 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
2446 + mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
2447 + }
2448 +
2449 ++ if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
2450 ++ netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
2451 ++ mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
2452 ++ return -EINVAL;
2453 ++ }
2454 ++
2455 + if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
2456 +- if (port->xdp_prog) {
2457 +- netdev_err(dev, "Jumbo frames are not supported with XDP\n");
2458 +- return -EINVAL;
2459 +- }
2460 + if (priv->percpu_pools) {
2461 + netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
2462 + mvpp2_bm_switch_buffers(priv, false);
2463 +@@ -5307,8 +5309,8 @@ static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
2464 + bool running = netif_running(port->dev);
2465 + bool reset = !prog != !port->xdp_prog;
2466 +
2467 +- if (port->dev->mtu > ETH_DATA_LEN) {
2468 +- NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled");
2469 ++ if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) {
2470 ++ NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
2471 + return -EOPNOTSUPP;
2472 + }
2473 +
2474 +diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
2475 +index 3ce6ccd0f5394..b4599fe4ca8da 100644
2476 +--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
2477 ++++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
2478 +@@ -497,8 +497,8 @@ int prestera_bridge_port_join(struct net_device *br_dev,
2479 +
2480 + br_port = prestera_bridge_port_add(bridge, port->dev);
2481 + if (IS_ERR(br_port)) {
2482 +- err = PTR_ERR(br_port);
2483 +- goto err_brport_create;
2484 ++ prestera_bridge_put(bridge);
2485 ++ return PTR_ERR(br_port);
2486 + }
2487 +
2488 + err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL,
2489 +@@ -519,8 +519,6 @@ err_port_join:
2490 + switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
2491 + err_switchdev_offload:
2492 + prestera_bridge_port_put(br_port);
2493 +-err_brport_create:
2494 +- prestera_bridge_put(bridge);
2495 + return err;
2496 + }
2497 +
2498 +@@ -1124,7 +1122,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused,
2499 + prestera_port_obj_attr_set);
2500 + break;
2501 + default:
2502 +- err = -EOPNOTSUPP;
2503 ++ return NOTIFY_DONE;
2504 + }
2505 +
2506 + return notifier_from_errno(err);
2507 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2508 +index 250c5a24264dc..edfdd44de579c 100644
2509 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2510 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2511 +@@ -2131,7 +2131,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2512 + max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2513 + local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2514 +
2515 +- if (WARN_ON_ONCE(local_port >= max_ports))
2516 ++ if (WARN_ON_ONCE(!local_port || local_port >= max_ports))
2517 + return;
2518 + mlxsw_sp_port = mlxsw_sp->ports[local_port];
2519 + if (!mlxsw_sp_port)
2520 +diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
2521 +index 4d5a5d6595b3b..d64ce65a3c174 100644
2522 +--- a/drivers/net/ethernet/microchip/lan743x_main.c
2523 ++++ b/drivers/net/ethernet/microchip/lan743x_main.c
2524 +@@ -914,8 +914,7 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
2525 + }
2526 +
2527 + static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
2528 +- u8 duplex, u16 local_adv,
2529 +- u16 remote_adv)
2530 ++ u16 local_adv, u16 remote_adv)
2531 + {
2532 + struct lan743x_phy *phy = &adapter->phy;
2533 + u8 cap;
2534 +@@ -943,7 +942,6 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
2535 +
2536 + phy_print_status(phydev);
2537 + if (phydev->state == PHY_RUNNING) {
2538 +- struct ethtool_link_ksettings ksettings;
2539 + int remote_advertisement = 0;
2540 + int local_advertisement = 0;
2541 +
2542 +@@ -980,18 +978,14 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
2543 + }
2544 + lan743x_csr_write(adapter, MAC_CR, data);
2545 +
2546 +- memset(&ksettings, 0, sizeof(ksettings));
2547 +- phy_ethtool_get_link_ksettings(netdev, &ksettings);
2548 + local_advertisement =
2549 + linkmode_adv_to_mii_adv_t(phydev->advertising);
2550 + remote_advertisement =
2551 + linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
2552 +
2553 +- lan743x_phy_update_flowcontrol(adapter,
2554 +- ksettings.base.duplex,
2555 +- local_advertisement,
2556 ++ lan743x_phy_update_flowcontrol(adapter, local_advertisement,
2557 + remote_advertisement);
2558 +- lan743x_ptp_update_latency(adapter, ksettings.base.speed);
2559 ++ lan743x_ptp_update_latency(adapter, phydev->speed);
2560 + }
2561 + }
2562 +
2563 +diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
2564 +index a08e4f530c1c1..00b5e6860bf69 100644
2565 +--- a/drivers/net/ethernet/mscc/ocelot.c
2566 ++++ b/drivers/net/ethernet/mscc/ocelot.c
2567 +@@ -1175,12 +1175,6 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
2568 + switch (cfg.rx_filter) {
2569 + case HWTSTAMP_FILTER_NONE:
2570 + break;
2571 +- case HWTSTAMP_FILTER_ALL:
2572 +- case HWTSTAMP_FILTER_SOME:
2573 +- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2574 +- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2575 +- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2576 +- case HWTSTAMP_FILTER_NTP_ALL:
2577 + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2578 + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2579 + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2580 +@@ -1299,7 +1293,10 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
2581 + SOF_TIMESTAMPING_RAW_HARDWARE;
2582 + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
2583 + BIT(HWTSTAMP_TX_ONESTEP_SYNC);
2584 +- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
2585 ++ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
2586 ++ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
2587 ++ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2588 ++ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
2589 +
2590 + return 0;
2591 + }
2592 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
2593 +index df203738511bf..0b1865e9f0b59 100644
2594 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
2595 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
2596 +@@ -565,7 +565,6 @@ struct nfp_net_dp {
2597 + * @exn_name: Name for Exception interrupt
2598 + * @shared_handler: Handler for shared interrupts
2599 + * @shared_name: Name for shared interrupt
2600 +- * @me_freq_mhz: ME clock_freq (MHz)
2601 + * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active,
2602 + * @reconfig_sync_present and HW reconfiguration request
2603 + * regs/machinery from async requests (sync must take
2604 +@@ -650,8 +649,6 @@ struct nfp_net {
2605 + irq_handler_t shared_handler;
2606 + char shared_name[IFNAMSIZ + 8];
2607 +
2608 +- u32 me_freq_mhz;
2609 +-
2610 + bool link_up;
2611 + spinlock_t link_status_lock;
2612 +
2613 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
2614 +index 0685ece1f155d..be1a358baadb9 100644
2615 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
2616 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
2617 +@@ -1343,7 +1343,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
2618 + * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
2619 + * count.
2620 + */
2621 +- factor = nn->me_freq_mhz / 16;
2622 ++ factor = nn->tlv_caps.me_freq_mhz / 16;
2623 +
2624 + /* Each pair of (usecs, max_frames) fields specifies that interrupts
2625 + * should be coalesced until
2626 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
2627 +index 43eead726886a..5f129733aabd2 100644
2628 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
2629 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
2630 +@@ -314,6 +314,7 @@ int stmmac_mdio_reset(struct mii_bus *mii);
2631 + int stmmac_xpcs_setup(struct mii_bus *mii);
2632 + void stmmac_set_ethtool_ops(struct net_device *netdev);
2633 +
2634 ++int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
2635 + void stmmac_ptp_register(struct stmmac_priv *priv);
2636 + void stmmac_ptp_unregister(struct stmmac_priv *priv);
2637 + int stmmac_open(struct net_device *dev);
2638 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2639 +index 0ab20e2f984b9..1cf94248c2217 100644
2640 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2641 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2642 +@@ -50,6 +50,13 @@
2643 + #include "dwxgmac2.h"
2644 + #include "hwif.h"
2645 +
2646 ++/* As long as the interface is active, we keep the timestamping counter enabled
2647 ++ * with fine resolution and binary rollover. This avoid non-monotonic behavior
2648 ++ * (clock jumps) when changing timestamping settings at runtime.
2649 ++ */
2650 ++#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
2651 ++ PTP_TCR_TSCTRLSSR)
2652 ++
2653 + #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
2654 + #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
2655 +
2656 +@@ -613,8 +620,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2657 + {
2658 + struct stmmac_priv *priv = netdev_priv(dev);
2659 + struct hwtstamp_config config;
2660 +- struct timespec64 now;
2661 +- u64 temp = 0;
2662 + u32 ptp_v2 = 0;
2663 + u32 tstamp_all = 0;
2664 + u32 ptp_over_ipv4_udp = 0;
2665 +@@ -623,11 +628,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2666 + u32 snap_type_sel = 0;
2667 + u32 ts_master_en = 0;
2668 + u32 ts_event_en = 0;
2669 +- u32 sec_inc = 0;
2670 +- u32 value = 0;
2671 +- bool xmac;
2672 +-
2673 +- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
2674 +
2675 + if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
2676 + netdev_alert(priv->dev, "No support for HW time stamping\n");
2677 +@@ -789,42 +789,17 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2678 + priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
2679 + priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
2680 +
2681 +- if (!priv->hwts_tx_en && !priv->hwts_rx_en)
2682 +- stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
2683 +- else {
2684 +- value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
2685 +- tstamp_all | ptp_v2 | ptp_over_ethernet |
2686 +- ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
2687 +- ts_master_en | snap_type_sel);
2688 +- stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
2689 +-
2690 +- /* program Sub Second Increment reg */
2691 +- stmmac_config_sub_second_increment(priv,
2692 +- priv->ptpaddr, priv->plat->clk_ptp_rate,
2693 +- xmac, &sec_inc);
2694 +- temp = div_u64(1000000000ULL, sec_inc);
2695 +-
2696 +- /* Store sub second increment and flags for later use */
2697 +- priv->sub_second_inc = sec_inc;
2698 +- priv->systime_flags = value;
2699 +-
2700 +- /* calculate default added value:
2701 +- * formula is :
2702 +- * addend = (2^32)/freq_div_ratio;
2703 +- * where, freq_div_ratio = 1e9ns/sec_inc
2704 +- */
2705 +- temp = (u64)(temp << 32);
2706 +- priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
2707 +- stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
2708 +-
2709 +- /* initialize system time */
2710 +- ktime_get_real_ts64(&now);
2711 ++ priv->systime_flags = STMMAC_HWTS_ACTIVE;
2712 +
2713 +- /* lower 32 bits of tv_sec are safe until y2106 */
2714 +- stmmac_init_systime(priv, priv->ptpaddr,
2715 +- (u32)now.tv_sec, now.tv_nsec);
2716 ++ if (priv->hwts_tx_en || priv->hwts_rx_en) {
2717 ++ priv->systime_flags |= tstamp_all | ptp_v2 |
2718 ++ ptp_over_ethernet | ptp_over_ipv6_udp |
2719 ++ ptp_over_ipv4_udp | ts_event_en |
2720 ++ ts_master_en | snap_type_sel;
2721 + }
2722 +
2723 ++ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
2724 ++
2725 + memcpy(&priv->tstamp_config, &config, sizeof(config));
2726 +
2727 + return copy_to_user(ifr->ifr_data, &config,
2728 +@@ -852,6 +827,66 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2729 + sizeof(*config)) ? -EFAULT : 0;
2730 + }
2731 +
2732 ++/**
2733 ++ * stmmac_init_tstamp_counter - init hardware timestamping counter
2734 ++ * @priv: driver private structure
2735 ++ * @systime_flags: timestamping flags
2736 ++ * Description:
2737 ++ * Initialize hardware counter for packet timestamping.
2738 ++ * This is valid as long as the interface is open and not suspended.
2739 ++ * Will be rerun after resuming from suspend, case in which the timestamping
2740 ++ * flags updated by stmmac_hwtstamp_set() also need to be restored.
2741 ++ */
2742 ++int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
2743 ++{
2744 ++ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
2745 ++ struct timespec64 now;
2746 ++ u32 sec_inc = 0;
2747 ++ u64 temp = 0;
2748 ++ int ret;
2749 ++
2750 ++ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
2751 ++ return -EOPNOTSUPP;
2752 ++
2753 ++ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2754 ++ if (ret < 0) {
2755 ++ netdev_warn(priv->dev,
2756 ++ "failed to enable PTP reference clock: %pe\n",
2757 ++ ERR_PTR(ret));
2758 ++ return ret;
2759 ++ }
2760 ++
2761 ++ stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
2762 ++ priv->systime_flags = systime_flags;
2763 ++
2764 ++ /* program Sub Second Increment reg */
2765 ++ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
2766 ++ priv->plat->clk_ptp_rate,
2767 ++ xmac, &sec_inc);
2768 ++ temp = div_u64(1000000000ULL, sec_inc);
2769 ++
2770 ++ /* Store sub second increment for later use */
2771 ++ priv->sub_second_inc = sec_inc;
2772 ++
2773 ++ /* calculate default added value:
2774 ++ * formula is :
2775 ++ * addend = (2^32)/freq_div_ratio;
2776 ++ * where, freq_div_ratio = 1e9ns/sec_inc
2777 ++ */
2778 ++ temp = (u64)(temp << 32);
2779 ++ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
2780 ++ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
2781 ++
2782 ++ /* initialize system time */
2783 ++ ktime_get_real_ts64(&now);
2784 ++
2785 ++ /* lower 32 bits of tv_sec are safe until y2106 */
2786 ++ stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
2787 ++
2788 ++ return 0;
2789 ++}
2790 ++EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
2791 ++
2792 + /**
2793 + * stmmac_init_ptp - init PTP
2794 + * @priv: driver private structure
2795 +@@ -862,9 +897,11 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2796 + static int stmmac_init_ptp(struct stmmac_priv *priv)
2797 + {
2798 + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
2799 ++ int ret;
2800 +
2801 +- if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
2802 +- return -EOPNOTSUPP;
2803 ++ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
2804 ++ if (ret)
2805 ++ return ret;
2806 +
2807 + priv->adv_ts = 0;
2808 + /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
2809 +@@ -3268,10 +3305,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2810 + stmmac_mmc_setup(priv);
2811 +
2812 + if (init_ptp) {
2813 +- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2814 +- if (ret < 0)
2815 +- netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2816 +-
2817 + ret = stmmac_init_ptp(priv);
2818 + if (ret == -EOPNOTSUPP)
2819 + netdev_warn(priv->dev, "PTP not supported by HW\n");
2820 +@@ -3761,6 +3794,8 @@ int stmmac_release(struct net_device *dev)
2821 + struct stmmac_priv *priv = netdev_priv(dev);
2822 + u32 chan;
2823 +
2824 ++ netif_tx_disable(dev);
2825 ++
2826 + if (device_may_wakeup(priv->device))
2827 + phylink_speed_down(priv->phylink, false);
2828 + /* Stop and disconnect the PHY */
2829 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2830 +index 232ac98943cd0..5d29f336315b7 100644
2831 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2832 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2833 +@@ -816,7 +816,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
2834 + if (ret)
2835 + return ret;
2836 +
2837 +- clk_prepare_enable(priv->plat->clk_ptp_ref);
2838 ++ stmmac_init_tstamp_counter(priv, priv->systime_flags);
2839 + }
2840 +
2841 + return 0;
2842 +diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
2843 +index cff51731195aa..d57472ea077f2 100644
2844 +--- a/drivers/net/ipa/ipa_cmd.c
2845 ++++ b/drivers/net/ipa/ipa_cmd.c
2846 +@@ -661,22 +661,6 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
2847 + wait_for_completion(&ipa->completion);
2848 + }
2849 +
2850 +-void ipa_cmd_pipeline_clear(struct ipa *ipa)
2851 +-{
2852 +- u32 count = ipa_cmd_pipeline_clear_count();
2853 +- struct gsi_trans *trans;
2854 +-
2855 +- trans = ipa_cmd_trans_alloc(ipa, count);
2856 +- if (trans) {
2857 +- ipa_cmd_pipeline_clear_add(trans);
2858 +- gsi_trans_commit_wait(trans);
2859 +- ipa_cmd_pipeline_clear_wait(ipa);
2860 +- } else {
2861 +- dev_err(&ipa->pdev->dev,
2862 +- "error allocating %u entry tag transaction\n", count);
2863 +- }
2864 +-}
2865 +-
2866 + static struct ipa_cmd_info *
2867 + ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
2868 + {
2869 +diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
2870 +index 69cd085d427db..05ed7e42e1842 100644
2871 +--- a/drivers/net/ipa/ipa_cmd.h
2872 ++++ b/drivers/net/ipa/ipa_cmd.h
2873 +@@ -163,12 +163,6 @@ u32 ipa_cmd_pipeline_clear_count(void);
2874 + */
2875 + void ipa_cmd_pipeline_clear_wait(struct ipa *ipa);
2876 +
2877 +-/**
2878 +- * ipa_cmd_pipeline_clear() - Clear the hardware pipeline
2879 +- * @ipa: - IPA pointer
2880 +- */
2881 +-void ipa_cmd_pipeline_clear(struct ipa *ipa);
2882 +-
2883 + /**
2884 + * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
2885 + * @ipa: IPA pointer
2886 +diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
2887 +index ef790fd0ab56a..03a1709934208 100644
2888 +--- a/drivers/net/ipa/ipa_endpoint.c
2889 ++++ b/drivers/net/ipa/ipa_endpoint.c
2890 +@@ -1636,8 +1636,6 @@ void ipa_endpoint_suspend(struct ipa *ipa)
2891 + if (ipa->modem_netdev)
2892 + ipa_modem_suspend(ipa->modem_netdev);
2893 +
2894 +- ipa_cmd_pipeline_clear(ipa);
2895 +-
2896 + ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
2897 + ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
2898 + }
2899 +diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
2900 +index cdfa98a76e1f4..a448ec198bee1 100644
2901 +--- a/drivers/net/ipa/ipa_main.c
2902 ++++ b/drivers/net/ipa/ipa_main.c
2903 +@@ -28,6 +28,7 @@
2904 + #include "ipa_reg.h"
2905 + #include "ipa_mem.h"
2906 + #include "ipa_table.h"
2907 ++#include "ipa_smp2p.h"
2908 + #include "ipa_modem.h"
2909 + #include "ipa_uc.h"
2910 + #include "ipa_interrupt.h"
2911 +@@ -801,6 +802,11 @@ static int ipa_remove(struct platform_device *pdev)
2912 + struct device *dev = &pdev->dev;
2913 + int ret;
2914 +
2915 ++ /* Prevent the modem from triggering a call to ipa_setup(). This
2916 ++ * also ensures a modem-initiated setup that's underway completes.
2917 ++ */
2918 ++ ipa_smp2p_irq_disable_setup(ipa);
2919 ++
2920 + ret = pm_runtime_get_sync(dev);
2921 + if (WARN_ON(ret < 0))
2922 + goto out_power_put;
2923 +diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
2924 +index ad116bcc0580e..d0ab4d70c303b 100644
2925 +--- a/drivers/net/ipa/ipa_modem.c
2926 ++++ b/drivers/net/ipa/ipa_modem.c
2927 +@@ -339,9 +339,6 @@ int ipa_modem_stop(struct ipa *ipa)
2928 + if (state != IPA_MODEM_STATE_RUNNING)
2929 + return -EBUSY;
2930 +
2931 +- /* Prevent the modem from triggering a call to ipa_setup() */
2932 +- ipa_smp2p_disable(ipa);
2933 +-
2934 + /* Clean up the netdev and endpoints if it was started */
2935 + if (netdev) {
2936 + struct ipa_priv *priv = netdev_priv(netdev);
2937 +@@ -369,6 +366,9 @@ static void ipa_modem_crashed(struct ipa *ipa)
2938 + struct device *dev = &ipa->pdev->dev;
2939 + int ret;
2940 +
2941 ++ /* Prevent the modem from triggering a call to ipa_setup() */
2942 ++ ipa_smp2p_irq_disable_setup(ipa);
2943 ++
2944 + ret = pm_runtime_get_sync(dev);
2945 + if (ret < 0) {
2946 + dev_err(dev, "error %d getting power to handle crash\n", ret);
2947 +diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
2948 +index df7639c39d716..2112336120391 100644
2949 +--- a/drivers/net/ipa/ipa_smp2p.c
2950 ++++ b/drivers/net/ipa/ipa_smp2p.c
2951 +@@ -53,7 +53,7 @@
2952 + * @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready
2953 + * @power_on: Whether IPA power is on
2954 + * @notified: Whether modem has been notified of power state
2955 +- * @disabled: Whether setup ready interrupt handling is disabled
2956 ++ * @setup_disabled: Whether setup ready interrupt handler is disabled
2957 + * @mutex: Mutex protecting ready-interrupt/shutdown interlock
2958 + * @panic_notifier: Panic notifier structure
2959 + */
2960 +@@ -67,7 +67,7 @@ struct ipa_smp2p {
2961 + u32 setup_ready_irq;
2962 + bool power_on;
2963 + bool notified;
2964 +- bool disabled;
2965 ++ bool setup_disabled;
2966 + struct mutex mutex;
2967 + struct notifier_block panic_notifier;
2968 + };
2969 +@@ -155,11 +155,9 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
2970 + struct device *dev;
2971 + int ret;
2972 +
2973 +- mutex_lock(&smp2p->mutex);
2974 +-
2975 +- if (smp2p->disabled)
2976 +- goto out_mutex_unlock;
2977 +- smp2p->disabled = true; /* If any others arrive, ignore them */
2978 ++ /* Ignore any (spurious) interrupts received after the first */
2979 ++ if (smp2p->ipa->setup_complete)
2980 ++ return IRQ_HANDLED;
2981 +
2982 + /* Power needs to be active for setup */
2983 + dev = &smp2p->ipa->pdev->dev;
2984 +@@ -176,8 +174,6 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
2985 + out_power_put:
2986 + pm_runtime_mark_last_busy(dev);
2987 + (void)pm_runtime_put_autosuspend(dev);
2988 +-out_mutex_unlock:
2989 +- mutex_unlock(&smp2p->mutex);
2990 +
2991 + return IRQ_HANDLED;
2992 + }
2993 +@@ -313,7 +309,7 @@ void ipa_smp2p_exit(struct ipa *ipa)
2994 + kfree(smp2p);
2995 + }
2996 +
2997 +-void ipa_smp2p_disable(struct ipa *ipa)
2998 ++void ipa_smp2p_irq_disable_setup(struct ipa *ipa)
2999 + {
3000 + struct ipa_smp2p *smp2p = ipa->smp2p;
3001 +
3002 +@@ -322,7 +318,10 @@ void ipa_smp2p_disable(struct ipa *ipa)
3003 +
3004 + mutex_lock(&smp2p->mutex);
3005 +
3006 +- smp2p->disabled = true;
3007 ++ if (!smp2p->setup_disabled) {
3008 ++ disable_irq(smp2p->setup_ready_irq);
3009 ++ smp2p->setup_disabled = true;
3010 ++ }
3011 +
3012 + mutex_unlock(&smp2p->mutex);
3013 + }
3014 +diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
3015 +index 99a9567896388..59cee31a73836 100644
3016 +--- a/drivers/net/ipa/ipa_smp2p.h
3017 ++++ b/drivers/net/ipa/ipa_smp2p.h
3018 +@@ -27,13 +27,12 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
3019 + void ipa_smp2p_exit(struct ipa *ipa);
3020 +
3021 + /**
3022 +- * ipa_smp2p_disable() - Prevent "ipa-setup-ready" interrupt handling
3023 ++ * ipa_smp2p_irq_disable_setup() - Disable the "setup ready" interrupt
3024 + * @ipa: IPA pointer
3025 + *
3026 +- * Prevent handling of the "setup ready" interrupt from the modem.
3027 +- * This is used before initiating shutdown of the driver.
3028 ++ * Disable the "ipa-setup-ready" interrupt from the modem.
3029 + */
3030 +-void ipa_smp2p_disable(struct ipa *ipa);
3031 ++void ipa_smp2p_irq_disable_setup(struct ipa *ipa);
3032 +
3033 + /**
3034 + * ipa_smp2p_notify_reset() - Reset modem notification state
3035 +diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
3036 +index cad820568f751..966c3b4ad59d1 100644
3037 +--- a/drivers/net/mdio/mdio-aspeed.c
3038 ++++ b/drivers/net/mdio/mdio-aspeed.c
3039 +@@ -61,6 +61,13 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
3040 +
3041 + iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
3042 +
3043 ++ rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
3044 ++ !(ctrl & ASPEED_MDIO_CTRL_FIRE),
3045 ++ ASPEED_MDIO_INTERVAL_US,
3046 ++ ASPEED_MDIO_TIMEOUT_US);
3047 ++ if (rc < 0)
3048 ++ return rc;
3049 ++
3050 + rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
3051 + data & ASPEED_MDIO_DATA_IDLE,
3052 + ASPEED_MDIO_INTERVAL_US,
3053 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
3054 +index 7ec3105010ac1..fef1416dcee4c 100644
3055 +--- a/drivers/net/phy/phylink.c
3056 ++++ b/drivers/net/phy/phylink.c
3057 +@@ -657,6 +657,7 @@ static void phylink_resolve(struct work_struct *w)
3058 + struct phylink_link_state link_state;
3059 + struct net_device *ndev = pl->netdev;
3060 + bool mac_config = false;
3061 ++ bool retrigger = false;
3062 + bool cur_link_state;
3063 +
3064 + mutex_lock(&pl->state_mutex);
3065 +@@ -670,6 +671,7 @@ static void phylink_resolve(struct work_struct *w)
3066 + link_state.link = false;
3067 + } else if (pl->mac_link_dropped) {
3068 + link_state.link = false;
3069 ++ retrigger = true;
3070 + } else {
3071 + switch (pl->cur_link_an_mode) {
3072 + case MLO_AN_PHY:
3073 +@@ -686,6 +688,19 @@ static void phylink_resolve(struct work_struct *w)
3074 + case MLO_AN_INBAND:
3075 + phylink_mac_pcs_get_state(pl, &link_state);
3076 +
3077 ++ /* The PCS may have a latching link-fail indicator.
3078 ++ * If the link was up, bring the link down and
3079 ++ * re-trigger the resolve. Otherwise, re-read the
3080 ++ * PCS state to get the current status of the link.
3081 ++ */
3082 ++ if (!link_state.link) {
3083 ++ if (cur_link_state)
3084 ++ retrigger = true;
3085 ++ else
3086 ++ phylink_mac_pcs_get_state(pl,
3087 ++ &link_state);
3088 ++ }
3089 ++
3090 + /* If we have a phy, the "up" state is the union of
3091 + * both the PHY and the MAC
3092 + */
3093 +@@ -694,6 +709,15 @@ static void phylink_resolve(struct work_struct *w)
3094 +
3095 + /* Only update if the PHY link is up */
3096 + if (pl->phydev && pl->phy_state.link) {
3097 ++ /* If the interface has changed, force a
3098 ++ * link down event if the link isn't already
3099 ++ * down, and re-resolve.
3100 ++ */
3101 ++ if (link_state.interface !=
3102 ++ pl->phy_state.interface) {
3103 ++ retrigger = true;
3104 ++ link_state.link = false;
3105 ++ }
3106 + link_state.interface = pl->phy_state.interface;
3107 +
3108 + /* If we have a PHY, we need to update with
3109 +@@ -736,7 +760,7 @@ static void phylink_resolve(struct work_struct *w)
3110 + else
3111 + phylink_link_up(pl, link_state);
3112 + }
3113 +- if (!link_state.link && pl->mac_link_dropped) {
3114 ++ if (!link_state.link && retrigger) {
3115 + pl->mac_link_dropped = false;
3116 + queue_work(system_power_efficient_wq, &pl->resolve);
3117 + }
3118 +diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
3119 +index 26b1bd8e845b4..f91dabd65ecd8 100644
3120 +--- a/drivers/net/usb/smsc95xx.c
3121 ++++ b/drivers/net/usb/smsc95xx.c
3122 +@@ -1049,6 +1049,14 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
3123 + .ndo_set_features = smsc95xx_set_features,
3124 + };
3125 +
3126 ++static void smsc95xx_handle_link_change(struct net_device *net)
3127 ++{
3128 ++ struct usbnet *dev = netdev_priv(net);
3129 ++
3130 ++ phy_print_status(net->phydev);
3131 ++ usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
3132 ++}
3133 ++
3134 + static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
3135 + {
3136 + struct smsc95xx_priv *pdata;
3137 +@@ -1153,6 +1161,17 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
3138 + dev->net->min_mtu = ETH_MIN_MTU;
3139 + dev->net->max_mtu = ETH_DATA_LEN;
3140 + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3141 ++
3142 ++ ret = phy_connect_direct(dev->net, pdata->phydev,
3143 ++ &smsc95xx_handle_link_change,
3144 ++ PHY_INTERFACE_MODE_MII);
3145 ++ if (ret) {
3146 ++ netdev_err(dev->net, "can't attach PHY to %s\n", pdata->mdiobus->id);
3147 ++ goto unregister_mdio;
3148 ++ }
3149 ++
3150 ++ phy_attached_info(dev->net->phydev);
3151 ++
3152 + return 0;
3153 +
3154 + unregister_mdio:
3155 +@@ -1170,47 +1189,25 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
3156 + {
3157 + struct smsc95xx_priv *pdata = dev->driver_priv;
3158 +
3159 ++ phy_disconnect(dev->net->phydev);
3160 + mdiobus_unregister(pdata->mdiobus);
3161 + mdiobus_free(pdata->mdiobus);
3162 + netif_dbg(dev, ifdown, dev->net, "free pdata\n");
3163 + kfree(pdata);
3164 + }
3165 +
3166 +-static void smsc95xx_handle_link_change(struct net_device *net)
3167 +-{
3168 +- struct usbnet *dev = netdev_priv(net);
3169 +-
3170 +- phy_print_status(net->phydev);
3171 +- usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
3172 +-}
3173 +-
3174 + static int smsc95xx_start_phy(struct usbnet *dev)
3175 + {
3176 +- struct smsc95xx_priv *pdata = dev->driver_priv;
3177 +- struct net_device *net = dev->net;
3178 +- int ret;
3179 ++ phy_start(dev->net->phydev);
3180 +
3181 +- ret = smsc95xx_reset(dev);
3182 +- if (ret < 0)
3183 +- return ret;
3184 +-
3185 +- ret = phy_connect_direct(net, pdata->phydev,
3186 +- &smsc95xx_handle_link_change,
3187 +- PHY_INTERFACE_MODE_MII);
3188 +- if (ret) {
3189 +- netdev_err(net, "can't attach PHY to %s\n", pdata->mdiobus->id);
3190 +- return ret;
3191 +- }
3192 +-
3193 +- phy_attached_info(net->phydev);
3194 +- phy_start(net->phydev);
3195 + return 0;
3196 + }
3197 +
3198 +-static int smsc95xx_disconnect_phy(struct usbnet *dev)
3199 ++static int smsc95xx_stop(struct usbnet *dev)
3200 + {
3201 +- phy_stop(dev->net->phydev);
3202 +- phy_disconnect(dev->net->phydev);
3203 ++ if (dev->net->phydev)
3204 ++ phy_stop(dev->net->phydev);
3205 ++
3206 + return 0;
3207 + }
3208 +
3209 +@@ -1965,7 +1962,7 @@ static const struct driver_info smsc95xx_info = {
3210 + .unbind = smsc95xx_unbind,
3211 + .link_reset = smsc95xx_link_reset,
3212 + .reset = smsc95xx_start_phy,
3213 +- .stop = smsc95xx_disconnect_phy,
3214 ++ .stop = smsc95xx_stop,
3215 + .rx_fixup = smsc95xx_rx_fixup,
3216 + .tx_fixup = smsc95xx_tx_fixup,
3217 + .status = smsc95xx_status,
3218 +diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
3219 +index 1dd1a0fe2e819..df7e033dd2732 100644
3220 +--- a/drivers/nvme/target/io-cmd-file.c
3221 ++++ b/drivers/nvme/target/io-cmd-file.c
3222 +@@ -8,6 +8,7 @@
3223 + #include <linux/uio.h>
3224 + #include <linux/falloc.h>
3225 + #include <linux/file.h>
3226 ++#include <linux/fs.h>
3227 + #include "nvmet.h"
3228 +
3229 + #define NVMET_MAX_MPOOL_BVEC 16
3230 +@@ -266,7 +267,8 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
3231 +
3232 + if (req->ns->buffered_io) {
3233 + if (likely(!req->f.mpool_alloc) &&
3234 +- nvmet_file_execute_io(req, IOCB_NOWAIT))
3235 ++ (req->ns->file->f_mode & FMODE_NOWAIT) &&
3236 ++ nvmet_file_execute_io(req, IOCB_NOWAIT))
3237 + return;
3238 + nvmet_file_submit_buffered_io(req);
3239 + } else
3240 +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
3241 +index 84c387e4bf431..2b8bab28417b8 100644
3242 +--- a/drivers/nvme/target/tcp.c
3243 ++++ b/drivers/nvme/target/tcp.c
3244 +@@ -700,10 +700,11 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
3245 + static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
3246 + {
3247 + struct nvmet_tcp_queue *queue = cmd->queue;
3248 ++ int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
3249 + struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
3250 + struct kvec iov = {
3251 + .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
3252 +- .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
3253 ++ .iov_len = left
3254 + };
3255 + int ret;
3256 +
3257 +@@ -717,6 +718,10 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
3258 + return ret;
3259 +
3260 + cmd->offset += ret;
3261 ++ left -= ret;
3262 ++
3263 ++ if (left)
3264 ++ return -EAGAIN;
3265 +
3266 + if (queue->nvme_sq.sqhd_disabled) {
3267 + cmd->queue->snd_cmd = NULL;
3268 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
3269 +index b051d127f0aff..c5300d49807a2 100644
3270 +--- a/drivers/pci/controller/pci-aardvark.c
3271 ++++ b/drivers/pci/controller/pci-aardvark.c
3272 +@@ -299,11 +299,6 @@ static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
3273 + return readl(pcie->base + reg);
3274 + }
3275 +
3276 +-static inline u16 advk_read16(struct advk_pcie *pcie, u64 reg)
3277 +-{
3278 +- return advk_readl(pcie, (reg & ~0x3)) >> ((reg & 0x3) * 8);
3279 +-}
3280 +-
3281 + static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
3282 + {
3283 + u32 val;
3284 +@@ -377,23 +372,9 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
3285 +
3286 + static void advk_pcie_issue_perst(struct advk_pcie *pcie)
3287 + {
3288 +- u32 reg;
3289 +-
3290 + if (!pcie->reset_gpio)
3291 + return;
3292 +
3293 +- /*
3294 +- * As required by PCI Express spec (PCI Express Base Specification, REV.
3295 +- * 4.0 PCI Express, February 19 2014, 6.6.1 Conventional Reset) a delay
3296 +- * for at least 100ms after de-asserting PERST# signal is needed before
3297 +- * link training is enabled. So ensure that link training is disabled
3298 +- * prior de-asserting PERST# signal to fulfill that PCI Express spec
3299 +- * requirement.
3300 +- */
3301 +- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
3302 +- reg &= ~LINK_TRAINING_EN;
3303 +- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
3304 +-
3305 + /* 10ms delay is needed for some cards */
3306 + dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
3307 + gpiod_set_value_cansleep(pcie->reset_gpio, 1);
3308 +@@ -401,53 +382,46 @@ static void advk_pcie_issue_perst(struct advk_pcie *pcie)
3309 + gpiod_set_value_cansleep(pcie->reset_gpio, 0);
3310 + }
3311 +
3312 +-static int advk_pcie_train_at_gen(struct advk_pcie *pcie, int gen)
3313 ++static void advk_pcie_train_link(struct advk_pcie *pcie)
3314 + {
3315 +- int ret, neg_gen;
3316 ++ struct device *dev = &pcie->pdev->dev;
3317 + u32 reg;
3318 ++ int ret;
3319 +
3320 +- /* Setup link speed */
3321 ++ /*
3322 ++ * Setup PCIe rev / gen compliance based on device tree property
3323 ++ * 'max-link-speed' which also forces maximal link speed.
3324 ++ */
3325 + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
3326 + reg &= ~PCIE_GEN_SEL_MSK;
3327 +- if (gen == 3)
3328 ++ if (pcie->link_gen == 3)
3329 + reg |= SPEED_GEN_3;
3330 +- else if (gen == 2)
3331 ++ else if (pcie->link_gen == 2)
3332 + reg |= SPEED_GEN_2;
3333 + else
3334 + reg |= SPEED_GEN_1;
3335 + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
3336 +
3337 + /*
3338 +- * Enable link training. This is not needed in every call to this
3339 +- * function, just once suffices, but it does not break anything either.
3340 ++ * Set maximal link speed value also into PCIe Link Control 2 register.
3341 ++ * Armada 3700 Functional Specification says that default value is based
3342 ++ * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
3343 + */
3344 ++ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
3345 ++ reg &= ~PCI_EXP_LNKCTL2_TLS;
3346 ++ if (pcie->link_gen == 3)
3347 ++ reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
3348 ++ else if (pcie->link_gen == 2)
3349 ++ reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
3350 ++ else
3351 ++ reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
3352 ++ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
3353 ++
3354 ++ /* Enable link training after selecting PCIe generation */
3355 + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
3356 + reg |= LINK_TRAINING_EN;
3357 + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
3358 +
3359 +- /*
3360 +- * Start link training immediately after enabling it.
3361 +- * This solves problems for some buggy cards.
3362 +- */
3363 +- reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
3364 +- reg |= PCI_EXP_LNKCTL_RL;
3365 +- advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
3366 +-
3367 +- ret = advk_pcie_wait_for_link(pcie);
3368 +- if (ret)
3369 +- return ret;
3370 +-
3371 +- reg = advk_read16(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKSTA);
3372 +- neg_gen = reg & PCI_EXP_LNKSTA_CLS;
3373 +-
3374 +- return neg_gen;
3375 +-}
3376 +-
3377 +-static void advk_pcie_train_link(struct advk_pcie *pcie)
3378 +-{
3379 +- struct device *dev = &pcie->pdev->dev;
3380 +- int neg_gen = -1, gen;
3381 +-
3382 + /*
3383 + * Reset PCIe card via PERST# signal. Some cards are not detected
3384 + * during link training when they are in some non-initial state.
3385 +@@ -458,41 +432,18 @@ static void advk_pcie_train_link(struct advk_pcie *pcie)
3386 + * PERST# signal could have been asserted by pinctrl subsystem before
3387 + * probe() callback has been called or issued explicitly by reset gpio
3388 + * function advk_pcie_issue_perst(), making the endpoint going into
3389 +- * fundamental reset. As required by PCI Express spec a delay for at
3390 +- * least 100ms after such a reset before link training is needed.
3391 +- */
3392 +- msleep(PCI_PM_D3COLD_WAIT);
3393 +-
3394 +- /*
3395 +- * Try link training at link gen specified by device tree property
3396 +- * 'max-link-speed'. If this fails, iteratively train at lower gen.
3397 +- */
3398 +- for (gen = pcie->link_gen; gen > 0; --gen) {
3399 +- neg_gen = advk_pcie_train_at_gen(pcie, gen);
3400 +- if (neg_gen > 0)
3401 +- break;
3402 +- }
3403 +-
3404 +- if (neg_gen < 0)
3405 +- goto err;
3406 +-
3407 +- /*
3408 +- * After successful training if negotiated gen is lower than requested,
3409 +- * train again on negotiated gen. This solves some stability issues for
3410 +- * some buggy gen1 cards.
3411 ++ * fundamental reset. As required by PCI Express spec (PCI Express
3412 ++ * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
3413 ++ * Conventional Reset) a delay for at least 100ms after such a reset
3414 ++ * before sending a Configuration Request to the device is needed.
3415 ++ * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
3416 ++ * waits for link at least 900ms.
3417 + */
3418 +- if (neg_gen < gen) {
3419 +- gen = neg_gen;
3420 +- neg_gen = advk_pcie_train_at_gen(pcie, gen);
3421 +- }
3422 +-
3423 +- if (neg_gen == gen) {
3424 +- dev_info(dev, "link up at gen %i\n", gen);
3425 +- return;
3426 +- }
3427 +-
3428 +-err:
3429 +- dev_err(dev, "link never came up\n");
3430 ++ ret = advk_pcie_wait_for_link(pcie);
3431 ++ if (ret < 0)
3432 ++ dev_err(dev, "link never came up\n");
3433 ++ else
3434 ++ dev_info(dev, "link up\n");
3435 + }
3436 +
3437 + /*
3438 +@@ -692,6 +643,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
3439 + u32 reg;
3440 + unsigned int status;
3441 + char *strcomp_status, *str_posted;
3442 ++ int ret;
3443 +
3444 + reg = advk_readl(pcie, PIO_STAT);
3445 + status = (reg & PIO_COMPLETION_STATUS_MASK) >>
3446 +@@ -716,6 +668,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
3447 + case PIO_COMPLETION_STATUS_OK:
3448 + if (reg & PIO_ERR_STATUS) {
3449 + strcomp_status = "COMP_ERR";
3450 ++ ret = -EFAULT;
3451 + break;
3452 + }
3453 + /* Get the read result */
3454 +@@ -723,9 +676,11 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
3455 + *val = advk_readl(pcie, PIO_RD_DATA);
3456 + /* No error */
3457 + strcomp_status = NULL;
3458 ++ ret = 0;
3459 + break;
3460 + case PIO_COMPLETION_STATUS_UR:
3461 + strcomp_status = "UR";
3462 ++ ret = -EOPNOTSUPP;
3463 + break;
3464 + case PIO_COMPLETION_STATUS_CRS:
3465 + if (allow_crs && val) {
3466 +@@ -743,6 +698,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
3467 + */
3468 + *val = CFG_RD_CRS_VAL;
3469 + strcomp_status = NULL;
3470 ++ ret = 0;
3471 + break;
3472 + }
3473 + /* PCIe r4.0, sec 2.3.2, says:
3474 +@@ -758,21 +714,24 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
3475 + * Request and taking appropriate action, e.g., complete the
3476 + * Request to the host as a failed transaction.
3477 + *
3478 +- * To simplify implementation do not re-issue the Configuration
3479 +- * Request and complete the Request as a failed transaction.
3480 ++ * So return -EAGAIN and caller (pci-aardvark.c driver) will
3481 ++ * re-issue request again up to the PIO_RETRY_CNT retries.
3482 + */
3483 + strcomp_status = "CRS";
3484 ++ ret = -EAGAIN;
3485 + break;
3486 + case PIO_COMPLETION_STATUS_CA:
3487 + strcomp_status = "CA";
3488 ++ ret = -ECANCELED;
3489 + break;
3490 + default:
3491 + strcomp_status = "Unknown";
3492 ++ ret = -EINVAL;
3493 + break;
3494 + }
3495 +
3496 + if (!strcomp_status)
3497 +- return 0;
3498 ++ return ret;
3499 +
3500 + if (reg & PIO_NON_POSTED_REQ)
3501 + str_posted = "Non-posted";
3502 +@@ -782,7 +741,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
3503 + dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
3504 + str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
3505 +
3506 +- return -EFAULT;
3507 ++ return ret;
3508 + }
3509 +
3510 + static int advk_pcie_wait_pio(struct advk_pcie *pcie)
3511 +@@ -790,13 +749,13 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
3512 + struct device *dev = &pcie->pdev->dev;
3513 + int i;
3514 +
3515 +- for (i = 0; i < PIO_RETRY_CNT; i++) {
3516 ++ for (i = 1; i <= PIO_RETRY_CNT; i++) {
3517 + u32 start, isr;
3518 +
3519 + start = advk_readl(pcie, PIO_START);
3520 + isr = advk_readl(pcie, PIO_ISR);
3521 + if (!start && isr)
3522 +- return 0;
3523 ++ return i;
3524 + udelay(PIO_RETRY_DELAY);
3525 + }
3526 +
3527 +@@ -984,7 +943,6 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
3528 + static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
3529 + {
3530 + struct pci_bridge_emul *bridge = &pcie->bridge;
3531 +- int ret;
3532 +
3533 + bridge->conf.vendor =
3534 + cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
3535 +@@ -1004,19 +962,14 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
3536 + /* Support interrupt A for MSI feature */
3537 + bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
3538 +
3539 ++ /* Indicates supports for Completion Retry Status */
3540 ++ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
3541 ++
3542 + bridge->has_pcie = true;
3543 + bridge->data = pcie;
3544 + bridge->ops = &advk_pci_bridge_emul_ops;
3545 +
3546 +- /* PCIe config space can be initialized after pci_bridge_emul_init() */
3547 +- ret = pci_bridge_emul_init(bridge, 0);
3548 +- if (ret < 0)
3549 +- return ret;
3550 +-
3551 +- /* Indicates supports for Completion Retry Status */
3552 +- bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
3553 +-
3554 +- return 0;
3555 ++ return pci_bridge_emul_init(bridge, 0);
3556 + }
3557 +
3558 + static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
3559 +@@ -1068,6 +1021,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
3560 + int where, int size, u32 *val)
3561 + {
3562 + struct advk_pcie *pcie = bus->sysdata;
3563 ++ int retry_count;
3564 + bool allow_crs;
3565 + u32 reg;
3566 + int ret;
3567 +@@ -1090,18 +1044,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
3568 + (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
3569 + PCI_EXP_RTCTL_CRSSVE);
3570 +
3571 +- if (advk_pcie_pio_is_running(pcie)) {
3572 +- /*
3573 +- * If it is possible return Completion Retry Status so caller
3574 +- * tries to issue the request again instead of failing.
3575 +- */
3576 +- if (allow_crs) {
3577 +- *val = CFG_RD_CRS_VAL;
3578 +- return PCIBIOS_SUCCESSFUL;
3579 +- }
3580 +- *val = 0xffffffff;
3581 +- return PCIBIOS_SET_FAILED;
3582 +- }
3583 ++ if (advk_pcie_pio_is_running(pcie))
3584 ++ goto try_crs;
3585 +
3586 + /* Program the control register */
3587 + reg = advk_readl(pcie, PIO_CTRL);
3588 +@@ -1120,30 +1064,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
3589 + /* Program the data strobe */
3590 + advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
3591 +
3592 +- /* Clear PIO DONE ISR and start the transfer */
3593 +- advk_writel(pcie, 1, PIO_ISR);
3594 +- advk_writel(pcie, 1, PIO_START);
3595 ++ retry_count = 0;
3596 ++ do {
3597 ++ /* Clear PIO DONE ISR and start the transfer */
3598 ++ advk_writel(pcie, 1, PIO_ISR);
3599 ++ advk_writel(pcie, 1, PIO_START);
3600 +
3601 +- ret = advk_pcie_wait_pio(pcie);
3602 +- if (ret < 0) {
3603 +- /*
3604 +- * If it is possible return Completion Retry Status so caller
3605 +- * tries to issue the request again instead of failing.
3606 +- */
3607 +- if (allow_crs) {
3608 +- *val = CFG_RD_CRS_VAL;
3609 +- return PCIBIOS_SUCCESSFUL;
3610 +- }
3611 +- *val = 0xffffffff;
3612 +- return PCIBIOS_SET_FAILED;
3613 +- }
3614 ++ ret = advk_pcie_wait_pio(pcie);
3615 ++ if (ret < 0)
3616 ++ goto try_crs;
3617 +
3618 +- /* Check PIO status and get the read result */
3619 +- ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
3620 +- if (ret < 0) {
3621 +- *val = 0xffffffff;
3622 +- return PCIBIOS_SET_FAILED;
3623 +- }
3624 ++ retry_count += ret;
3625 ++
3626 ++ /* Check PIO status and get the read result */
3627 ++ ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
3628 ++ } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
3629 ++
3630 ++ if (ret < 0)
3631 ++ goto fail;
3632 +
3633 + if (size == 1)
3634 + *val = (*val >> (8 * (where & 3))) & 0xff;
3635 +@@ -1151,6 +1089,20 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
3636 + *val = (*val >> (8 * (where & 3))) & 0xffff;
3637 +
3638 + return PCIBIOS_SUCCESSFUL;
3639 ++
3640 ++try_crs:
3641 ++ /*
3642 ++ * If it is possible, return Completion Retry Status so that caller
3643 ++ * tries to issue the request again instead of failing.
3644 ++ */
3645 ++ if (allow_crs) {
3646 ++ *val = CFG_RD_CRS_VAL;
3647 ++ return PCIBIOS_SUCCESSFUL;
3648 ++ }
3649 ++
3650 ++fail:
3651 ++ *val = 0xffffffff;
3652 ++ return PCIBIOS_SET_FAILED;
3653 + }
3654 +
3655 + static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
3656 +@@ -1159,6 +1111,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
3657 + struct advk_pcie *pcie = bus->sysdata;
3658 + u32 reg;
3659 + u32 data_strobe = 0x0;
3660 ++ int retry_count;
3661 + int offset;
3662 + int ret;
3663 +
3664 +@@ -1200,19 +1153,22 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
3665 + /* Program the data strobe */
3666 + advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
3667 +
3668 +- /* Clear PIO DONE ISR and start the transfer */
3669 +- advk_writel(pcie, 1, PIO_ISR);
3670 +- advk_writel(pcie, 1, PIO_START);
3671 ++ retry_count = 0;
3672 ++ do {
3673 ++ /* Clear PIO DONE ISR and start the transfer */
3674 ++ advk_writel(pcie, 1, PIO_ISR);
3675 ++ advk_writel(pcie, 1, PIO_START);
3676 +
3677 +- ret = advk_pcie_wait_pio(pcie);
3678 +- if (ret < 0)
3679 +- return PCIBIOS_SET_FAILED;
3680 ++ ret = advk_pcie_wait_pio(pcie);
3681 ++ if (ret < 0)
3682 ++ return PCIBIOS_SET_FAILED;
3683 +
3684 +- ret = advk_pcie_check_pio_status(pcie, false, NULL);
3685 +- if (ret < 0)
3686 +- return PCIBIOS_SET_FAILED;
3687 ++ retry_count += ret;
3688 +
3689 +- return PCIBIOS_SUCCESSFUL;
3690 ++ ret = advk_pcie_check_pio_status(pcie, false, NULL);
3691 ++ } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
3692 ++
3693 ++ return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
3694 + }
3695 +
3696 + static struct pci_ops advk_pcie_ops = {
3697 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
3698 +index 27eb652b564f5..81dab9b82f79f 100644
3699 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
3700 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
3701 +@@ -639,8 +639,8 @@ static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
3702 + mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP;
3703 + current_time = ktime_get_real();
3704 + TimeStamp = ktime_to_ms(current_time);
3705 +- mpi_request->Reserved7 = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
3706 +- mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp >> 32);
3707 ++ mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32);
3708 ++ mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
3709 + init_completion(&ioc->scsih_cmds.done);
3710 + ioc->put_smid_default(ioc, smid);
3711 + dinitprintk(ioc, ioc_info(ioc,
3712 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
3713 +index f87c0911f66ad..1b3a44ce65aae 100644
3714 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
3715 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
3716 +@@ -142,6 +142,8 @@
3717 +
3718 + #define MPT_MAX_CALLBACKS 32
3719 +
3720 ++#define MPT_MAX_HBA_NUM_PHYS 32
3721 ++
3722 + #define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
3723 + /* reserved for issuing internally framed scsi io cmds */
3724 + #define INTERNAL_SCSIIO_CMDS_COUNT 3
3725 +@@ -798,6 +800,7 @@ struct _sas_phy {
3726 + * @enclosure_handle: handle for this a member of an enclosure
3727 + * @device_info: bitwise defining capabilities of this sas_host/expander
3728 + * @responding: used in _scsih_expander_device_mark_responding
3729 ++ * @nr_phys_allocated: Allocated memory for this many count phys
3730 + * @phy: a list of phys that make up this sas_host/expander
3731 + * @sas_port_list: list of ports attached to this sas_host/expander
3732 + * @port: hba port entry containing node's port number info
3733 +@@ -813,6 +816,7 @@ struct _sas_node {
3734 + u16 enclosure_handle;
3735 + u64 enclosure_logical_id;
3736 + u8 responding;
3737 ++ u8 nr_phys_allocated;
3738 + struct hba_port *port;
3739 + struct _sas_phy *phy;
3740 + struct list_head sas_port_list;
3741 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
3742 +index ad1b6c2b37a74..c1f900c6ea003 100644
3743 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
3744 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
3745 +@@ -3869,7 +3869,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3746 +
3747 + shost_for_each_device(sdev, ioc->shost) {
3748 + sas_device_priv_data = sdev->hostdata;
3749 +- if (!sas_device_priv_data)
3750 ++ if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
3751 + continue;
3752 + if (sas_device_priv_data->sas_target->sas_address
3753 + != sas_address)
3754 +@@ -6406,11 +6406,26 @@ _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
3755 + int i, j, count = 0, lcount = 0;
3756 + int ret;
3757 + u64 sas_addr;
3758 ++ u8 num_phys;
3759 +
3760 + drsprintk(ioc, ioc_info(ioc,
3761 + "updating ports for sas_host(0x%016llx)\n",
3762 + (unsigned long long)ioc->sas_hba.sas_address));
3763 +
3764 ++ mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
3765 ++ if (!num_phys) {
3766 ++ ioc_err(ioc, "failure at %s:%d/%s()!\n",
3767 ++ __FILE__, __LINE__, __func__);
3768 ++ return;
3769 ++ }
3770 ++
3771 ++ if (num_phys > ioc->sas_hba.nr_phys_allocated) {
3772 ++ ioc_err(ioc, "failure at %s:%d/%s()!\n",
3773 ++ __FILE__, __LINE__, __func__);
3774 ++ return;
3775 ++ }
3776 ++ ioc->sas_hba.num_phys = num_phys;
3777 ++
3778 + port_table = kcalloc(ioc->sas_hba.num_phys,
3779 + sizeof(struct hba_port), GFP_KERNEL);
3780 + if (!port_table)
3781 +@@ -6611,6 +6626,30 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
3782 + ioc->sas_hba.phy[i].hba_vphy = 1;
3783 + }
3784 +
3785 ++ /*
3786 ++ * Add new HBA phys to STL if these new phys got added as part
3787 ++ * of HBA Firmware upgrade/downgrade operation.
3788 ++ */
3789 ++ if (!ioc->sas_hba.phy[i].phy) {
3790 ++ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
3791 ++ &phy_pg0, i))) {
3792 ++ ioc_err(ioc, "failure at %s:%d/%s()!\n",
3793 ++ __FILE__, __LINE__, __func__);
3794 ++ continue;
3795 ++ }
3796 ++ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3797 ++ MPI2_IOCSTATUS_MASK;
3798 ++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3799 ++ ioc_err(ioc, "failure at %s:%d/%s()!\n",
3800 ++ __FILE__, __LINE__, __func__);
3801 ++ continue;
3802 ++ }
3803 ++ ioc->sas_hba.phy[i].phy_id = i;
3804 ++ mpt3sas_transport_add_host_phy(ioc,
3805 ++ &ioc->sas_hba.phy[i], phy_pg0,
3806 ++ ioc->sas_hba.parent_dev);
3807 ++ continue;
3808 ++ }
3809 + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
3810 + attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
3811 + AttachedDevHandle);
3812 +@@ -6622,6 +6661,19 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
3813 + attached_handle, i, link_rate,
3814 + ioc->sas_hba.phy[i].port);
3815 + }
3816 ++ /*
3817 ++ * Clear the phy details if this phy got disabled as part of
3818 ++ * HBA Firmware upgrade/downgrade operation.
3819 ++ */
3820 ++ for (i = ioc->sas_hba.num_phys;
3821 ++ i < ioc->sas_hba.nr_phys_allocated; i++) {
3822 ++ if (ioc->sas_hba.phy[i].phy &&
3823 ++ ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
3824 ++ SAS_LINK_RATE_1_5_GBPS)
3825 ++ mpt3sas_transport_update_links(ioc,
3826 ++ ioc->sas_hba.sas_address, 0, i,
3827 ++ MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
3828 ++ }
3829 + out:
3830 + kfree(sas_iounit_pg0);
3831 + }
3832 +@@ -6654,7 +6706,10 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
3833 + __FILE__, __LINE__, __func__);
3834 + return;
3835 + }
3836 +- ioc->sas_hba.phy = kcalloc(num_phys,
3837 ++
3838 ++ ioc->sas_hba.nr_phys_allocated = max_t(u8,
3839 ++ MPT_MAX_HBA_NUM_PHYS, num_phys);
3840 ++ ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
3841 + sizeof(struct _sas_phy), GFP_KERNEL);
3842 + if (!ioc->sas_hba.phy) {
3843 + ioc_err(ioc, "failure at %s:%d/%s()!\n",
3844 +diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
3845 +index 9240e788b011d..a04693498dc01 100644
3846 +--- a/drivers/scsi/qla2xxx/qla_edif.c
3847 ++++ b/drivers/scsi/qla2xxx/qla_edif.c
3848 +@@ -865,7 +865,7 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
3849 + "APP request entry - portid=%06x.\n", tdid.b24);
3850 +
3851 + /* Ran out of space */
3852 +- if (pcnt > app_req.num_ports)
3853 ++ if (pcnt >= app_req.num_ports)
3854 + break;
3855 +
3856 + if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
3857 +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
3858 +index ead65cdfb522e..1b1a63a467816 100644
3859 +--- a/drivers/scsi/scsi_debug.c
3860 ++++ b/drivers/scsi/scsi_debug.c
3861 +@@ -4649,6 +4649,7 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip,
3862 + struct sdeb_zone_state *zsp)
3863 + {
3864 + enum sdebug_z_cond zc;
3865 ++ struct sdeb_store_info *sip = devip2sip(devip, false);
3866 +
3867 + if (zbc_zone_is_conv(zsp))
3868 + return;
3869 +@@ -4660,6 +4661,10 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip,
3870 + if (zsp->z_cond == ZC4_CLOSED)
3871 + devip->nr_closed--;
3872 +
3873 ++ if (zsp->z_wp > zsp->z_start)
3874 ++ memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
3875 ++ (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
3876 ++
3877 + zsp->z_non_seq_resource = false;
3878 + zsp->z_wp = zsp->z_start;
3879 + zsp->z_cond = ZC1_EMPTY;
3880 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
3881 +index 9527e734a999a..920aae661c5b2 100644
3882 +--- a/drivers/scsi/scsi_sysfs.c
3883 ++++ b/drivers/scsi/scsi_sysfs.c
3884 +@@ -817,7 +817,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
3885 +
3886 + mutex_lock(&sdev->state_mutex);
3887 + if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
3888 +- ret = count;
3889 ++ ret = 0;
3890 + } else {
3891 + ret = scsi_device_set_state(sdev, state);
3892 + if (ret == 0 && state == SDEV_RUNNING)
3893 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
3894 +index fce63335084ed..78ead3369779c 100644
3895 +--- a/drivers/scsi/sd.c
3896 ++++ b/drivers/scsi/sd.c
3897 +@@ -2607,6 +2607,13 @@ sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
3898 + unsigned char *buffer, int len, struct scsi_mode_data *data,
3899 + struct scsi_sense_hdr *sshdr)
3900 + {
3901 ++ /*
3902 ++ * If we must use MODE SENSE(10), make sure that the buffer length
3903 ++ * is at least 8 bytes so that the mode sense header fits.
3904 ++ */
3905 ++ if (sdkp->device->use_10_for_ms && len < 8)
3906 ++ len = 8;
3907 ++
3908 + return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
3909 + SD_TIMEOUT, sdkp->max_retries, data,
3910 + sshdr);
3911 +diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
3912 +index cf263a58a1489..6fd549a424d53 100644
3913 +--- a/drivers/staging/fbtft/fb_ssd1351.c
3914 ++++ b/drivers/staging/fbtft/fb_ssd1351.c
3915 +@@ -187,7 +187,6 @@ static struct fbtft_display display = {
3916 + },
3917 + };
3918 +
3919 +-#ifdef CONFIG_FB_BACKLIGHT
3920 + static int update_onboard_backlight(struct backlight_device *bd)
3921 + {
3922 + struct fbtft_par *par = bl_get_data(bd);
3923 +@@ -231,9 +230,6 @@ static void register_onboard_backlight(struct fbtft_par *par)
3924 + if (!par->fbtftops.unregister_backlight)
3925 + par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
3926 + }
3927 +-#else
3928 +-static void register_onboard_backlight(struct fbtft_par *par) { };
3929 +-#endif
3930 +
3931 + FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display);
3932 +
3933 +diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
3934 +index ed992ca605ebe..1690358b8f018 100644
3935 +--- a/drivers/staging/fbtft/fbtft-core.c
3936 ++++ b/drivers/staging/fbtft/fbtft-core.c
3937 +@@ -128,7 +128,6 @@ static int fbtft_request_gpios(struct fbtft_par *par)
3938 + return 0;
3939 + }
3940 +
3941 +-#ifdef CONFIG_FB_BACKLIGHT
3942 + static int fbtft_backlight_update_status(struct backlight_device *bd)
3943 + {
3944 + struct fbtft_par *par = bl_get_data(bd);
3945 +@@ -161,6 +160,7 @@ void fbtft_unregister_backlight(struct fbtft_par *par)
3946 + par->info->bl_dev = NULL;
3947 + }
3948 + }
3949 ++EXPORT_SYMBOL(fbtft_unregister_backlight);
3950 +
3951 + static const struct backlight_ops fbtft_bl_ops = {
3952 + .get_brightness = fbtft_backlight_get_brightness,
3953 +@@ -198,12 +198,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
3954 + if (!par->fbtftops.unregister_backlight)
3955 + par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
3956 + }
3957 +-#else
3958 +-void fbtft_register_backlight(struct fbtft_par *par) { };
3959 +-void fbtft_unregister_backlight(struct fbtft_par *par) { };
3960 +-#endif
3961 + EXPORT_SYMBOL(fbtft_register_backlight);
3962 +-EXPORT_SYMBOL(fbtft_unregister_backlight);
3963 +
3964 + static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe,
3965 + int ye)
3966 +@@ -853,13 +848,11 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
3967 + fb_info->fix.smem_len >> 10, text1,
3968 + HZ / fb_info->fbdefio->delay, text2);
3969 +
3970 +-#ifdef CONFIG_FB_BACKLIGHT
3971 + /* Turn on backlight if available */
3972 + if (fb_info->bl_dev) {
3973 + fb_info->bl_dev->props.power = FB_BLANK_UNBLANK;
3974 + fb_info->bl_dev->ops->update_status(fb_info->bl_dev);
3975 + }
3976 +-#endif
3977 +
3978 + return 0;
3979 +
3980 +diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c
3981 +index 1ed4772d27715..843760675876a 100644
3982 +--- a/drivers/staging/greybus/audio_helper.c
3983 ++++ b/drivers/staging/greybus/audio_helper.c
3984 +@@ -192,7 +192,11 @@ int gbaudio_remove_component_controls(struct snd_soc_component *component,
3985 + unsigned int num_controls)
3986 + {
3987 + struct snd_card *card = component->card->snd_card;
3988 ++ int err;
3989 +
3990 +- return gbaudio_remove_controls(card, component->dev, controls,
3991 +- num_controls, component->name_prefix);
3992 ++ down_write(&card->controls_rwsem);
3993 ++ err = gbaudio_remove_controls(card, component->dev, controls,
3994 ++ num_controls, component->name_prefix);
3995 ++ up_write(&card->controls_rwsem);
3996 ++ return err;
3997 + }
3998 +diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
3999 +index 5a472a4954b0f..63d312d01171e 100644
4000 +--- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c
4001 ++++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
4002 +@@ -104,6 +104,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
4003 + {0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */
4004 + {0x02}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */
4005 + {0x01}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
4006 ++ {0x00}, /* 0x13 */
4007 + {0x02}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */
4008 + {0x00}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */
4009 + {0x00}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */
4010 +@@ -115,6 +116,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
4011 + {0x00}, /* 0x1C, */
4012 + {0x00}, /* 0x1D, */
4013 + {0x00}, /* 0x1E, */
4014 ++ {0x00}, /* 0x1F, */
4015 + /* 0x20 ~ 0x7F , New Define ===== */
4016 + {0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */
4017 + {0x01}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */
4018 +@@ -7080,12 +7082,12 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
4019 + struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
4020 + struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
4021 +
4022 +- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
4023 ++ pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC);
4024 + if (!pcmd_obj)
4025 + return;
4026 +
4027 + cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
4028 +- pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
4029 ++ pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
4030 + if (!pevtcmd) {
4031 + kfree(pcmd_obj);
4032 + return;
4033 +diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
4034 +index 013c322b98a2d..0eccce57c63a6 100644
4035 +--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
4036 ++++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
4037 +@@ -2061,6 +2061,7 @@ static int rtw_wx_read32(struct net_device *dev,
4038 + u32 data32;
4039 + u32 bytes;
4040 + u8 *ptmp;
4041 ++ int ret;
4042 +
4043 + padapter = (struct adapter *)rtw_netdev_priv(dev);
4044 + p = &wrqu->data;
4045 +@@ -2093,12 +2094,17 @@ static int rtw_wx_read32(struct net_device *dev,
4046 + break;
4047 + default:
4048 + DBG_88E(KERN_INFO "%s: usage> read [bytes],[address(hex)]\n", __func__);
4049 +- return -EINVAL;
4050 ++ ret = -EINVAL;
4051 ++ goto err_free_ptmp;
4052 + }
4053 + DBG_88E(KERN_INFO "%s: addr = 0x%08X data =%s\n", __func__, addr, extra);
4054 +
4055 + kfree(ptmp);
4056 + return 0;
4057 ++
4058 ++err_free_ptmp:
4059 ++ kfree(ptmp);
4060 ++ return ret;
4061 + }
4062 +
4063 + static int rtw_wx_write32(struct net_device *dev,
4064 +diff --git a/drivers/staging/r8188eu/os_dep/mlme_linux.c b/drivers/staging/r8188eu/os_dep/mlme_linux.c
4065 +index e3ee9dc7ab900..b0d1e20edc4c2 100644
4066 +--- a/drivers/staging/r8188eu/os_dep/mlme_linux.c
4067 ++++ b/drivers/staging/r8188eu/os_dep/mlme_linux.c
4068 +@@ -114,7 +114,7 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
4069 +
4070 + buff = NULL;
4071 + if (authmode == _WPA_IE_ID_) {
4072 +- buff = kzalloc(IW_CUSTOM_MAX, GFP_KERNEL);
4073 ++ buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC);
4074 + if (!buff)
4075 + return;
4076 + p = buff;
4077 +diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
4078 +index a7dd1578b2c6a..616ab3c8fde4f 100644
4079 +--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
4080 ++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
4081 +@@ -2549,13 +2549,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
4082 + free_irq(dev->irq, dev);
4083 + priv->irq = 0;
4084 + }
4085 +- free_rtllib(dev);
4086 +
4087 + if (dev->mem_start != 0) {
4088 + iounmap((void __iomem *)dev->mem_start);
4089 + release_mem_region(pci_resource_start(pdev, 1),
4090 + pci_resource_len(pdev, 1));
4091 + }
4092 ++
4093 ++ free_rtllib(dev);
4094 + }
4095 +
4096 + pci_disable_device(pdev);
4097 +diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
4098 +index f1d100671ee6a..097142ffb1842 100644
4099 +--- a/drivers/usb/chipidea/ci_hdrc_imx.c
4100 ++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
4101 +@@ -420,15 +420,15 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
4102 + data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
4103 + if (IS_ERR(data->phy)) {
4104 + ret = PTR_ERR(data->phy);
4105 +- if (ret == -ENODEV) {
4106 +- data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
4107 +- if (IS_ERR(data->phy)) {
4108 +- ret = PTR_ERR(data->phy);
4109 +- if (ret == -ENODEV)
4110 +- data->phy = NULL;
4111 +- else
4112 +- goto err_clk;
4113 +- }
4114 ++ if (ret != -ENODEV)
4115 ++ goto err_clk;
4116 ++ data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
4117 ++ if (IS_ERR(data->phy)) {
4118 ++ ret = PTR_ERR(data->phy);
4119 ++ if (ret == -ENODEV)
4120 ++ data->phy = NULL;
4121 ++ else
4122 ++ goto err_clk;
4123 + }
4124 + }
4125 +
4126 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
4127 +index 86658a81d2844..00070a8a65079 100644
4128 +--- a/drivers/usb/core/hub.c
4129 ++++ b/drivers/usb/core/hub.c
4130 +@@ -4700,8 +4700,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
4131 + if (oldspeed == USB_SPEED_LOW)
4132 + delay = HUB_LONG_RESET_TIME;
4133 +
4134 +- mutex_lock(hcd->address0_mutex);
4135 +-
4136 + /* Reset the device; full speed may morph to high speed */
4137 + /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
4138 + retval = hub_port_reset(hub, port1, udev, delay, false);
4139 +@@ -5016,7 +5014,6 @@ fail:
4140 + hub_port_disable(hub, port1, 0);
4141 + update_devnum(udev, devnum); /* for disconnect processing */
4142 + }
4143 +- mutex_unlock(hcd->address0_mutex);
4144 + return retval;
4145 + }
4146 +
4147 +@@ -5191,6 +5188,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
4148 + struct usb_port *port_dev = hub->ports[port1 - 1];
4149 + struct usb_device *udev = port_dev->child;
4150 + static int unreliable_port = -1;
4151 ++ bool retry_locked;
4152 +
4153 + /* Disconnect any existing devices under this port */
4154 + if (udev) {
4155 +@@ -5246,8 +5244,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
4156 + unit_load = 100;
4157 +
4158 + status = 0;
4159 +- for (i = 0; i < PORT_INIT_TRIES; i++) {
4160 +
4161 ++ for (i = 0; i < PORT_INIT_TRIES; i++) {
4162 ++ usb_lock_port(port_dev);
4163 ++ mutex_lock(hcd->address0_mutex);
4164 ++ retry_locked = true;
4165 + /* reallocate for each attempt, since references
4166 + * to the previous one can escape in various ways
4167 + */
4168 +@@ -5255,6 +5256,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
4169 + if (!udev) {
4170 + dev_err(&port_dev->dev,
4171 + "couldn't allocate usb_device\n");
4172 ++ mutex_unlock(hcd->address0_mutex);
4173 ++ usb_unlock_port(port_dev);
4174 + goto done;
4175 + }
4176 +
4177 +@@ -5276,12 +5279,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
4178 + }
4179 +
4180 + /* reset (non-USB 3.0 devices) and get descriptor */
4181 +- usb_lock_port(port_dev);
4182 + status = hub_port_init(hub, udev, port1, i);
4183 +- usb_unlock_port(port_dev);
4184 + if (status < 0)
4185 + goto loop;
4186 +
4187 ++ mutex_unlock(hcd->address0_mutex);
4188 ++ usb_unlock_port(port_dev);
4189 ++ retry_locked = false;
4190 ++
4191 + if (udev->quirks & USB_QUIRK_DELAY_INIT)
4192 + msleep(2000);
4193 +
4194 +@@ -5374,6 +5379,10 @@ loop:
4195 + usb_ep0_reinit(udev);
4196 + release_devnum(udev);
4197 + hub_free_dev(udev);
4198 ++ if (retry_locked) {
4199 ++ mutex_unlock(hcd->address0_mutex);
4200 ++ usb_unlock_port(port_dev);
4201 ++ }
4202 + usb_put_dev(udev);
4203 + if ((status == -ENOTCONN) || (status == -ENOTSUPP))
4204 + break;
4205 +@@ -5915,6 +5924,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
4206 + bos = udev->bos;
4207 + udev->bos = NULL;
4208 +
4209 ++ mutex_lock(hcd->address0_mutex);
4210 ++
4211 + for (i = 0; i < PORT_INIT_TRIES; ++i) {
4212 +
4213 + /* ep0 maxpacket size may change; let the HCD know about it.
4214 +@@ -5924,6 +5935,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
4215 + if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
4216 + break;
4217 + }
4218 ++ mutex_unlock(hcd->address0_mutex);
4219 +
4220 + if (ret < 0)
4221 + goto re_enumerate;
4222 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
4223 +index 11d85a6e0b0dc..2190225bf3da2 100644
4224 +--- a/drivers/usb/dwc2/gadget.c
4225 ++++ b/drivers/usb/dwc2/gadget.c
4226 +@@ -1198,6 +1198,8 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
4227 + }
4228 + ctrl |= DXEPCTL_CNAK;
4229 + } else {
4230 ++ hs_req->req.frame_number = hs_ep->target_frame;
4231 ++ hs_req->req.actual = 0;
4232 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
4233 + return;
4234 + }
4235 +@@ -2857,9 +2859,12 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
4236 +
4237 + do {
4238 + hs_req = get_ep_head(hs_ep);
4239 +- if (hs_req)
4240 ++ if (hs_req) {
4241 ++ hs_req->req.frame_number = hs_ep->target_frame;
4242 ++ hs_req->req.actual = 0;
4243 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
4244 + -ENODATA);
4245 ++ }
4246 + dwc2_gadget_incr_frame_num(hs_ep);
4247 + /* Update current frame number value. */
4248 + hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
4249 +@@ -2912,8 +2917,11 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
4250 +
4251 + while (dwc2_gadget_target_frame_elapsed(ep)) {
4252 + hs_req = get_ep_head(ep);
4253 +- if (hs_req)
4254 ++ if (hs_req) {
4255 ++ hs_req->req.frame_number = ep->target_frame;
4256 ++ hs_req->req.actual = 0;
4257 + dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
4258 ++ }
4259 +
4260 + dwc2_gadget_incr_frame_num(ep);
4261 + /* Update current frame number value. */
4262 +@@ -3002,8 +3010,11 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
4263 +
4264 + while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
4265 + hs_req = get_ep_head(hs_ep);
4266 +- if (hs_req)
4267 ++ if (hs_req) {
4268 ++ hs_req->req.frame_number = hs_ep->target_frame;
4269 ++ hs_req->req.actual = 0;
4270 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
4271 ++ }
4272 +
4273 + dwc2_gadget_incr_frame_num(hs_ep);
4274 + /* Update current frame number value. */
4275 +diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
4276 +index 89a788326c562..24beff610cf2c 100644
4277 +--- a/drivers/usb/dwc2/hcd_queue.c
4278 ++++ b/drivers/usb/dwc2/hcd_queue.c
4279 +@@ -59,7 +59,7 @@
4280 + #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
4281 +
4282 + /* If we get a NAK, wait this long before retrying */
4283 +-#define DWC2_RETRY_WAIT_DELAY (1 * 1E6L)
4284 ++#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
4285 +
4286 + /**
4287 + * dwc2_periodic_channel_available() - Checks that a channel is available for a
4288 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
4289 +index 0104a80b185e1..357b7805896e7 100644
4290 +--- a/drivers/usb/dwc3/core.c
4291 ++++ b/drivers/usb/dwc3/core.c
4292 +@@ -1565,9 +1565,11 @@ static int dwc3_probe(struct platform_device *pdev)
4293 +
4294 + dwc3_get_properties(dwc);
4295 +
4296 +- ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
4297 +- if (ret)
4298 +- return ret;
4299 ++ if (!dwc->sysdev_is_parent) {
4300 ++ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
4301 ++ if (ret)
4302 ++ return ret;
4303 ++ }
4304 +
4305 + dwc->reset = devm_reset_control_array_get_optional_shared(dev);
4306 + if (IS_ERR(dwc->reset))
4307 +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
4308 +index 0c100901a7845..fd5d42ec53501 100644
4309 +--- a/drivers/usb/dwc3/core.h
4310 ++++ b/drivers/usb/dwc3/core.h
4311 +@@ -143,7 +143,7 @@
4312 + #define DWC3_GHWPARAMS8 0xc600
4313 + #define DWC3_GUCTL3 0xc60c
4314 + #define DWC3_GFLADJ 0xc630
4315 +-#define DWC3_GHWPARAMS9 0xc680
4316 ++#define DWC3_GHWPARAMS9 0xc6e0
4317 +
4318 + /* Device Registers */
4319 + #define DWC3_DCFG 0xc700
4320 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
4321 +index ed97e47d32613..4c16805a2b310 100644
4322 +--- a/drivers/usb/dwc3/gadget.c
4323 ++++ b/drivers/usb/dwc3/gadget.c
4324 +@@ -310,13 +310,24 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
4325 + if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
4326 + int link_state;
4327 +
4328 ++ /*
4329 ++ * Initiate remote wakeup if the link state is in U3 when
4330 ++ * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
4331 ++ * link state is in U1/U2, no remote wakeup is needed. The Start
4332 ++ * Transfer command will initiate the link recovery.
4333 ++ */
4334 + link_state = dwc3_gadget_get_link_state(dwc);
4335 +- if (link_state == DWC3_LINK_STATE_U1 ||
4336 +- link_state == DWC3_LINK_STATE_U2 ||
4337 +- link_state == DWC3_LINK_STATE_U3) {
4338 ++ switch (link_state) {
4339 ++ case DWC3_LINK_STATE_U2:
4340 ++ if (dwc->gadget->speed >= USB_SPEED_SUPER)
4341 ++ break;
4342 ++
4343 ++ fallthrough;
4344 ++ case DWC3_LINK_STATE_U3:
4345 + ret = __dwc3_gadget_wakeup(dwc);
4346 + dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
4347 + ret);
4348 ++ break;
4349 + }
4350 + }
4351 +
4352 +@@ -3252,6 +3263,9 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
4353 + struct dwc3 *dwc = dep->dwc;
4354 + bool no_started_trb = true;
4355 +
4356 ++ if (!dep->endpoint.desc)
4357 ++ return no_started_trb;
4358 ++
4359 + dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
4360 +
4361 + if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
4362 +@@ -3299,6 +3313,9 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
4363 + {
4364 + int status = 0;
4365 +
4366 ++ if (!dep->endpoint.desc)
4367 ++ return;
4368 ++
4369 + if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
4370 + dwc3_gadget_endpoint_frame_from_event(dep, event);
4371 +
4372 +@@ -3352,6 +3369,14 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
4373 + if (cmd != DWC3_DEPCMD_ENDTRANSFER)
4374 + return;
4375 +
4376 ++ /*
4377 ++ * The END_TRANSFER command will cause the controller to generate a
4378 ++ * NoStream Event, and it's not due to the host DP NoStream rejection.
4379 ++ * Ignore the next NoStream event.
4380 ++ */
4381 ++ if (dep->stream_capable)
4382 ++ dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
4383 ++
4384 + dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
4385 + dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
4386 + dwc3_gadget_ep_cleanup_cancelled_requests(dep);
4387 +@@ -3574,14 +3599,6 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
4388 + WARN_ON_ONCE(ret);
4389 + dep->resource_index = 0;
4390 +
4391 +- /*
4392 +- * The END_TRANSFER command will cause the controller to generate a
4393 +- * NoStream Event, and it's not due to the host DP NoStream rejection.
4394 +- * Ignore the next NoStream event.
4395 +- */
4396 +- if (dep->stream_capable)
4397 +- dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
4398 +-
4399 + if (!interrupt)
4400 + dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
4401 + else
4402 +diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
4403 +index 1bf494b649bd2..c8af2cd2216d6 100644
4404 +--- a/drivers/usb/host/xhci-tegra.c
4405 ++++ b/drivers/usb/host/xhci-tegra.c
4406 +@@ -1400,6 +1400,7 @@ static void tegra_xusb_deinit_usb_phy(struct tegra_xusb *tegra)
4407 +
4408 + static int tegra_xusb_probe(struct platform_device *pdev)
4409 + {
4410 ++ struct of_phandle_args args;
4411 + struct tegra_xusb *tegra;
4412 + struct device_node *np;
4413 + struct resource *regs;
4414 +@@ -1454,10 +1455,17 @@ static int tegra_xusb_probe(struct platform_device *pdev)
4415 + goto put_padctl;
4416 + }
4417 +
4418 +- tegra->padctl_irq = of_irq_get(np, 0);
4419 +- if (tegra->padctl_irq <= 0) {
4420 +- err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq;
4421 +- goto put_padctl;
4422 ++ /* Older device-trees don't have padctrl interrupt */
4423 ++ err = of_irq_parse_one(np, 0, &args);
4424 ++ if (!err) {
4425 ++ tegra->padctl_irq = of_irq_get(np, 0);
4426 ++ if (tegra->padctl_irq <= 0) {
4427 ++ err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq;
4428 ++ goto put_padctl;
4429 ++ }
4430 ++ } else {
4431 ++ dev_dbg(&pdev->dev,
4432 ++ "%pOF is missing an interrupt, disabling PM support\n", np);
4433 + }
4434 +
4435 + tegra->host_clk = devm_clk_get(&pdev->dev, "xusb_host");
4436 +@@ -1696,11 +1704,15 @@ static int tegra_xusb_probe(struct platform_device *pdev)
4437 + goto remove_usb3;
4438 + }
4439 +
4440 +- err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq, NULL, tegra_xusb_padctl_irq,
4441 +- IRQF_ONESHOT, dev_name(&pdev->dev), tegra);
4442 +- if (err < 0) {
4443 +- dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err);
4444 +- goto remove_usb3;
4445 ++ if (tegra->padctl_irq) {
4446 ++ err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq,
4447 ++ NULL, tegra_xusb_padctl_irq,
4448 ++ IRQF_ONESHOT, dev_name(&pdev->dev),
4449 ++ tegra);
4450 ++ if (err < 0) {
4451 ++ dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err);
4452 ++ goto remove_usb3;
4453 ++ }
4454 + }
4455 +
4456 + err = tegra_xusb_enable_firmware_messages(tegra);
4457 +@@ -1718,13 +1730,16 @@ static int tegra_xusb_probe(struct platform_device *pdev)
4458 + /* Enable wake for both USB 2.0 and USB 3.0 roothubs */
4459 + device_init_wakeup(&tegra->hcd->self.root_hub->dev, true);
4460 + device_init_wakeup(&xhci->shared_hcd->self.root_hub->dev, true);
4461 +- device_init_wakeup(tegra->dev, true);
4462 +
4463 + pm_runtime_use_autosuspend(tegra->dev);
4464 + pm_runtime_set_autosuspend_delay(tegra->dev, 2000);
4465 + pm_runtime_mark_last_busy(tegra->dev);
4466 + pm_runtime_set_active(tegra->dev);
4467 +- pm_runtime_enable(tegra->dev);
4468 ++
4469 ++ if (tegra->padctl_irq) {
4470 ++ device_init_wakeup(tegra->dev, true);
4471 ++ pm_runtime_enable(tegra->dev);
4472 ++ }
4473 +
4474 + return 0;
4475 +
4476 +@@ -1772,7 +1787,9 @@ static int tegra_xusb_remove(struct platform_device *pdev)
4477 + dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
4478 + tegra->fw.phys);
4479 +
4480 +- pm_runtime_disable(&pdev->dev);
4481 ++ if (tegra->padctl_irq)
4482 ++ pm_runtime_disable(&pdev->dev);
4483 ++
4484 + pm_runtime_put(&pdev->dev);
4485 +
4486 + tegra_xusb_powergate_partitions(tegra);
4487 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
4488 +index a484ff5e4ebf8..546fce4617a85 100644
4489 +--- a/drivers/usb/serial/option.c
4490 ++++ b/drivers/usb/serial/option.c
4491 +@@ -1267,6 +1267,8 @@ static const struct usb_device_id option_ids[] = {
4492 + .driver_info = NCTRL(2) },
4493 + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
4494 + .driver_info = NCTRL(0) | ZLP },
4495 ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
4496 ++ .driver_info = NCTRL(0) | ZLP },
4497 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
4498 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
4499 + .driver_info = RSVD(1) },
4500 +@@ -2094,6 +2096,9 @@ static const struct usb_device_id option_ids[] = {
4501 + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
4502 + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
4503 + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
4504 ++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
4505 ++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
4506 ++ .driver_info = RSVD(4) },
4507 + { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
4508 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
4509 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
4510 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
4511 +index f45ca7ddf78ea..a70fd86f735ca 100644
4512 +--- a/drivers/usb/serial/pl2303.c
4513 ++++ b/drivers/usb/serial/pl2303.c
4514 +@@ -432,6 +432,7 @@ static int pl2303_detect_type(struct usb_serial *serial)
4515 + case 0x200:
4516 + switch (bcdDevice) {
4517 + case 0x100:
4518 ++ case 0x105:
4519 + case 0x305:
4520 + case 0x405:
4521 + /*
4522 +diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
4523 +index 7a2a17866a823..72f9001b07921 100644
4524 +--- a/drivers/usb/typec/tcpm/fusb302.c
4525 ++++ b/drivers/usb/typec/tcpm/fusb302.c
4526 +@@ -669,25 +669,27 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
4527 + ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
4528 + FUSB_REG_MASK_BC_LVL |
4529 + FUSB_REG_MASK_COMP_CHNG,
4530 +- FUSB_REG_MASK_COMP_CHNG);
4531 ++ FUSB_REG_MASK_BC_LVL);
4532 + if (ret < 0) {
4533 + fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
4534 + ret);
4535 + goto done;
4536 + }
4537 + chip->intr_comp_chng = true;
4538 ++ chip->intr_bc_lvl = false;
4539 + break;
4540 + case TYPEC_CC_RD:
4541 + ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
4542 + FUSB_REG_MASK_BC_LVL |
4543 + FUSB_REG_MASK_COMP_CHNG,
4544 +- FUSB_REG_MASK_BC_LVL);
4545 ++ FUSB_REG_MASK_COMP_CHNG);
4546 + if (ret < 0) {
4547 + fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
4548 + ret);
4549 + goto done;
4550 + }
4551 + chip->intr_bc_lvl = true;
4552 ++ chip->intr_comp_chng = false;
4553 + break;
4554 + default:
4555 + break;
4556 +diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
4557 +index 5f484fff8dbec..41b0cd17fcbac 100644
4558 +--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
4559 ++++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
4560 +@@ -591,8 +591,11 @@ static void vdpasim_free(struct vdpa_device *vdpa)
4561 + vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
4562 + }
4563 +
4564 +- put_iova_domain(&vdpasim->iova);
4565 +- iova_cache_put();
4566 ++ if (vdpa_get_dma_dev(vdpa)) {
4567 ++ put_iova_domain(&vdpasim->iova);
4568 ++ iova_cache_put();
4569 ++ }
4570 ++
4571 + kvfree(vdpasim->buffer);
4572 + if (vdpasim->iommu)
4573 + vhost_iotlb_free(vdpasim->iommu);
4574 +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
4575 +index 938aefbc75ecc..4e3b95af7ee4d 100644
4576 +--- a/drivers/vhost/vsock.c
4577 ++++ b/drivers/vhost/vsock.c
4578 +@@ -554,7 +554,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
4579 + virtio_transport_free_pkt(pkt);
4580 +
4581 + len += sizeof(pkt->hdr);
4582 +- vhost_add_used(vq, head, len);
4583 ++ vhost_add_used(vq, head, 0);
4584 + total_len += len;
4585 + added = true;
4586 + } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
4587 +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
4588 +index bd003ca8acbe9..fe360c33ce717 100644
4589 +--- a/drivers/xen/xenbus/xenbus_probe.c
4590 ++++ b/drivers/xen/xenbus/xenbus_probe.c
4591 +@@ -909,7 +909,7 @@ static struct notifier_block xenbus_resume_nb = {
4592 +
4593 + static int __init xenbus_init(void)
4594 + {
4595 +- int err = 0;
4596 ++ int err;
4597 + uint64_t v = 0;
4598 + xen_store_domain_type = XS_UNKNOWN;
4599 +
4600 +@@ -949,6 +949,29 @@ static int __init xenbus_init(void)
4601 + err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
4602 + if (err)
4603 + goto out_error;
4604 ++ /*
4605 ++ * Uninitialized hvm_params are zero and return no error.
4606 ++ * Although it is theoretically possible to have
4607 ++ * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
4608 ++ * not zero when valid. If zero, it means that Xenstore hasn't
4609 ++ * been properly initialized. Instead of attempting to map a
4610 ++ * wrong guest physical address return error.
4611 ++ *
4612 ++ * Also recognize all bits set as an invalid value.
4613 ++ */
4614 ++ if (!v || !~v) {
4615 ++ err = -ENOENT;
4616 ++ goto out_error;
4617 ++ }
4618 ++ /* Avoid truncation on 32-bit. */
4619 ++#if BITS_PER_LONG == 32
4620 ++ if (v > ULONG_MAX) {
4621 ++ pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
4622 ++ __func__, v);
4623 ++ err = -EINVAL;
4624 ++ goto out_error;
4625 ++ }
4626 ++#endif
4627 + xen_store_gfn = (unsigned long)v;
4628 + xen_store_interface =
4629 + xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
4630 +@@ -983,8 +1006,10 @@ static int __init xenbus_init(void)
4631 + */
4632 + proc_create_mount_point("xen");
4633 + #endif
4634 ++ return 0;
4635 +
4636 + out_error:
4637 ++ xen_store_domain_type = XS_UNKNOWN;
4638 + return err;
4639 + }
4640 +
4641 +diff --git a/fs/ceph/super.c b/fs/ceph/super.c
4642 +index fd8742bae8471..202ddde3d62ad 100644
4643 +--- a/fs/ceph/super.c
4644 ++++ b/fs/ceph/super.c
4645 +@@ -52,8 +52,7 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
4646 + struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
4647 + struct ceph_mon_client *monc = &fsc->client->monc;
4648 + struct ceph_statfs st;
4649 +- u64 fsid;
4650 +- int err;
4651 ++ int i, err;
4652 + u64 data_pool;
4653 +
4654 + if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
4655 +@@ -99,12 +98,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
4656 + buf->f_namelen = NAME_MAX;
4657 +
4658 + /* Must convert the fsid, for consistent values across arches */
4659 ++ buf->f_fsid.val[0] = 0;
4660 + mutex_lock(&monc->mutex);
4661 +- fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^
4662 +- le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1));
4663 ++ for (i = 0 ; i < sizeof(monc->monmap->fsid) / sizeof(__le32) ; ++i)
4664 ++ buf->f_fsid.val[0] ^= le32_to_cpu(((__le32 *)&monc->monmap->fsid)[i]);
4665 + mutex_unlock(&monc->mutex);
4666 +
4667 +- buf->f_fsid = u64_to_fsid(fsid);
4668 ++ /* fold the fs_cluster_id into the upper bits */
4669 ++ buf->f_fsid.val[1] = monc->fs_cluster_id;
4670 +
4671 + return 0;
4672 + }
4673 +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
4674 +index de2c12bcfa4bc..905a901f7f80b 100644
4675 +--- a/fs/cifs/cifs_debug.c
4676 ++++ b/fs/cifs/cifs_debug.c
4677 +@@ -358,6 +358,8 @@ skip_rdma:
4678 + seq_printf(m, " signed");
4679 + if (server->posix_ext_supported)
4680 + seq_printf(m, " posix");
4681 ++ if (server->nosharesock)
4682 ++ seq_printf(m, " nosharesock");
4683 +
4684 + if (server->rdma)
4685 + seq_printf(m, "\nRDMA ");
4686 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
4687 +index dea4c929d3f46..3e5b8e177cfa7 100644
4688 +--- a/fs/cifs/cifsglob.h
4689 ++++ b/fs/cifs/cifsglob.h
4690 +@@ -592,6 +592,7 @@ struct TCP_Server_Info {
4691 + struct list_head pending_mid_q;
4692 + bool noblocksnd; /* use blocking sendmsg */
4693 + bool noautotune; /* do not autotune send buf sizes */
4694 ++ bool nosharesock;
4695 + bool tcp_nodelay;
4696 + unsigned int credits; /* send no more requests at once */
4697 + unsigned int max_credits; /* can override large 32000 default at mnt */
4698 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
4699 +index e757ee52cc777..439f02f1886c1 100644
4700 +--- a/fs/cifs/connect.c
4701 ++++ b/fs/cifs/connect.c
4702 +@@ -1220,6 +1220,10 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *
4703 + if (ctx->nosharesock)
4704 + return 0;
4705 +
4706 ++ /* this server does not share socket */
4707 ++ if (server->nosharesock)
4708 ++ return 0;
4709 ++
4710 + /* If multidialect negotiation see if existing sessions match one */
4711 + if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
4712 + if (server->vals->protocol_id < SMB30_PROT_ID)
4713 +@@ -1370,6 +1374,9 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx)
4714 + goto out_err;
4715 + }
4716 +
4717 ++ if (ctx->nosharesock)
4718 ++ tcp_ses->nosharesock = true;
4719 ++
4720 + tcp_ses->ops = ctx->ops;
4721 + tcp_ses->vals = ctx->vals;
4722 + cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
4723 +diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
4724 +index bd86067a63f7f..3ca703cd5b24a 100644
4725 +--- a/fs/erofs/utils.c
4726 ++++ b/fs/erofs/utils.c
4727 +@@ -141,7 +141,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
4728 + * however in order to avoid some race conditions, add a
4729 + * DBG_BUGON to observe this in advance.
4730 + */
4731 +- DBG_BUGON(xa_erase(&sbi->managed_pslots, grp->index) != grp);
4732 ++ DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
4733 +
4734 + /* last refcount should be connected with its managed pslot. */
4735 + erofs_workgroup_unfreeze(grp, 0);
4736 +@@ -156,15 +156,19 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
4737 + unsigned int freed = 0;
4738 + unsigned long index;
4739 +
4740 ++ xa_lock(&sbi->managed_pslots);
4741 + xa_for_each(&sbi->managed_pslots, index, grp) {
4742 + /* try to shrink each valid workgroup */
4743 + if (!erofs_try_to_release_workgroup(sbi, grp))
4744 + continue;
4745 ++ xa_unlock(&sbi->managed_pslots);
4746 +
4747 + ++freed;
4748 + if (!--nr_shrink)
4749 +- break;
4750 ++ return freed;
4751 ++ xa_lock(&sbi->managed_pslots);
4752 + }
4753 ++ xa_unlock(&sbi->managed_pslots);
4754 + return freed;
4755 + }
4756 +
4757 +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
4758 +index 83e9bc0f91ffd..7b02827242312 100644
4759 +--- a/fs/f2fs/checkpoint.c
4760 ++++ b/fs/f2fs/checkpoint.c
4761 +@@ -1162,7 +1162,8 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
4762 + if (!is_journalled_quota(sbi))
4763 + return false;
4764 +
4765 +- down_write(&sbi->quota_sem);
4766 ++ if (!down_write_trylock(&sbi->quota_sem))
4767 ++ return true;
4768 + if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
4769 + ret = false;
4770 + } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
4771 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
4772 +index e863136081b47..556fcd8457f3f 100644
4773 +--- a/fs/f2fs/node.c
4774 ++++ b/fs/f2fs/node.c
4775 +@@ -1443,6 +1443,7 @@ page_hit:
4776 + nid, nid_of_node(page), ino_of_node(page),
4777 + ofs_of_node(page), cpver_of_node(page),
4778 + next_blkaddr_of_node(page));
4779 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
4780 + err = -EINVAL;
4781 + out_err:
4782 + ClearPageUptodate(page);
4783 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
4784 +index 5a1f142bdb484..a9d21b33da9c4 100644
4785 +--- a/fs/fuse/dev.c
4786 ++++ b/fs/fuse/dev.c
4787 +@@ -847,17 +847,17 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
4788 +
4789 + replace_page_cache_page(oldpage, newpage);
4790 +
4791 ++ get_page(newpage);
4792 ++
4793 ++ if (!(buf->flags & PIPE_BUF_FLAG_LRU))
4794 ++ lru_cache_add(newpage);
4795 ++
4796 + /*
4797 + * Release while we have extra ref on stolen page. Otherwise
4798 + * anon_pipe_buf_release() might think the page can be reused.
4799 + */
4800 + pipe_buf_release(cs->pipe, buf);
4801 +
4802 +- get_page(newpage);
4803 +-
4804 +- if (!(buf->flags & PIPE_BUF_FLAG_LRU))
4805 +- lru_cache_add(newpage);
4806 +-
4807 + err = 0;
4808 + spin_lock(&cs->req->waitq.lock);
4809 + if (test_bit(FR_ABORTED, &cs->req->flags))
4810 +diff --git a/fs/io_uring.c b/fs/io_uring.c
4811 +index 365f8b350b7f0..f8ceddafb6fc4 100644
4812 +--- a/fs/io_uring.c
4813 ++++ b/fs/io_uring.c
4814 +@@ -1204,6 +1204,7 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
4815 +
4816 + static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
4817 + bool cancel_all)
4818 ++ __must_hold(&req->ctx->timeout_lock)
4819 + {
4820 + struct io_kiocb *req;
4821 +
4822 +@@ -1219,6 +1220,44 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
4823 + return false;
4824 + }
4825 +
4826 ++static bool io_match_linked(struct io_kiocb *head)
4827 ++{
4828 ++ struct io_kiocb *req;
4829 ++
4830 ++ io_for_each_link(req, head) {
4831 ++ if (req->flags & REQ_F_INFLIGHT)
4832 ++ return true;
4833 ++ }
4834 ++ return false;
4835 ++}
4836 ++
4837 ++/*
4838 ++ * As io_match_task() but protected against racing with linked timeouts.
4839 ++ * User must not hold timeout_lock.
4840 ++ */
4841 ++static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
4842 ++ bool cancel_all)
4843 ++{
4844 ++ bool matched;
4845 ++
4846 ++ if (task && head->task != task)
4847 ++ return false;
4848 ++ if (cancel_all)
4849 ++ return true;
4850 ++
4851 ++ if (head->flags & REQ_F_LINK_TIMEOUT) {
4852 ++ struct io_ring_ctx *ctx = head->ctx;
4853 ++
4854 ++ /* protect against races with linked timeouts */
4855 ++ spin_lock_irq(&ctx->timeout_lock);
4856 ++ matched = io_match_linked(head);
4857 ++ spin_unlock_irq(&ctx->timeout_lock);
4858 ++ } else {
4859 ++ matched = io_match_linked(head);
4860 ++ }
4861 ++ return matched;
4862 ++}
4863 ++
4864 + static inline void req_set_fail(struct io_kiocb *req)
4865 + {
4866 + req->flags |= REQ_F_FAIL;
4867 +@@ -1430,10 +1469,10 @@ static void io_prep_async_link(struct io_kiocb *req)
4868 + if (req->flags & REQ_F_LINK_TIMEOUT) {
4869 + struct io_ring_ctx *ctx = req->ctx;
4870 +
4871 +- spin_lock(&ctx->completion_lock);
4872 ++ spin_lock_irq(&ctx->timeout_lock);
4873 + io_for_each_link(cur, req)
4874 + io_prep_async_work(cur);
4875 +- spin_unlock(&ctx->completion_lock);
4876 ++ spin_unlock_irq(&ctx->timeout_lock);
4877 + } else {
4878 + io_for_each_link(cur, req)
4879 + io_prep_async_work(cur);
4880 +@@ -4304,6 +4343,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
4881 + kfree(nxt);
4882 + if (++i == nbufs)
4883 + return i;
4884 ++ cond_resched();
4885 + }
4886 + i++;
4887 + kfree(buf);
4888 +@@ -5702,7 +5742,7 @@ static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
4889 +
4890 + list = &ctx->cancel_hash[i];
4891 + hlist_for_each_entry_safe(req, tmp, list, hash_node) {
4892 +- if (io_match_task(req, tsk, cancel_all))
4893 ++ if (io_match_task_safe(req, tsk, cancel_all))
4894 + posted += io_poll_remove_one(req);
4895 + }
4896 + }
4897 +@@ -6884,10 +6924,11 @@ static inline struct file *io_file_get(struct io_ring_ctx *ctx,
4898 + static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
4899 + {
4900 + struct io_kiocb *prev = req->timeout.prev;
4901 +- int ret;
4902 ++ int ret = -ENOENT;
4903 +
4904 + if (prev) {
4905 +- ret = io_try_cancel_userdata(req, prev->user_data);
4906 ++ if (!(req->task->flags & PF_EXITING))
4907 ++ ret = io_try_cancel_userdata(req, prev->user_data);
4908 + io_req_complete_post(req, ret ?: -ETIME, 0);
4909 + io_put_req(prev);
4910 + } else {
4911 +@@ -9209,10 +9250,8 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
4912 + struct io_buffer *buf;
4913 + unsigned long index;
4914 +
4915 +- xa_for_each(&ctx->io_buffers, index, buf) {
4916 ++ xa_for_each(&ctx->io_buffers, index, buf)
4917 + __io_remove_buffers(ctx, buf, index, -1U);
4918 +- cond_resched();
4919 +- }
4920 + }
4921 +
4922 + static void io_req_cache_free(struct list_head *list)
4923 +@@ -9517,19 +9556,8 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
4924 + {
4925 + struct io_kiocb *req = container_of(work, struct io_kiocb, work);
4926 + struct io_task_cancel *cancel = data;
4927 +- bool ret;
4928 +
4929 +- if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
4930 +- struct io_ring_ctx *ctx = req->ctx;
4931 +-
4932 +- /* protect against races with linked timeouts */
4933 +- spin_lock(&ctx->completion_lock);
4934 +- ret = io_match_task(req, cancel->task, cancel->all);
4935 +- spin_unlock(&ctx->completion_lock);
4936 +- } else {
4937 +- ret = io_match_task(req, cancel->task, cancel->all);
4938 +- }
4939 +- return ret;
4940 ++ return io_match_task_safe(req, cancel->task, cancel->all);
4941 + }
4942 +
4943 + static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
4944 +@@ -9540,7 +9568,7 @@ static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
4945 +
4946 + spin_lock(&ctx->completion_lock);
4947 + list_for_each_entry_reverse(de, &ctx->defer_list, list) {
4948 +- if (io_match_task(de->req, task, cancel_all)) {
4949 ++ if (io_match_task_safe(de->req, task, cancel_all)) {
4950 + list_cut_position(&list, &ctx->defer_list, &de->list);
4951 + break;
4952 + }
4953 +diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
4954 +index 9cc5798423d12..97119ec3b8503 100644
4955 +--- a/fs/iomap/buffered-io.c
4956 ++++ b/fs/iomap/buffered-io.c
4957 +@@ -256,8 +256,13 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
4958 + unsigned poff, plen;
4959 + sector_t sector;
4960 +
4961 +- if (iomap->type == IOMAP_INLINE)
4962 +- return min(iomap_read_inline_data(iter, page), length);
4963 ++ if (iomap->type == IOMAP_INLINE) {
4964 ++ loff_t ret = iomap_read_inline_data(iter, page);
4965 ++
4966 ++ if (ret < 0)
4967 ++ return ret;
4968 ++ return 0;
4969 ++ }
4970 +
4971 + /* zero post-eof blocks as the page may be mapped */
4972 + iop = iomap_page_create(iter->inode, page);
4973 +@@ -370,6 +375,8 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
4974 + ctx->cur_page_in_bio = false;
4975 + }
4976 + ret = iomap_readpage_iter(iter, ctx, done);
4977 ++ if (ret <= 0)
4978 ++ return ret;
4979 + }
4980 +
4981 + return done;
4982 +diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
4983 +index 589694af4e951..ad0ea5d36f2ee 100644
4984 +--- a/fs/ksmbd/smb2pdu.c
4985 ++++ b/fs/ksmbd/smb2pdu.c
4986 +@@ -1700,8 +1700,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
4987 + negblob_off = le16_to_cpu(req->SecurityBufferOffset);
4988 + negblob_len = le16_to_cpu(req->SecurityBufferLength);
4989 + if (negblob_off < (offsetof(struct smb2_sess_setup_req, Buffer) - 4) ||
4990 +- negblob_len < offsetof(struct negotiate_message, NegotiateFlags))
4991 +- return -EINVAL;
4992 ++ negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
4993 ++ rc = -EINVAL;
4994 ++ goto out_err;
4995 ++ }
4996 +
4997 + negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId +
4998 + negblob_off);
4999 +@@ -4450,6 +4452,12 @@ static void get_file_stream_info(struct ksmbd_work *work,
5000 + &stat);
5001 + file_info = (struct smb2_file_stream_info *)rsp->Buffer;
5002 +
5003 ++ buf_free_len =
5004 ++ smb2_calc_max_out_buf_len(work, 8,
5005 ++ le32_to_cpu(req->OutputBufferLength));
5006 ++ if (buf_free_len < 0)
5007 ++ goto out;
5008 ++
5009 + xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
5010 + if (xattr_list_len < 0) {
5011 + goto out;
5012 +@@ -4458,12 +4466,6 @@ static void get_file_stream_info(struct ksmbd_work *work,
5013 + goto out;
5014 + }
5015 +
5016 +- buf_free_len =
5017 +- smb2_calc_max_out_buf_len(work, 8,
5018 +- le32_to_cpu(req->OutputBufferLength));
5019 +- if (buf_free_len < 0)
5020 +- goto out;
5021 +-
5022 + while (idx < xattr_list_len) {
5023 + stream_name = xattr_list + idx;
5024 + streamlen = strlen(stream_name);
5025 +@@ -4489,8 +4491,10 @@ static void get_file_stream_info(struct ksmbd_work *work,
5026 + ":%s", &stream_name[XATTR_NAME_STREAM_LEN]);
5027 +
5028 + next = sizeof(struct smb2_file_stream_info) + streamlen * 2;
5029 +- if (next > buf_free_len)
5030 ++ if (next > buf_free_len) {
5031 ++ kfree(stream_buf);
5032 + break;
5033 ++ }
5034 +
5035 + file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes];
5036 + streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
5037 +@@ -4507,6 +4511,7 @@ static void get_file_stream_info(struct ksmbd_work *work,
5038 + file_info->NextEntryOffset = cpu_to_le32(next);
5039 + }
5040 +
5041 ++out:
5042 + if (!S_ISDIR(stat.mode) &&
5043 + buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) {
5044 + file_info = (struct smb2_file_stream_info *)
5045 +@@ -4515,14 +4520,13 @@ static void get_file_stream_info(struct ksmbd_work *work,
5046 + "::$DATA", 7, conn->local_nls, 0);
5047 + streamlen *= 2;
5048 + file_info->StreamNameLength = cpu_to_le32(streamlen);
5049 +- file_info->StreamSize = 0;
5050 +- file_info->StreamAllocationSize = 0;
5051 ++ file_info->StreamSize = cpu_to_le64(stat.size);
5052 ++ file_info->StreamAllocationSize = cpu_to_le64(stat.blocks << 9);
5053 + nbytes += sizeof(struct smb2_file_stream_info) + streamlen;
5054 + }
5055 +
5056 + /* last entry offset should be 0 */
5057 + file_info->NextEntryOffset = 0;
5058 +-out:
5059 + kvfree(xattr_list);
5060 +
5061 + rsp->OutputBufferLength = cpu_to_le32(nbytes);
5062 +@@ -5060,7 +5064,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
5063 + if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
5064 + PROTECTED_DACL_SECINFO |
5065 + UNPROTECTED_DACL_SECINFO)) {
5066 +- pr_err("Unsupported addition info: 0x%x)\n",
5067 ++ ksmbd_debug(SMB, "Unsupported addition info: 0x%x)\n",
5068 + addition_info);
5069 +
5070 + pntsd->revision = cpu_to_le16(1);
5071 +diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
5072 +index a24349512ffe9..9865b5c37d889 100644
5073 +--- a/fs/nfs/nfs42proc.c
5074 ++++ b/fs/nfs/nfs42proc.c
5075 +@@ -285,7 +285,9 @@ static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
5076 + loff_t newsize = pos + len;
5077 + loff_t end = newsize - 1;
5078 +
5079 +- truncate_pagecache_range(inode, pos, end);
5080 ++ WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping,
5081 ++ pos >> PAGE_SHIFT, end >> PAGE_SHIFT));
5082 ++
5083 + spin_lock(&inode->i_lock);
5084 + if (newsize > i_size_read(inode))
5085 + i_size_write(inode, newsize);
5086 +diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
5087 +index c8bad735e4c19..271e5f92ed019 100644
5088 +--- a/fs/nfs/nfs42xdr.c
5089 ++++ b/fs/nfs/nfs42xdr.c
5090 +@@ -1434,8 +1434,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
5091 + status = decode_clone(xdr);
5092 + if (status)
5093 + goto out;
5094 +- status = decode_getfattr(xdr, res->dst_fattr, res->server);
5095 +-
5096 ++ decode_getfattr(xdr, res->dst_fattr, res->server);
5097 + out:
5098 + res->rpc_status = status;
5099 + return status;
5100 +diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
5101 +index 9a15334da2086..e5730986758fa 100644
5102 +--- a/fs/proc/vmcore.c
5103 ++++ b/fs/proc/vmcore.c
5104 +@@ -124,9 +124,13 @@ ssize_t read_from_oldmem(char *buf, size_t count,
5105 + nr_bytes = count;
5106 +
5107 + /* If pfn is not ram, return zeros for sparse dump files */
5108 +- if (pfn_is_ram(pfn) == 0)
5109 +- memset(buf, 0, nr_bytes);
5110 +- else {
5111 ++ if (pfn_is_ram(pfn) == 0) {
5112 ++ tmp = 0;
5113 ++ if (!userbuf)
5114 ++ memset(buf, 0, nr_bytes);
5115 ++ else if (clear_user(buf, nr_bytes))
5116 ++ tmp = -EFAULT;
5117 ++ } else {
5118 + if (encrypted)
5119 + tmp = copy_oldmem_page_encrypted(pfn, buf,
5120 + nr_bytes,
5121 +@@ -135,10 +139,10 @@ ssize_t read_from_oldmem(char *buf, size_t count,
5122 + else
5123 + tmp = copy_oldmem_page(pfn, buf, nr_bytes,
5124 + offset, userbuf);
5125 +-
5126 +- if (tmp < 0)
5127 +- return tmp;
5128 + }
5129 ++ if (tmp < 0)
5130 ++ return tmp;
5131 ++
5132 + *ppos += nr_bytes;
5133 + count -= nr_bytes;
5134 + buf += nr_bytes;
5135 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
5136 +index c412dde4d67dc..83b8070d1cc93 100644
5137 +--- a/include/net/ip6_fib.h
5138 ++++ b/include/net/ip6_fib.h
5139 +@@ -485,6 +485,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
5140 + struct fib6_config *cfg, gfp_t gfp_flags,
5141 + struct netlink_ext_ack *extack);
5142 + void fib6_nh_release(struct fib6_nh *fib6_nh);
5143 ++void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);
5144 +
5145 + int call_fib6_entry_notifiers(struct net *net,
5146 + enum fib_event_type event_type,
5147 +diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
5148 +index afbce90c44808..45e0339be6fa4 100644
5149 +--- a/include/net/ipv6_stubs.h
5150 ++++ b/include/net/ipv6_stubs.h
5151 +@@ -47,6 +47,7 @@ struct ipv6_stub {
5152 + struct fib6_config *cfg, gfp_t gfp_flags,
5153 + struct netlink_ext_ack *extack);
5154 + void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
5155 ++ void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh);
5156 + void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
5157 + int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify);
5158 + void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
5159 +diff --git a/include/net/nl802154.h b/include/net/nl802154.h
5160 +index ddcee128f5d9a..145acb8f25095 100644
5161 +--- a/include/net/nl802154.h
5162 ++++ b/include/net/nl802154.h
5163 +@@ -19,6 +19,8 @@
5164 + *
5165 + */
5166 +
5167 ++#include <linux/types.h>
5168 ++
5169 + #define NL802154_GENL_NAME "nl802154"
5170 +
5171 + enum nl802154_commands {
5172 +@@ -150,10 +152,9 @@ enum nl802154_attrs {
5173 + };
5174 +
5175 + enum nl802154_iftype {
5176 +- /* for backwards compatibility TODO */
5177 +- NL802154_IFTYPE_UNSPEC = -1,
5178 ++ NL802154_IFTYPE_UNSPEC = (~(__u32)0),
5179 +
5180 +- NL802154_IFTYPE_NODE,
5181 ++ NL802154_IFTYPE_NODE = 0,
5182 + NL802154_IFTYPE_MONITOR,
5183 + NL802154_IFTYPE_COORD,
5184 +
5185 +diff --git a/kernel/cpu.c b/kernel/cpu.c
5186 +index 192e43a874076..407a2568f35eb 100644
5187 +--- a/kernel/cpu.c
5188 ++++ b/kernel/cpu.c
5189 +@@ -31,6 +31,7 @@
5190 + #include <linux/smpboot.h>
5191 + #include <linux/relay.h>
5192 + #include <linux/slab.h>
5193 ++#include <linux/scs.h>
5194 + #include <linux/percpu-rwsem.h>
5195 + #include <linux/cpuset.h>
5196 +
5197 +@@ -587,6 +588,12 @@ static int bringup_cpu(unsigned int cpu)
5198 + struct task_struct *idle = idle_thread_get(cpu);
5199 + int ret;
5200 +
5201 ++ /*
5202 ++ * Reset stale stack state from the last time this CPU was online.
5203 ++ */
5204 ++ scs_task_reset(idle);
5205 ++ kasan_unpoison_task_stack(idle);
5206 ++
5207 + /*
5208 + * Some architectures have to walk the irq descriptors to
5209 + * setup the vector space for the cpu which comes online.
5210 +diff --git a/kernel/events/core.c b/kernel/events/core.c
5211 +index 7162b600e7eaa..2931faf92a76f 100644
5212 +--- a/kernel/events/core.c
5213 ++++ b/kernel/events/core.c
5214 +@@ -9729,6 +9729,9 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
5215 + continue;
5216 + if (event->attr.config != entry->type)
5217 + continue;
5218 ++ /* Cannot deliver synchronous signal to other task. */
5219 ++ if (event->attr.sigtrap)
5220 ++ continue;
5221 + if (perf_tp_event_match(event, &data, regs))
5222 + perf_swevent_event(event, count, &data, regs);
5223 + }
5224 +diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
5225 +index 29eea50a3e678..e63f740c2cc84 100644
5226 +--- a/kernel/locking/rwsem.c
5227 ++++ b/kernel/locking/rwsem.c
5228 +@@ -106,9 +106,9 @@
5229 + * atomic_long_cmpxchg() will be used to obtain writer lock.
5230 + *
5231 + * There are three places where the lock handoff bit may be set or cleared.
5232 +- * 1) rwsem_mark_wake() for readers.
5233 +- * 2) rwsem_try_write_lock() for writers.
5234 +- * 3) Error path of rwsem_down_write_slowpath().
5235 ++ * 1) rwsem_mark_wake() for readers -- set, clear
5236 ++ * 2) rwsem_try_write_lock() for writers -- set, clear
5237 ++ * 3) rwsem_del_waiter() -- clear
5238 + *
5239 + * For all the above cases, wait_lock will be held. A writer must also
5240 + * be the first one in the wait_list to be eligible for setting the handoff
5241 +@@ -335,6 +335,9 @@ struct rwsem_waiter {
5242 + struct task_struct *task;
5243 + enum rwsem_waiter_type type;
5244 + unsigned long timeout;
5245 ++
5246 ++ /* Writer only, not initialized in reader */
5247 ++ bool handoff_set;
5248 + };
5249 + #define rwsem_first_waiter(sem) \
5250 + list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
5251 +@@ -345,12 +348,6 @@ enum rwsem_wake_type {
5252 + RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
5253 + };
5254 +
5255 +-enum writer_wait_state {
5256 +- WRITER_NOT_FIRST, /* Writer is not first in wait list */
5257 +- WRITER_FIRST, /* Writer is first in wait list */
5258 +- WRITER_HANDOFF /* Writer is first & handoff needed */
5259 +-};
5260 +-
5261 + /*
5262 + * The typical HZ value is either 250 or 1000. So set the minimum waiting
5263 + * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
5264 +@@ -366,6 +363,31 @@ enum writer_wait_state {
5265 + */
5266 + #define MAX_READERS_WAKEUP 0x100
5267 +
5268 ++static inline void
5269 ++rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
5270 ++{
5271 ++ lockdep_assert_held(&sem->wait_lock);
5272 ++ list_add_tail(&waiter->list, &sem->wait_list);
5273 ++ /* caller will set RWSEM_FLAG_WAITERS */
5274 ++}
5275 ++
5276 ++/*
5277 ++ * Remove a waiter from the wait_list and clear flags.
5278 ++ *
5279 ++ * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
5280 ++ * this function. Modify with care.
5281 ++ */
5282 ++static inline void
5283 ++rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
5284 ++{
5285 ++ lockdep_assert_held(&sem->wait_lock);
5286 ++ list_del(&waiter->list);
5287 ++ if (likely(!list_empty(&sem->wait_list)))
5288 ++ return;
5289 ++
5290 ++ atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count);
5291 ++}
5292 ++
5293 + /*
5294 + * handle the lock release when processes blocked on it that can now run
5295 + * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
5296 +@@ -377,6 +399,8 @@ enum writer_wait_state {
5297 + * preferably when the wait_lock is released
5298 + * - woken process blocks are discarded from the list after having task zeroed
5299 + * - writers are only marked woken if downgrading is false
5300 ++ *
5301 ++ * Implies rwsem_del_waiter() for all woken readers.
5302 + */
5303 + static void rwsem_mark_wake(struct rw_semaphore *sem,
5304 + enum rwsem_wake_type wake_type,
5305 +@@ -491,18 +515,25 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
5306 +
5307 + adjustment = woken * RWSEM_READER_BIAS - adjustment;
5308 + lockevent_cond_inc(rwsem_wake_reader, woken);
5309 ++
5310 ++ oldcount = atomic_long_read(&sem->count);
5311 + if (list_empty(&sem->wait_list)) {
5312 +- /* hit end of list above */
5313 ++ /*
5314 ++ * Combined with list_move_tail() above, this implies
5315 ++ * rwsem_del_waiter().
5316 ++ */
5317 + adjustment -= RWSEM_FLAG_WAITERS;
5318 ++ if (oldcount & RWSEM_FLAG_HANDOFF)
5319 ++ adjustment -= RWSEM_FLAG_HANDOFF;
5320 ++ } else if (woken) {
5321 ++ /*
5322 ++ * When we've woken a reader, we no longer need to force
5323 ++ * writers to give up the lock and we can clear HANDOFF.
5324 ++ */
5325 ++ if (oldcount & RWSEM_FLAG_HANDOFF)
5326 ++ adjustment -= RWSEM_FLAG_HANDOFF;
5327 + }
5328 +
5329 +- /*
5330 +- * When we've woken a reader, we no longer need to force writers
5331 +- * to give up the lock and we can clear HANDOFF.
5332 +- */
5333 +- if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
5334 +- adjustment -= RWSEM_FLAG_HANDOFF;
5335 +-
5336 + if (adjustment)
5337 + atomic_long_add(adjustment, &sem->count);
5338 +
5339 +@@ -533,12 +564,12 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
5340 + * race conditions between checking the rwsem wait list and setting the
5341 + * sem->count accordingly.
5342 + *
5343 +- * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
5344 +- * bit is set or the lock is acquired with handoff bit cleared.
5345 ++ * Implies rwsem_del_waiter() on success.
5346 + */
5347 + static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
5348 +- enum writer_wait_state wstate)
5349 ++ struct rwsem_waiter *waiter)
5350 + {
5351 ++ bool first = rwsem_first_waiter(sem) == waiter;
5352 + long count, new;
5353 +
5354 + lockdep_assert_held(&sem->wait_lock);
5355 +@@ -547,13 +578,19 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
5356 + do {
5357 + bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
5358 +
5359 +- if (has_handoff && wstate == WRITER_NOT_FIRST)
5360 +- return false;
5361 ++ if (has_handoff) {
5362 ++ if (!first)
5363 ++ return false;
5364 ++
5365 ++ /* First waiter inherits a previously set handoff bit */
5366 ++ waiter->handoff_set = true;
5367 ++ }
5368 +
5369 + new = count;
5370 +
5371 + if (count & RWSEM_LOCK_MASK) {
5372 +- if (has_handoff || (wstate != WRITER_HANDOFF))
5373 ++ if (has_handoff || (!rt_task(waiter->task) &&
5374 ++ !time_after(jiffies, waiter->timeout)))
5375 + return false;
5376 +
5377 + new |= RWSEM_FLAG_HANDOFF;
5378 +@@ -570,9 +607,17 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
5379 + * We have either acquired the lock with handoff bit cleared or
5380 + * set the handoff bit.
5381 + */
5382 +- if (new & RWSEM_FLAG_HANDOFF)
5383 ++ if (new & RWSEM_FLAG_HANDOFF) {
5384 ++ waiter->handoff_set = true;
5385 ++ lockevent_inc(rwsem_wlock_handoff);
5386 + return false;
5387 ++ }
5388 +
5389 ++ /*
5390 ++ * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on
5391 ++ * success.
5392 ++ */
5393 ++ list_del(&waiter->list);
5394 + rwsem_set_owner(sem);
5395 + return true;
5396 + }
5397 +@@ -953,7 +998,7 @@ queue:
5398 + }
5399 + adjustment += RWSEM_FLAG_WAITERS;
5400 + }
5401 +- list_add_tail(&waiter.list, &sem->wait_list);
5402 ++ rwsem_add_waiter(sem, &waiter);
5403 +
5404 + /* we're now waiting on the lock, but no longer actively locking */
5405 + count = atomic_long_add_return(adjustment, &sem->count);
5406 +@@ -999,11 +1044,7 @@ queue:
5407 + return sem;
5408 +
5409 + out_nolock:
5410 +- list_del(&waiter.list);
5411 +- if (list_empty(&sem->wait_list)) {
5412 +- atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
5413 +- &sem->count);
5414 +- }
5415 ++ rwsem_del_waiter(sem, &waiter);
5416 + raw_spin_unlock_irq(&sem->wait_lock);
5417 + __set_current_state(TASK_RUNNING);
5418 + lockevent_inc(rwsem_rlock_fail);
5419 +@@ -1017,9 +1058,7 @@ static struct rw_semaphore *
5420 + rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
5421 + {
5422 + long count;
5423 +- enum writer_wait_state wstate;
5424 + struct rwsem_waiter waiter;
5425 +- struct rw_semaphore *ret = sem;
5426 + DEFINE_WAKE_Q(wake_q);
5427 +
5428 + /* do optimistic spinning and steal lock if possible */
5429 +@@ -1035,16 +1074,13 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
5430 + waiter.task = current;
5431 + waiter.type = RWSEM_WAITING_FOR_WRITE;
5432 + waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
5433 ++ waiter.handoff_set = false;
5434 +
5435 + raw_spin_lock_irq(&sem->wait_lock);
5436 +-
5437 +- /* account for this before adding a new element to the list */
5438 +- wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
5439 +-
5440 +- list_add_tail(&waiter.list, &sem->wait_list);
5441 ++ rwsem_add_waiter(sem, &waiter);
5442 +
5443 + /* we're now waiting on the lock */
5444 +- if (wstate == WRITER_NOT_FIRST) {
5445 ++ if (rwsem_first_waiter(sem) != &waiter) {
5446 + count = atomic_long_read(&sem->count);
5447 +
5448 + /*
5449 +@@ -1080,13 +1116,16 @@ wait:
5450 + /* wait until we successfully acquire the lock */
5451 + set_current_state(state);
5452 + for (;;) {
5453 +- if (rwsem_try_write_lock(sem, wstate)) {
5454 ++ if (rwsem_try_write_lock(sem, &waiter)) {
5455 + /* rwsem_try_write_lock() implies ACQUIRE on success */
5456 + break;
5457 + }
5458 +
5459 + raw_spin_unlock_irq(&sem->wait_lock);
5460 +
5461 ++ if (signal_pending_state(state, current))
5462 ++ goto out_nolock;
5463 ++
5464 + /*
5465 + * After setting the handoff bit and failing to acquire
5466 + * the lock, attempt to spin on owner to accelerate lock
5467 +@@ -1095,7 +1134,7 @@ wait:
5468 + * In this case, we attempt to acquire the lock again
5469 + * without sleeping.
5470 + */
5471 +- if (wstate == WRITER_HANDOFF) {
5472 ++ if (waiter.handoff_set) {
5473 + enum owner_state owner_state;
5474 +
5475 + preempt_disable();
5476 +@@ -1106,66 +1145,26 @@ wait:
5477 + goto trylock_again;
5478 + }
5479 +
5480 +- /* Block until there are no active lockers. */
5481 +- for (;;) {
5482 +- if (signal_pending_state(state, current))
5483 +- goto out_nolock;
5484 +-
5485 +- schedule();
5486 +- lockevent_inc(rwsem_sleep_writer);
5487 +- set_current_state(state);
5488 +- /*
5489 +- * If HANDOFF bit is set, unconditionally do
5490 +- * a trylock.
5491 +- */
5492 +- if (wstate == WRITER_HANDOFF)
5493 +- break;
5494 +-
5495 +- if ((wstate == WRITER_NOT_FIRST) &&
5496 +- (rwsem_first_waiter(sem) == &waiter))
5497 +- wstate = WRITER_FIRST;
5498 +-
5499 +- count = atomic_long_read(&sem->count);
5500 +- if (!(count & RWSEM_LOCK_MASK))
5501 +- break;
5502 +-
5503 +- /*
5504 +- * The setting of the handoff bit is deferred
5505 +- * until rwsem_try_write_lock() is called.
5506 +- */
5507 +- if ((wstate == WRITER_FIRST) && (rt_task(current) ||
5508 +- time_after(jiffies, waiter.timeout))) {
5509 +- wstate = WRITER_HANDOFF;
5510 +- lockevent_inc(rwsem_wlock_handoff);
5511 +- break;
5512 +- }
5513 +- }
5514 ++ schedule();
5515 ++ lockevent_inc(rwsem_sleep_writer);
5516 ++ set_current_state(state);
5517 + trylock_again:
5518 + raw_spin_lock_irq(&sem->wait_lock);
5519 + }
5520 + __set_current_state(TASK_RUNNING);
5521 +- list_del(&waiter.list);
5522 + raw_spin_unlock_irq(&sem->wait_lock);
5523 + lockevent_inc(rwsem_wlock);
5524 +-
5525 +- return ret;
5526 ++ return sem;
5527 +
5528 + out_nolock:
5529 + __set_current_state(TASK_RUNNING);
5530 + raw_spin_lock_irq(&sem->wait_lock);
5531 +- list_del(&waiter.list);
5532 +-
5533 +- if (unlikely(wstate == WRITER_HANDOFF))
5534 +- atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
5535 +-
5536 +- if (list_empty(&sem->wait_list))
5537 +- atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
5538 +- else
5539 ++ rwsem_del_waiter(sem, &waiter);
5540 ++ if (!list_empty(&sem->wait_list))
5541 + rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
5542 + raw_spin_unlock_irq(&sem->wait_lock);
5543 + wake_up_q(&wake_q);
5544 + lockevent_inc(rwsem_wlock_fail);
5545 +-
5546 + return ERR_PTR(-EINTR);
5547 + }
5548 +
5549 +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
5550 +index 559acef3fddb8..b0888e9224da3 100644
5551 +--- a/kernel/power/hibernate.c
5552 ++++ b/kernel/power/hibernate.c
5553 +@@ -691,7 +691,7 @@ static int load_image_and_restore(void)
5554 + goto Unlock;
5555 +
5556 + error = swsusp_read(&flags);
5557 +- swsusp_close(FMODE_READ);
5558 ++ swsusp_close(FMODE_READ | FMODE_EXCL);
5559 + if (!error)
5560 + error = hibernation_restore(flags & SF_PLATFORM_MODE);
5561 +
5562 +@@ -981,7 +981,7 @@ static int software_resume(void)
5563 + /* The snapshot device should not be opened while we're running */
5564 + if (!hibernate_acquire()) {
5565 + error = -EBUSY;
5566 +- swsusp_close(FMODE_READ);
5567 ++ swsusp_close(FMODE_READ | FMODE_EXCL);
5568 + goto Unlock;
5569 + }
5570 +
5571 +@@ -1016,7 +1016,7 @@ static int software_resume(void)
5572 + pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
5573 + return error;
5574 + Close_Finish:
5575 +- swsusp_close(FMODE_READ);
5576 ++ swsusp_close(FMODE_READ | FMODE_EXCL);
5577 + goto Finish;
5578 + }
5579 +
5580 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
5581 +index 779f27a4b46ac..6f4625f8276f1 100644
5582 +--- a/kernel/sched/core.c
5583 ++++ b/kernel/sched/core.c
5584 +@@ -8641,9 +8641,6 @@ void __init init_idle(struct task_struct *idle, int cpu)
5585 + idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
5586 + kthread_set_per_cpu(idle, cpu);
5587 +
5588 +- scs_task_reset(idle);
5589 +- kasan_unpoison_task_stack(idle);
5590 +-
5591 + #ifdef CONFIG_SMP
5592 + /*
5593 + * It's possible that init_idle() gets called multiple times on a task,
5594 +@@ -8799,7 +8796,6 @@ void idle_task_exit(void)
5595 + finish_arch_post_lock_switch();
5596 + }
5597 +
5598 +- scs_task_reset(current);
5599 + /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
5600 + }
5601 +
5602 +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
5603 +index 5c71d32b2860a..421374c304fc0 100644
5604 +--- a/kernel/trace/trace.h
5605 ++++ b/kernel/trace/trace.h
5606 +@@ -1360,14 +1360,26 @@ __event_trigger_test_discard(struct trace_event_file *file,
5607 + if (eflags & EVENT_FILE_FL_TRIGGER_COND)
5608 + *tt = event_triggers_call(file, buffer, entry, event);
5609 +
5610 +- if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
5611 +- (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
5612 +- !filter_match_preds(file->filter, entry))) {
5613 +- __trace_event_discard_commit(buffer, event);
5614 +- return true;
5615 +- }
5616 ++ if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
5617 ++ EVENT_FILE_FL_FILTERED |
5618 ++ EVENT_FILE_FL_PID_FILTER))))
5619 ++ return false;
5620 ++
5621 ++ if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
5622 ++ goto discard;
5623 ++
5624 ++ if (file->flags & EVENT_FILE_FL_FILTERED &&
5625 ++ !filter_match_preds(file->filter, entry))
5626 ++ goto discard;
5627 ++
5628 ++ if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
5629 ++ trace_event_ignore_this_pid(file))
5630 ++ goto discard;
5631 +
5632 + return false;
5633 ++ discard:
5634 ++ __trace_event_discard_commit(buffer, event);
5635 ++ return true;
5636 + }
5637 +
5638 + /**
5639 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
5640 +index bb1123ef2a021..44d031ffe5112 100644
5641 +--- a/kernel/trace/trace_events.c
5642 ++++ b/kernel/trace/trace_events.c
5643 +@@ -2678,12 +2678,22 @@ static struct trace_event_file *
5644 + trace_create_new_event(struct trace_event_call *call,
5645 + struct trace_array *tr)
5646 + {
5647 ++ struct trace_pid_list *no_pid_list;
5648 ++ struct trace_pid_list *pid_list;
5649 + struct trace_event_file *file;
5650 +
5651 + file = kmem_cache_alloc(file_cachep, GFP_TRACE);
5652 + if (!file)
5653 + return NULL;
5654 +
5655 ++ pid_list = rcu_dereference_protected(tr->filtered_pids,
5656 ++ lockdep_is_held(&event_mutex));
5657 ++ no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
5658 ++ lockdep_is_held(&event_mutex));
5659 ++
5660 ++ if (pid_list || no_pid_list)
5661 ++ file->flags |= EVENT_FILE_FL_PID_FILTER;
5662 ++
5663 + file->event_call = call;
5664 + file->tr = tr;
5665 + atomic_set(&file->sm_ref, 0);
5666 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
5667 +index 0a5c0db3137ee..f5f0039d31e5a 100644
5668 +--- a/kernel/trace/trace_uprobe.c
5669 ++++ b/kernel/trace/trace_uprobe.c
5670 +@@ -1313,6 +1313,7 @@ static int uprobe_perf_open(struct trace_event_call *call,
5671 + return 0;
5672 +
5673 + list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
5674 ++ tu = container_of(pos, struct trace_uprobe, tp);
5675 + err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
5676 + if (err) {
5677 + uprobe_perf_close(call, event);
5678 +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
5679 +index a3a0a5e994f5a..abaa5d96ded24 100644
5680 +--- a/net/8021q/vlan.c
5681 ++++ b/net/8021q/vlan.c
5682 +@@ -184,9 +184,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
5683 + if (err)
5684 + goto out_unregister_netdev;
5685 +
5686 +- /* Account for reference in struct vlan_dev_priv */
5687 +- dev_hold(real_dev);
5688 +-
5689 + vlan_stacked_transfer_operstate(real_dev, dev, vlan);
5690 + linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
5691 +
5692 +diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
5693 +index aeeb5f90417b5..8602885c8a8e0 100644
5694 +--- a/net/8021q/vlan_dev.c
5695 ++++ b/net/8021q/vlan_dev.c
5696 +@@ -615,6 +615,9 @@ static int vlan_dev_init(struct net_device *dev)
5697 + if (!vlan->vlan_pcpu_stats)
5698 + return -ENOMEM;
5699 +
5700 ++ /* Get vlan's reference to real_dev */
5701 ++ dev_hold(real_dev);
5702 ++
5703 + return 0;
5704 + }
5705 +
5706 +diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
5707 +index f2abc31528883..e4983f473a3c5 100644
5708 +--- a/net/ethtool/ioctl.c
5709 ++++ b/net/ethtool/ioctl.c
5710 +@@ -1697,7 +1697,7 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
5711 + struct ethtool_coalesce coalesce;
5712 + int ret;
5713 +
5714 +- if (!dev->ethtool_ops->set_coalesce && !dev->ethtool_ops->get_coalesce)
5715 ++ if (!dev->ethtool_ops->set_coalesce || !dev->ethtool_ops->get_coalesce)
5716 + return -EOPNOTSUPP;
5717 +
5718 + ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce,
5719 +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
5720 +index 9e8100728d464..5dbd4b5505eba 100644
5721 +--- a/net/ipv4/nexthop.c
5722 ++++ b/net/ipv4/nexthop.c
5723 +@@ -1899,15 +1899,36 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
5724 + /* if any FIB entries reference this nexthop, any dst entries
5725 + * need to be regenerated
5726 + */
5727 +-static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
5728 ++static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
5729 ++ struct nexthop *replaced_nh)
5730 + {
5731 + struct fib6_info *f6i;
5732 ++ struct nh_group *nhg;
5733 ++ int i;
5734 +
5735 + if (!list_empty(&nh->fi_list))
5736 + rt_cache_flush(net);
5737 +
5738 + list_for_each_entry(f6i, &nh->f6i_list, nh_list)
5739 + ipv6_stub->fib6_update_sernum(net, f6i);
5740 ++
5741 ++ /* if an IPv6 group was replaced, we have to release all old
5742 ++ * dsts to make sure all refcounts are released
5743 ++ */
5744 ++ if (!replaced_nh->is_group)
5745 ++ return;
5746 ++
5747 ++ /* new dsts must use only the new nexthop group */
5748 ++ synchronize_net();
5749 ++
5750 ++ nhg = rtnl_dereference(replaced_nh->nh_grp);
5751 ++ for (i = 0; i < nhg->num_nh; i++) {
5752 ++ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
5753 ++ struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
5754 ++
5755 ++ if (nhi->family == AF_INET6)
5756 ++ ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
5757 ++ }
5758 + }
5759 +
5760 + static int replace_nexthop_grp(struct net *net, struct nexthop *old,
5761 +@@ -2247,7 +2268,7 @@ static int replace_nexthop(struct net *net, struct nexthop *old,
5762 + err = replace_nexthop_single(net, old, new, extack);
5763 +
5764 + if (!err) {
5765 +- nh_rt_cache_flush(net, old);
5766 ++ nh_rt_cache_flush(net, old, new);
5767 +
5768 + __remove_nexthop(net, new, NULL);
5769 + nexthop_put(new);
5770 +@@ -2544,11 +2565,15 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh,
5771 + /* sets nh_dev if successful */
5772 + err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
5773 + extack);
5774 +- if (err)
5775 ++ if (err) {
5776 ++ /* IPv6 is not enabled, don't call fib6_nh_release */
5777 ++ if (err == -EAFNOSUPPORT)
5778 ++ goto out;
5779 + ipv6_stub->fib6_nh_release(fib6_nh);
5780 +- else
5781 ++ } else {
5782 + nh->nh_flags = fib6_nh->fib_nh_flags;
5783 +-
5784 ++ }
5785 ++out:
5786 + return err;
5787 + }
5788 +
5789 +diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
5790 +index 4a30deaa9a37f..8d2d4d652f6d4 100644
5791 +--- a/net/ipv4/tcp_cubic.c
5792 ++++ b/net/ipv4/tcp_cubic.c
5793 +@@ -328,8 +328,6 @@ static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
5794 + return;
5795 +
5796 + if (tcp_in_slow_start(tp)) {
5797 +- if (hystart && after(ack, ca->end_seq))
5798 +- bictcp_hystart_reset(sk);
5799 + acked = tcp_slow_start(tp, acked);
5800 + if (!acked)
5801 + return;
5802 +@@ -389,6 +387,9 @@ static void hystart_update(struct sock *sk, u32 delay)
5803 + struct bictcp *ca = inet_csk_ca(sk);
5804 + u32 threshold;
5805 +
5806 ++ if (after(tp->snd_una, ca->end_seq))
5807 ++ bictcp_hystart_reset(sk);
5808 ++
5809 + if (hystart_detect & HYSTART_ACK_TRAIN) {
5810 + u32 now = bictcp_clock_us(sk);
5811 +
5812 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
5813 +index 0c4da163535ad..dab4a047590b7 100644
5814 +--- a/net/ipv6/af_inet6.c
5815 ++++ b/net/ipv6/af_inet6.c
5816 +@@ -1026,6 +1026,7 @@ static const struct ipv6_stub ipv6_stub_impl = {
5817 + .ip6_mtu_from_fib6 = ip6_mtu_from_fib6,
5818 + .fib6_nh_init = fib6_nh_init,
5819 + .fib6_nh_release = fib6_nh_release,
5820 ++ .fib6_nh_release_dsts = fib6_nh_release_dsts,
5821 + .fib6_update_sernum = fib6_update_sernum_stub,
5822 + .fib6_rt_update = fib6_rt_update,
5823 + .ip6_del_rt = ip6_del_rt,
5824 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
5825 +index 2f044a49afa8c..ff4e83e2a5068 100644
5826 +--- a/net/ipv6/ip6_output.c
5827 ++++ b/net/ipv6/ip6_output.c
5828 +@@ -174,7 +174,7 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
5829 + #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
5830 + /* Policy lookup after SNAT yielded a new policy */
5831 + if (skb_dst(skb)->xfrm) {
5832 +- IPCB(skb)->flags |= IPSKB_REROUTED;
5833 ++ IP6CB(skb)->flags |= IP6SKB_REROUTED;
5834 + return dst_output(net, sk, skb);
5835 + }
5836 + #endif
5837 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
5838 +index 9b9ef09382ab9..79cb5e5a4948b 100644
5839 +--- a/net/ipv6/route.c
5840 ++++ b/net/ipv6/route.c
5841 +@@ -3680,6 +3680,25 @@ void fib6_nh_release(struct fib6_nh *fib6_nh)
5842 + fib_nh_common_release(&fib6_nh->nh_common);
5843 + }
5844 +
5845 ++void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
5846 ++{
5847 ++ int cpu;
5848 ++
5849 ++ if (!fib6_nh->rt6i_pcpu)
5850 ++ return;
5851 ++
5852 ++ for_each_possible_cpu(cpu) {
5853 ++ struct rt6_info *pcpu_rt, **ppcpu_rt;
5854 ++
5855 ++ ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
5856 ++ pcpu_rt = xchg(ppcpu_rt, NULL);
5857 ++ if (pcpu_rt) {
5858 ++ dst_dev_put(&pcpu_rt->dst);
5859 ++ dst_release(&pcpu_rt->dst);
5860 ++ }
5861 ++ }
5862 ++}
5863 ++
5864 + static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
5865 + gfp_t gfp_flags,
5866 + struct netlink_ext_ack *extack)
5867 +diff --git a/net/mptcp/options.c b/net/mptcp/options.c
5868 +index 350348f070700..0966855a7c251 100644
5869 +--- a/net/mptcp/options.c
5870 ++++ b/net/mptcp/options.c
5871 +@@ -422,28 +422,6 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
5872 + return false;
5873 + }
5874 +
5875 +-/* MP_JOIN client subflow must wait for 4th ack before sending any data:
5876 +- * TCP can't schedule delack timer before the subflow is fully established.
5877 +- * MPTCP uses the delack timer to do 3rd ack retransmissions
5878 +- */
5879 +-static void schedule_3rdack_retransmission(struct sock *sk)
5880 +-{
5881 +- struct inet_connection_sock *icsk = inet_csk(sk);
5882 +- struct tcp_sock *tp = tcp_sk(sk);
5883 +- unsigned long timeout;
5884 +-
5885 +- /* reschedule with a timeout above RTT, as we must look only for drop */
5886 +- if (tp->srtt_us)
5887 +- timeout = tp->srtt_us << 1;
5888 +- else
5889 +- timeout = TCP_TIMEOUT_INIT;
5890 +-
5891 +- WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
5892 +- icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
5893 +- icsk->icsk_ack.timeout = timeout;
5894 +- sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
5895 +-}
5896 +-
5897 + static void clear_3rdack_retransmission(struct sock *sk)
5898 + {
5899 + struct inet_connection_sock *icsk = inet_csk(sk);
5900 +@@ -526,7 +504,15 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
5901 + *size = TCPOLEN_MPTCP_MPJ_ACK;
5902 + pr_debug("subflow=%p", subflow);
5903 +
5904 +- schedule_3rdack_retransmission(sk);
5905 ++ /* we can use the full delegate action helper only from BH context
5906 ++ * If we are in process context - sk is flushing the backlog at
5907 ++ * socket lock release time - just set the appropriate flag, will
5908 ++ * be handled by the release callback
5909 ++ */
5910 ++ if (sock_owned_by_user(sk))
5911 ++ set_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status);
5912 ++ else
5913 ++ mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_ACK);
5914 + return true;
5915 + }
5916 + return false;
5917 +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
5918 +index 4379d69aead7e..421fa62ce5cdf 100644
5919 +--- a/net/mptcp/protocol.c
5920 ++++ b/net/mptcp/protocol.c
5921 +@@ -1621,7 +1621,8 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
5922 + if (!xmit_ssk)
5923 + goto out;
5924 + if (xmit_ssk != ssk) {
5925 +- mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
5926 ++ mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk),
5927 ++ MPTCP_DELEGATE_SEND);
5928 + goto out;
5929 + }
5930 +
5931 +@@ -2959,7 +2960,7 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
5932 + if (xmit_ssk == ssk)
5933 + __mptcp_subflow_push_pending(sk, ssk);
5934 + else if (xmit_ssk)
5935 +- mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
5936 ++ mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND);
5937 + } else {
5938 + set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
5939 + }
5940 +@@ -3013,18 +3014,50 @@ static void mptcp_release_cb(struct sock *sk)
5941 + __mptcp_update_rmem(sk);
5942 + }
5943 +
5944 ++/* MP_JOIN client subflow must wait for 4th ack before sending any data:
5945 ++ * TCP can't schedule delack timer before the subflow is fully established.
5946 ++ * MPTCP uses the delack timer to do 3rd ack retransmissions
5947 ++ */
5948 ++static void schedule_3rdack_retransmission(struct sock *ssk)
5949 ++{
5950 ++ struct inet_connection_sock *icsk = inet_csk(ssk);
5951 ++ struct tcp_sock *tp = tcp_sk(ssk);
5952 ++ unsigned long timeout;
5953 ++
5954 ++ if (mptcp_subflow_ctx(ssk)->fully_established)
5955 ++ return;
5956 ++
5957 ++ /* reschedule with a timeout above RTT, as we must look only for drop */
5958 ++ if (tp->srtt_us)
5959 ++ timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1));
5960 ++ else
5961 ++ timeout = TCP_TIMEOUT_INIT;
5962 ++ timeout += jiffies;
5963 ++
5964 ++ WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
5965 ++ icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
5966 ++ icsk->icsk_ack.timeout = timeout;
5967 ++ sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
5968 ++}
5969 ++
5970 + void mptcp_subflow_process_delegated(struct sock *ssk)
5971 + {
5972 + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
5973 + struct sock *sk = subflow->conn;
5974 +
5975 +- mptcp_data_lock(sk);
5976 +- if (!sock_owned_by_user(sk))
5977 +- __mptcp_subflow_push_pending(sk, ssk);
5978 +- else
5979 +- set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
5980 +- mptcp_data_unlock(sk);
5981 +- mptcp_subflow_delegated_done(subflow);
5982 ++ if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
5983 ++ mptcp_data_lock(sk);
5984 ++ if (!sock_owned_by_user(sk))
5985 ++ __mptcp_subflow_push_pending(sk, ssk);
5986 ++ else
5987 ++ set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
5988 ++ mptcp_data_unlock(sk);
5989 ++ mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND);
5990 ++ }
5991 ++ if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) {
5992 ++ schedule_3rdack_retransmission(ssk);
5993 ++ mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK);
5994 ++ }
5995 + }
5996 +
5997 + static int mptcp_hash(struct sock *sk)
5998 +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
5999 +index dc984676c5eb1..82c5dc4d6b49d 100644
6000 +--- a/net/mptcp/protocol.h
6001 ++++ b/net/mptcp/protocol.h
6002 +@@ -401,6 +401,7 @@ struct mptcp_delegated_action {
6003 + DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
6004 +
6005 + #define MPTCP_DELEGATE_SEND 0
6006 ++#define MPTCP_DELEGATE_ACK 1
6007 +
6008 + /* MPTCP subflow context */
6009 + struct mptcp_subflow_context {
6010 +@@ -506,23 +507,23 @@ static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk,
6011 +
6012 + void mptcp_subflow_process_delegated(struct sock *ssk);
6013 +
6014 +-static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow)
6015 ++static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action)
6016 + {
6017 + struct mptcp_delegated_action *delegated;
6018 + bool schedule;
6019 +
6020 ++ /* the caller held the subflow bh socket lock */
6021 ++ lockdep_assert_in_softirq();
6022 ++
6023 + /* The implied barrier pairs with mptcp_subflow_delegated_done(), and
6024 + * ensures the below list check sees list updates done prior to status
6025 + * bit changes
6026 + */
6027 +- if (!test_and_set_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
6028 ++ if (!test_and_set_bit(action, &subflow->delegated_status)) {
6029 + /* still on delegated list from previous scheduling */
6030 + if (!list_empty(&subflow->delegated_node))
6031 + return;
6032 +
6033 +- /* the caller held the subflow bh socket lock */
6034 +- lockdep_assert_in_softirq();
6035 +-
6036 + delegated = this_cpu_ptr(&mptcp_delegated_actions);
6037 + schedule = list_empty(&delegated->head);
6038 + list_add_tail(&subflow->delegated_node, &delegated->head);
6039 +@@ -547,16 +548,16 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
6040 +
6041 + static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow)
6042 + {
6043 +- return test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
6044 ++ return !!READ_ONCE(subflow->delegated_status);
6045 + }
6046 +
6047 +-static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow)
6048 ++static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow, int action)
6049 + {
6050 + /* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before
6051 + * touching the status bit
6052 + */
6053 + smp_wmb();
6054 +- clear_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
6055 ++ clear_bit(action, &subflow->delegated_status);
6056 + }
6057 +
6058 + int mptcp_is_enabled(const struct net *net);
6059 +diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
6060 +index ba9ae482141b0..dda8b76b77988 100644
6061 +--- a/net/ncsi/ncsi-cmd.c
6062 ++++ b/net/ncsi/ncsi-cmd.c
6063 +@@ -18,6 +18,8 @@
6064 + #include "internal.h"
6065 + #include "ncsi-pkt.h"
6066 +
6067 ++static const int padding_bytes = 26;
6068 ++
6069 + u32 ncsi_calculate_checksum(unsigned char *data, int len)
6070 + {
6071 + u32 checksum = 0;
6072 +@@ -213,12 +215,17 @@ static int ncsi_cmd_handler_oem(struct sk_buff *skb,
6073 + {
6074 + struct ncsi_cmd_oem_pkt *cmd;
6075 + unsigned int len;
6076 ++ int payload;
6077 ++ /* NC-SI spec DSP_0222_1.2.0, section 8.2.2.2
6078 ++ * requires payload to be padded with 0 to
6079 ++ * 32-bit boundary before the checksum field.
6080 ++ * Ensure the padding bytes are accounted for in
6081 ++ * skb allocation
6082 ++ */
6083 +
6084 ++ payload = ALIGN(nca->payload, 4);
6085 + len = sizeof(struct ncsi_cmd_pkt_hdr) + 4;
6086 +- if (nca->payload < 26)
6087 +- len += 26;
6088 +- else
6089 +- len += nca->payload;
6090 ++ len += max(payload, padding_bytes);
6091 +
6092 + cmd = skb_put_zero(skb, len);
6093 + memcpy(&cmd->mfr_id, nca->data, nca->payload);
6094 +@@ -272,6 +279,7 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
6095 + struct net_device *dev = nd->dev;
6096 + int hlen = LL_RESERVED_SPACE(dev);
6097 + int tlen = dev->needed_tailroom;
6098 ++ int payload;
6099 + int len = hlen + tlen;
6100 + struct sk_buff *skb;
6101 + struct ncsi_request *nr;
6102 +@@ -281,14 +289,14 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
6103 + return NULL;
6104 +
6105 + /* NCSI command packet has 16-bytes header, payload, 4 bytes checksum.
6106 ++ * Payload needs padding so that the checksum field following payload is
6107 ++ * aligned to 32-bit boundary.
6108 + * The packet needs padding if its payload is less than 26 bytes to
6109 + * meet 64 bytes minimal ethernet frame length.
6110 + */
6111 + len += sizeof(struct ncsi_cmd_pkt_hdr) + 4;
6112 +- if (nca->payload < 26)
6113 +- len += 26;
6114 +- else
6115 +- len += nca->payload;
6116 ++ payload = ALIGN(nca->payload, 4);
6117 ++ len += max(payload, padding_bytes);
6118 +
6119 + /* Allocate skb */
6120 + skb = alloc_skb(len, GFP_ATOMIC);
6121 +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
6122 +index 128690c512dff..393058a43aa73 100644
6123 +--- a/net/netfilter/ipvs/ip_vs_core.c
6124 ++++ b/net/netfilter/ipvs/ip_vs_core.c
6125 +@@ -1964,7 +1964,6 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
6126 + struct ip_vs_proto_data *pd;
6127 + struct ip_vs_conn *cp;
6128 + int ret, pkts;
6129 +- int conn_reuse_mode;
6130 + struct sock *sk;
6131 +
6132 + /* Already marked as IPVS request or reply? */
6133 +@@ -2041,15 +2040,16 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
6134 + cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
6135 + ipvs, af, skb, &iph);
6136 +
6137 +- conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
6138 +- if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
6139 ++ if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) {
6140 ++ int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
6141 + bool old_ct = false, resched = false;
6142 +
6143 + if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
6144 + unlikely(!atomic_read(&cp->dest->weight))) {
6145 + resched = true;
6146 + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
6147 +- } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
6148 ++ } else if (conn_reuse_mode &&
6149 ++ is_new_conn_expected(cp, conn_reuse_mode)) {
6150 + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
6151 + if (!atomic_read(&cp->n_control)) {
6152 + resched = true;
6153 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
6154 +index f1e5443fe7c74..c7708bde057cb 100644
6155 +--- a/net/netfilter/nf_conntrack_netlink.c
6156 ++++ b/net/netfilter/nf_conntrack_netlink.c
6157 +@@ -1011,11 +1011,9 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
6158 + CTA_TUPLE_REPLY,
6159 + filter->family,
6160 + &filter->zone,
6161 +- filter->orig_flags);
6162 +- if (err < 0) {
6163 +- err = -EINVAL;
6164 ++ filter->reply_flags);
6165 ++ if (err < 0)
6166 + goto err_filter;
6167 +- }
6168 + }
6169 +
6170 + return filter;
6171 +diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
6172 +index d6bf1b2cd541b..b561e0a44a45f 100644
6173 +--- a/net/netfilter/nf_flow_table_offload.c
6174 ++++ b/net/netfilter/nf_flow_table_offload.c
6175 +@@ -65,11 +65,11 @@ static void nf_flow_rule_lwt_match(struct nf_flow_match *match,
6176 + sizeof(struct in6_addr));
6177 + if (memcmp(&key->enc_ipv6.src, &in6addr_any,
6178 + sizeof(struct in6_addr)))
6179 +- memset(&key->enc_ipv6.src, 0xff,
6180 ++ memset(&mask->enc_ipv6.src, 0xff,
6181 + sizeof(struct in6_addr));
6182 + if (memcmp(&key->enc_ipv6.dst, &in6addr_any,
6183 + sizeof(struct in6_addr)))
6184 +- memset(&key->enc_ipv6.dst, 0xff,
6185 ++ memset(&mask->enc_ipv6.dst, 0xff,
6186 + sizeof(struct in6_addr));
6187 + enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS);
6188 + key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
6189 +diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
6190 +index 1f857ffd1ac23..92a686807971b 100644
6191 +--- a/net/sched/sch_ets.c
6192 ++++ b/net/sched/sch_ets.c
6193 +@@ -667,12 +667,14 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
6194 + q->classes[i].deficit = quanta[i];
6195 + }
6196 + }
6197 ++ for (i = q->nbands; i < oldbands; i++) {
6198 ++ qdisc_tree_flush_backlog(q->classes[i].qdisc);
6199 ++ if (i >= q->nstrict)
6200 ++ list_del(&q->classes[i].alist);
6201 ++ }
6202 + q->nstrict = nstrict;
6203 + memcpy(q->prio2band, priomap, sizeof(priomap));
6204 +
6205 +- for (i = q->nbands; i < oldbands; i++)
6206 +- qdisc_tree_flush_backlog(q->classes[i].qdisc);
6207 +-
6208 + for (i = 0; i < q->nbands; i++)
6209 + q->classes[i].quantum = quanta[i];
6210 +
6211 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
6212 +index 32c1c7ce856d3..3d8219e3b0264 100644
6213 +--- a/net/smc/af_smc.c
6214 ++++ b/net/smc/af_smc.c
6215 +@@ -1918,8 +1918,10 @@ static int smc_listen(struct socket *sock, int backlog)
6216 + smc->clcsock->sk->sk_user_data =
6217 + (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
6218 + rc = kernel_listen(smc->clcsock, backlog);
6219 +- if (rc)
6220 ++ if (rc) {
6221 ++ smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
6222 + goto out;
6223 ++ }
6224 + sk->sk_max_ack_backlog = backlog;
6225 + sk->sk_ack_backlog = 0;
6226 + sk->sk_state = SMC_LISTEN;
6227 +@@ -2152,8 +2154,10 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
6228 + static int smc_shutdown(struct socket *sock, int how)
6229 + {
6230 + struct sock *sk = sock->sk;
6231 ++ bool do_shutdown = true;
6232 + struct smc_sock *smc;
6233 + int rc = -EINVAL;
6234 ++ int old_state;
6235 + int rc1 = 0;
6236 +
6237 + smc = smc_sk(sk);
6238 +@@ -2180,7 +2184,11 @@ static int smc_shutdown(struct socket *sock, int how)
6239 + }
6240 + switch (how) {
6241 + case SHUT_RDWR: /* shutdown in both directions */
6242 ++ old_state = sk->sk_state;
6243 + rc = smc_close_active(smc);
6244 ++ if (old_state == SMC_ACTIVE &&
6245 ++ sk->sk_state == SMC_PEERCLOSEWAIT1)
6246 ++ do_shutdown = false;
6247 + break;
6248 + case SHUT_WR:
6249 + rc = smc_close_shutdown_write(smc);
6250 +@@ -2190,7 +2198,7 @@ static int smc_shutdown(struct socket *sock, int how)
6251 + /* nothing more to do because peer is not involved */
6252 + break;
6253 + }
6254 +- if (smc->clcsock)
6255 ++ if (do_shutdown && smc->clcsock)
6256 + rc1 = kernel_sock_shutdown(smc->clcsock, how);
6257 + /* map sock_shutdown_cmd constants to sk_shutdown value range */
6258 + sk->sk_shutdown |= how + 1;
6259 +diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
6260 +index 0f9ffba07d268..04620b53b74a7 100644
6261 +--- a/net/smc/smc_close.c
6262 ++++ b/net/smc/smc_close.c
6263 +@@ -228,6 +228,12 @@ again:
6264 + /* send close request */
6265 + rc = smc_close_final(conn);
6266 + sk->sk_state = SMC_PEERCLOSEWAIT1;
6267 ++
6268 ++ /* actively shutdown clcsock before peer close it,
6269 ++ * prevent peer from entering TIME_WAIT state.
6270 ++ */
6271 ++ if (smc->clcsock && smc->clcsock->sk)
6272 ++ rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
6273 + } else {
6274 + /* peer event has changed the state */
6275 + goto again;
6276 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
6277 +index d672c0f0e247f..508a14fc4f587 100644
6278 +--- a/net/smc/smc_core.c
6279 ++++ b/net/smc/smc_core.c
6280 +@@ -1596,14 +1596,26 @@ static void smc_link_down_work(struct work_struct *work)
6281 + mutex_unlock(&lgr->llc_conf_mutex);
6282 + }
6283 +
6284 +-/* Determine vlan of internal TCP socket.
6285 +- * @vlan_id: address to store the determined vlan id into
6286 +- */
6287 ++static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
6288 ++ struct netdev_nested_priv *priv)
6289 ++{
6290 ++ unsigned short *vlan_id = (unsigned short *)priv->data;
6291 ++
6292 ++ if (is_vlan_dev(lower_dev)) {
6293 ++ *vlan_id = vlan_dev_vlan_id(lower_dev);
6294 ++ return 1;
6295 ++ }
6296 ++
6297 ++ return 0;
6298 ++}
6299 ++
6300 ++/* Determine vlan of internal TCP socket. */
6301 + int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
6302 + {
6303 + struct dst_entry *dst = sk_dst_get(clcsock->sk);
6304 ++ struct netdev_nested_priv priv;
6305 + struct net_device *ndev;
6306 +- int i, nest_lvl, rc = 0;
6307 ++ int rc = 0;
6308 +
6309 + ini->vlan_id = 0;
6310 + if (!dst) {
6311 +@@ -1621,20 +1633,9 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
6312 + goto out_rel;
6313 + }
6314 +
6315 ++ priv.data = (void *)&ini->vlan_id;
6316 + rtnl_lock();
6317 +- nest_lvl = ndev->lower_level;
6318 +- for (i = 0; i < nest_lvl; i++) {
6319 +- struct list_head *lower = &ndev->adj_list.lower;
6320 +-
6321 +- if (list_empty(lower))
6322 +- break;
6323 +- lower = lower->next;
6324 +- ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
6325 +- if (is_vlan_dev(ndev)) {
6326 +- ini->vlan_id = vlan_dev_vlan_id(ndev);
6327 +- break;
6328 +- }
6329 +- }
6330 ++ netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
6331 + rtnl_unlock();
6332 +
6333 + out_rel:
6334 +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
6335 +index 9ab81db8a6545..9aac9c60d786d 100644
6336 +--- a/net/tls/tls_main.c
6337 ++++ b/net/tls/tls_main.c
6338 +@@ -61,7 +61,7 @@ static DEFINE_MUTEX(tcpv6_prot_mutex);
6339 + static const struct proto *saved_tcpv4_prot;
6340 + static DEFINE_MUTEX(tcpv4_prot_mutex);
6341 + static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
6342 +-static struct proto_ops tls_sw_proto_ops;
6343 ++static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
6344 + static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
6345 + const struct proto *base);
6346 +
6347 +@@ -71,6 +71,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
6348 +
6349 + WRITE_ONCE(sk->sk_prot,
6350 + &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
6351 ++ WRITE_ONCE(sk->sk_socket->ops,
6352 ++ &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]);
6353 + }
6354 +
6355 + int wait_on_pending_writer(struct sock *sk, long *timeo)
6356 +@@ -581,8 +583,6 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
6357 + if (tx) {
6358 + ctx->sk_write_space = sk->sk_write_space;
6359 + sk->sk_write_space = tls_write_space;
6360 +- } else {
6361 +- sk->sk_socket->ops = &tls_sw_proto_ops;
6362 + }
6363 + goto out;
6364 +
6365 +@@ -640,6 +640,39 @@ struct tls_context *tls_ctx_create(struct sock *sk)
6366 + return ctx;
6367 + }
6368 +
6369 ++static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
6370 ++ const struct proto_ops *base)
6371 ++{
6372 ++ ops[TLS_BASE][TLS_BASE] = *base;
6373 ++
6374 ++ ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
6375 ++ ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
6376 ++
6377 ++ ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
6378 ++ ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read;
6379 ++
6380 ++ ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE];
6381 ++ ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read;
6382 ++
6383 ++#ifdef CONFIG_TLS_DEVICE
6384 ++ ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
6385 ++ ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL;
6386 ++
6387 ++ ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ];
6388 ++ ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL;
6389 ++
6390 ++ ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ];
6391 ++
6392 ++ ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ];
6393 ++
6394 ++ ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ];
6395 ++ ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL;
6396 ++#endif
6397 ++#ifdef CONFIG_TLS_TOE
6398 ++ ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
6399 ++#endif
6400 ++}
6401 ++
6402 + static void tls_build_proto(struct sock *sk)
6403 + {
6404 + int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
6405 +@@ -651,6 +684,8 @@ static void tls_build_proto(struct sock *sk)
6406 + mutex_lock(&tcpv6_prot_mutex);
6407 + if (likely(prot != saved_tcpv6_prot)) {
6408 + build_protos(tls_prots[TLSV6], prot);
6409 ++ build_proto_ops(tls_proto_ops[TLSV6],
6410 ++ sk->sk_socket->ops);
6411 + smp_store_release(&saved_tcpv6_prot, prot);
6412 + }
6413 + mutex_unlock(&tcpv6_prot_mutex);
6414 +@@ -661,6 +696,8 @@ static void tls_build_proto(struct sock *sk)
6415 + mutex_lock(&tcpv4_prot_mutex);
6416 + if (likely(prot != saved_tcpv4_prot)) {
6417 + build_protos(tls_prots[TLSV4], prot);
6418 ++ build_proto_ops(tls_proto_ops[TLSV4],
6419 ++ sk->sk_socket->ops);
6420 + smp_store_release(&saved_tcpv4_prot, prot);
6421 + }
6422 + mutex_unlock(&tcpv4_prot_mutex);
6423 +@@ -871,10 +908,6 @@ static int __init tls_register(void)
6424 + if (err)
6425 + return err;
6426 +
6427 +- tls_sw_proto_ops = inet_stream_ops;
6428 +- tls_sw_proto_ops.splice_read = tls_sw_splice_read;
6429 +- tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked;
6430 +-
6431 + tls_device_init();
6432 + tcp_register_ulp(&tcp_tls_ulp_ops);
6433 +
6434 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
6435 +index 1b08b877a8900..b0cdcea101806 100644
6436 +--- a/net/tls/tls_sw.c
6437 ++++ b/net/tls/tls_sw.c
6438 +@@ -1993,6 +1993,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
6439 + struct sock *sk = sock->sk;
6440 + struct sk_buff *skb;
6441 + ssize_t copied = 0;
6442 ++ bool from_queue;
6443 + int err = 0;
6444 + long timeo;
6445 + int chunk;
6446 +@@ -2002,25 +2003,28 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
6447 +
6448 + timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
6449 +
6450 +- skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
6451 +- if (!skb)
6452 +- goto splice_read_end;
6453 +-
6454 +- if (!ctx->decrypted) {
6455 +- err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
6456 +-
6457 +- /* splice does not support reading control messages */
6458 +- if (ctx->control != TLS_RECORD_TYPE_DATA) {
6459 +- err = -EINVAL;
6460 ++ from_queue = !skb_queue_empty(&ctx->rx_list);
6461 ++ if (from_queue) {
6462 ++ skb = __skb_dequeue(&ctx->rx_list);
6463 ++ } else {
6464 ++ skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo,
6465 ++ &err);
6466 ++ if (!skb)
6467 + goto splice_read_end;
6468 +- }
6469 +
6470 ++ err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
6471 + if (err < 0) {
6472 + tls_err_abort(sk, -EBADMSG);
6473 + goto splice_read_end;
6474 + }
6475 +- ctx->decrypted = 1;
6476 + }
6477 ++
6478 ++ /* splice does not support reading control messages */
6479 ++ if (ctx->control != TLS_RECORD_TYPE_DATA) {
6480 ++ err = -EINVAL;
6481 ++ goto splice_read_end;
6482 ++ }
6483 ++
6484 + rxm = strp_msg(skb);
6485 +
6486 + chunk = min_t(unsigned int, rxm->full_len, len);
6487 +@@ -2028,7 +2032,17 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
6488 + if (copied < 0)
6489 + goto splice_read_end;
6490 +
6491 +- tls_sw_advance_skb(sk, skb, copied);
6492 ++ if (!from_queue) {
6493 ++ ctx->recv_pkt = NULL;
6494 ++ __strp_unpause(&ctx->strp);
6495 ++ }
6496 ++ if (chunk < rxm->full_len) {
6497 ++ __skb_queue_head(&ctx->rx_list, skb);
6498 ++ rxm->offset += len;
6499 ++ rxm->full_len -= len;
6500 ++ } else {
6501 ++ consume_skb(skb);
6502 ++ }
6503 +
6504 + splice_read_end:
6505 + release_sock(sk);
6506 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
6507 +index 78e08e82c08c4..b0bfc78e421ce 100644
6508 +--- a/net/unix/af_unix.c
6509 ++++ b/net/unix/af_unix.c
6510 +@@ -2882,9 +2882,6 @@ static int unix_shutdown(struct socket *sock, int mode)
6511 +
6512 + unix_state_lock(sk);
6513 + sk->sk_shutdown |= mode;
6514 +- if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
6515 +- mode == SHUTDOWN_MASK)
6516 +- sk->sk_state = TCP_CLOSE;
6517 + other = unix_peer(sk);
6518 + if (other)
6519 + sock_hold(other);
6520 +diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
6521 +index b9ac9e9e45a48..10a0bffc3cf6c 100644
6522 +--- a/sound/hda/intel-dsp-config.c
6523 ++++ b/sound/hda/intel-dsp-config.c
6524 +@@ -299,6 +299,15 @@ static const struct config_entry config_table[] = {
6525 + },
6526 + #endif
6527 +
6528 ++/* JasperLake */
6529 ++#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
6530 ++ {
6531 ++ .flags = FLAG_SOF,
6532 ++ .device = 0x4dc8,
6533 ++ .codec_hid = "ESSX8336",
6534 ++ },
6535 ++#endif
6536 ++
6537 + /* Tigerlake */
6538 + #if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
6539 + {
6540 +diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
6541 +index da6e6350ceafa..d074727c3e21d 100644
6542 +--- a/sound/pci/ctxfi/ctamixer.c
6543 ++++ b/sound/pci/ctxfi/ctamixer.c
6544 +@@ -23,16 +23,15 @@
6545 +
6546 + #define BLANK_SLOT 4094
6547 +
6548 +-static int amixer_master(struct rsc *rsc)
6549 ++static void amixer_master(struct rsc *rsc)
6550 + {
6551 + rsc->conj = 0;
6552 +- return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
6553 ++ rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
6554 + }
6555 +
6556 +-static int amixer_next_conj(struct rsc *rsc)
6557 ++static void amixer_next_conj(struct rsc *rsc)
6558 + {
6559 + rsc->conj++;
6560 +- return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
6561 + }
6562 +
6563 + static int amixer_index(const struct rsc *rsc)
6564 +@@ -331,16 +330,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
6565 +
6566 + /* SUM resource management */
6567 +
6568 +-static int sum_master(struct rsc *rsc)
6569 ++static void sum_master(struct rsc *rsc)
6570 + {
6571 + rsc->conj = 0;
6572 +- return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
6573 ++ rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
6574 + }
6575 +
6576 +-static int sum_next_conj(struct rsc *rsc)
6577 ++static void sum_next_conj(struct rsc *rsc)
6578 + {
6579 + rsc->conj++;
6580 +- return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
6581 + }
6582 +
6583 + static int sum_index(const struct rsc *rsc)
6584 +diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
6585 +index f589da0453424..7fc720046ce29 100644
6586 +--- a/sound/pci/ctxfi/ctdaio.c
6587 ++++ b/sound/pci/ctxfi/ctdaio.c
6588 +@@ -51,12 +51,12 @@ static const struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
6589 + [SPDIFIO] = {.left = 0x05, .right = 0x85},
6590 + };
6591 +
6592 +-static int daio_master(struct rsc *rsc)
6593 ++static void daio_master(struct rsc *rsc)
6594 + {
6595 + /* Actually, this is not the resource index of DAIO.
6596 + * For DAO, it is the input mapper index. And, for DAI,
6597 + * it is the output time-slot index. */
6598 +- return rsc->conj = rsc->idx;
6599 ++ rsc->conj = rsc->idx;
6600 + }
6601 +
6602 + static int daio_index(const struct rsc *rsc)
6603 +@@ -64,19 +64,19 @@ static int daio_index(const struct rsc *rsc)
6604 + return rsc->conj;
6605 + }
6606 +
6607 +-static int daio_out_next_conj(struct rsc *rsc)
6608 ++static void daio_out_next_conj(struct rsc *rsc)
6609 + {
6610 +- return rsc->conj += 2;
6611 ++ rsc->conj += 2;
6612 + }
6613 +
6614 +-static int daio_in_next_conj_20k1(struct rsc *rsc)
6615 ++static void daio_in_next_conj_20k1(struct rsc *rsc)
6616 + {
6617 +- return rsc->conj += 0x200;
6618 ++ rsc->conj += 0x200;
6619 + }
6620 +
6621 +-static int daio_in_next_conj_20k2(struct rsc *rsc)
6622 ++static void daio_in_next_conj_20k2(struct rsc *rsc)
6623 + {
6624 +- return rsc->conj += 0x100;
6625 ++ rsc->conj += 0x100;
6626 + }
6627 +
6628 + static const struct rsc_ops daio_out_rsc_ops = {
6629 +diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
6630 +index 81ad269345182..be1d3e61309ce 100644
6631 +--- a/sound/pci/ctxfi/ctresource.c
6632 ++++ b/sound/pci/ctxfi/ctresource.c
6633 +@@ -109,18 +109,17 @@ static int audio_ring_slot(const struct rsc *rsc)
6634 + return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
6635 + }
6636 +
6637 +-static int rsc_next_conj(struct rsc *rsc)
6638 ++static void rsc_next_conj(struct rsc *rsc)
6639 + {
6640 + unsigned int i;
6641 + for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
6642 + i++;
6643 + rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
6644 +- return rsc->conj;
6645 + }
6646 +
6647 +-static int rsc_master(struct rsc *rsc)
6648 ++static void rsc_master(struct rsc *rsc)
6649 + {
6650 +- return rsc->conj = rsc->idx;
6651 ++ rsc->conj = rsc->idx;
6652 + }
6653 +
6654 + static const struct rsc_ops rsc_generic_ops = {
6655 +diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h
6656 +index fdbfd808816d3..58553bda44f43 100644
6657 +--- a/sound/pci/ctxfi/ctresource.h
6658 ++++ b/sound/pci/ctxfi/ctresource.h
6659 +@@ -39,8 +39,8 @@ struct rsc {
6660 + };
6661 +
6662 + struct rsc_ops {
6663 +- int (*master)(struct rsc *rsc); /* Move to master resource */
6664 +- int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
6665 ++ void (*master)(struct rsc *rsc); /* Move to master resource */
6666 ++ void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
6667 + int (*index)(const struct rsc *rsc); /* Return the index of resource */
6668 + /* Return the output slot number */
6669 + int (*output_slot)(const struct rsc *rsc);
6670 +diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
6671 +index bd4697b442334..4a94b4708a77e 100644
6672 +--- a/sound/pci/ctxfi/ctsrc.c
6673 ++++ b/sound/pci/ctxfi/ctsrc.c
6674 +@@ -590,16 +590,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
6675 +
6676 + /* SRCIMP resource manager operations */
6677 +
6678 +-static int srcimp_master(struct rsc *rsc)
6679 ++static void srcimp_master(struct rsc *rsc)
6680 + {
6681 + rsc->conj = 0;
6682 +- return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
6683 ++ rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
6684 + }
6685 +
6686 +-static int srcimp_next_conj(struct rsc *rsc)
6687 ++static void srcimp_next_conj(struct rsc *rsc)
6688 + {
6689 + rsc->conj++;
6690 +- return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
6691 + }
6692 +
6693 + static int srcimp_index(const struct rsc *rsc)
6694 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6695 +index 2f1727faec698..9ce7457533c96 100644
6696 +--- a/sound/pci/hda/patch_realtek.c
6697 ++++ b/sound/pci/hda/patch_realtek.c
6698 +@@ -6521,6 +6521,27 @@ static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *co
6699 + alc_write_coef_idx(codec, 0x45, 0x5089);
6700 + }
6701 +
6702 ++static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = {
6703 ++ WRITE_COEF(0x1a, 0x9003), WRITE_COEF(0x1b, 0x0e2b), WRITE_COEF(0x37, 0xfe06),
6704 ++ WRITE_COEF(0x38, 0x4981), WRITE_COEF(0x45, 0xd489), WRITE_COEF(0x46, 0x0074),
6705 ++ WRITE_COEF(0x49, 0x0149),
6706 ++ {}
6707 ++};
6708 ++
6709 ++static void alc233_fixup_no_audio_jack(struct hda_codec *codec,
6710 ++ const struct hda_fixup *fix,
6711 ++ int action)
6712 ++{
6713 ++ /*
6714 ++ * The audio jack input and output is not detected on the ASRock NUC Box
6715 ++ * 1100 series when cold booting without this fix. Warm rebooting from a
6716 ++ * certain other OS makes the audio functional, as COEF settings are
6717 ++ * preserved in this case. This fix sets these altered COEF values as
6718 ++ * the default.
6719 ++ */
6720 ++ alc_process_coef_fw(codec, alc233_fixup_no_audio_jack_coefs);
6721 ++}
6722 ++
6723 + enum {
6724 + ALC269_FIXUP_GPIO2,
6725 + ALC269_FIXUP_SONY_VAIO,
6726 +@@ -6740,6 +6761,7 @@ enum {
6727 + ALC287_FIXUP_13S_GEN2_SPEAKERS,
6728 + ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
6729 + ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
6730 ++ ALC233_FIXUP_NO_AUDIO_JACK,
6731 + };
6732 +
6733 + static const struct hda_fixup alc269_fixups[] = {
6734 +@@ -8460,6 +8482,10 @@ static const struct hda_fixup alc269_fixups[] = {
6735 + .chained = true,
6736 + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
6737 + },
6738 ++ [ALC233_FIXUP_NO_AUDIO_JACK] = {
6739 ++ .type = HDA_FIXUP_FUNC,
6740 ++ .v.func = alc233_fixup_no_audio_jack,
6741 ++ },
6742 + };
6743 +
6744 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6745 +@@ -8639,6 +8665,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6746 + SND_PCI_QUIRK(0x103c, 0x8728, "HP EliteBook 840 G7", ALC285_FIXUP_HP_GPIO_LED),
6747 + SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
6748 + SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
6749 ++ SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
6750 + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
6751 + SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
6752 + SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
6753 +@@ -8894,6 +8921,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6754 + SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6755 + SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6756 + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
6757 ++ SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
6758 + SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
6759 + SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
6760 + SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
6761 +diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
6762 +index 196b06898eeb2..07894ec5e7a61 100644
6763 +--- a/sound/soc/codecs/lpass-rx-macro.c
6764 ++++ b/sound/soc/codecs/lpass-rx-macro.c
6765 +@@ -2188,7 +2188,7 @@ static int rx_macro_config_classh(struct snd_soc_component *component,
6766 + snd_soc_component_update_bits(component,
6767 + CDC_RX_CLSH_DECAY_CTRL,
6768 + CDC_RX_CLSH_DECAY_RATE_MASK, 0x0);
6769 +- snd_soc_component_update_bits(component,
6770 ++ snd_soc_component_write_field(component,
6771 + CDC_RX_RX1_RX_PATH_CFG0,
6772 + CDC_RX_RXn_CLSH_EN_MASK, 0x1);
6773 + break;
6774 +diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
6775 +index c496b359f2f40..4f568abd59e24 100644
6776 +--- a/sound/soc/codecs/wcd934x.c
6777 ++++ b/sound/soc/codecs/wcd934x.c
6778 +@@ -1896,9 +1896,8 @@ static int wcd934x_hw_params(struct snd_pcm_substream *substream,
6779 + }
6780 +
6781 + wcd->dai[dai->id].sconfig.rate = params_rate(params);
6782 +- wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
6783 +
6784 +- return 0;
6785 ++ return wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
6786 + }
6787 +
6788 + static int wcd934x_hw_free(struct snd_pcm_substream *substream,
6789 +diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
6790 +index 52de7d14b1398..67151c7770c65 100644
6791 +--- a/sound/soc/codecs/wcd938x.c
6792 ++++ b/sound/soc/codecs/wcd938x.c
6793 +@@ -1174,6 +1174,9 @@ static bool wcd938x_readonly_register(struct device *dev, unsigned int reg)
6794 + case WCD938X_DIGITAL_INTR_STATUS_0:
6795 + case WCD938X_DIGITAL_INTR_STATUS_1:
6796 + case WCD938X_DIGITAL_INTR_STATUS_2:
6797 ++ case WCD938X_DIGITAL_INTR_CLEAR_0:
6798 ++ case WCD938X_DIGITAL_INTR_CLEAR_1:
6799 ++ case WCD938X_DIGITAL_INTR_CLEAR_2:
6800 + case WCD938X_DIGITAL_SWR_HM_TEST_0:
6801 + case WCD938X_DIGITAL_SWR_HM_TEST_1:
6802 + case WCD938X_DIGITAL_EFUSE_T_DATA_0:
6803 +diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
6804 +index 46f365528d501..b74b67720ef43 100644
6805 +--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
6806 ++++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
6807 +@@ -269,9 +269,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
6808 +
6809 + if (ret < 0) {
6810 + dev_err(dev, "%s: q6asm_open_write failed\n", __func__);
6811 +- q6asm_audio_client_free(prtd->audio_client);
6812 +- prtd->audio_client = NULL;
6813 +- return -ENOMEM;
6814 ++ goto open_err;
6815 + }
6816 +
6817 + prtd->session_id = q6asm_get_session_id(prtd->audio_client);
6818 +@@ -279,7 +277,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
6819 + prtd->session_id, substream->stream);
6820 + if (ret) {
6821 + dev_err(dev, "%s: stream reg failed ret:%d\n", __func__, ret);
6822 +- return ret;
6823 ++ goto routing_err;
6824 + }
6825 +
6826 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
6827 +@@ -301,10 +299,19 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
6828 + }
6829 + if (ret < 0)
6830 + dev_info(dev, "%s: CMD Format block failed\n", __func__);
6831 ++ else
6832 ++ prtd->state = Q6ASM_STREAM_RUNNING;
6833 +
6834 +- prtd->state = Q6ASM_STREAM_RUNNING;
6835 ++ return ret;
6836 +
6837 +- return 0;
6838 ++routing_err:
6839 ++ q6asm_cmd(prtd->audio_client, prtd->stream_id, CMD_CLOSE);
6840 ++open_err:
6841 ++ q6asm_unmap_memory_regions(substream->stream, prtd->audio_client);
6842 ++ q6asm_audio_client_free(prtd->audio_client);
6843 ++ prtd->audio_client = NULL;
6844 ++
6845 ++ return ret;
6846 + }
6847 +
6848 + static int q6asm_dai_trigger(struct snd_soc_component *component,
6849 +diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
6850 +index 3390ebef9549d..243b8179e59df 100644
6851 +--- a/sound/soc/qcom/qdsp6/q6routing.c
6852 ++++ b/sound/soc/qcom/qdsp6/q6routing.c
6853 +@@ -495,7 +495,11 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
6854 + session->port_id = be_id;
6855 + snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
6856 + } else {
6857 +- session->port_id = -1;
6858 ++ if (session->port_id == be_id) {
6859 ++ session->port_id = -1;
6860 ++ return 0;
6861 ++ }
6862 ++
6863 + snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
6864 + }
6865 +
6866 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
6867 +index f6e5ac3e03140..7459956d62b99 100644
6868 +--- a/sound/soc/soc-topology.c
6869 ++++ b/sound/soc/soc-topology.c
6870 +@@ -2674,6 +2674,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load);
6871 + /* remove dynamic controls from the component driver */
6872 + int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
6873 + {
6874 ++ struct snd_card *card = comp->card->snd_card;
6875 + struct snd_soc_dobj *dobj, *next_dobj;
6876 + int pass = SOC_TPLG_PASS_END;
6877 +
6878 +@@ -2681,6 +2682,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
6879 + while (pass >= SOC_TPLG_PASS_START) {
6880 +
6881 + /* remove mixer controls */
6882 ++ down_write(&card->controls_rwsem);
6883 + list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list,
6884 + list) {
6885 +
6886 +@@ -2719,6 +2721,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
6887 + break;
6888 + }
6889 + }
6890 ++ up_write(&card->controls_rwsem);
6891 + pass--;
6892 + }
6893 +
6894 +diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c
6895 +index 30025d3c16b6e..0862ff8b66273 100644
6896 +--- a/sound/soc/sof/intel/hda-bus.c
6897 ++++ b/sound/soc/sof/intel/hda-bus.c
6898 +@@ -10,6 +10,8 @@
6899 + #include <linux/io.h>
6900 + #include <sound/hdaudio.h>
6901 + #include <sound/hda_i915.h>
6902 ++#include <sound/hda_codec.h>
6903 ++#include <sound/hda_register.h>
6904 + #include "../sof-priv.h"
6905 + #include "hda.h"
6906 +
6907 +@@ -21,6 +23,18 @@
6908 + #endif
6909 +
6910 + #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
6911 ++static void update_codec_wake_enable(struct hdac_bus *bus, unsigned int addr, bool link_power)
6912 ++{
6913 ++ unsigned int mask = snd_hdac_chip_readw(bus, WAKEEN);
6914 ++
6915 ++ if (link_power)
6916 ++ mask &= ~BIT(addr);
6917 ++ else
6918 ++ mask |= BIT(addr);
6919 ++
6920 ++ snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
6921 ++}
6922 ++
6923 + static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable)
6924 + {
6925 + struct hdac_bus *bus = codec->bus;
6926 +@@ -41,6 +55,9 @@ static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable)
6927 + */
6928 + if (codec->addr == HDA_IDISP_ADDR && !enable)
6929 + snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
6930 ++
6931 ++ /* WAKEEN needs to be set for disabled links */
6932 ++ update_codec_wake_enable(bus, codec->addr, enable);
6933 + }
6934 +
6935 + static const struct hdac_bus_ops bus_core_ops = {
6936 +diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
6937 +index 623cf291e2074..262a70791a8f8 100644
6938 +--- a/sound/soc/sof/intel/hda-dsp.c
6939 ++++ b/sound/soc/sof/intel/hda-dsp.c
6940 +@@ -623,8 +623,7 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
6941 + hda_dsp_ipc_int_disable(sdev);
6942 +
6943 + #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
6944 +- if (runtime_suspend)
6945 +- hda_codec_jack_wake_enable(sdev, true);
6946 ++ hda_codec_jack_wake_enable(sdev, runtime_suspend);
6947 +
6948 + /* power down all hda link */
6949 + snd_hdac_ext_bus_link_power_down_all(bus);
6950 +diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
6951 +index f60e2c57d3d0c..ef92cca7ae01e 100644
6952 +--- a/sound/soc/sof/intel/hda.c
6953 ++++ b/sound/soc/sof/intel/hda.c
6954 +@@ -696,6 +696,20 @@ skip_soundwire:
6955 + return 0;
6956 + }
6957 +
6958 ++static void hda_check_for_state_change(struct snd_sof_dev *sdev)
6959 ++{
6960 ++#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
6961 ++ struct hdac_bus *bus = sof_to_bus(sdev);
6962 ++ unsigned int codec_mask;
6963 ++
6964 ++ codec_mask = snd_hdac_chip_readw(bus, STATESTS);
6965 ++ if (codec_mask) {
6966 ++ hda_codec_jack_check(sdev);
6967 ++ snd_hdac_chip_writew(bus, STATESTS, codec_mask);
6968 ++ }
6969 ++#endif
6970 ++}
6971 ++
6972 + static irqreturn_t hda_dsp_interrupt_handler(int irq, void *context)
6973 + {
6974 + struct snd_sof_dev *sdev = context;
6975 +@@ -737,6 +751,8 @@ static irqreturn_t hda_dsp_interrupt_thread(int irq, void *context)
6976 + if (hda_sdw_check_wakeen_irq(sdev))
6977 + hda_sdw_process_wakeen(sdev);
6978 +
6979 ++ hda_check_for_state_change(sdev);
6980 ++
6981 + /* enable GIE interrupt */
6982 + snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
6983 + SOF_HDA_INTCTL,
6984 +diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c
6985 +index 6254bacad6eb7..717f45a83445c 100644
6986 +--- a/sound/soc/stm/stm32_i2s.c
6987 ++++ b/sound/soc/stm/stm32_i2s.c
6988 +@@ -700,7 +700,7 @@ static int stm32_i2s_configure_clock(struct snd_soc_dai *cpu_dai,
6989 + if (ret < 0)
6990 + return ret;
6991 +
6992 +- nb_bits = frame_len * ((cgfr & I2S_CGFR_CHLEN) + 1);
6993 ++ nb_bits = frame_len * (FIELD_GET(I2S_CGFR_CHLEN, cgfr) + 1);
6994 + ret = stm32_i2s_calc_clk_div(i2s, i2s_clock_rate,
6995 + (nb_bits * rate));
6996 + if (ret)