Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Wed, 29 Jun 2022 11:08:16
Message-Id: 1656500878.9bac66606b7e8270e924cc491ee71b0b54ea9ceb.mpagano@gentoo
1 commit: 9bac66606b7e8270e924cc491ee71b0b54ea9ceb
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jun 29 11:07:58 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jun 29 11:07:58 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9bac6660
7
8 Linux patch 5.15.51
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1050_linux-5.15.51.patch | 4991 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4995 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index b568efe1..55250c51 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -243,6 +243,10 @@ Patch: 1049_linux-5.15.50.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.50
23
24 +Patch: 1050_linux-5.15.51.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.51
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1050_linux-5.15.51.patch b/1050_linux-5.15.51.patch
33 new file mode 100644
34 index 00000000..1114e2c3
35 --- /dev/null
36 +++ b/1050_linux-5.15.51.patch
37 @@ -0,0 +1,4991 @@
38 +diff --git a/Documentation/ABI/testing/sysfs-bus-iio-vf610 b/Documentation/ABI/testing/sysfs-bus-iio-vf610
39 +index 308a6756d3bf3..491ead8044888 100644
40 +--- a/Documentation/ABI/testing/sysfs-bus-iio-vf610
41 ++++ b/Documentation/ABI/testing/sysfs-bus-iio-vf610
42 +@@ -1,4 +1,4 @@
43 +-What: /sys/bus/iio/devices/iio:deviceX/conversion_mode
44 ++What: /sys/bus/iio/devices/iio:deviceX/in_conversion_mode
45 + KernelVersion: 4.2
46 + Contact: linux-iio@×××××××××××.org
47 + Description:
48 +diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
49 +index 8913497624de2..cb5da1df8d405 100644
50 +--- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml
51 ++++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
52 +@@ -135,7 +135,8 @@ properties:
53 + Phandle of a companion.
54 +
55 + phys:
56 +- maxItems: 1
57 ++ minItems: 1
58 ++ maxItems: 3
59 +
60 + phy-names:
61 + const: usb
62 +diff --git a/Documentation/devicetree/bindings/usb/generic-ohci.yaml b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
63 +index acbf94fa5f74a..d5fd3aa53ed29 100644
64 +--- a/Documentation/devicetree/bindings/usb/generic-ohci.yaml
65 ++++ b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
66 +@@ -102,7 +102,8 @@ properties:
67 + Overrides the detected port count
68 +
69 + phys:
70 +- maxItems: 1
71 ++ minItems: 1
72 ++ maxItems: 3
73 +
74 + phy-names:
75 + const: usb
76 +diff --git a/MAINTAINERS b/MAINTAINERS
77 +index 942e0b173f2cc..393706e85ba27 100644
78 +--- a/MAINTAINERS
79 ++++ b/MAINTAINERS
80 +@@ -434,6 +434,7 @@ ACPI VIOT DRIVER
81 + M: Jean-Philippe Brucker <jean-philippe@××××××.org>
82 + L: linux-acpi@×××××××××××.org
83 + L: iommu@××××××××××××××××××××××.org
84 ++L: iommu@×××××××××××.dev
85 + S: Maintained
86 + F: drivers/acpi/viot.c
87 + F: include/linux/acpi_viot.h
88 +@@ -941,6 +942,7 @@ AMD IOMMU (AMD-VI)
89 + M: Joerg Roedel <joro@××××××.org>
90 + R: Suravee Suthikulpanit <suravee.suthikulpanit@×××.com>
91 + L: iommu@××××××××××××××××××××××.org
92 ++L: iommu@×××××××××××.dev
93 + S: Maintained
94 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
95 + F: drivers/iommu/amd/
96 +@@ -5602,6 +5604,7 @@ M: Christoph Hellwig <hch@×××.de>
97 + M: Marek Szyprowski <m.szyprowski@×××××××.com>
98 + R: Robin Murphy <robin.murphy@×××.com>
99 + L: iommu@××××××××××××××××××××××.org
100 ++L: iommu@×××××××××××.dev
101 + S: Supported
102 + W: http://git.infradead.org/users/hch/dma-mapping.git
103 + T: git git://git.infradead.org/users/hch/dma-mapping.git
104 +@@ -5614,6 +5617,7 @@ F: kernel/dma/
105 + DMA MAPPING BENCHMARK
106 + M: Barry Song <song.bao.hua@×××××××××.com>
107 + L: iommu@××××××××××××××××××××××.org
108 ++L: iommu@×××××××××××.dev
109 + F: kernel/dma/map_benchmark.c
110 + F: tools/testing/selftests/dma/
111 +
112 +@@ -7115,6 +7119,7 @@ F: drivers/gpu/drm/exynos/exynos_dp*
113 + EXYNOS SYSMMU (IOMMU) driver
114 + M: Marek Szyprowski <m.szyprowski@×××××××.com>
115 + L: iommu@××××××××××××××××××××××.org
116 ++L: iommu@×××××××××××.dev
117 + S: Maintained
118 + F: drivers/iommu/exynos-iommu.c
119 +
120 +@@ -9457,6 +9462,7 @@ INTEL IOMMU (VT-d)
121 + M: David Woodhouse <dwmw2@×××××××××.org>
122 + M: Lu Baolu <baolu.lu@×××××××××××.com>
123 + L: iommu@××××××××××××××××××××××.org
124 ++L: iommu@×××××××××××.dev
125 + S: Supported
126 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
127 + F: drivers/iommu/intel/
128 +@@ -9793,6 +9799,7 @@ IOMMU DRIVERS
129 + M: Joerg Roedel <joro@××××××.org>
130 + M: Will Deacon <will@××××××.org>
131 + L: iommu@××××××××××××××××××××××.org
132 ++L: iommu@×××××××××××.dev
133 + S: Maintained
134 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
135 + F: Documentation/devicetree/bindings/iommu/
136 +@@ -11795,6 +11802,7 @@ F: drivers/i2c/busses/i2c-mt65xx.c
137 + MEDIATEK IOMMU DRIVER
138 + M: Yong Wu <yong.wu@××××××××.com>
139 + L: iommu@××××××××××××××××××××××.org
140 ++L: iommu@×××××××××××.dev
141 + L: linux-mediatek@×××××××××××××××.org (moderated for non-subscribers)
142 + S: Supported
143 + F: Documentation/devicetree/bindings/iommu/mediatek*
144 +@@ -15554,6 +15562,7 @@ F: drivers/i2c/busses/i2c-qcom-cci.c
145 + QUALCOMM IOMMU
146 + M: Rob Clark <robdclark@×××××.com>
147 + L: iommu@××××××××××××××××××××××.org
148 ++L: iommu@×××××××××××.dev
149 + L: linux-arm-msm@×××××××××××.org
150 + S: Maintained
151 + F: drivers/iommu/arm/arm-smmu/qcom_iommu.c
152 +@@ -17982,6 +17991,7 @@ F: arch/x86/boot/video*
153 + SWIOTLB SUBSYSTEM
154 + M: Christoph Hellwig <hch@×××××××××.org>
155 + L: iommu@××××××××××××××××××××××.org
156 ++L: iommu@×××××××××××.dev
157 + S: Supported
158 + W: http://git.infradead.org/users/hch/dma-mapping.git
159 + T: git git://git.infradead.org/users/hch/dma-mapping.git
160 +@@ -20562,6 +20572,7 @@ M: Juergen Gross <jgross@××××.com>
161 + M: Stefano Stabellini <sstabellini@××××××.org>
162 + L: xen-devel@××××××××××××××××.org (moderated for non-subscribers)
163 + L: iommu@××××××××××××××××××××××.org
164 ++L: iommu@×××××××××××.dev
165 + S: Supported
166 + F: arch/x86/xen/*swiotlb*
167 + F: drivers/xen/*swiotlb*
168 +diff --git a/Makefile b/Makefile
169 +index 03b3a493fcca1..b3bc9d907bed3 100644
170 +--- a/Makefile
171 ++++ b/Makefile
172 +@@ -1,7 +1,7 @@
173 + # SPDX-License-Identifier: GPL-2.0
174 + VERSION = 5
175 + PATCHLEVEL = 15
176 +-SUBLEVEL = 50
177 ++SUBLEVEL = 51
178 + EXTRAVERSION =
179 + NAME = Trick or Treat
180 +
181 +@@ -1163,7 +1163,7 @@ KBUILD_MODULES := 1
182 +
183 + autoksyms_recursive: descend modules.order
184 + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
185 +- "$(MAKE) -f $(srctree)/Makefile vmlinux"
186 ++ "$(MAKE) -f $(srctree)/Makefile autoksyms_recursive"
187 + endif
188 +
189 + autoksyms_h := $(if $(CONFIG_TRIM_UNUSED_KSYMS), include/generated/autoksyms.h)
190 +diff --git a/arch/arm/boot/dts/bcm2711-rpi-400.dts b/arch/arm/boot/dts/bcm2711-rpi-400.dts
191 +index f4d2fc20397c7..c53d9eb0b8027 100644
192 +--- a/arch/arm/boot/dts/bcm2711-rpi-400.dts
193 ++++ b/arch/arm/boot/dts/bcm2711-rpi-400.dts
194 +@@ -28,12 +28,12 @@
195 + &expgpio {
196 + gpio-line-names = "BT_ON",
197 + "WL_ON",
198 +- "",
199 ++ "PWR_LED_OFF",
200 + "GLOBAL_RESET",
201 + "VDD_SD_IO_SEL",
202 +- "CAM_GPIO",
203 ++ "GLOBAL_SHUTDOWN",
204 + "SD_PWR_ON",
205 +- "SD_OC_N";
206 ++ "SHUTDOWN_REQUEST";
207 + };
208 +
209 + &genet_mdio {
210 +diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
211 +index 89c342f3a7c2f..8520ffc1779b6 100644
212 +--- a/arch/arm/boot/dts/imx6qdl.dtsi
213 ++++ b/arch/arm/boot/dts/imx6qdl.dtsi
214 +@@ -763,7 +763,7 @@
215 + regulator-name = "vddpu";
216 + regulator-min-microvolt = <725000>;
217 + regulator-max-microvolt = <1450000>;
218 +- regulator-enable-ramp-delay = <150>;
219 ++ regulator-enable-ramp-delay = <380>;
220 + anatop-reg-offset = <0x140>;
221 + anatop-vol-bit-shift = <9>;
222 + anatop-vol-bit-width = <5>;
223 +diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
224 +index 1843fc0538709..95f22513a7c02 100644
225 +--- a/arch/arm/boot/dts/imx7s.dtsi
226 ++++ b/arch/arm/boot/dts/imx7s.dtsi
227 +@@ -104,6 +104,7 @@
228 + compatible = "usb-nop-xceiv";
229 + clocks = <&clks IMX7D_USB_HSIC_ROOT_CLK>;
230 + clock-names = "main_clk";
231 ++ power-domains = <&pgc_hsic_phy>;
232 + #phy-cells = <0>;
233 + };
234 +
235 +@@ -1135,7 +1136,6 @@
236 + compatible = "fsl,imx7d-usb", "fsl,imx27-usb";
237 + reg = <0x30b30000 0x200>;
238 + interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
239 +- power-domains = <&pgc_hsic_phy>;
240 + clocks = <&clks IMX7D_USB_CTRL_CLK>;
241 + fsl,usbphy = <&usbphynop3>;
242 + fsl,usbmisc = <&usbmisc3 0>;
243 +diff --git a/arch/arm/mach-axxia/platsmp.c b/arch/arm/mach-axxia/platsmp.c
244 +index 512943eae30a5..2e203626eda52 100644
245 +--- a/arch/arm/mach-axxia/platsmp.c
246 ++++ b/arch/arm/mach-axxia/platsmp.c
247 +@@ -39,6 +39,7 @@ static int axxia_boot_secondary(unsigned int cpu, struct task_struct *idle)
248 + return -ENOENT;
249 +
250 + syscon = of_iomap(syscon_np, 0);
251 ++ of_node_put(syscon_np);
252 + if (!syscon)
253 + return -ENOMEM;
254 +
255 +diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
256 +index e4f4b20b83a2d..3fc4ec830e3a3 100644
257 +--- a/arch/arm/mach-cns3xxx/core.c
258 ++++ b/arch/arm/mach-cns3xxx/core.c
259 +@@ -372,6 +372,7 @@ static void __init cns3xxx_init(void)
260 + /* De-Asscer SATA Reset */
261 + cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SATA));
262 + }
263 ++ of_node_put(dn);
264 +
265 + dn = of_find_compatible_node(NULL, NULL, "cavium,cns3420-sdhci");
266 + if (of_device_is_available(dn)) {
267 +@@ -385,6 +386,7 @@ static void __init cns3xxx_init(void)
268 + cns3xxx_pwr_clk_en(CNS3XXX_PWR_CLK_EN(SDIO));
269 + cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SDIO));
270 + }
271 ++ of_node_put(dn);
272 +
273 + pm_power_off = cns3xxx_power_off;
274 +
275 +diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
276 +index 8b48326be9fd5..51a247ca4da8c 100644
277 +--- a/arch/arm/mach-exynos/exynos.c
278 ++++ b/arch/arm/mach-exynos/exynos.c
279 +@@ -149,6 +149,7 @@ static void exynos_map_pmu(void)
280 + np = of_find_matching_node(NULL, exynos_dt_pmu_match);
281 + if (np)
282 + pmu_base_addr = of_iomap(np, 0);
283 ++ of_node_put(np);
284 + }
285 +
286 + static void __init exynos_init_irq(void)
287 +diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
288 +index 86291f3469f15..d195b97ab2eef 100644
289 +--- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
290 ++++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
291 +@@ -456,13 +456,11 @@
292 + clock-names = "clk_ahb", "clk_xin";
293 + mmc-ddr-1_8v;
294 + mmc-hs200-1_8v;
295 +- mmc-hs400-1_8v;
296 + ti,trm-icp = <0x2>;
297 + ti,otap-del-sel-legacy = <0x0>;
298 + ti,otap-del-sel-mmc-hs = <0x0>;
299 + ti,otap-del-sel-ddr52 = <0x6>;
300 + ti,otap-del-sel-hs200 = <0x7>;
301 +- ti,otap-del-sel-hs400 = <0x4>;
302 + };
303 +
304 + sdhci1: mmc@fa00000 {
305 +diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
306 +index a30c036577a32..f181527f9d436 100644
307 +--- a/arch/arm64/kvm/arm.c
308 ++++ b/arch/arm64/kvm/arm.c
309 +@@ -2011,11 +2011,11 @@ static int finalize_hyp_mode(void)
310 + return 0;
311 +
312 + /*
313 +- * Exclude HYP BSS from kmemleak so that it doesn't get peeked
314 +- * at, which would end badly once the section is inaccessible.
315 +- * None of other sections should ever be introspected.
316 ++ * Exclude HYP sections from kmemleak so that they don't get peeked
317 ++ * at, which would end badly once inaccessible.
318 + */
319 + kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
320 ++ kmemleak_free_part(__va(hyp_mem_base), hyp_mem_size);
321 + return pkvm_drop_host_privileges();
322 + }
323 +
324 +diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
325 +index 7b7f25b4b057e..9240bcdbe74e4 100644
326 +--- a/arch/mips/vr41xx/common/icu.c
327 ++++ b/arch/mips/vr41xx/common/icu.c
328 +@@ -640,8 +640,6 @@ static int icu_get_irq(unsigned int irq)
329 +
330 + printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
331 +
332 +- atomic_inc(&irq_err_count);
333 +-
334 + return -1;
335 + }
336 +
337 +diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
338 +index 27a8b49af11fc..5dccf01a9e172 100644
339 +--- a/arch/parisc/Kconfig
340 ++++ b/arch/parisc/Kconfig
341 +@@ -9,6 +9,7 @@ config PARISC
342 + select ARCH_WANT_FRAME_POINTERS
343 + select ARCH_HAS_ELF_RANDOMIZE
344 + select ARCH_HAS_STRICT_KERNEL_RWX
345 ++ select ARCH_HAS_STRICT_MODULE_RWX
346 + select ARCH_HAS_UBSAN_SANITIZE_ALL
347 + select ARCH_NO_SG_CHAIN
348 + select ARCH_SUPPORTS_HUGETLBFS if PA20
349 +diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h
350 +index d63a2acb91f2b..55d29c4f716e6 100644
351 +--- a/arch/parisc/include/asm/fb.h
352 ++++ b/arch/parisc/include/asm/fb.h
353 +@@ -12,7 +12,7 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
354 + pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
355 + }
356 +
357 +-#if defined(CONFIG_STI_CONSOLE) || defined(CONFIG_FB_STI)
358 ++#if defined(CONFIG_FB_STI)
359 + int fb_is_primary_device(struct fb_info *info);
360 + #else
361 + static inline int fb_is_primary_device(struct fb_info *info)
362 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
363 +index 39a0a13a3a277..c590e12199132 100644
364 +--- a/arch/powerpc/kernel/process.c
365 ++++ b/arch/powerpc/kernel/process.c
366 +@@ -1818,7 +1818,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
367 + tm_reclaim_current(0);
368 + #endif
369 +
370 +- memset(regs->gpr, 0, sizeof(regs->gpr));
371 ++ memset(&regs->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0]));
372 + regs->ctr = 0;
373 + regs->link = 0;
374 + regs->xer = 0;
375 +diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
376 +index 3f58140370357..e8f44084d2512 100644
377 +--- a/arch/powerpc/kernel/rtas.c
378 ++++ b/arch/powerpc/kernel/rtas.c
379 +@@ -983,7 +983,7 @@ static struct rtas_filter rtas_filters[] __ro_after_init = {
380 + { "get-time-of-day", -1, -1, -1, -1, -1 },
381 + { "ibm,get-vpd", -1, 0, -1, 1, 2 },
382 + { "ibm,lpar-perftools", -1, 2, 3, -1, -1 },
383 +- { "ibm,platform-dump", -1, 4, 5, -1, -1 },
384 ++ { "ibm,platform-dump", -1, 4, 5, -1, -1 }, /* Special cased */
385 + { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 },
386 + { "ibm,scan-log-dump", -1, 0, 1, -1, -1 },
387 + { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 },
388 +@@ -1032,6 +1032,15 @@ static bool block_rtas_call(int token, int nargs,
389 + size = 1;
390 +
391 + end = base + size - 1;
392 ++
393 ++ /*
394 ++ * Special case for ibm,platform-dump - NULL buffer
395 ++ * address is used to indicate end of dump processing
396 ++ */
397 ++ if (!strcmp(f->name, "ibm,platform-dump") &&
398 ++ base == 0)
399 ++ return false;
400 ++
401 + if (!in_rmo_buf(base, end))
402 + goto err;
403 + }
404 +diff --git a/arch/powerpc/platforms/microwatt/microwatt.h b/arch/powerpc/platforms/microwatt/microwatt.h
405 +new file mode 100644
406 +index 0000000000000..335417e95e66f
407 +--- /dev/null
408 ++++ b/arch/powerpc/platforms/microwatt/microwatt.h
409 +@@ -0,0 +1,7 @@
410 ++/* SPDX-License-Identifier: GPL-2.0 */
411 ++#ifndef _MICROWATT_H
412 ++#define _MICROWATT_H
413 ++
414 ++void microwatt_rng_init(void);
415 ++
416 ++#endif /* _MICROWATT_H */
417 +diff --git a/arch/powerpc/platforms/microwatt/rng.c b/arch/powerpc/platforms/microwatt/rng.c
418 +index 3d8ee6eb7dada..8cb161533e6aa 100644
419 +--- a/arch/powerpc/platforms/microwatt/rng.c
420 ++++ b/arch/powerpc/platforms/microwatt/rng.c
421 +@@ -11,6 +11,7 @@
422 + #include <asm/archrandom.h>
423 + #include <asm/cputable.h>
424 + #include <asm/machdep.h>
425 ++#include "microwatt.h"
426 +
427 + #define DARN_ERR 0xFFFFFFFFFFFFFFFFul
428 +
429 +@@ -29,7 +30,7 @@ int microwatt_get_random_darn(unsigned long *v)
430 + return 1;
431 + }
432 +
433 +-static __init int rng_init(void)
434 ++void __init microwatt_rng_init(void)
435 + {
436 + unsigned long val;
437 + int i;
438 +@@ -37,12 +38,7 @@ static __init int rng_init(void)
439 + for (i = 0; i < 10; i++) {
440 + if (microwatt_get_random_darn(&val)) {
441 + ppc_md.get_random_seed = microwatt_get_random_darn;
442 +- return 0;
443 ++ return;
444 + }
445 + }
446 +-
447 +- pr_warn("Unable to use DARN for get_random_seed()\n");
448 +-
449 +- return -EIO;
450 + }
451 +-machine_subsys_initcall(, rng_init);
452 +diff --git a/arch/powerpc/platforms/microwatt/setup.c b/arch/powerpc/platforms/microwatt/setup.c
453 +index 0b02603bdb747..6b32539395a48 100644
454 +--- a/arch/powerpc/platforms/microwatt/setup.c
455 ++++ b/arch/powerpc/platforms/microwatt/setup.c
456 +@@ -16,6 +16,8 @@
457 + #include <asm/xics.h>
458 + #include <asm/udbg.h>
459 +
460 ++#include "microwatt.h"
461 ++
462 + static void __init microwatt_init_IRQ(void)
463 + {
464 + xics_init();
465 +@@ -32,10 +34,16 @@ static int __init microwatt_populate(void)
466 + }
467 + machine_arch_initcall(microwatt, microwatt_populate);
468 +
469 ++static void __init microwatt_setup_arch(void)
470 ++{
471 ++ microwatt_rng_init();
472 ++}
473 ++
474 + define_machine(microwatt) {
475 + .name = "microwatt",
476 + .probe = microwatt_probe,
477 + .init_IRQ = microwatt_init_IRQ,
478 ++ .setup_arch = microwatt_setup_arch,
479 + .progress = udbg_progress,
480 + .calibrate_decr = generic_calibrate_decr,
481 + };
482 +diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
483 +index 11df4e16a1cc3..528946ee7a777 100644
484 +--- a/arch/powerpc/platforms/powernv/powernv.h
485 ++++ b/arch/powerpc/platforms/powernv/powernv.h
486 +@@ -42,4 +42,6 @@ ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count);
487 + u32 memcons_get_size(struct memcons *mc);
488 + struct memcons *memcons_init(struct device_node *node, const char *mc_prop_name);
489 +
490 ++void pnv_rng_init(void);
491 ++
492 + #endif /* _POWERNV_H */
493 +diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
494 +index 69c344c8884f3..2b5a1a41234cc 100644
495 +--- a/arch/powerpc/platforms/powernv/rng.c
496 ++++ b/arch/powerpc/platforms/powernv/rng.c
497 +@@ -17,6 +17,7 @@
498 + #include <asm/prom.h>
499 + #include <asm/machdep.h>
500 + #include <asm/smp.h>
501 ++#include "powernv.h"
502 +
503 + #define DARN_ERR 0xFFFFFFFFFFFFFFFFul
504 +
505 +@@ -28,7 +29,6 @@ struct powernv_rng {
506 +
507 + static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng);
508 +
509 +-
510 + int powernv_hwrng_present(void)
511 + {
512 + struct powernv_rng *rng;
513 +@@ -98,9 +98,6 @@ static int initialise_darn(void)
514 + return 0;
515 + }
516 + }
517 +-
518 +- pr_warn("Unable to use DARN for get_random_seed()\n");
519 +-
520 + return -EIO;
521 + }
522 +
523 +@@ -163,32 +160,55 @@ static __init int rng_create(struct device_node *dn)
524 +
525 + rng_init_per_cpu(rng, dn);
526 +
527 +- pr_info_once("Registering arch random hook.\n");
528 +-
529 + ppc_md.get_random_seed = powernv_get_random_long;
530 +
531 + return 0;
532 + }
533 +
534 +-static __init int rng_init(void)
535 ++static int __init pnv_get_random_long_early(unsigned long *v)
536 + {
537 + struct device_node *dn;
538 +- int rc;
539 ++
540 ++ if (!slab_is_available())
541 ++ return 0;
542 ++
543 ++ if (cmpxchg(&ppc_md.get_random_seed, pnv_get_random_long_early,
544 ++ NULL) != pnv_get_random_long_early)
545 ++ return 0;
546 +
547 + for_each_compatible_node(dn, NULL, "ibm,power-rng") {
548 +- rc = rng_create(dn);
549 +- if (rc) {
550 +- pr_err("Failed creating rng for %pOF (%d).\n",
551 +- dn, rc);
552 ++ if (rng_create(dn))
553 + continue;
554 +- }
555 +-
556 + /* Create devices for hwrng driver */
557 + of_platform_device_create(dn, NULL, NULL);
558 + }
559 +
560 +- initialise_darn();
561 ++ if (!ppc_md.get_random_seed)
562 ++ return 0;
563 ++ return ppc_md.get_random_seed(v);
564 ++}
565 ++
566 ++void __init pnv_rng_init(void)
567 ++{
568 ++ struct device_node *dn;
569 +
570 ++ /* Prefer darn over the rest. */
571 ++ if (!initialise_darn())
572 ++ return;
573 ++
574 ++ dn = of_find_compatible_node(NULL, NULL, "ibm,power-rng");
575 ++ if (dn)
576 ++ ppc_md.get_random_seed = pnv_get_random_long_early;
577 ++
578 ++ of_node_put(dn);
579 ++}
580 ++
581 ++static int __init pnv_rng_late_init(void)
582 ++{
583 ++ unsigned long v;
584 ++ /* In case it wasn't called during init for some other reason. */
585 ++ if (ppc_md.get_random_seed == pnv_get_random_long_early)
586 ++ pnv_get_random_long_early(&v);
587 + return 0;
588 + }
589 +-machine_subsys_initcall(powernv, rng_init);
590 ++machine_subsys_initcall(powernv, pnv_rng_late_init);
591 +diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
592 +index a8db3f1530639..1b3c7e04a7af5 100644
593 +--- a/arch/powerpc/platforms/powernv/setup.c
594 ++++ b/arch/powerpc/platforms/powernv/setup.c
595 +@@ -190,6 +190,8 @@ static void __init pnv_setup_arch(void)
596 + pnv_check_guarded_cores();
597 +
598 + /* XXX PMCS */
599 ++
600 ++ pnv_rng_init();
601 + }
602 +
603 + static void __init pnv_init(void)
604 +diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
605 +index 3544778e06d01..2a97cc20fe8fe 100644
606 +--- a/arch/powerpc/platforms/pseries/pseries.h
607 ++++ b/arch/powerpc/platforms/pseries/pseries.h
608 +@@ -115,4 +115,6 @@ extern u32 pseries_security_flavor;
609 + void pseries_setup_security_mitigations(void);
610 + void pseries_lpar_read_hblkrm_characteristics(void);
611 +
612 ++void pseries_rng_init(void);
613 ++
614 + #endif /* _PSERIES_PSERIES_H */
615 +diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
616 +index 6268545947b83..6ddfdeaace9ef 100644
617 +--- a/arch/powerpc/platforms/pseries/rng.c
618 ++++ b/arch/powerpc/platforms/pseries/rng.c
619 +@@ -10,6 +10,7 @@
620 + #include <asm/archrandom.h>
621 + #include <asm/machdep.h>
622 + #include <asm/plpar_wrappers.h>
623 ++#include "pseries.h"
624 +
625 +
626 + static int pseries_get_random_long(unsigned long *v)
627 +@@ -24,19 +25,13 @@ static int pseries_get_random_long(unsigned long *v)
628 + return 0;
629 + }
630 +
631 +-static __init int rng_init(void)
632 ++void __init pseries_rng_init(void)
633 + {
634 + struct device_node *dn;
635 +
636 + dn = of_find_compatible_node(NULL, NULL, "ibm,random");
637 + if (!dn)
638 +- return -ENODEV;
639 +-
640 +- pr_info("Registering arch random hook.\n");
641 +-
642 ++ return;
643 + ppc_md.get_random_seed = pseries_get_random_long;
644 +-
645 + of_node_put(dn);
646 +- return 0;
647 + }
648 +-machine_subsys_initcall(pseries, rng_init);
649 +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
650 +index f79126f16258a..c2b3752684b5f 100644
651 +--- a/arch/powerpc/platforms/pseries/setup.c
652 ++++ b/arch/powerpc/platforms/pseries/setup.c
653 +@@ -840,6 +840,8 @@ static void __init pSeries_setup_arch(void)
654 +
655 + if (swiotlb_force == SWIOTLB_FORCE)
656 + ppc_swiotlb_enable = 1;
657 ++
658 ++ pseries_rng_init();
659 + }
660 +
661 + static void pseries_panic(char *str)
662 +diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
663 +index cceb8ec707e4b..d2a2a18b55808 100644
664 +--- a/arch/s390/kernel/perf_cpum_cf.c
665 ++++ b/arch/s390/kernel/perf_cpum_cf.c
666 +@@ -516,6 +516,26 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
667 + return err;
668 + }
669 +
670 ++/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different
671 ++ * attribute::type values:
672 ++ * - PERF_TYPE_HARDWARE:
673 ++ * - pmu->type:
674 ++ * Handle both type of invocations identical. They address the same hardware.
675 ++ * The result is different when event modifiers exclude_kernel and/or
676 ++ * exclude_user are also set.
677 ++ */
678 ++static int cpumf_pmu_event_type(struct perf_event *event)
679 ++{
680 ++ u64 ev = event->attr.config;
681 ++
682 ++ if (cpumf_generic_events_basic[PERF_COUNT_HW_CPU_CYCLES] == ev ||
683 ++ cpumf_generic_events_basic[PERF_COUNT_HW_INSTRUCTIONS] == ev ||
684 ++ cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev ||
685 ++ cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev)
686 ++ return PERF_TYPE_HARDWARE;
687 ++ return PERF_TYPE_RAW;
688 ++}
689 ++
690 + static int cpumf_pmu_event_init(struct perf_event *event)
691 + {
692 + unsigned int type = event->attr.type;
693 +@@ -525,7 +545,7 @@ static int cpumf_pmu_event_init(struct perf_event *event)
694 + err = __hw_perf_event_init(event, type);
695 + else if (event->pmu->type == type)
696 + /* Registered as unknown PMU */
697 +- err = __hw_perf_event_init(event, PERF_TYPE_RAW);
698 ++ err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
699 + else
700 + return -ENOENT;
701 +
702 +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
703 +index d99434dc215c2..8dca2bcbb0eae 100644
704 +--- a/arch/x86/net/bpf_jit_comp.c
705 ++++ b/arch/x86/net/bpf_jit_comp.c
706 +@@ -1440,8 +1440,9 @@ st: if (is_imm8(insn->off))
707 + case BPF_JMP | BPF_CALL:
708 + func = (u8 *) __bpf_call_base + imm32;
709 + if (tail_call_reachable) {
710 ++ /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
711 + EMIT3_off32(0x48, 0x8B, 0x85,
712 +- -(bpf_prog->aux->stack_depth + 8));
713 ++ -round_up(bpf_prog->aux->stack_depth, 8) - 8);
714 + if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
715 + return -EINVAL;
716 + } else {
717 +diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
718 +index e8ceb15286081..16b8a6273772c 100644
719 +--- a/arch/xtensa/kernel/time.c
720 ++++ b/arch/xtensa/kernel/time.c
721 +@@ -154,6 +154,7 @@ static void __init calibrate_ccount(void)
722 + cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
723 + if (cpu) {
724 + clk = of_clk_get(cpu, 0);
725 ++ of_node_put(cpu);
726 + if (!IS_ERR(clk)) {
727 + ccount_freq = clk_get_rate(clk);
728 + return;
729 +diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
730 +index 538e6748e85a7..c79c1d09ea863 100644
731 +--- a/arch/xtensa/platforms/xtfpga/setup.c
732 ++++ b/arch/xtensa/platforms/xtfpga/setup.c
733 +@@ -133,6 +133,7 @@ static int __init machine_setup(void)
734 +
735 + if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
736 + update_local_mac(eth);
737 ++ of_node_put(eth);
738 + return 0;
739 + }
740 + arch_initcall(machine_setup);
741 +diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
742 +index 4a446259a184e..3aac960ae30ab 100644
743 +--- a/drivers/base/regmap/regmap-irq.c
744 ++++ b/drivers/base/regmap/regmap-irq.c
745 +@@ -252,6 +252,7 @@ static void regmap_irq_enable(struct irq_data *data)
746 + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
747 + struct regmap *map = d->map;
748 + const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
749 ++ unsigned int reg = irq_data->reg_offset / map->reg_stride;
750 + unsigned int mask, type;
751 +
752 + type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
753 +@@ -268,14 +269,14 @@ static void regmap_irq_enable(struct irq_data *data)
754 + * at the corresponding offset in regmap_irq_set_type().
755 + */
756 + if (d->chip->type_in_mask && type)
757 +- mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
758 ++ mask = d->type_buf[reg] & irq_data->mask;
759 + else
760 + mask = irq_data->mask;
761 +
762 + if (d->chip->clear_on_unmask)
763 + d->clear_status = true;
764 +
765 +- d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
766 ++ d->mask_buf[reg] &= ~mask;
767 + }
768 +
769 + static void regmap_irq_disable(struct irq_data *data)
770 +@@ -386,6 +387,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
771 + subreg = &chip->sub_reg_offsets[b];
772 + for (i = 0; i < subreg->num_regs; i++) {
773 + unsigned int offset = subreg->offset[i];
774 ++ unsigned int index = offset / map->reg_stride;
775 +
776 + if (chip->not_fixed_stride)
777 + ret = regmap_read(map,
778 +@@ -394,7 +396,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
779 + else
780 + ret = regmap_read(map,
781 + chip->status_base + offset,
782 +- &data->status_buf[offset]);
783 ++ &data->status_buf[index]);
784 +
785 + if (ret)
786 + break;
787 +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
788 +index 390817cf12212..d7a9bf43fb32e 100644
789 +--- a/drivers/block/xen-blkfront.c
790 ++++ b/drivers/block/xen-blkfront.c
791 +@@ -2140,9 +2140,11 @@ static void blkfront_closing(struct blkfront_info *info)
792 + return;
793 +
794 + /* No more blkif_request(). */
795 +- blk_mq_stop_hw_queues(info->rq);
796 +- blk_mark_disk_dead(info->gd);
797 +- set_capacity(info->gd, 0);
798 ++ if (info->rq && info->gd) {
799 ++ blk_mq_stop_hw_queues(info->rq);
800 ++ blk_mark_disk_dead(info->gd);
801 ++ set_capacity(info->gd, 0);
802 ++ }
803 +
804 + for_each_rinfo(info, rinfo, i) {
805 + /* No more gnttab callback work. */
806 +@@ -2478,16 +2480,19 @@ static int blkfront_remove(struct xenbus_device *xbdev)
807 +
808 + dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
809 +
810 +- del_gendisk(info->gd);
811 ++ if (info->gd)
812 ++ del_gendisk(info->gd);
813 +
814 + mutex_lock(&blkfront_mutex);
815 + list_del(&info->info_list);
816 + mutex_unlock(&blkfront_mutex);
817 +
818 + blkif_free(info, 0);
819 +- xlbd_release_minors(info->gd->first_minor, info->gd->minors);
820 +- blk_cleanup_disk(info->gd);
821 +- blk_mq_free_tag_set(&info->tag_set);
822 ++ if (info->gd) {
823 ++ xlbd_release_minors(info->gd->first_minor, info->gd->minors);
824 ++ blk_cleanup_disk(info->gd);
825 ++ blk_mq_free_tag_set(&info->tag_set);
826 ++ }
827 +
828 + kfree(info);
829 + return 0;
830 +diff --git a/drivers/char/random.c b/drivers/char/random.c
831 +index 7f6585c49380b..7bd6eb15d432e 100644
832 +--- a/drivers/char/random.c
833 ++++ b/drivers/char/random.c
834 +@@ -88,7 +88,7 @@ static RAW_NOTIFIER_HEAD(random_ready_chain);
835 +
836 + /* Control how we warn userspace. */
837 + static struct ratelimit_state urandom_warning =
838 +- RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
839 ++ RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
840 + static int ratelimit_disable __read_mostly =
841 + IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
842 + module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
843 +@@ -452,7 +452,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
844 +
845 + /*
846 + * Immediately overwrite the ChaCha key at index 4 with random
847 +- * bytes, in case userspace causes copy_to_user() below to sleep
848 ++ * bytes, in case userspace causes copy_to_iter() below to sleep
849 + * forever, so that we still retain forward secrecy in that case.
850 + */
851 + crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
852 +@@ -1000,7 +1000,7 @@ void add_interrupt_randomness(int irq)
853 + if (new_count & MIX_INFLIGHT)
854 + return;
855 +
856 +- if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ))
857 ++ if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
858 + return;
859 +
860 + if (unlikely(!fast_pool->mix.func))
861 +diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
862 +index e7330684d3b82..9631f2fd2faf7 100644
863 +--- a/drivers/dma-buf/udmabuf.c
864 ++++ b/drivers/dma-buf/udmabuf.c
865 +@@ -32,8 +32,11 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
866 + {
867 + struct vm_area_struct *vma = vmf->vma;
868 + struct udmabuf *ubuf = vma->vm_private_data;
869 ++ pgoff_t pgoff = vmf->pgoff;
870 +
871 +- vmf->page = ubuf->pages[vmf->pgoff];
872 ++ if (pgoff >= ubuf->pagecount)
873 ++ return VM_FAULT_SIGBUS;
874 ++ vmf->page = ubuf->pages[pgoff];
875 + get_page(vmf->page);
876 + return 0;
877 + }
878 +diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
879 +index 98cd715ccc33c..8d09b619c1669 100644
880 +--- a/drivers/gpio/gpio-vr41xx.c
881 ++++ b/drivers/gpio/gpio-vr41xx.c
882 +@@ -217,8 +217,6 @@ static int giu_get_irq(unsigned int irq)
883 + printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
884 + maskl, pendl, maskh, pendh);
885 +
886 +- atomic_inc(&irq_err_count);
887 +-
888 + return -EINVAL;
889 + }
890 +
891 +diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
892 +index 7f8f5b02e31d5..4b61d975cc0ec 100644
893 +--- a/drivers/gpio/gpio-winbond.c
894 ++++ b/drivers/gpio/gpio-winbond.c
895 +@@ -385,12 +385,13 @@ static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset)
896 + unsigned long *base = gpiochip_get_data(gc);
897 + const struct winbond_gpio_info *info;
898 + bool val;
899 ++ int ret;
900 +
901 + winbond_gpio_get_info(&offset, &info);
902 +
903 +- val = winbond_sio_enter(*base);
904 +- if (val)
905 +- return val;
906 ++ ret = winbond_sio_enter(*base);
907 ++ if (ret)
908 ++ return ret;
909 +
910 + winbond_sio_select_logical(*base, info->dev);
911 +
912 +diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
913 +index 5c91d125a3371..3dfa600fb86d6 100644
914 +--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
915 ++++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
916 +@@ -2434,7 +2434,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
917 + }
918 +
919 + /*
920 +- * Display WA #22010492432: ehl, tgl, adl-p
921 ++ * Display WA #22010492432: ehl, tgl, adl-s, adl-p
922 + * Program half of the nominal DCO divider fraction value.
923 + */
924 + static bool
925 +@@ -2442,7 +2442,7 @@ ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
926 + {
927 + return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
928 + IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
929 +- IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) &&
930 ++ IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
931 + i915->dpll.ref_clks.nssc == 38400;
932 + }
933 +
934 +diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
935 +index 748665232d296..bba68776cb25d 100644
936 +--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
937 ++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
938 +@@ -958,7 +958,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
939 + for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
940 + release_firmware(adreno_gpu->fw[i]);
941 +
942 +- pm_runtime_disable(&priv->gpu_pdev->dev);
943 ++ if (pm_runtime_enabled(&priv->gpu_pdev->dev))
944 ++ pm_runtime_disable(&priv->gpu_pdev->dev);
945 +
946 + msm_gpu_cleanup(&adreno_gpu->base);
947 + }
948 +diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
949 +index cdcaf470f1480..97ae68182f3ed 100644
950 +--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
951 ++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
952 +@@ -223,6 +223,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
953 + encoder = mdp4_lcdc_encoder_init(dev, panel_node);
954 + if (IS_ERR(encoder)) {
955 + DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
956 ++ of_node_put(panel_node);
957 + return PTR_ERR(encoder);
958 + }
959 +
960 +@@ -232,6 +233,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
961 + connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
962 + if (IS_ERR(connector)) {
963 + DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
964 ++ of_node_put(panel_node);
965 + return PTR_ERR(connector);
966 + }
967 +
968 +diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
969 +index 1992347537e65..b6f4ce2a48afe 100644
970 +--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
971 ++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
972 +@@ -1348,60 +1348,49 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
973 + return ret;
974 + }
975 +
976 +-int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset)
977 ++void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
978 ++{
979 ++ struct dp_ctrl_private *ctrl;
980 ++
981 ++ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
982 ++
983 ++ dp_catalog_ctrl_reset(ctrl->catalog);
984 ++
985 ++ /*
986 ++ * all dp controller programmable registers will not
987 ++ * be reset to default value after DP_SW_RESET
988 ++ * therefore interrupt mask bits have to be updated
989 ++ * to enable/disable interrupts
990 ++ */
991 ++ dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
992 ++}
993 ++
994 ++void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
995 + {
996 + struct dp_ctrl_private *ctrl;
997 + struct dp_io *dp_io;
998 + struct phy *phy;
999 +
1000 +- if (!dp_ctrl) {
1001 +- DRM_ERROR("Invalid input data\n");
1002 +- return -EINVAL;
1003 +- }
1004 +-
1005 + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
1006 + dp_io = &ctrl->parser->io;
1007 + phy = dp_io->phy;
1008 +
1009 +- ctrl->dp_ctrl.orientation = flip;
1010 +-
1011 +- if (reset)
1012 +- dp_catalog_ctrl_reset(ctrl->catalog);
1013 +-
1014 +- DRM_DEBUG_DP("flip=%d\n", flip);
1015 + dp_catalog_ctrl_phy_reset(ctrl->catalog);
1016 + phy_init(phy);
1017 +- dp_catalog_ctrl_enable_irq(ctrl->catalog, true);
1018 +-
1019 +- return 0;
1020 + }
1021 +
1022 +-/**
1023 +- * dp_ctrl_host_deinit() - Uninitialize DP controller
1024 +- * @dp_ctrl: Display Port Driver data
1025 +- *
1026 +- * Perform required steps to uninitialize DP controller
1027 +- * and its resources.
1028 +- */
1029 +-void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
1030 ++void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl)
1031 + {
1032 + struct dp_ctrl_private *ctrl;
1033 + struct dp_io *dp_io;
1034 + struct phy *phy;
1035 +
1036 +- if (!dp_ctrl) {
1037 +- DRM_ERROR("Invalid input data\n");
1038 +- return;
1039 +- }
1040 +-
1041 + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
1042 + dp_io = &ctrl->parser->io;
1043 + phy = dp_io->phy;
1044 +
1045 +- dp_catalog_ctrl_enable_irq(ctrl->catalog, false);
1046 ++ dp_catalog_ctrl_phy_reset(ctrl->catalog);
1047 + phy_exit(phy);
1048 +-
1049 +- DRM_DEBUG_DP("Host deinitialized successfully\n");
1050 + }
1051 +
1052 + static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
1053 +@@ -1471,7 +1460,10 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
1054 + }
1055 +
1056 + phy_power_off(phy);
1057 ++
1058 ++ /* aux channel down, reinit phy */
1059 + phy_exit(phy);
1060 ++ phy_init(phy);
1061 +
1062 + return 0;
1063 + }
1064 +@@ -1501,6 +1493,8 @@ end:
1065 + return ret;
1066 + }
1067 +
1068 ++static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl);
1069 ++
1070 + static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
1071 + {
1072 + int ret = 0;
1073 +@@ -1523,7 +1517,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
1074 +
1075 + ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
1076 + if (!ret)
1077 +- ret = dp_ctrl_on_stream(&ctrl->dp_ctrl);
1078 ++ ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl);
1079 + else
1080 + DRM_ERROR("failed to enable DP link controller\n");
1081 +
1082 +@@ -1778,7 +1772,27 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
1083 + return dp_ctrl_setup_main_link(ctrl, &training_step);
1084 + }
1085 +
1086 +-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
1087 ++static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl)
1088 ++{
1089 ++ int ret;
1090 ++ struct dp_ctrl_private *ctrl;
1091 ++
1092 ++ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
1093 ++
1094 ++ ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
1095 ++
1096 ++ ret = dp_ctrl_enable_stream_clocks(ctrl);
1097 ++ if (ret) {
1098 ++ DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
1099 ++ return ret;
1100 ++ }
1101 ++
1102 ++ dp_ctrl_send_phy_test_pattern(ctrl);
1103 ++
1104 ++ return 0;
1105 ++}
1106 ++
1107 ++int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
1108 + {
1109 + int ret = 0;
1110 + bool mainlink_ready = false;
1111 +@@ -1809,12 +1823,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
1112 + goto end;
1113 + }
1114 +
1115 +- if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
1116 +- dp_ctrl_send_phy_test_pattern(ctrl);
1117 +- return 0;
1118 +- }
1119 +-
1120 +- if (!dp_ctrl_channel_eq_ok(ctrl))
1121 ++ if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl))
1122 + dp_ctrl_link_retrain(ctrl);
1123 +
1124 + /* stop txing train pattern to end link training */
1125 +@@ -1877,8 +1886,14 @@ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
1126 + return ret;
1127 + }
1128 +
1129 ++ DRM_DEBUG_DP("Before, phy=%x init_count=%d power_on=%d\n",
1130 ++ (u32)(uintptr_t)phy, phy->init_count, phy->power_count);
1131 ++
1132 + phy_power_off(phy);
1133 +
1134 ++ DRM_DEBUG_DP("After, phy=%x init_count=%d power_on=%d\n",
1135 ++ (u32)(uintptr_t)phy, phy->init_count, phy->power_count);
1136 ++
1137 + /* aux channel down, reinit phy */
1138 + phy_exit(phy);
1139 + phy_init(phy);
1140 +@@ -1887,23 +1902,6 @@ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
1141 + return ret;
1142 + }
1143 +
1144 +-void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl)
1145 +-{
1146 +- struct dp_ctrl_private *ctrl;
1147 +- struct dp_io *dp_io;
1148 +- struct phy *phy;
1149 +-
1150 +- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
1151 +- dp_io = &ctrl->parser->io;
1152 +- phy = dp_io->phy;
1153 +-
1154 +- dp_catalog_ctrl_reset(ctrl->catalog);
1155 +-
1156 +- phy_exit(phy);
1157 +-
1158 +- DRM_DEBUG_DP("DP off phy done\n");
1159 +-}
1160 +-
1161 + int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
1162 + {
1163 + struct dp_ctrl_private *ctrl;
1164 +@@ -1931,10 +1929,14 @@ int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
1165 + DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
1166 + }
1167 +
1168 ++ DRM_DEBUG_DP("Before, phy=%x init_count=%d power_on=%d\n",
1169 ++ (u32)(uintptr_t)phy, phy->init_count, phy->power_count);
1170 ++
1171 + phy_power_off(phy);
1172 +- phy_exit(phy);
1173 +
1174 +- DRM_DEBUG_DP("DP off done\n");
1175 ++ DRM_DEBUG_DP("After, phy=%x init_count=%d power_on=%d\n",
1176 ++ (u32)(uintptr_t)phy, phy->init_count, phy->power_count);
1177 ++
1178 + return ret;
1179 + }
1180 +
1181 +diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
1182 +index 2363a2df9597b..dcc7af21a5f05 100644
1183 +--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
1184 ++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
1185 +@@ -19,12 +19,9 @@ struct dp_ctrl {
1186 + u32 pixel_rate;
1187 + };
1188 +
1189 +-int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
1190 +-void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
1191 + int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
1192 +-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
1193 ++int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train);
1194 + int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
1195 +-void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl);
1196 + int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
1197 + void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
1198 + void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
1199 +@@ -34,4 +31,9 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
1200 + struct dp_power *power, struct dp_catalog *catalog,
1201 + struct dp_parser *parser);
1202 +
1203 ++void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable);
1204 ++void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl);
1205 ++void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl);
1206 ++void dp_ctrl_irq_phy_exit(struct dp_ctrl *dp_ctrl);
1207 ++
1208 + #endif /* _DP_CTRL_H_ */
1209 +diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
1210 +index 7b624191abf19..b141ccb527b00 100644
1211 +--- a/drivers/gpu/drm/msm/dp/dp_display.c
1212 ++++ b/drivers/gpu/drm/msm/dp/dp_display.c
1213 +@@ -81,6 +81,7 @@ struct dp_display_private {
1214 +
1215 + /* state variables */
1216 + bool core_initialized;
1217 ++ bool phy_initialized;
1218 + bool hpd_irq_on;
1219 + bool audio_supported;
1220 +
1221 +@@ -260,7 +261,8 @@ static void dp_display_unbind(struct device *dev, struct device *master,
1222 + struct dp_display_private, dp_display);
1223 +
1224 + /* disable all HPD interrupts */
1225 +- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
1226 ++ if (dp->core_initialized)
1227 ++ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
1228 +
1229 + kthread_stop(dp->ev_tsk);
1230 +
1231 +@@ -361,36 +363,45 @@ end:
1232 + return rc;
1233 + }
1234 +
1235 +-static void dp_display_host_init(struct dp_display_private *dp, int reset)
1236 ++static void dp_display_host_phy_init(struct dp_display_private *dp)
1237 + {
1238 +- bool flip = false;
1239 ++ DRM_DEBUG_DP("core_init=%d phy_init=%d\n",
1240 ++ dp->core_initialized, dp->phy_initialized);
1241 +
1242 +- DRM_DEBUG_DP("core_initialized=%d\n", dp->core_initialized);
1243 +- if (dp->core_initialized) {
1244 +- DRM_DEBUG_DP("DP core already initialized\n");
1245 +- return;
1246 ++ if (!dp->phy_initialized) {
1247 ++ dp_ctrl_phy_init(dp->ctrl);
1248 ++ dp->phy_initialized = true;
1249 ++ }
1250 ++}
1251 ++
1252 ++static void dp_display_host_phy_exit(struct dp_display_private *dp)
1253 ++{
1254 ++ DRM_DEBUG_DP("core_init=%d phy_init=%d\n",
1255 ++ dp->core_initialized, dp->phy_initialized);
1256 ++
1257 ++ if (dp->phy_initialized) {
1258 ++ dp_ctrl_phy_exit(dp->ctrl);
1259 ++ dp->phy_initialized = false;
1260 + }
1261 ++}
1262 +
1263 +- if (dp->usbpd->orientation == ORIENTATION_CC2)
1264 +- flip = true;
1265 ++static void dp_display_host_init(struct dp_display_private *dp)
1266 ++{
1267 ++ DRM_DEBUG_DP("core_initialized=%d\n", dp->core_initialized);
1268 +
1269 +- dp_power_init(dp->power, flip);
1270 +- dp_ctrl_host_init(dp->ctrl, flip, reset);
1271 ++ dp_power_init(dp->power, false);
1272 ++ dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
1273 + dp_aux_init(dp->aux);
1274 + dp->core_initialized = true;
1275 + }
1276 +
1277 + static void dp_display_host_deinit(struct dp_display_private *dp)
1278 + {
1279 +- if (!dp->core_initialized) {
1280 +- DRM_DEBUG_DP("DP core not initialized\n");
1281 +- return;
1282 +- }
1283 ++ DRM_DEBUG_DP("core_initialized=%d\n", dp->core_initialized);
1284 +
1285 +- dp_ctrl_host_deinit(dp->ctrl);
1286 ++ dp_ctrl_reset_irq_ctrl(dp->ctrl, false);
1287 + dp_aux_deinit(dp->aux);
1288 + dp_power_deinit(dp->power);
1289 +-
1290 + dp->core_initialized = false;
1291 + }
1292 +
1293 +@@ -408,7 +419,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
1294 + dp = container_of(g_dp_display,
1295 + struct dp_display_private, dp_display);
1296 +
1297 +- dp_display_host_init(dp, false);
1298 ++ dp_display_host_phy_init(dp);
1299 +
1300 + rc = dp_display_process_hpd_high(dp);
1301 + end:
1302 +@@ -546,17 +557,9 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
1303 +
1304 + dp->hpd_state = ST_CONNECT_PENDING;
1305 +
1306 +- hpd->hpd_high = 1;
1307 +-
1308 + ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
1309 + if (ret) { /* link train failed */
1310 +- hpd->hpd_high = 0;
1311 + dp->hpd_state = ST_DISCONNECTED;
1312 +-
1313 +- if (ret == -ECONNRESET) { /* cable unplugged */
1314 +- dp->core_initialized = false;
1315 +- }
1316 +-
1317 + } else {
1318 + /* start sentinel checking in case of missing uevent */
1319 + dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
1320 +@@ -626,9 +629,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
1321 + if (state == ST_DISCONNECTED) {
1322 + /* triggered by irq_hdp with sink_count = 0 */
1323 + if (dp->link->sink_count == 0) {
1324 +- dp_ctrl_off_phy(dp->ctrl);
1325 +- hpd->hpd_high = 0;
1326 +- dp->core_initialized = false;
1327 ++ dp_display_host_phy_exit(dp);
1328 + }
1329 + mutex_unlock(&dp->event_mutex);
1330 + return 0;
1331 +@@ -651,8 +652,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
1332 + /* disable HPD plug interrupts */
1333 + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
1334 +
1335 +- hpd->hpd_high = 0;
1336 +-
1337 + /*
1338 + * We don't need separate work for disconnect as
1339 + * connect/attention interrupts are disabled
1340 +@@ -692,7 +691,6 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
1341 + static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
1342 + {
1343 + u32 state;
1344 +- int ret;
1345 +
1346 + mutex_lock(&dp->event_mutex);
1347 +
1348 +@@ -717,10 +715,8 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
1349 + return 0;
1350 + }
1351 +
1352 +- ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
1353 +- if (ret == -ECONNRESET) { /* cable unplugged */
1354 +- dp->core_initialized = false;
1355 +- }
1356 ++ dp_display_usbpd_attention_cb(&dp->pdev->dev);
1357 ++
1358 + DRM_DEBUG_DP("hpd_state=%d\n", state);
1359 +
1360 + mutex_unlock(&dp->event_mutex);
1361 +@@ -869,7 +865,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
1362 + return 0;
1363 + }
1364 +
1365 +- rc = dp_ctrl_on_stream(dp->ctrl);
1366 ++ rc = dp_ctrl_on_stream(dp->ctrl, data);
1367 + if (!rc)
1368 + dp_display->power_on = true;
1369 +
1370 +@@ -915,12 +911,19 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
1371 +
1372 + dp_display->audio_enabled = false;
1373 +
1374 +- /* triggered by irq_hpd with sink_count = 0 */
1375 + if (dp->link->sink_count == 0) {
1376 ++ /*
1377 ++ * irq_hpd with sink_count = 0
1378 ++ * hdmi unplugged out of dongle
1379 ++ */
1380 + dp_ctrl_off_link_stream(dp->ctrl);
1381 + } else {
1382 ++ /*
1383 ++ * unplugged interrupt
1384 ++ * dongle unplugged out of DUT
1385 ++ */
1386 + dp_ctrl_off(dp->ctrl);
1387 +- dp->core_initialized = false;
1388 ++ dp_display_host_phy_exit(dp);
1389 + }
1390 +
1391 + dp_display->power_on = false;
1392 +@@ -1050,7 +1053,7 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
1393 + static void dp_display_config_hpd(struct dp_display_private *dp)
1394 + {
1395 +
1396 +- dp_display_host_init(dp, true);
1397 ++ dp_display_host_init(dp);
1398 + dp_catalog_ctrl_hpd_config(dp->catalog);
1399 +
1400 + /* Enable interrupt first time
1401 +@@ -1317,20 +1320,23 @@ static int dp_pm_resume(struct device *dev)
1402 + dp->hpd_state = ST_DISCONNECTED;
1403 +
1404 + /* turn on dp ctrl/phy */
1405 +- dp_display_host_init(dp, true);
1406 ++ dp_display_host_init(dp);
1407 +
1408 + dp_catalog_ctrl_hpd_config(dp->catalog);
1409 +
1410 +- /*
1411 +- * set sink to normal operation mode -- D0
1412 +- * before dpcd read
1413 +- */
1414 +- dp_link_psm_config(dp->link, &dp->panel->link_info, false);
1415 +
1416 + if (dp_catalog_link_is_connected(dp->catalog)) {
1417 ++ /*
1418 ++ * set sink to normal operation mode -- D0
1419 ++ * before dpcd read
1420 ++ */
1421 ++ dp_display_host_phy_init(dp);
1422 ++ dp_link_psm_config(dp->link, &dp->panel->link_info, false);
1423 + sink_count = drm_dp_read_sink_count(dp->aux);
1424 + if (sink_count < 0)
1425 + sink_count = 0;
1426 ++
1427 ++ dp_display_host_phy_exit(dp);
1428 + }
1429 +
1430 + dp->link->sink_count = sink_count;
1431 +@@ -1369,18 +1375,16 @@ static int dp_pm_suspend(struct device *dev)
1432 + DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n",
1433 + dp->core_initialized, dp_display->power_on);
1434 +
1435 +- if (dp->core_initialized == true) {
1436 +- /* mainlink enabled */
1437 +- if (dp_power_clk_status(dp->power, DP_CTRL_PM))
1438 +- dp_ctrl_off_link_stream(dp->ctrl);
1439 ++ /* mainlink enabled */
1440 ++ if (dp_power_clk_status(dp->power, DP_CTRL_PM))
1441 ++ dp_ctrl_off_link_stream(dp->ctrl);
1442 +
1443 +- dp_display_host_deinit(dp);
1444 +- }
1445 +-
1446 +- dp->hpd_state = ST_SUSPENDED;
1447 ++ dp_display_host_phy_exit(dp);
1448 +
1449 + /* host_init will be called at pm_resume */
1450 +- dp->core_initialized = false;
1451 ++ dp_display_host_deinit(dp);
1452 ++
1453 ++ dp->hpd_state = ST_SUSPENDED;
1454 +
1455 + DRM_DEBUG_DP("After, core_inited=%d power_on=%d\n",
1456 + dp->core_initialized, dp_display->power_on);
1457 +@@ -1508,6 +1512,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
1458 + int rc = 0;
1459 + struct dp_display_private *dp_display;
1460 + u32 state;
1461 ++ bool force_link_train = false;
1462 +
1463 + dp_display = container_of(dp, struct dp_display_private, dp_display);
1464 + if (!dp_display->dp_mode.drm_mode.clock) {
1465 +@@ -1536,10 +1541,12 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
1466 +
1467 + state = dp_display->hpd_state;
1468 +
1469 +- if (state == ST_DISPLAY_OFF)
1470 +- dp_display_host_init(dp_display, true);
1471 ++ if (state == ST_DISPLAY_OFF) {
1472 ++ dp_display_host_phy_init(dp_display);
1473 ++ force_link_train = true;
1474 ++ }
1475 +
1476 +- dp_display_enable(dp_display, 0);
1477 ++ dp_display_enable(dp_display, force_link_train);
1478 +
1479 + rc = dp_display_post_enable(dp);
1480 + if (rc) {
1481 +@@ -1548,10 +1555,6 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
1482 + dp_display_unprepare(dp);
1483 + }
1484 +
1485 +- /* manual kick off plug event to train link */
1486 +- if (state == ST_DISPLAY_OFF)
1487 +- dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
1488 +-
1489 + /* completed connection */
1490 + dp_display->hpd_state = ST_CONNECTED;
1491 +
1492 +diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
1493 +index e1c90fa47411f..db98a1d431eb6 100644
1494 +--- a/drivers/gpu/drm/msm/dp/dp_hpd.c
1495 ++++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
1496 +@@ -32,8 +32,6 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
1497 + hpd_priv = container_of(dp_usbpd, struct dp_hpd_private,
1498 + dp_usbpd);
1499 +
1500 +- dp_usbpd->hpd_high = hpd;
1501 +-
1502 + if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
1503 + || !hpd_priv->dp_cb->disconnect) {
1504 + pr_err("hpd dp_cb not initialized\n");
1505 +diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h
1506 +index 5bc5bb64680fb..8feec5aa50271 100644
1507 +--- a/drivers/gpu/drm/msm/dp/dp_hpd.h
1508 ++++ b/drivers/gpu/drm/msm/dp/dp_hpd.h
1509 +@@ -26,7 +26,6 @@ enum plug_orientation {
1510 + * @multi_func: multi-function preferred
1511 + * @usb_config_req: request to switch to usb
1512 + * @exit_dp_mode: request exit from displayport mode
1513 +- * @hpd_high: Hot Plug Detect signal is high.
1514 + * @hpd_irq: Change in the status since last message
1515 + * @alt_mode_cfg_done: bool to specify alt mode status
1516 + * @debug_en: bool to specify debug mode
1517 +@@ -39,7 +38,6 @@ struct dp_usbpd {
1518 + bool multi_func;
1519 + bool usb_config_req;
1520 + bool exit_dp_mode;
1521 +- bool hpd_high;
1522 + bool hpd_irq;
1523 + bool alt_mode_cfg_done;
1524 + bool debug_en;
1525 +diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
1526 +index a5bdfc5029dee..d4d31e5bda070 100644
1527 +--- a/drivers/gpu/drm/msm/dp/dp_link.c
1528 ++++ b/drivers/gpu/drm/msm/dp/dp_link.c
1529 +@@ -737,18 +737,25 @@ static int dp_link_parse_sink_count(struct dp_link *dp_link)
1530 + return 0;
1531 + }
1532 +
1533 +-static void dp_link_parse_sink_status_field(struct dp_link_private *link)
1534 ++static int dp_link_parse_sink_status_field(struct dp_link_private *link)
1535 + {
1536 + int len = 0;
1537 +
1538 + link->prev_sink_count = link->dp_link.sink_count;
1539 +- dp_link_parse_sink_count(&link->dp_link);
1540 ++ len = dp_link_parse_sink_count(&link->dp_link);
1541 ++ if (len < 0) {
1542 ++ DRM_ERROR("DP parse sink count failed\n");
1543 ++ return len;
1544 ++ }
1545 +
1546 + len = drm_dp_dpcd_read_link_status(link->aux,
1547 + link->link_status);
1548 +- if (len < DP_LINK_STATUS_SIZE)
1549 ++ if (len < DP_LINK_STATUS_SIZE) {
1550 + DRM_ERROR("DP link status read failed\n");
1551 +- dp_link_parse_request(link);
1552 ++ return len;
1553 ++ }
1554 ++
1555 ++ return dp_link_parse_request(link);
1556 + }
1557 +
1558 + /**
1559 +@@ -1023,7 +1030,9 @@ int dp_link_process_request(struct dp_link *dp_link)
1560 +
1561 + dp_link_reset_data(link);
1562 +
1563 +- dp_link_parse_sink_status_field(link);
1564 ++ ret = dp_link_parse_sink_status_field(link);
1565 ++ if (ret)
1566 ++ return ret;
1567 +
1568 + if (link->request.test_requested == DP_TEST_LINK_EDID_READ) {
1569 + dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
1570 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
1571 +index 9712582886aac..916361c30d774 100644
1572 +--- a/drivers/gpu/drm/msm/msm_drv.c
1573 ++++ b/drivers/gpu/drm/msm/msm_drv.c
1574 +@@ -1102,7 +1102,7 @@ static const struct drm_driver msm_driver = {
1575 + .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1576 + .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1577 + .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
1578 +- .gem_prime_mmap = drm_gem_prime_mmap,
1579 ++ .gem_prime_mmap = msm_gem_prime_mmap,
1580 + #ifdef CONFIG_DEBUG_FS
1581 + .debugfs_init = msm_debugfs_init,
1582 + #endif
1583 +diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
1584 +index c552f0c3890c1..bd5132bb9bde2 100644
1585 +--- a/drivers/gpu/drm/msm/msm_drv.h
1586 ++++ b/drivers/gpu/drm/msm/msm_drv.h
1587 +@@ -298,6 +298,7 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t
1588 + void msm_gem_shrinker_init(struct drm_device *dev);
1589 + void msm_gem_shrinker_cleanup(struct drm_device *dev);
1590 +
1591 ++int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
1592 + struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
1593 + int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
1594 + void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
1595 +diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
1596 +index 8a2d94bd5df28..02c70a0b2a036 100644
1597 +--- a/drivers/gpu/drm/msm/msm_gem_prime.c
1598 ++++ b/drivers/gpu/drm/msm/msm_gem_prime.c
1599 +@@ -11,6 +11,21 @@
1600 + #include "msm_drv.h"
1601 + #include "msm_gem.h"
1602 +
1603 ++int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1604 ++{
1605 ++ int ret;
1606 ++
1607 ++ /* Ensure the mmap offset is initialized. We lazily initialize it,
1608 ++ * so if it has not been first mmap'd directly as a GEM object, the
1609 ++ * mmap offset will not be already initialized.
1610 ++ */
1611 ++ ret = drm_gem_create_mmap_offset(obj);
1612 ++ if (ret)
1613 ++ return ret;
1614 ++
1615 ++ return drm_gem_prime_mmap(obj, vma);
1616 ++}
1617 ++
1618 + struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
1619 + {
1620 + struct msm_gem_object *msm_obj = to_msm_bo(obj);
1621 +diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
1622 +index 2c46cd968ac4c..b01d0a521c908 100644
1623 +--- a/drivers/gpu/drm/msm/msm_gpu.c
1624 ++++ b/drivers/gpu/drm/msm/msm_gpu.c
1625 +@@ -658,7 +658,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
1626 + msm_submit_retire(submit);
1627 +
1628 + pm_runtime_mark_last_busy(&gpu->pdev->dev);
1629 +- pm_runtime_put_autosuspend(&gpu->pdev->dev);
1630 +
1631 + spin_lock_irqsave(&ring->submit_lock, flags);
1632 + list_del(&submit->node);
1633 +@@ -672,6 +671,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
1634 + msm_devfreq_idle(gpu);
1635 + mutex_unlock(&gpu->active_lock);
1636 +
1637 ++ pm_runtime_put_autosuspend(&gpu->pdev->dev);
1638 ++
1639 + msm_gem_submit_put(submit);
1640 + }
1641 +
1642 +diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
1643 +index bcaddbba564df..a54ed354578b5 100644
1644 +--- a/drivers/gpu/drm/msm/msm_iommu.c
1645 ++++ b/drivers/gpu/drm/msm/msm_iommu.c
1646 +@@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
1647 + u64 addr = iova;
1648 + unsigned int i;
1649 +
1650 +- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
1651 ++ for_each_sgtable_sg(sgt, sg, i) {
1652 + size_t size = sg->length;
1653 + phys_addr_t phys = sg_phys(sg);
1654 +
1655 +diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
1656 +index 54dd562e294ce..5b7061e2bca4d 100644
1657 +--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
1658 ++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
1659 +@@ -72,7 +72,6 @@ static int sun4i_drv_bind(struct device *dev)
1660 + goto free_drm;
1661 + }
1662 +
1663 +- dev_set_drvdata(dev, drm);
1664 + drm->dev_private = drv;
1665 + INIT_LIST_HEAD(&drv->frontend_list);
1666 + INIT_LIST_HEAD(&drv->engine_list);
1667 +@@ -113,6 +112,8 @@ static int sun4i_drv_bind(struct device *dev)
1668 +
1669 + drm_fbdev_generic_setup(drm, 32);
1670 +
1671 ++ dev_set_drvdata(dev, drm);
1672 ++
1673 + return 0;
1674 +
1675 + finish_poll:
1676 +@@ -129,6 +130,7 @@ static void sun4i_drv_unbind(struct device *dev)
1677 + {
1678 + struct drm_device *drm = dev_get_drvdata(dev);
1679 +
1680 ++ dev_set_drvdata(dev, NULL);
1681 + drm_dev_unregister(drm);
1682 + drm_kms_helper_poll_fini(drm);
1683 + drm_atomic_helper_shutdown(drm);
1684 +diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
1685 +index 2edfcb4819b7d..3a1f47c7288ff 100644
1686 +--- a/drivers/iio/accel/bma180.c
1687 ++++ b/drivers/iio/accel/bma180.c
1688 +@@ -1006,11 +1006,12 @@ static int bma180_probe(struct i2c_client *client,
1689 +
1690 + data->trig->ops = &bma180_trigger_ops;
1691 + iio_trigger_set_drvdata(data->trig, indio_dev);
1692 +- indio_dev->trig = iio_trigger_get(data->trig);
1693 +
1694 + ret = iio_trigger_register(data->trig);
1695 + if (ret)
1696 + goto err_trigger_free;
1697 ++
1698 ++ indio_dev->trig = iio_trigger_get(data->trig);
1699 + }
1700 +
1701 + ret = iio_triggered_buffer_setup(indio_dev, NULL,
1702 +diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
1703 +index ba6c8ca488b1a..594a383169c75 100644
1704 +--- a/drivers/iio/accel/kxcjk-1013.c
1705 ++++ b/drivers/iio/accel/kxcjk-1013.c
1706 +@@ -1553,12 +1553,12 @@ static int kxcjk1013_probe(struct i2c_client *client,
1707 +
1708 + data->dready_trig->ops = &kxcjk1013_trigger_ops;
1709 + iio_trigger_set_drvdata(data->dready_trig, indio_dev);
1710 +- indio_dev->trig = data->dready_trig;
1711 +- iio_trigger_get(indio_dev->trig);
1712 + ret = iio_trigger_register(data->dready_trig);
1713 + if (ret)
1714 + goto err_poweroff;
1715 +
1716 ++ indio_dev->trig = iio_trigger_get(data->dready_trig);
1717 ++
1718 + data->motion_trig->ops = &kxcjk1013_trigger_ops;
1719 + iio_trigger_set_drvdata(data->motion_trig, indio_dev);
1720 + ret = iio_trigger_register(data->motion_trig);
1721 +diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
1722 +index 21a99467f3646..373b59557afe9 100644
1723 +--- a/drivers/iio/accel/mma8452.c
1724 ++++ b/drivers/iio/accel/mma8452.c
1725 +@@ -1493,10 +1493,14 @@ static int mma8452_reset(struct i2c_client *client)
1726 + int i;
1727 + int ret;
1728 +
1729 +- ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2,
1730 ++ /*
1731 ++ * Find on fxls8471, after config reset bit, it reset immediately,
1732 ++ * and will not give ACK, so here do not check the return value.
1733 ++ * The following code will read the reset register, and check whether
1734 ++ * this reset works.
1735 ++ */
1736 ++ i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2,
1737 + MMA8452_CTRL_REG2_RST);
1738 +- if (ret < 0)
1739 +- return ret;
1740 +
1741 + for (i = 0; i < 10; i++) {
1742 + usleep_range(100, 200);
1743 +@@ -1539,11 +1543,13 @@ static int mma8452_probe(struct i2c_client *client,
1744 + mutex_init(&data->lock);
1745 +
1746 + data->chip_info = device_get_match_data(&client->dev);
1747 +- if (!data->chip_info && id) {
1748 +- data->chip_info = &mma_chip_info_table[id->driver_data];
1749 +- } else {
1750 +- dev_err(&client->dev, "unknown device model\n");
1751 +- return -ENODEV;
1752 ++ if (!data->chip_info) {
1753 ++ if (id) {
1754 ++ data->chip_info = &mma_chip_info_table[id->driver_data];
1755 ++ } else {
1756 ++ dev_err(&client->dev, "unknown device model\n");
1757 ++ return -ENODEV;
1758 ++ }
1759 + }
1760 +
1761 + data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
1762 +diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
1763 +index b3afbf0649152..df600d2917c0a 100644
1764 +--- a/drivers/iio/accel/mxc4005.c
1765 ++++ b/drivers/iio/accel/mxc4005.c
1766 +@@ -456,8 +456,6 @@ static int mxc4005_probe(struct i2c_client *client,
1767 +
1768 + data->dready_trig->ops = &mxc4005_trigger_ops;
1769 + iio_trigger_set_drvdata(data->dready_trig, indio_dev);
1770 +- indio_dev->trig = data->dready_trig;
1771 +- iio_trigger_get(indio_dev->trig);
1772 + ret = devm_iio_trigger_register(&client->dev,
1773 + data->dready_trig);
1774 + if (ret) {
1775 +@@ -465,6 +463,8 @@ static int mxc4005_probe(struct i2c_client *client,
1776 + "failed to register trigger\n");
1777 + return ret;
1778 + }
1779 ++
1780 ++ indio_dev->trig = iio_trigger_get(data->dready_trig);
1781 + }
1782 +
1783 + return devm_iio_device_register(&client->dev, indio_dev);
1784 +diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
1785 +index a73e3c2d212fa..a9e655e69eaa2 100644
1786 +--- a/drivers/iio/adc/adi-axi-adc.c
1787 ++++ b/drivers/iio/adc/adi-axi-adc.c
1788 +@@ -322,16 +322,19 @@ static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev)
1789 +
1790 + if (!try_module_get(cl->dev->driver->owner)) {
1791 + mutex_unlock(&registered_clients_lock);
1792 ++ of_node_put(cln);
1793 + return ERR_PTR(-ENODEV);
1794 + }
1795 +
1796 + get_device(cl->dev);
1797 + cl->info = info;
1798 + mutex_unlock(&registered_clients_lock);
1799 ++ of_node_put(cln);
1800 + return cl;
1801 + }
1802 +
1803 + mutex_unlock(&registered_clients_lock);
1804 ++ of_node_put(cln);
1805 +
1806 + return ERR_PTR(-EPROBE_DEFER);
1807 + }
1808 +diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
1809 +index 5f5e8b39e4d22..84dbe9e2f0eff 100644
1810 +--- a/drivers/iio/adc/axp288_adc.c
1811 ++++ b/drivers/iio/adc/axp288_adc.c
1812 +@@ -196,6 +196,14 @@ static const struct dmi_system_id axp288_adc_ts_bias_override[] = {
1813 + },
1814 + .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
1815 + },
1816 ++ {
1817 ++ /* Nuvision Solo 10 Draw */
1818 ++ .matches = {
1819 ++ DMI_MATCH(DMI_SYS_VENDOR, "TMAX"),
1820 ++ DMI_MATCH(DMI_PRODUCT_NAME, "TM101W610L"),
1821 ++ },
1822 ++ .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
1823 ++ },
1824 + {}
1825 + };
1826 +
1827 +diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c
1828 +index 32fbf57c362fa..2fa41b90bcfa9 100644
1829 +--- a/drivers/iio/adc/rzg2l_adc.c
1830 ++++ b/drivers/iio/adc/rzg2l_adc.c
1831 +@@ -334,11 +334,15 @@ static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l
1832 + i = 0;
1833 + device_for_each_child_node(&pdev->dev, fwnode) {
1834 + ret = fwnode_property_read_u32(fwnode, "reg", &channel);
1835 +- if (ret)
1836 ++ if (ret) {
1837 ++ fwnode_handle_put(fwnode);
1838 + return ret;
1839 ++ }
1840 +
1841 +- if (channel >= RZG2L_ADC_MAX_CHANNELS)
1842 ++ if (channel >= RZG2L_ADC_MAX_CHANNELS) {
1843 ++ fwnode_handle_put(fwnode);
1844 + return -EINVAL;
1845 ++ }
1846 +
1847 + chan_array[i].type = IIO_VOLTAGE;
1848 + chan_array[i].indexed = 1;
1849 +diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
1850 +index c088cb990193c..42faca457ace8 100644
1851 +--- a/drivers/iio/adc/stm32-adc-core.c
1852 ++++ b/drivers/iio/adc/stm32-adc-core.c
1853 +@@ -64,6 +64,7 @@ struct stm32_adc_priv;
1854 + * @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
1855 + * @has_syscfg: SYSCFG capability flags
1856 + * @num_irqs: number of interrupt lines
1857 ++ * @num_adcs: maximum number of ADC instances in the common registers
1858 + */
1859 + struct stm32_adc_priv_cfg {
1860 + const struct stm32_adc_common_regs *regs;
1861 +@@ -71,6 +72,7 @@ struct stm32_adc_priv_cfg {
1862 + u32 max_clk_rate_hz;
1863 + unsigned int has_syscfg;
1864 + unsigned int num_irqs;
1865 ++ unsigned int num_adcs;
1866 + };
1867 +
1868 + /**
1869 +@@ -352,7 +354,7 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
1870 + * before invoking the interrupt handler (e.g. call ISR only for
1871 + * IRQ-enabled ADCs).
1872 + */
1873 +- for (i = 0; i < priv->cfg->num_irqs; i++) {
1874 ++ for (i = 0; i < priv->cfg->num_adcs; i++) {
1875 + if ((status & priv->cfg->regs->eoc_msk[i] &&
1876 + stm32_adc_eoc_enabled(priv, i)) ||
1877 + (status & priv->cfg->regs->ovr_msk[i]))
1878 +@@ -796,6 +798,7 @@ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
1879 + .clk_sel = stm32f4_adc_clk_sel,
1880 + .max_clk_rate_hz = 36000000,
1881 + .num_irqs = 1,
1882 ++ .num_adcs = 3,
1883 + };
1884 +
1885 + static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
1886 +@@ -804,14 +807,16 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
1887 + .max_clk_rate_hz = 36000000,
1888 + .has_syscfg = HAS_VBOOSTER,
1889 + .num_irqs = 1,
1890 ++ .num_adcs = 2,
1891 + };
1892 +
1893 + static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
1894 + .regs = &stm32h7_adc_common_regs,
1895 + .clk_sel = stm32h7_adc_clk_sel,
1896 +- .max_clk_rate_hz = 40000000,
1897 ++ .max_clk_rate_hz = 36000000,
1898 + .has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD,
1899 + .num_irqs = 2,
1900 ++ .num_adcs = 2,
1901 + };
1902 +
1903 + static const struct of_device_id stm32_adc_of_match[] = {
1904 +diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
1905 +index e3e75413b49e7..ef5b54ed96614 100644
1906 +--- a/drivers/iio/adc/stm32-adc.c
1907 ++++ b/drivers/iio/adc/stm32-adc.c
1908 +@@ -1259,7 +1259,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
1909 + struct stm32_adc *adc = iio_priv(indio_dev);
1910 + const struct stm32_adc_regspec *regs = adc->cfg->regs;
1911 + u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
1912 +- u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
1913 +
1914 + /* Check ovr status right now, as ovr mask should be already disabled */
1915 + if (status & regs->isr_ovr.mask) {
1916 +@@ -1274,11 +1273,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
1917 + return IRQ_HANDLED;
1918 + }
1919 +
1920 +- if (!(status & mask))
1921 +- dev_err_ratelimited(&indio_dev->dev,
1922 +- "Unexpected IRQ: IER=0x%08x, ISR=0x%08x\n",
1923 +- mask, status);
1924 +-
1925 + return IRQ_NONE;
1926 + }
1927 +
1928 +@@ -1288,10 +1282,6 @@ static irqreturn_t stm32_adc_isr(int irq, void *data)
1929 + struct stm32_adc *adc = iio_priv(indio_dev);
1930 + const struct stm32_adc_regspec *regs = adc->cfg->regs;
1931 + u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
1932 +- u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
1933 +-
1934 +- if (!(status & mask))
1935 +- return IRQ_WAKE_THREAD;
1936 +
1937 + if (status & regs->isr_ovr.mask) {
1938 + /*
1939 +diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
1940 +index 0c2025a225750..80a09817c1194 100644
1941 +--- a/drivers/iio/adc/ti-ads131e08.c
1942 ++++ b/drivers/iio/adc/ti-ads131e08.c
1943 +@@ -739,7 +739,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev)
1944 + device_for_each_child_node(dev, node) {
1945 + ret = fwnode_property_read_u32(node, "reg", &channel);
1946 + if (ret)
1947 +- return ret;
1948 ++ goto err_child_out;
1949 +
1950 + ret = fwnode_property_read_u32(node, "ti,gain", &tmp);
1951 + if (ret) {
1952 +@@ -747,7 +747,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev)
1953 + } else {
1954 + ret = ads131e08_pga_gain_to_field_value(st, tmp);
1955 + if (ret < 0)
1956 +- return ret;
1957 ++ goto err_child_out;
1958 +
1959 + channel_config[i].pga_gain = tmp;
1960 + }
1961 +@@ -758,7 +758,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev)
1962 + } else {
1963 + ret = ads131e08_validate_channel_mux(st, tmp);
1964 + if (ret)
1965 +- return ret;
1966 ++ goto err_child_out;
1967 +
1968 + channel_config[i].mux = tmp;
1969 + }
1970 +@@ -784,6 +784,10 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev)
1971 + st->channel_config = channel_config;
1972 +
1973 + return 0;
1974 ++
1975 ++err_child_out:
1976 ++ fwnode_handle_put(node);
1977 ++ return ret;
1978 + }
1979 +
1980 + static void ads131e08_regulator_disable(void *data)
1981 +diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
1982 +index 271d73e420c42..cc28713b0dc8b 100644
1983 +--- a/drivers/iio/afe/iio-rescale.c
1984 ++++ b/drivers/iio/afe/iio-rescale.c
1985 +@@ -148,7 +148,7 @@ static int rescale_configure_channel(struct device *dev,
1986 + chan->ext_info = rescale->ext_info;
1987 + chan->type = rescale->cfg->type;
1988 +
1989 +- if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) ||
1990 ++ if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) &&
1991 + iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) {
1992 + dev_info(dev, "using raw+scale source channel\n");
1993 + } else if (iio_channel_has_info(schan, IIO_CHAN_INFO_PROCESSED)) {
1994 +diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
1995 +index 847194fa1e464..80ef1aa9aae3b 100644
1996 +--- a/drivers/iio/chemical/ccs811.c
1997 ++++ b/drivers/iio/chemical/ccs811.c
1998 +@@ -499,11 +499,11 @@ static int ccs811_probe(struct i2c_client *client,
1999 +
2000 + data->drdy_trig->ops = &ccs811_trigger_ops;
2001 + iio_trigger_set_drvdata(data->drdy_trig, indio_dev);
2002 +- indio_dev->trig = data->drdy_trig;
2003 +- iio_trigger_get(indio_dev->trig);
2004 + ret = iio_trigger_register(data->drdy_trig);
2005 + if (ret)
2006 + goto err_poweroff;
2007 ++
2008 ++ indio_dev->trig = iio_trigger_get(data->drdy_trig);
2009 + }
2010 +
2011 + ret = iio_triggered_buffer_setup(indio_dev, NULL,
2012 +diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
2013 +index 3225de1f023b3..5311bee5475ff 100644
2014 +--- a/drivers/iio/gyro/mpu3050-core.c
2015 ++++ b/drivers/iio/gyro/mpu3050-core.c
2016 +@@ -876,6 +876,7 @@ static int mpu3050_power_up(struct mpu3050 *mpu3050)
2017 + ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
2018 + MPU3050_PWR_MGM_SLEEP, 0);
2019 + if (ret) {
2020 ++ regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
2021 + dev_err(mpu3050->dev, "error setting power mode\n");
2022 + return ret;
2023 + }
2024 +diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
2025 +index f29692b9d2db0..66b32413cf5e2 100644
2026 +--- a/drivers/iio/humidity/hts221_buffer.c
2027 ++++ b/drivers/iio/humidity/hts221_buffer.c
2028 +@@ -135,9 +135,12 @@ int hts221_allocate_trigger(struct iio_dev *iio_dev)
2029 +
2030 + iio_trigger_set_drvdata(hw->trig, iio_dev);
2031 + hw->trig->ops = &hts221_trigger_ops;
2032 ++
2033 ++ err = devm_iio_trigger_register(hw->dev, hw->trig);
2034 ++
2035 + iio_dev->trig = iio_trigger_get(hw->trig);
2036 +
2037 +- return devm_iio_trigger_register(hw->dev, hw->trig);
2038 ++ return err;
2039 + }
2040 +
2041 + static int hts221_buffer_preenable(struct iio_dev *iio_dev)
2042 +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
2043 +index c0f5059b13b31..995a9dc06521d 100644
2044 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
2045 ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
2046 +@@ -17,6 +17,7 @@
2047 + #include "inv_icm42600_buffer.h"
2048 +
2049 + enum inv_icm42600_chip {
2050 ++ INV_CHIP_INVALID,
2051 + INV_CHIP_ICM42600,
2052 + INV_CHIP_ICM42602,
2053 + INV_CHIP_ICM42605,
2054 +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
2055 +index 86858da9cc38f..ca85fccc98393 100644
2056 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
2057 ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
2058 +@@ -565,7 +565,7 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
2059 + bool open_drain;
2060 + int ret;
2061 +
2062 +- if (chip < 0 || chip >= INV_CHIP_NB) {
2063 ++ if (chip <= INV_CHIP_INVALID || chip >= INV_CHIP_NB) {
2064 + dev_err(dev, "invalid chip = %d\n", chip);
2065 + return -ENODEV;
2066 + }
2067 +diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
2068 +index 9ff7b0e56cf67..b2bc637150bfa 100644
2069 +--- a/drivers/iio/magnetometer/yamaha-yas530.c
2070 ++++ b/drivers/iio/magnetometer/yamaha-yas530.c
2071 +@@ -639,7 +639,7 @@ static int yas532_get_calibration_data(struct yas5xx *yas5xx)
2072 + dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data);
2073 +
2074 + /* Sanity check, is this all zeroes? */
2075 +- if (memchr_inv(data, 0x00, 13)) {
2076 ++ if (memchr_inv(data, 0x00, 13) == NULL) {
2077 + if (!(data[13] & BIT(7)))
2078 + dev_warn(yas5xx->dev, "calibration is blank!\n");
2079 + }
2080 +diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
2081 +index e9adfff45b39b..bec9b94e088b9 100644
2082 +--- a/drivers/iio/trigger/iio-trig-sysfs.c
2083 ++++ b/drivers/iio/trigger/iio-trig-sysfs.c
2084 +@@ -195,6 +195,7 @@ static int iio_sysfs_trigger_remove(int id)
2085 + }
2086 +
2087 + iio_trigger_unregister(t->trig);
2088 ++ irq_work_sync(&t->work);
2089 + iio_trigger_free(t->trig);
2090 +
2091 + list_del(&t->l);
2092 +diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
2093 +index 2a78f68741431..a56df45366059 100644
2094 +--- a/drivers/md/dm-era-target.c
2095 ++++ b/drivers/md/dm-era-target.c
2096 +@@ -1400,7 +1400,7 @@ static void start_worker(struct era *era)
2097 + static void stop_worker(struct era *era)
2098 + {
2099 + atomic_set(&era->suspended, 1);
2100 +- flush_workqueue(era->wq);
2101 ++ drain_workqueue(era->wq);
2102 + }
2103 +
2104 + /*----------------------------------------------------------------
2105 +@@ -1570,6 +1570,12 @@ static void era_postsuspend(struct dm_target *ti)
2106 + }
2107 +
2108 + stop_worker(era);
2109 ++
2110 ++ r = metadata_commit(era->md);
2111 ++ if (r) {
2112 ++ DMERR("%s: metadata_commit failed", __func__);
2113 ++ /* FIXME: fail mode */
2114 ++ }
2115 + }
2116 +
2117 + static int era_preresume(struct dm_target *ti)
2118 +diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
2119 +index ccdd65148498f..b40741bedfd43 100644
2120 +--- a/drivers/md/dm-log.c
2121 ++++ b/drivers/md/dm-log.c
2122 +@@ -615,7 +615,7 @@ static int disk_resume(struct dm_dirty_log *log)
2123 + log_clear_bit(lc, lc->clean_bits, i);
2124 +
2125 + /* clear any old bits -- device has shrunk */
2126 +- for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
2127 ++ for (i = lc->region_count; i % BITS_PER_LONG; i++)
2128 + log_clear_bit(lc, lc->clean_bits, i);
2129 +
2130 + /* copy clean across to sync */
2131 +diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
2132 +index 4733e7898ffe5..c491cd549644f 100644
2133 +--- a/drivers/memory/samsung/exynos5422-dmc.c
2134 ++++ b/drivers/memory/samsung/exynos5422-dmc.c
2135 +@@ -1187,33 +1187,39 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc)
2136 +
2137 + dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
2138 + sizeof(u32), GFP_KERNEL);
2139 +- if (!dmc->timing_row)
2140 +- return -ENOMEM;
2141 ++ if (!dmc->timing_row) {
2142 ++ ret = -ENOMEM;
2143 ++ goto put_node;
2144 ++ }
2145 +
2146 + dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
2147 + sizeof(u32), GFP_KERNEL);
2148 +- if (!dmc->timing_data)
2149 +- return -ENOMEM;
2150 ++ if (!dmc->timing_data) {
2151 ++ ret = -ENOMEM;
2152 ++ goto put_node;
2153 ++ }
2154 +
2155 + dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
2156 + sizeof(u32), GFP_KERNEL);
2157 +- if (!dmc->timing_power)
2158 +- return -ENOMEM;
2159 ++ if (!dmc->timing_power) {
2160 ++ ret = -ENOMEM;
2161 ++ goto put_node;
2162 ++ }
2163 +
2164 + dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev,
2165 + DDR_TYPE_LPDDR3,
2166 + &dmc->timings_arr_size);
2167 + if (!dmc->timings) {
2168 +- of_node_put(np_ddr);
2169 + dev_warn(dmc->dev, "could not get timings from DT\n");
2170 +- return -EINVAL;
2171 ++ ret = -EINVAL;
2172 ++ goto put_node;
2173 + }
2174 +
2175 + dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev);
2176 + if (!dmc->min_tck) {
2177 +- of_node_put(np_ddr);
2178 + dev_warn(dmc->dev, "could not get tck from DT\n");
2179 +- return -EINVAL;
2180 ++ ret = -EINVAL;
2181 ++ goto put_node;
2182 + }
2183 +
2184 + /* Sorted array of OPPs with frequency ascending */
2185 +@@ -1227,13 +1233,14 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc)
2186 + clk_period_ps);
2187 + }
2188 +
2189 +- of_node_put(np_ddr);
2190 +
2191 + /* Take the highest frequency's timings as 'bypass' */
2192 + dmc->bypass_timing_row = dmc->timing_row[idx - 1];
2193 + dmc->bypass_timing_data = dmc->timing_data[idx - 1];
2194 + dmc->bypass_timing_power = dmc->timing_power[idx - 1];
2195 +
2196 ++put_node:
2197 ++ of_node_put(np_ddr);
2198 + return ret;
2199 + }
2200 +
2201 +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
2202 +index 1ac92015992ed..f9b2897569bb4 100644
2203 +--- a/drivers/mmc/host/mtk-sd.c
2204 ++++ b/drivers/mmc/host/mtk-sd.c
2205 +@@ -1355,7 +1355,7 @@ static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq)
2206 + msdc_request_done(host, mrq);
2207 + }
2208 +
2209 +-static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
2210 ++static void msdc_data_xfer_done(struct msdc_host *host, u32 events,
2211 + struct mmc_request *mrq, struct mmc_data *data)
2212 + {
2213 + struct mmc_command *stop;
2214 +@@ -1375,7 +1375,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
2215 + spin_unlock_irqrestore(&host->lock, flags);
2216 +
2217 + if (done)
2218 +- return true;
2219 ++ return;
2220 + stop = data->stop;
2221 +
2222 + if (check_data || (stop && stop->error)) {
2223 +@@ -1384,12 +1384,15 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
2224 + sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP,
2225 + 1);
2226 +
2227 ++ ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CTRL, val,
2228 ++ !(val & MSDC_DMA_CTRL_STOP), 1, 20000);
2229 ++ if (ret)
2230 ++ dev_dbg(host->dev, "DMA stop timed out\n");
2231 ++
2232 + ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CFG, val,
2233 + !(val & MSDC_DMA_CFG_STS), 1, 20000);
2234 +- if (ret) {
2235 +- dev_dbg(host->dev, "DMA stop timed out\n");
2236 +- return false;
2237 +- }
2238 ++ if (ret)
2239 ++ dev_dbg(host->dev, "DMA inactive timed out\n");
2240 +
2241 + sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask);
2242 + dev_dbg(host->dev, "DMA stop\n");
2243 +@@ -1414,9 +1417,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
2244 + }
2245 +
2246 + msdc_data_xfer_next(host, mrq);
2247 +- done = true;
2248 + }
2249 +- return done;
2250 + }
2251 +
2252 + static void msdc_set_buswidth(struct msdc_host *host, u32 width)
2253 +@@ -2347,6 +2348,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
2254 + if (recovery) {
2255 + sdr_set_field(host->base + MSDC_DMA_CTRL,
2256 + MSDC_DMA_CTRL_STOP, 1);
2257 ++ if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CTRL, val,
2258 ++ !(val & MSDC_DMA_CTRL_STOP), 1, 3000)))
2259 ++ return;
2260 + if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val,
2261 + !(val & MSDC_DMA_CFG_STS), 1, 3000)))
2262 + return;
2263 +diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
2264 +index 51d55a87aebef..059034e832c92 100644
2265 +--- a/drivers/mmc/host/sdhci-pci-o2micro.c
2266 ++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
2267 +@@ -147,6 +147,8 @@ static int sdhci_o2_get_cd(struct mmc_host *mmc)
2268 +
2269 + if (!(sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1) & O2_PLL_LOCK_STATUS))
2270 + sdhci_o2_enable_internal_clock(host);
2271 ++ else
2272 ++ sdhci_o2_wait_card_detect_stable(host);
2273 +
2274 + return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2275 + }
2276 +diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
2277 +index b72b387c08ef7..6eb0677777037 100644
2278 +--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
2279 ++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
2280 +@@ -685,7 +685,7 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
2281 + hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
2282 + BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
2283 + BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
2284 +- hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
2285 ++ hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096));
2286 +
2287 + /*
2288 + * Derive NFC ideal delay from {3}:
2289 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2290 +index 2e75b7e8f70b3..cd0d7b24f0140 100644
2291 +--- a/drivers/net/bonding/bond_main.c
2292 ++++ b/drivers/net/bonding/bond_main.c
2293 +@@ -3474,9 +3474,11 @@ re_arm:
2294 + if (!rtnl_trylock())
2295 + return;
2296 +
2297 +- if (should_notify_peers)
2298 ++ if (should_notify_peers) {
2299 ++ bond->send_peer_notif--;
2300 + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
2301 + bond->dev);
2302 ++ }
2303 + if (should_notify_rtnl) {
2304 + bond_slave_state_notify(bond);
2305 + bond_slave_link_notify(bond);
2306 +diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
2307 +index 19f115402969e..982db894754f7 100644
2308 +--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
2309 ++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
2310 +@@ -2150,6 +2150,42 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
2311 + return err;
2312 + }
2313 +
2314 ++/**
2315 ++ * ice_set_phy_type_from_speed - set phy_types based on speeds
2316 ++ * and advertised modes
2317 ++ * @ks: ethtool link ksettings struct
2318 ++ * @phy_type_low: pointer to the lower part of phy_type
2319 ++ * @phy_type_high: pointer to the higher part of phy_type
2320 ++ * @adv_link_speed: targeted link speeds bitmap
2321 ++ */
2322 ++static void
2323 ++ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks,
2324 ++ u64 *phy_type_low, u64 *phy_type_high,
2325 ++ u16 adv_link_speed)
2326 ++{
2327 ++ /* Handle 1000M speed in a special way because ice_update_phy_type
2328 ++ * enables all link modes, but having mixed copper and optical
2329 ++ * standards is not supported.
2330 ++ */
2331 ++ adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB;
2332 ++
2333 ++ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2334 ++ 1000baseT_Full))
2335 ++ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T |
2336 ++ ICE_PHY_TYPE_LOW_1G_SGMII;
2337 ++
2338 ++ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2339 ++ 1000baseKX_Full))
2340 ++ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX;
2341 ++
2342 ++ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2343 ++ 1000baseX_Full))
2344 ++ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX |
2345 ++ ICE_PHY_TYPE_LOW_1000BASE_LX;
2346 ++
2347 ++ ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed);
2348 ++}
2349 ++
2350 + /**
2351 + * ice_set_link_ksettings - Set Speed and Duplex
2352 + * @netdev: network interface device structure
2353 +@@ -2286,7 +2322,8 @@ ice_set_link_ksettings(struct net_device *netdev,
2354 + adv_link_speed = curr_link_speed;
2355 +
2356 + /* Convert the advertise link speeds to their corresponded PHY_TYPE */
2357 +- ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed);
2358 ++ ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high,
2359 ++ adv_link_speed);
2360 +
2361 + if (!autoneg_changed && adv_link_speed == curr_link_speed) {
2362 + netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
2363 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
2364 +index b883033514843..db11a1c278f69 100644
2365 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
2366 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
2367 +@@ -4819,8 +4819,11 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2368 + while (i != tx_ring->next_to_use) {
2369 + union e1000_adv_tx_desc *eop_desc, *tx_desc;
2370 +
2371 +- /* Free all the Tx ring sk_buffs */
2372 +- dev_kfree_skb_any(tx_buffer->skb);
2373 ++ /* Free all the Tx ring sk_buffs or xdp frames */
2374 ++ if (tx_buffer->type == IGB_TYPE_SKB)
2375 ++ dev_kfree_skb_any(tx_buffer->skb);
2376 ++ else
2377 ++ xdp_return_frame(tx_buffer->xdpf);
2378 +
2379 + /* unmap skb header data */
2380 + dma_unmap_single(tx_ring->dev,
2381 +@@ -9820,11 +9823,10 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
2382 + struct e1000_hw *hw = &adapter->hw;
2383 + u32 dmac_thr;
2384 + u16 hwm;
2385 ++ u32 reg;
2386 +
2387 + if (hw->mac.type > e1000_82580) {
2388 + if (adapter->flags & IGB_FLAG_DMAC) {
2389 +- u32 reg;
2390 +-
2391 + /* force threshold to 0. */
2392 + wr32(E1000_DMCTXTH, 0);
2393 +
2394 +@@ -9857,7 +9859,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
2395 + /* Disable BMC-to-OS Watchdog Enable */
2396 + if (hw->mac.type != e1000_i354)
2397 + reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
2398 +-
2399 + wr32(E1000_DMACR, reg);
2400 +
2401 + /* no lower threshold to disable
2402 +@@ -9874,12 +9875,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
2403 + */
2404 + wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
2405 + (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
2406 ++ }
2407 +
2408 +- /* make low power state decision controlled
2409 +- * by DMA coal
2410 +- */
2411 ++ if (hw->mac.type >= e1000_i210 ||
2412 ++ (adapter->flags & IGB_FLAG_DMAC)) {
2413 + reg = rd32(E1000_PCIEMISC);
2414 +- reg &= ~E1000_PCIEMISC_LX_DECISION;
2415 ++ reg |= E1000_PCIEMISC_LX_DECISION;
2416 + wr32(E1000_PCIEMISC, reg);
2417 + } /* endif adapter->dmac is not disabled */
2418 + } else if (hw->mac.type == e1000_82580) {
2419 +diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
2420 +index 968dd43a2b1e0..3221224525ac9 100644
2421 +--- a/drivers/net/phy/aquantia_main.c
2422 ++++ b/drivers/net/phy/aquantia_main.c
2423 +@@ -34,6 +34,8 @@
2424 + #define MDIO_AN_VEND_PROV 0xc400
2425 + #define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15)
2426 + #define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
2427 ++#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11)
2428 ++#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10)
2429 + #define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4)
2430 + #define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0)
2431 + #define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4
2432 +@@ -231,9 +233,20 @@ static int aqr_config_aneg(struct phy_device *phydev)
2433 + phydev->advertising))
2434 + reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
2435 +
2436 ++ /* Handle the case when the 2.5G and 5G speeds are not advertised */
2437 ++ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
2438 ++ phydev->advertising))
2439 ++ reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
2440 ++
2441 ++ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
2442 ++ phydev->advertising))
2443 ++ reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
2444 ++
2445 + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
2446 + MDIO_AN_VEND_PROV_1000BASET_HALF |
2447 +- MDIO_AN_VEND_PROV_1000BASET_FULL, reg);
2448 ++ MDIO_AN_VEND_PROV_1000BASET_FULL |
2449 ++ MDIO_AN_VEND_PROV_2500BASET_FULL |
2450 ++ MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
2451 + if (ret < 0)
2452 + return ret;
2453 + if (ret > 0)
2454 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2455 +index 73aba760e10c6..468d0ffc266b4 100644
2456 +--- a/drivers/net/virtio_net.c
2457 ++++ b/drivers/net/virtio_net.c
2458 +@@ -2431,7 +2431,6 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
2459 + static void virtnet_freeze_down(struct virtio_device *vdev)
2460 + {
2461 + struct virtnet_info *vi = vdev->priv;
2462 +- int i;
2463 +
2464 + /* Make sure no work handler is accessing the device */
2465 + flush_work(&vi->config_work);
2466 +@@ -2439,14 +2438,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
2467 + netif_tx_lock_bh(vi->dev);
2468 + netif_device_detach(vi->dev);
2469 + netif_tx_unlock_bh(vi->dev);
2470 +- cancel_delayed_work_sync(&vi->refill);
2471 +-
2472 +- if (netif_running(vi->dev)) {
2473 +- for (i = 0; i < vi->max_queue_pairs; i++) {
2474 +- napi_disable(&vi->rq[i].napi);
2475 +- virtnet_napi_tx_disable(&vi->sq[i].napi);
2476 +- }
2477 +- }
2478 ++ if (netif_running(vi->dev))
2479 ++ virtnet_close(vi->dev);
2480 + }
2481 +
2482 + static int init_vqs(struct virtnet_info *vi);
2483 +@@ -2454,7 +2447,7 @@ static int init_vqs(struct virtnet_info *vi);
2484 + static int virtnet_restore_up(struct virtio_device *vdev)
2485 + {
2486 + struct virtnet_info *vi = vdev->priv;
2487 +- int err, i;
2488 ++ int err;
2489 +
2490 + err = init_vqs(vi);
2491 + if (err)
2492 +@@ -2463,15 +2456,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
2493 + virtio_device_ready(vdev);
2494 +
2495 + if (netif_running(vi->dev)) {
2496 +- for (i = 0; i < vi->curr_queue_pairs; i++)
2497 +- if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2498 +- schedule_delayed_work(&vi->refill, 0);
2499 +-
2500 +- for (i = 0; i < vi->max_queue_pairs; i++) {
2501 +- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2502 +- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2503 +- &vi->sq[i].napi);
2504 +- }
2505 ++ err = virtnet_open(vi->dev);
2506 ++ if (err)
2507 ++ return err;
2508 + }
2509 +
2510 + netif_tx_lock_bh(vi->dev);
2511 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2512 +index 9bc9f6d225bd0..19054b791c672 100644
2513 +--- a/drivers/nvme/host/core.c
2514 ++++ b/drivers/nvme/host/core.c
2515 +@@ -2475,6 +2475,34 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
2516 + .vid = 0x14a4,
2517 + .fr = "22301111",
2518 + .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2519 ++ },
2520 ++ {
2521 ++ /*
2522 ++ * This Kioxia CD6-V Series / HPE PE8030 device times out and
2523 ++ * aborts I/O during any load, but more easily reproducible
2524 ++ * with discards (fstrim).
2525 ++ *
2526 ++ * The device is left in a state where it is also not possible
2527 ++ * to use "nvme set-feature" to disable APST, but booting with
2528 ++ * nvme_core.default_ps_max_latency=0 works.
2529 ++ */
2530 ++ .vid = 0x1e0f,
2531 ++ .mn = "KCD6XVUL6T40",
2532 ++ .quirks = NVME_QUIRK_NO_APST,
2533 ++ },
2534 ++ {
2535 ++ /*
2536 ++ * The external Samsung X5 SSD fails initialization without a
2537 ++ * delay before checking if it is ready and has a whole set of
2538 ++ * other problems. To make this even more interesting, it
2539 ++ * shares the PCI ID with internal Samsung 970 Evo Plus that
2540 ++ * does not need or want these quirks.
2541 ++ */
2542 ++ .vid = 0x144d,
2543 ++ .mn = "Samsung Portable SSD X5",
2544 ++ .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
2545 ++ NVME_QUIRK_NO_DEEPEST_PS |
2546 ++ NVME_QUIRK_IGNORE_DEV_SUBNQN,
2547 + }
2548 + };
2549 +
2550 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2551 +index 3ddd24a42043d..58b8461b2b0fb 100644
2552 +--- a/drivers/nvme/host/pci.c
2553 ++++ b/drivers/nvme/host/pci.c
2554 +@@ -3380,10 +3380,6 @@ static const struct pci_device_id nvme_id_table[] = {
2555 + NVME_QUIRK_128_BYTES_SQES |
2556 + NVME_QUIRK_SHARED_TAGS |
2557 + NVME_QUIRK_SKIP_CID_GEN },
2558 +- { PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */
2559 +- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
2560 +- NVME_QUIRK_NO_DEEPEST_PS |
2561 +- NVME_QUIRK_IGNORE_DEV_SUBNQN, },
2562 + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2563 + { 0, }
2564 + };
2565 +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
2566 +index 01f79991bf4a2..b3531065a4387 100644
2567 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c
2568 ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
2569 +@@ -160,8 +160,8 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *);
2570 + static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
2571 + static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
2572 +
2573 +-static void ibmvfc_release_sub_crqs(struct ibmvfc_host *);
2574 +-static void ibmvfc_init_sub_crqs(struct ibmvfc_host *);
2575 ++static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *);
2576 ++static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *);
2577 +
2578 + static const char *unknown_error = "unknown error";
2579 +
2580 +@@ -917,7 +917,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
2581 + struct vio_dev *vdev = to_vio_dev(vhost->dev);
2582 + unsigned long flags;
2583 +
2584 +- ibmvfc_release_sub_crqs(vhost);
2585 ++ ibmvfc_dereg_sub_crqs(vhost);
2586 +
2587 + /* Re-enable the CRQ */
2588 + do {
2589 +@@ -936,7 +936,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
2590 + spin_unlock(vhost->crq.q_lock);
2591 + spin_unlock_irqrestore(vhost->host->host_lock, flags);
2592 +
2593 +- ibmvfc_init_sub_crqs(vhost);
2594 ++ ibmvfc_reg_sub_crqs(vhost);
2595 +
2596 + return rc;
2597 + }
2598 +@@ -955,7 +955,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
2599 + struct vio_dev *vdev = to_vio_dev(vhost->dev);
2600 + struct ibmvfc_queue *crq = &vhost->crq;
2601 +
2602 +- ibmvfc_release_sub_crqs(vhost);
2603 ++ ibmvfc_dereg_sub_crqs(vhost);
2604 +
2605 + /* Close the CRQ */
2606 + do {
2607 +@@ -988,7 +988,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
2608 + spin_unlock(vhost->crq.q_lock);
2609 + spin_unlock_irqrestore(vhost->host->host_lock, flags);
2610 +
2611 +- ibmvfc_init_sub_crqs(vhost);
2612 ++ ibmvfc_reg_sub_crqs(vhost);
2613 +
2614 + return rc;
2615 + }
2616 +@@ -5680,6 +5680,8 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
2617 + queue->cur = 0;
2618 + queue->fmt = fmt;
2619 + queue->size = PAGE_SIZE / fmt_size;
2620 ++
2621 ++ queue->vhost = vhost;
2622 + return 0;
2623 + }
2624 +
2625 +@@ -5755,9 +5757,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
2626 +
2627 + ENTER;
2628 +
2629 +- if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT))
2630 +- return -ENOMEM;
2631 +-
2632 + rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
2633 + &scrq->cookie, &scrq->hw_irq);
2634 +
2635 +@@ -5788,7 +5787,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
2636 + }
2637 +
2638 + scrq->hwq_id = index;
2639 +- scrq->vhost = vhost;
2640 +
2641 + LEAVE;
2642 + return 0;
2643 +@@ -5798,7 +5796,6 @@ irq_failed:
2644 + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
2645 + } while (rtas_busy_delay(rc));
2646 + reg_failed:
2647 +- ibmvfc_free_queue(vhost, scrq);
2648 + LEAVE;
2649 + return rc;
2650 + }
2651 +@@ -5824,12 +5821,50 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
2652 + if (rc)
2653 + dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
2654 +
2655 +- ibmvfc_free_queue(vhost, scrq);
2656 ++ /* Clean out the queue */
2657 ++ memset(scrq->msgs.crq, 0, PAGE_SIZE);
2658 ++ scrq->cur = 0;
2659 ++
2660 ++ LEAVE;
2661 ++}
2662 ++
2663 ++static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
2664 ++{
2665 ++ int i, j;
2666 ++
2667 ++ ENTER;
2668 ++ if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
2669 ++ return;
2670 ++
2671 ++ for (i = 0; i < nr_scsi_hw_queues; i++) {
2672 ++ if (ibmvfc_register_scsi_channel(vhost, i)) {
2673 ++ for (j = i; j > 0; j--)
2674 ++ ibmvfc_deregister_scsi_channel(vhost, j - 1);
2675 ++ vhost->do_enquiry = 0;
2676 ++ return;
2677 ++ }
2678 ++ }
2679 ++
2680 ++ LEAVE;
2681 ++}
2682 ++
2683 ++static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost)
2684 ++{
2685 ++ int i;
2686 ++
2687 ++ ENTER;
2688 ++ if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
2689 ++ return;
2690 ++
2691 ++ for (i = 0; i < nr_scsi_hw_queues; i++)
2692 ++ ibmvfc_deregister_scsi_channel(vhost, i);
2693 ++
2694 + LEAVE;
2695 + }
2696 +
2697 + static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
2698 + {
2699 ++ struct ibmvfc_queue *scrq;
2700 + int i, j;
2701 +
2702 + ENTER;
2703 +@@ -5845,30 +5880,41 @@ static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
2704 + }
2705 +
2706 + for (i = 0; i < nr_scsi_hw_queues; i++) {
2707 +- if (ibmvfc_register_scsi_channel(vhost, i)) {
2708 +- for (j = i; j > 0; j--)
2709 +- ibmvfc_deregister_scsi_channel(vhost, j - 1);
2710 ++ scrq = &vhost->scsi_scrqs.scrqs[i];
2711 ++ if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) {
2712 ++ for (j = i; j > 0; j--) {
2713 ++ scrq = &vhost->scsi_scrqs.scrqs[j - 1];
2714 ++ ibmvfc_free_queue(vhost, scrq);
2715 ++ }
2716 + kfree(vhost->scsi_scrqs.scrqs);
2717 + vhost->scsi_scrqs.scrqs = NULL;
2718 + vhost->scsi_scrqs.active_queues = 0;
2719 + vhost->do_enquiry = 0;
2720 +- break;
2721 ++ vhost->mq_enabled = 0;
2722 ++ return;
2723 + }
2724 + }
2725 +
2726 ++ ibmvfc_reg_sub_crqs(vhost);
2727 ++
2728 + LEAVE;
2729 + }
2730 +
2731 + static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
2732 + {
2733 ++ struct ibmvfc_queue *scrq;
2734 + int i;
2735 +
2736 + ENTER;
2737 + if (!vhost->scsi_scrqs.scrqs)
2738 + return;
2739 +
2740 +- for (i = 0; i < nr_scsi_hw_queues; i++)
2741 +- ibmvfc_deregister_scsi_channel(vhost, i);
2742 ++ ibmvfc_dereg_sub_crqs(vhost);
2743 ++
2744 ++ for (i = 0; i < nr_scsi_hw_queues; i++) {
2745 ++ scrq = &vhost->scsi_scrqs.scrqs[i];
2746 ++ ibmvfc_free_queue(vhost, scrq);
2747 ++ }
2748 +
2749 + kfree(vhost->scsi_scrqs.scrqs);
2750 + vhost->scsi_scrqs.scrqs = NULL;
2751 +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
2752 +index 3718406e09887..c39a245f43d02 100644
2753 +--- a/drivers/scsi/ibmvscsi/ibmvfc.h
2754 ++++ b/drivers/scsi/ibmvscsi/ibmvfc.h
2755 +@@ -789,6 +789,7 @@ struct ibmvfc_queue {
2756 + spinlock_t _lock;
2757 + spinlock_t *q_lock;
2758 +
2759 ++ struct ibmvfc_host *vhost;
2760 + struct ibmvfc_event_pool evt_pool;
2761 + struct list_head sent;
2762 + struct list_head free;
2763 +@@ -797,7 +798,6 @@ struct ibmvfc_queue {
2764 + union ibmvfc_iu cancel_rsp;
2765 +
2766 + /* Sub-CRQ fields */
2767 +- struct ibmvfc_host *vhost;
2768 + unsigned long cookie;
2769 + unsigned long vios_cookie;
2770 + unsigned long hw_irq;
2771 +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
2772 +index cfeadd5f61f18..747e1cbb7ec91 100644
2773 +--- a/drivers/scsi/scsi_debug.c
2774 ++++ b/drivers/scsi/scsi_debug.c
2775 +@@ -2747,6 +2747,24 @@ static void zbc_open_zone(struct sdebug_dev_info *devip,
2776 + }
2777 + }
2778 +
2779 ++static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2780 ++ struct sdeb_zone_state *zsp)
2781 ++{
2782 ++ switch (zsp->z_cond) {
2783 ++ case ZC2_IMPLICIT_OPEN:
2784 ++ devip->nr_imp_open--;
2785 ++ break;
2786 ++ case ZC3_EXPLICIT_OPEN:
2787 ++ devip->nr_exp_open--;
2788 ++ break;
2789 ++ default:
2790 ++ WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2791 ++ zsp->z_start, zsp->z_cond);
2792 ++ break;
2793 ++ }
2794 ++ zsp->z_cond = ZC5_FULL;
2795 ++}
2796 ++
2797 + static void zbc_inc_wp(struct sdebug_dev_info *devip,
2798 + unsigned long long lba, unsigned int num)
2799 + {
2800 +@@ -2759,7 +2777,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
2801 + if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2802 + zsp->z_wp += num;
2803 + if (zsp->z_wp >= zend)
2804 +- zsp->z_cond = ZC5_FULL;
2805 ++ zbc_set_zone_full(devip, zsp);
2806 + return;
2807 + }
2808 +
2809 +@@ -2778,7 +2796,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
2810 + n = num;
2811 + }
2812 + if (zsp->z_wp >= zend)
2813 +- zsp->z_cond = ZC5_FULL;
2814 ++ zbc_set_zone_full(devip, zsp);
2815 +
2816 + num -= n;
2817 + lba += n;
2818 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
2819 +index bcdfcb25349ad..5947b9d5746e1 100644
2820 +--- a/drivers/scsi/scsi_transport_iscsi.c
2821 ++++ b/drivers/scsi/scsi_transport_iscsi.c
2822 +@@ -213,7 +213,12 @@ iscsi_create_endpoint(int dd_size)
2823 + return NULL;
2824 +
2825 + mutex_lock(&iscsi_ep_idr_mutex);
2826 +- id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
2827 ++
2828 ++ /*
2829 ++ * First endpoint id should be 1 to comply with user space
2830 ++ * applications (iscsid).
2831 ++ */
2832 ++ id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO);
2833 + if (id < 0) {
2834 + mutex_unlock(&iscsi_ep_idr_mutex);
2835 + printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
2836 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
2837 +index 9eb1b88a29dde..71c7f7b435c4a 100644
2838 +--- a/drivers/scsi/storvsc_drv.c
2839 ++++ b/drivers/scsi/storvsc_drv.c
2840 +@@ -1907,7 +1907,7 @@ static struct scsi_host_template scsi_driver = {
2841 + .cmd_per_lun = 2048,
2842 + .this_id = -1,
2843 + /* Ensure there are no gaps in presented sgls */
2844 +- .virt_boundary_mask = PAGE_SIZE-1,
2845 ++ .virt_boundary_mask = HV_HYP_PAGE_SIZE - 1,
2846 + .no_write_same = 1,
2847 + .track_queue_depth = 1,
2848 + .change_queue_depth = storvsc_change_queue_depth,
2849 +@@ -1961,6 +1961,7 @@ static int storvsc_probe(struct hv_device *device,
2850 + int max_targets;
2851 + int max_channels;
2852 + int max_sub_channels = 0;
2853 ++ u32 max_xfer_bytes;
2854 +
2855 + /*
2856 + * Based on the windows host we are running on,
2857 +@@ -2049,12 +2050,28 @@ static int storvsc_probe(struct hv_device *device,
2858 + }
2859 + /* max cmd length */
2860 + host->max_cmd_len = STORVSC_MAX_CMD_LEN;
2861 +-
2862 + /*
2863 +- * set the table size based on the info we got
2864 +- * from the host.
2865 ++ * Any reasonable Hyper-V configuration should provide
2866 ++ * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE,
2867 ++ * protecting it from any weird value.
2868 ++ */
2869 ++ max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE);
2870 ++ /* max_hw_sectors_kb */
2871 ++ host->max_sectors = max_xfer_bytes >> 9;
2872 ++ /*
2873 ++ * There are 2 requirements for Hyper-V storvsc sgl segments,
2874 ++ * based on which the below calculation for max segments is
2875 ++ * done:
2876 ++ *
2877 ++ * 1. Except for the first and last sgl segment, all sgl segments
2878 ++ * should be align to HV_HYP_PAGE_SIZE, that also means the
2879 ++ * maximum number of segments in a sgl can be calculated by
2880 ++ * dividing the total max transfer length by HV_HYP_PAGE_SIZE.
2881 ++ *
2882 ++ * 2. Except for the first and last, each entry in the SGL must
2883 ++ * have an offset that is a multiple of HV_HYP_PAGE_SIZE.
2884 + */
2885 +- host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
2886 ++ host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1;
2887 + /*
2888 + * For non-IDE disks, the host supports multiple channels.
2889 + * Set the number of HW queues we are supporting.
2890 +diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
2891 +index 3cbb165d6e309..70ad0f3dce283 100644
2892 +--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
2893 ++++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
2894 +@@ -783,6 +783,7 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
2895 + }
2896 +
2897 + ret = brcmstb_init_sram(dn);
2898 ++ of_node_put(dn);
2899 + if (ret) {
2900 + pr_err("error setting up SRAM for PM\n");
2901 + return ret;
2902 +diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
2903 +index 8834ca6137219..aacc37736db6e 100644
2904 +--- a/drivers/usb/chipidea/udc.c
2905 ++++ b/drivers/usb/chipidea/udc.c
2906 +@@ -1040,6 +1040,9 @@ isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
2907 + struct ci_hdrc *ci = req->context;
2908 + unsigned long flags;
2909 +
2910 ++ if (req->status < 0)
2911 ++ return;
2912 ++
2913 + if (ci->setaddr) {
2914 + hw_usb_set_address(ci, ci->address);
2915 + ci->setaddr = false;
2916 +diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
2917 +index 3427ce37a5c5b..2869bda642292 100644
2918 +--- a/drivers/usb/gadget/legacy/raw_gadget.c
2919 ++++ b/drivers/usb/gadget/legacy/raw_gadget.c
2920 +@@ -11,6 +11,7 @@
2921 + #include <linux/ctype.h>
2922 + #include <linux/debugfs.h>
2923 + #include <linux/delay.h>
2924 ++#include <linux/idr.h>
2925 + #include <linux/kref.h>
2926 + #include <linux/miscdevice.h>
2927 + #include <linux/module.h>
2928 +@@ -36,6 +37,9 @@ MODULE_LICENSE("GPL");
2929 +
2930 + /*----------------------------------------------------------------------*/
2931 +
2932 ++static DEFINE_IDA(driver_id_numbers);
2933 ++#define DRIVER_DRIVER_NAME_LENGTH_MAX 32
2934 ++
2935 + #define RAW_EVENT_QUEUE_SIZE 16
2936 +
2937 + struct raw_event_queue {
2938 +@@ -161,6 +165,9 @@ struct raw_dev {
2939 + /* Reference to misc device: */
2940 + struct device *dev;
2941 +
2942 ++ /* Make driver names unique */
2943 ++ int driver_id_number;
2944 ++
2945 + /* Protected by lock: */
2946 + enum dev_state state;
2947 + bool gadget_registered;
2948 +@@ -189,6 +196,7 @@ static struct raw_dev *dev_new(void)
2949 + spin_lock_init(&dev->lock);
2950 + init_completion(&dev->ep0_done);
2951 + raw_event_queue_init(&dev->queue);
2952 ++ dev->driver_id_number = -1;
2953 + return dev;
2954 + }
2955 +
2956 +@@ -199,6 +207,9 @@ static void dev_free(struct kref *kref)
2957 +
2958 + kfree(dev->udc_name);
2959 + kfree(dev->driver.udc_name);
2960 ++ kfree(dev->driver.driver.name);
2961 ++ if (dev->driver_id_number >= 0)
2962 ++ ida_free(&driver_id_numbers, dev->driver_id_number);
2963 + if (dev->req) {
2964 + if (dev->ep0_urb_queued)
2965 + usb_ep_dequeue(dev->gadget->ep0, dev->req);
2966 +@@ -419,9 +430,11 @@ out_put:
2967 + static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
2968 + {
2969 + int ret = 0;
2970 ++ int driver_id_number;
2971 + struct usb_raw_init arg;
2972 + char *udc_driver_name;
2973 + char *udc_device_name;
2974 ++ char *driver_driver_name;
2975 + unsigned long flags;
2976 +
2977 + if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
2978 +@@ -440,36 +453,43 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
2979 + return -EINVAL;
2980 + }
2981 +
2982 ++ driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL);
2983 ++ if (driver_id_number < 0)
2984 ++ return driver_id_number;
2985 ++
2986 ++ driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL);
2987 ++ if (!driver_driver_name) {
2988 ++ ret = -ENOMEM;
2989 ++ goto out_free_driver_id_number;
2990 ++ }
2991 ++ snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX,
2992 ++ DRIVER_NAME ".%d", driver_id_number);
2993 ++
2994 + udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
2995 +- if (!udc_driver_name)
2996 +- return -ENOMEM;
2997 ++ if (!udc_driver_name) {
2998 ++ ret = -ENOMEM;
2999 ++ goto out_free_driver_driver_name;
3000 ++ }
3001 + ret = strscpy(udc_driver_name, &arg.driver_name[0],
3002 + UDC_NAME_LENGTH_MAX);
3003 +- if (ret < 0) {
3004 +- kfree(udc_driver_name);
3005 +- return ret;
3006 +- }
3007 ++ if (ret < 0)
3008 ++ goto out_free_udc_driver_name;
3009 + ret = 0;
3010 +
3011 + udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
3012 + if (!udc_device_name) {
3013 +- kfree(udc_driver_name);
3014 +- return -ENOMEM;
3015 ++ ret = -ENOMEM;
3016 ++ goto out_free_udc_driver_name;
3017 + }
3018 + ret = strscpy(udc_device_name, &arg.device_name[0],
3019 + UDC_NAME_LENGTH_MAX);
3020 +- if (ret < 0) {
3021 +- kfree(udc_driver_name);
3022 +- kfree(udc_device_name);
3023 +- return ret;
3024 +- }
3025 ++ if (ret < 0)
3026 ++ goto out_free_udc_device_name;
3027 + ret = 0;
3028 +
3029 + spin_lock_irqsave(&dev->lock, flags);
3030 + if (dev->state != STATE_DEV_OPENED) {
3031 + dev_dbg(dev->dev, "fail, device is not opened\n");
3032 +- kfree(udc_driver_name);
3033 +- kfree(udc_device_name);
3034 + ret = -EINVAL;
3035 + goto out_unlock;
3036 + }
3037 +@@ -484,14 +504,25 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
3038 + dev->driver.suspend = gadget_suspend;
3039 + dev->driver.resume = gadget_resume;
3040 + dev->driver.reset = gadget_reset;
3041 +- dev->driver.driver.name = DRIVER_NAME;
3042 ++ dev->driver.driver.name = driver_driver_name;
3043 + dev->driver.udc_name = udc_device_name;
3044 + dev->driver.match_existing_only = 1;
3045 ++ dev->driver_id_number = driver_id_number;
3046 +
3047 + dev->state = STATE_DEV_INITIALIZED;
3048 ++ spin_unlock_irqrestore(&dev->lock, flags);
3049 ++ return ret;
3050 +
3051 + out_unlock:
3052 + spin_unlock_irqrestore(&dev->lock, flags);
3053 ++out_free_udc_device_name:
3054 ++ kfree(udc_device_name);
3055 ++out_free_udc_driver_name:
3056 ++ kfree(udc_driver_name);
3057 ++out_free_driver_driver_name:
3058 ++ kfree(driver_driver_name);
3059 ++out_free_driver_id_number:
3060 ++ ida_free(&driver_id_numbers, driver_id_number);
3061 + return ret;
3062 + }
3063 +
3064 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
3065 +index f65f1ba2b5929..fc322a9526c8c 100644
3066 +--- a/drivers/usb/host/xhci-hub.c
3067 ++++ b/drivers/usb/host/xhci-hub.c
3068 +@@ -652,7 +652,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
3069 + * It will release and re-aquire the lock while calling ACPI
3070 + * method.
3071 + */
3072 +-static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
3073 ++void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
3074 + u16 index, bool on, unsigned long *flags)
3075 + __must_hold(&xhci->lock)
3076 + {
3077 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3078 +index 7d13ddff6b33f..352626f9e451b 100644
3079 +--- a/drivers/usb/host/xhci-pci.c
3080 ++++ b/drivers/usb/host/xhci-pci.c
3081 +@@ -61,6 +61,8 @@
3082 + #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
3083 + #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI 0x464e
3084 + #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
3085 ++#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI 0xa71e
3086 ++#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI 0x7ec0
3087 +
3088 + #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
3089 + #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
3090 +@@ -270,7 +272,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3091 + pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
3092 + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI ||
3093 + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI ||
3094 +- pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI))
3095 ++ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI ||
3096 ++ pdev->device == PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI ||
3097 ++ pdev->device == PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI))
3098 + xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
3099 +
3100 + if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
3101 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3102 +index 90f5a3ce7c348..94fe7d64e762b 100644
3103 +--- a/drivers/usb/host/xhci.c
3104 ++++ b/drivers/usb/host/xhci.c
3105 +@@ -774,6 +774,8 @@ static void xhci_stop(struct usb_hcd *hcd)
3106 + void xhci_shutdown(struct usb_hcd *hcd)
3107 + {
3108 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3109 ++ unsigned long flags;
3110 ++ int i;
3111 +
3112 + if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
3113 + usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
3114 +@@ -789,12 +791,21 @@ void xhci_shutdown(struct usb_hcd *hcd)
3115 + del_timer_sync(&xhci->shared_hcd->rh_timer);
3116 + }
3117 +
3118 +- spin_lock_irq(&xhci->lock);
3119 ++ spin_lock_irqsave(&xhci->lock, flags);
3120 + xhci_halt(xhci);
3121 ++
3122 ++ /* Power off USB2 ports*/
3123 ++ for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
3124 ++ xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags);
3125 ++
3126 ++ /* Power off USB3 ports*/
3127 ++ for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
3128 ++ xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags);
3129 ++
3130 + /* Workaround for spurious wakeups at shutdown with HSW */
3131 + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
3132 + xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
3133 +- spin_unlock_irq(&xhci->lock);
3134 ++ spin_unlock_irqrestore(&xhci->lock, flags);
3135 +
3136 + xhci_cleanup_msix(xhci);
3137 +
3138 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3139 +index bc0789229527f..79fa34f1e31c4 100644
3140 +--- a/drivers/usb/host/xhci.h
3141 ++++ b/drivers/usb/host/xhci.h
3142 +@@ -2174,6 +2174,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
3143 + int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
3144 + int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
3145 + struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
3146 ++void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index,
3147 ++ bool on, unsigned long *flags);
3148 +
3149 + void xhci_hc_died(struct xhci_hcd *xhci);
3150 +
3151 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3152 +index ed1e50d83ccab..de59fa919540a 100644
3153 +--- a/drivers/usb/serial/option.c
3154 ++++ b/drivers/usb/serial/option.c
3155 +@@ -252,10 +252,12 @@ static void option_instat_callback(struct urb *urb);
3156 + #define QUECTEL_PRODUCT_EG95 0x0195
3157 + #define QUECTEL_PRODUCT_BG96 0x0296
3158 + #define QUECTEL_PRODUCT_EP06 0x0306
3159 ++#define QUECTEL_PRODUCT_EM05G 0x030a
3160 + #define QUECTEL_PRODUCT_EM12 0x0512
3161 + #define QUECTEL_PRODUCT_RM500Q 0x0800
3162 + #define QUECTEL_PRODUCT_EC200S_CN 0x6002
3163 + #define QUECTEL_PRODUCT_EC200T 0x6026
3164 ++#define QUECTEL_PRODUCT_RM500K 0x7001
3165 +
3166 + #define CMOTECH_VENDOR_ID 0x16d8
3167 + #define CMOTECH_PRODUCT_6001 0x6001
3168 +@@ -1134,6 +1136,8 @@ static const struct usb_device_id option_ids[] = {
3169 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
3170 + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
3171 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
3172 ++ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
3173 ++ .driver_info = RSVD(6) | ZLP },
3174 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
3175 + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
3176 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
3177 +@@ -1147,6 +1151,7 @@ static const struct usb_device_id option_ids[] = {
3178 + .driver_info = ZLP },
3179 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
3180 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
3181 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
3182 +
3183 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
3184 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
3185 +@@ -1279,6 +1284,7 @@ static const struct usb_device_id option_ids[] = {
3186 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
3187 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */
3188 + .driver_info = NCTRL(2) | RSVD(3) },
3189 ++ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x1250, 0xff, 0x00, 0x00) }, /* Telit LE910Cx (rmnet) */
3190 + { USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
3191 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
3192 + { USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
3193 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
3194 +index 3506c47e1eef0..40b1ab3d284dc 100644
3195 +--- a/drivers/usb/serial/pl2303.c
3196 ++++ b/drivers/usb/serial/pl2303.c
3197 +@@ -436,22 +436,27 @@ static int pl2303_detect_type(struct usb_serial *serial)
3198 + break;
3199 + case 0x200:
3200 + switch (bcdDevice) {
3201 +- case 0x100:
3202 ++ case 0x100: /* GC */
3203 + case 0x105:
3204 ++ return TYPE_HXN;
3205 ++ case 0x300: /* GT / TA */
3206 ++ if (pl2303_supports_hx_status(serial))
3207 ++ return TYPE_TA;
3208 ++ fallthrough;
3209 + case 0x305:
3210 ++ case 0x400: /* GL */
3211 + case 0x405:
3212 ++ return TYPE_HXN;
3213 ++ case 0x500: /* GE / TB */
3214 ++ if (pl2303_supports_hx_status(serial))
3215 ++ return TYPE_TB;
3216 ++ fallthrough;
3217 ++ case 0x505:
3218 ++ case 0x600: /* GS */
3219 + case 0x605:
3220 +- /*
3221 +- * Assume it's an HXN-type if the device doesn't
3222 +- * support the old read request value.
3223 +- */
3224 +- if (!pl2303_supports_hx_status(serial))
3225 +- return TYPE_HXN;
3226 +- break;
3227 +- case 0x300:
3228 +- return TYPE_TA;
3229 +- case 0x500:
3230 +- return TYPE_TB;
3231 ++ case 0x700: /* GR */
3232 ++ case 0x705:
3233 ++ return TYPE_HXN;
3234 + }
3235 + break;
3236 + }
3237 +diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
3238 +index 557f392fe24da..073fd2ea5e0bb 100644
3239 +--- a/drivers/usb/typec/tcpm/Kconfig
3240 ++++ b/drivers/usb/typec/tcpm/Kconfig
3241 +@@ -56,7 +56,6 @@ config TYPEC_WCOVE
3242 + tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
3243 + depends on ACPI
3244 + depends on MFD_INTEL_PMC_BXT
3245 +- depends on INTEL_SOC_PMIC
3246 + depends on BXT_WC_PMIC_OPREGION
3247 + help
3248 + This driver adds support for USB Type-C on Intel Broxton platforms
3249 +diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
3250 +index 6a947ff96d6eb..19fd3389946d9 100644
3251 +--- a/drivers/video/console/sticore.c
3252 ++++ b/drivers/video/console/sticore.c
3253 +@@ -1127,6 +1127,7 @@ int sti_call(const struct sti_struct *sti, unsigned long func,
3254 + return ret;
3255 + }
3256 +
3257 ++#if defined(CONFIG_FB_STI)
3258 + /* check if given fb_info is the primary device */
3259 + int fb_is_primary_device(struct fb_info *info)
3260 + {
3261 +@@ -1142,6 +1143,7 @@ int fb_is_primary_device(struct fb_info *info)
3262 + return (sti->info == info);
3263 + }
3264 + EXPORT_SYMBOL(fb_is_primary_device);
3265 ++#endif
3266 +
3267 + MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer");
3268 + MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines");
3269 +diff --git a/drivers/xen/features.c b/drivers/xen/features.c
3270 +index 7b591443833c9..87f1828d40d5e 100644
3271 +--- a/drivers/xen/features.c
3272 ++++ b/drivers/xen/features.c
3273 +@@ -42,7 +42,7 @@ void xen_setup_features(void)
3274 + if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
3275 + break;
3276 + for (j = 0; j < 32; j++)
3277 +- xen_features[i * 32 + j] = !!(fi.submap & 1<<j);
3278 ++ xen_features[i * 32 + j] = !!(fi.submap & 1U << j);
3279 + }
3280 +
3281 + if (xen_pv_domain()) {
3282 +diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
3283 +index 20d7d059dadb5..40ef379c28ab0 100644
3284 +--- a/drivers/xen/gntdev-common.h
3285 ++++ b/drivers/xen/gntdev-common.h
3286 +@@ -16,6 +16,7 @@
3287 + #include <linux/mmu_notifier.h>
3288 + #include <linux/types.h>
3289 + #include <xen/interface/event_channel.h>
3290 ++#include <xen/grant_table.h>
3291 +
3292 + struct gntdev_dmabuf_priv;
3293 +
3294 +@@ -56,6 +57,7 @@ struct gntdev_grant_map {
3295 + struct gnttab_unmap_grant_ref *unmap_ops;
3296 + struct gnttab_map_grant_ref *kmap_ops;
3297 + struct gnttab_unmap_grant_ref *kunmap_ops;
3298 ++ bool *being_removed;
3299 + struct page **pages;
3300 + unsigned long pages_vm_start;
3301 +
3302 +@@ -73,6 +75,11 @@ struct gntdev_grant_map {
3303 + /* Needed to avoid allocation in gnttab_dma_free_pages(). */
3304 + xen_pfn_t *frames;
3305 + #endif
3306 ++
3307 ++ /* Number of live grants */
3308 ++ atomic_t live_grants;
3309 ++ /* Needed to avoid allocation in __unmap_grant_pages */
3310 ++ struct gntab_unmap_queue_data unmap_data;
3311 + };
3312 +
3313 + struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
3314 +diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
3315 +index 59ffea8000791..4b56c39f766d4 100644
3316 +--- a/drivers/xen/gntdev.c
3317 ++++ b/drivers/xen/gntdev.c
3318 +@@ -35,6 +35,7 @@
3319 + #include <linux/slab.h>
3320 + #include <linux/highmem.h>
3321 + #include <linux/refcount.h>
3322 ++#include <linux/workqueue.h>
3323 +
3324 + #include <xen/xen.h>
3325 + #include <xen/grant_table.h>
3326 +@@ -60,10 +61,11 @@ module_param(limit, uint, 0644);
3327 + MODULE_PARM_DESC(limit,
3328 + "Maximum number of grants that may be mapped by one mapping request");
3329 +
3330 ++/* True in PV mode, false otherwise */
3331 + static int use_ptemod;
3332 +
3333 +-static int unmap_grant_pages(struct gntdev_grant_map *map,
3334 +- int offset, int pages);
3335 ++static void unmap_grant_pages(struct gntdev_grant_map *map,
3336 ++ int offset, int pages);
3337 +
3338 + static struct miscdevice gntdev_miscdev;
3339 +
3340 +@@ -120,6 +122,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map)
3341 + kvfree(map->unmap_ops);
3342 + kvfree(map->kmap_ops);
3343 + kvfree(map->kunmap_ops);
3344 ++ kvfree(map->being_removed);
3345 + kfree(map);
3346 + }
3347 +
3348 +@@ -140,10 +143,13 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
3349 + add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]),
3350 + GFP_KERNEL);
3351 + add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
3352 ++ add->being_removed =
3353 ++ kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);
3354 + if (NULL == add->grants ||
3355 + NULL == add->map_ops ||
3356 + NULL == add->unmap_ops ||
3357 +- NULL == add->pages)
3358 ++ NULL == add->pages ||
3359 ++ NULL == add->being_removed)
3360 + goto err;
3361 + if (use_ptemod) {
3362 + add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]),
3363 +@@ -250,9 +256,36 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
3364 + if (!refcount_dec_and_test(&map->users))
3365 + return;
3366 +
3367 +- if (map->pages && !use_ptemod)
3368 ++ if (map->pages && !use_ptemod) {
3369 ++ /*
3370 ++ * Increment the reference count. This ensures that the
3371 ++ * subsequent call to unmap_grant_pages() will not wind up
3372 ++ * re-entering itself. It *can* wind up calling
3373 ++ * gntdev_put_map() recursively, but such calls will be with a
3374 ++ * reference count greater than 1, so they will return before
3375 ++ * this code is reached. The recursion depth is thus limited to
3376 ++ * 1. Do NOT use refcount_inc() here, as it will detect that
3377 ++ * the reference count is zero and WARN().
3378 ++ */
3379 ++ refcount_set(&map->users, 1);
3380 ++
3381 ++ /*
3382 ++ * Unmap the grants. This may or may not be asynchronous, so it
3383 ++ * is possible that the reference count is 1 on return, but it
3384 ++ * could also be greater than 1.
3385 ++ */
3386 + unmap_grant_pages(map, 0, map->count);
3387 +
3388 ++ /* Check if the memory now needs to be freed */
3389 ++ if (!refcount_dec_and_test(&map->users))
3390 ++ return;
3391 ++
3392 ++ /*
3393 ++ * All pages have been returned to the hypervisor, so free the
3394 ++ * map.
3395 ++ */
3396 ++ }
3397 ++
3398 + if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
3399 + notify_remote_via_evtchn(map->notify.event);
3400 + evtchn_put(map->notify.event);
3401 +@@ -283,6 +316,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
3402 +
3403 + int gntdev_map_grant_pages(struct gntdev_grant_map *map)
3404 + {
3405 ++ size_t alloced = 0;
3406 + int i, err = 0;
3407 +
3408 + if (!use_ptemod) {
3409 +@@ -331,97 +365,116 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
3410 + map->count);
3411 +
3412 + for (i = 0; i < map->count; i++) {
3413 +- if (map->map_ops[i].status == GNTST_okay)
3414 ++ if (map->map_ops[i].status == GNTST_okay) {
3415 + map->unmap_ops[i].handle = map->map_ops[i].handle;
3416 +- else if (!err)
3417 ++ if (!use_ptemod)
3418 ++ alloced++;
3419 ++ } else if (!err)
3420 + err = -EINVAL;
3421 +
3422 + if (map->flags & GNTMAP_device_map)
3423 + map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
3424 +
3425 + if (use_ptemod) {
3426 +- if (map->kmap_ops[i].status == GNTST_okay)
3427 ++ if (map->kmap_ops[i].status == GNTST_okay) {
3428 ++ if (map->map_ops[i].status == GNTST_okay)
3429 ++ alloced++;
3430 + map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
3431 +- else if (!err)
3432 ++ } else if (!err)
3433 + err = -EINVAL;
3434 + }
3435 + }
3436 ++ atomic_add(alloced, &map->live_grants);
3437 + return err;
3438 + }
3439 +
3440 +-static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
3441 +- int pages)
3442 ++static void __unmap_grant_pages_done(int result,
3443 ++ struct gntab_unmap_queue_data *data)
3444 + {
3445 +- int i, err = 0;
3446 +- struct gntab_unmap_queue_data unmap_data;
3447 +-
3448 +- if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
3449 +- int pgno = (map->notify.addr >> PAGE_SHIFT);
3450 +- if (pgno >= offset && pgno < offset + pages) {
3451 +- /* No need for kmap, pages are in lowmem */
3452 +- uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
3453 +- tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
3454 +- map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
3455 +- }
3456 +- }
3457 +-
3458 +- unmap_data.unmap_ops = map->unmap_ops + offset;
3459 +- unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
3460 +- unmap_data.pages = map->pages + offset;
3461 +- unmap_data.count = pages;
3462 +-
3463 +- err = gnttab_unmap_refs_sync(&unmap_data);
3464 +- if (err)
3465 +- return err;
3466 ++ unsigned int i;
3467 ++ struct gntdev_grant_map *map = data->data;
3468 ++ unsigned int offset = data->unmap_ops - map->unmap_ops;
3469 +
3470 +- for (i = 0; i < pages; i++) {
3471 +- if (map->unmap_ops[offset+i].status)
3472 +- err = -EINVAL;
3473 ++ for (i = 0; i < data->count; i++) {
3474 ++ WARN_ON(map->unmap_ops[offset+i].status);
3475 + pr_debug("unmap handle=%d st=%d\n",
3476 + map->unmap_ops[offset+i].handle,
3477 + map->unmap_ops[offset+i].status);
3478 + map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
3479 + if (use_ptemod) {
3480 +- if (map->kunmap_ops[offset+i].status)
3481 +- err = -EINVAL;
3482 ++ WARN_ON(map->kunmap_ops[offset+i].status);
3483 + pr_debug("kunmap handle=%u st=%d\n",
3484 + map->kunmap_ops[offset+i].handle,
3485 + map->kunmap_ops[offset+i].status);
3486 + map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
3487 + }
3488 + }
3489 +- return err;
3490 ++ /*
3491 ++ * Decrease the live-grant counter. This must happen after the loop to
3492 ++ * prevent premature reuse of the grants by gnttab_mmap().
3493 ++ */
3494 ++ atomic_sub(data->count, &map->live_grants);
3495 ++
3496 ++ /* Release reference taken by __unmap_grant_pages */
3497 ++ gntdev_put_map(NULL, map);
3498 ++}
3499 ++
3500 ++static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
3501 ++ int pages)
3502 ++{
3503 ++ if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
3504 ++ int pgno = (map->notify.addr >> PAGE_SHIFT);
3505 ++
3506 ++ if (pgno >= offset && pgno < offset + pages) {
3507 ++ /* No need for kmap, pages are in lowmem */
3508 ++ uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
3509 ++
3510 ++ tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
3511 ++ map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
3512 ++ }
3513 ++ }
3514 ++
3515 ++ map->unmap_data.unmap_ops = map->unmap_ops + offset;
3516 ++ map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
3517 ++ map->unmap_data.pages = map->pages + offset;
3518 ++ map->unmap_data.count = pages;
3519 ++ map->unmap_data.done = __unmap_grant_pages_done;
3520 ++ map->unmap_data.data = map;
3521 ++ refcount_inc(&map->users); /* to keep map alive during async call below */
3522 ++
3523 ++ gnttab_unmap_refs_async(&map->unmap_data);
3524 + }
3525 +
3526 +-static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
3527 +- int pages)
3528 ++static void unmap_grant_pages(struct gntdev_grant_map *map, int offset,
3529 ++ int pages)
3530 + {
3531 +- int range, err = 0;
3532 ++ int range;
3533 ++
3534 ++ if (atomic_read(&map->live_grants) == 0)
3535 ++ return; /* Nothing to do */
3536 +
3537 + pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
3538 +
3539 + /* It is possible the requested range will have a "hole" where we
3540 + * already unmapped some of the grants. Only unmap valid ranges.
3541 + */
3542 +- while (pages && !err) {
3543 +- while (pages &&
3544 +- map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) {
3545 ++ while (pages) {
3546 ++ while (pages && map->being_removed[offset]) {
3547 + offset++;
3548 + pages--;
3549 + }
3550 + range = 0;
3551 + while (range < pages) {
3552 +- if (map->unmap_ops[offset + range].handle ==
3553 +- INVALID_GRANT_HANDLE)
3554 ++ if (map->being_removed[offset + range])
3555 + break;
3556 ++ map->being_removed[offset + range] = true;
3557 + range++;
3558 + }
3559 +- err = __unmap_grant_pages(map, offset, range);
3560 ++ if (range)
3561 ++ __unmap_grant_pages(map, offset, range);
3562 + offset += range;
3563 + pages -= range;
3564 + }
3565 +-
3566 +- return err;
3567 + }
3568 +
3569 + /* ------------------------------------------------------------------ */
3570 +@@ -473,7 +526,6 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
3571 + struct gntdev_grant_map *map =
3572 + container_of(mn, struct gntdev_grant_map, notifier);
3573 + unsigned long mstart, mend;
3574 +- int err;
3575 +
3576 + if (!mmu_notifier_range_blockable(range))
3577 + return false;
3578 +@@ -494,10 +546,9 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
3579 + map->index, map->count,
3580 + map->vma->vm_start, map->vma->vm_end,
3581 + range->start, range->end, mstart, mend);
3582 +- err = unmap_grant_pages(map,
3583 ++ unmap_grant_pages(map,
3584 + (mstart - map->vma->vm_start) >> PAGE_SHIFT,
3585 + (mend - mstart) >> PAGE_SHIFT);
3586 +- WARN_ON(err);
3587 +
3588 + return true;
3589 + }
3590 +@@ -985,6 +1036,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
3591 + goto unlock_out;
3592 + if (use_ptemod && map->vma)
3593 + goto unlock_out;
3594 ++ if (atomic_read(&map->live_grants)) {
3595 ++ err = -EAGAIN;
3596 ++ goto unlock_out;
3597 ++ }
3598 + refcount_inc(&map->users);
3599 +
3600 + vma->vm_ops = &gntdev_vmops;
3601 +diff --git a/fs/9p/fid.c b/fs/9p/fid.c
3602 +index e8a3b891b0366..c702a336837dd 100644
3603 +--- a/fs/9p/fid.c
3604 ++++ b/fs/9p/fid.c
3605 +@@ -151,7 +151,7 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
3606 + const unsigned char **wnames, *uname;
3607 + int i, n, l, clone, access;
3608 + struct v9fs_session_info *v9ses;
3609 +- struct p9_fid *fid, *old_fid = NULL;
3610 ++ struct p9_fid *fid, *old_fid;
3611 +
3612 + v9ses = v9fs_dentry2v9ses(dentry);
3613 + access = v9ses->flags & V9FS_ACCESS_MASK;
3614 +@@ -193,13 +193,12 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
3615 + if (IS_ERR(fid))
3616 + return fid;
3617 +
3618 ++ refcount_inc(&fid->count);
3619 + v9fs_fid_add(dentry->d_sb->s_root, fid);
3620 + }
3621 + /* If we are root ourself just return that */
3622 +- if (dentry->d_sb->s_root == dentry) {
3623 +- refcount_inc(&fid->count);
3624 ++ if (dentry->d_sb->s_root == dentry)
3625 + return fid;
3626 +- }
3627 + /*
3628 + * Do a multipath walk with attached root.
3629 + * When walking parent we need to make sure we
3630 +@@ -211,6 +210,7 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
3631 + fid = ERR_PTR(n);
3632 + goto err_out;
3633 + }
3634 ++ old_fid = fid;
3635 + clone = 1;
3636 + i = 0;
3637 + while (i < n) {
3638 +@@ -220,19 +220,15 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
3639 + * walk to ensure none of the patch component change
3640 + */
3641 + fid = p9_client_walk(fid, l, &wnames[i], clone);
3642 ++ /* non-cloning walk will return the same fid */
3643 ++ if (fid != old_fid) {
3644 ++ p9_client_clunk(old_fid);
3645 ++ old_fid = fid;
3646 ++ }
3647 + if (IS_ERR(fid)) {
3648 +- if (old_fid) {
3649 +- /*
3650 +- * If we fail, clunk fid which are mapping
3651 +- * to path component and not the last component
3652 +- * of the path.
3653 +- */
3654 +- p9_client_clunk(old_fid);
3655 +- }
3656 + kfree(wnames);
3657 + goto err_out;
3658 + }
3659 +- old_fid = fid;
3660 + i += l;
3661 + clone = 0;
3662 + }
3663 +diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
3664 +index 08f48b70a7414..15d9492536cf5 100644
3665 +--- a/fs/9p/vfs_inode.c
3666 ++++ b/fs/9p/vfs_inode.c
3667 +@@ -1228,15 +1228,15 @@ static const char *v9fs_vfs_get_link(struct dentry *dentry,
3668 + return ERR_PTR(-ECHILD);
3669 +
3670 + v9ses = v9fs_dentry2v9ses(dentry);
3671 +- fid = v9fs_fid_lookup(dentry);
3672 ++ if (!v9fs_proto_dotu(v9ses))
3673 ++ return ERR_PTR(-EBADF);
3674 ++
3675 + p9_debug(P9_DEBUG_VFS, "%pd\n", dentry);
3676 ++ fid = v9fs_fid_lookup(dentry);
3677 +
3678 + if (IS_ERR(fid))
3679 + return ERR_CAST(fid);
3680 +
3681 +- if (!v9fs_proto_dotu(v9ses))
3682 +- return ERR_PTR(-EBADF);
3683 +-
3684 + st = p9_client_stat(fid);
3685 + p9_client_clunk(fid);
3686 + if (IS_ERR(st))
3687 +diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
3688 +index a61df2e0ae521..833c638437a72 100644
3689 +--- a/fs/9p/vfs_inode_dotl.c
3690 ++++ b/fs/9p/vfs_inode_dotl.c
3691 +@@ -276,6 +276,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
3692 + if (IS_ERR(ofid)) {
3693 + err = PTR_ERR(ofid);
3694 + p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
3695 ++ p9_client_clunk(dfid);
3696 + goto out;
3697 + }
3698 +
3699 +@@ -287,6 +288,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
3700 + if (err) {
3701 + p9_debug(P9_DEBUG_VFS, "Failed to get acl values in creat %d\n",
3702 + err);
3703 ++ p9_client_clunk(dfid);
3704 + goto error;
3705 + }
3706 + err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
3707 +@@ -294,6 +296,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
3708 + if (err < 0) {
3709 + p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n",
3710 + err);
3711 ++ p9_client_clunk(dfid);
3712 + goto error;
3713 + }
3714 + v9fs_invalidate_inode_attr(dir);
3715 +diff --git a/fs/afs/inode.c b/fs/afs/inode.c
3716 +index a47666ba48f56..785bacb972da5 100644
3717 +--- a/fs/afs/inode.c
3718 ++++ b/fs/afs/inode.c
3719 +@@ -733,7 +733,8 @@ int afs_getattr(struct user_namespace *mnt_userns, const struct path *path,
3720 +
3721 + _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
3722 +
3723 +- if (!(query_flags & AT_STATX_DONT_SYNC) &&
3724 ++ if (vnode->volume &&
3725 ++ !(query_flags & AT_STATX_DONT_SYNC) &&
3726 + !test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
3727 + key = afs_request_key(vnode->volume->cell);
3728 + if (IS_ERR(key))
3729 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
3730 +index 9ab2792055271..233d894f6feba 100644
3731 +--- a/fs/btrfs/disk-io.c
3732 ++++ b/fs/btrfs/disk-io.c
3733 +@@ -4360,6 +4360,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
3734 + int ret;
3735 +
3736 + set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3737 ++
3738 ++ /*
3739 ++ * We may have the reclaim task running and relocating a data block group,
3740 ++ * in which case it may create delayed iputs. So stop it before we park
3741 ++ * the cleaner kthread otherwise we can get new delayed iputs after
3742 ++ * parking the cleaner, and that can make the async reclaim task to hang
3743 ++ * if it's waiting for delayed iputs to complete, since the cleaner is
3744 ++ * parked and can not run delayed iputs - this will make us hang when
3745 ++ * trying to stop the async reclaim task.
3746 ++ */
3747 ++ cancel_work_sync(&fs_info->reclaim_bgs_work);
3748 + /*
3749 + * We don't want the cleaner to start new transactions, add more delayed
3750 + * iputs, etc. while we're closing. We can't use kthread_stop() yet
3751 +@@ -4400,8 +4411,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
3752 + cancel_work_sync(&fs_info->async_data_reclaim_work);
3753 + cancel_work_sync(&fs_info->preempt_reclaim_work);
3754 +
3755 +- cancel_work_sync(&fs_info->reclaim_bgs_work);
3756 +-
3757 + /* Cancel or finish ongoing discard work */
3758 + btrfs_discard_cleanup(fs_info);
3759 +
3760 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
3761 +index ff578c934bbcf..a06c8366a8f4e 100644
3762 +--- a/fs/btrfs/file.c
3763 ++++ b/fs/btrfs/file.c
3764 +@@ -2337,25 +2337,62 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
3765 + */
3766 + btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3767 +
3768 +- if (ret != BTRFS_NO_LOG_SYNC) {
3769 ++ if (ret == BTRFS_NO_LOG_SYNC) {
3770 ++ ret = btrfs_end_transaction(trans);
3771 ++ goto out;
3772 ++ }
3773 ++
3774 ++ /* We successfully logged the inode, attempt to sync the log. */
3775 ++ if (!ret) {
3776 ++ ret = btrfs_sync_log(trans, root, &ctx);
3777 + if (!ret) {
3778 +- ret = btrfs_sync_log(trans, root, &ctx);
3779 +- if (!ret) {
3780 +- ret = btrfs_end_transaction(trans);
3781 +- goto out;
3782 +- }
3783 +- }
3784 +- if (!full_sync) {
3785 +- ret = btrfs_wait_ordered_range(inode, start, len);
3786 +- if (ret) {
3787 +- btrfs_end_transaction(trans);
3788 +- goto out;
3789 +- }
3790 ++ ret = btrfs_end_transaction(trans);
3791 ++ goto out;
3792 + }
3793 +- ret = btrfs_commit_transaction(trans);
3794 +- } else {
3795 ++ }
3796 ++
3797 ++ /*
3798 ++ * At this point we need to commit the transaction because we had
3799 ++ * btrfs_need_log_full_commit() or some other error.
3800 ++ *
3801 ++ * If we didn't do a full sync we have to stop the trans handle, wait on
3802 ++ * the ordered extents, start it again and commit the transaction. If
3803 ++ * we attempt to wait on the ordered extents here we could deadlock with
3804 ++ * something like fallocate() that is holding the extent lock trying to
3805 ++ * start a transaction while some other thread is trying to commit the
3806 ++ * transaction while we (fsync) are currently holding the transaction
3807 ++ * open.
3808 ++ */
3809 ++ if (!full_sync) {
3810 + ret = btrfs_end_transaction(trans);
3811 ++ if (ret)
3812 ++ goto out;
3813 ++ ret = btrfs_wait_ordered_range(inode, start, len);
3814 ++ if (ret)
3815 ++ goto out;
3816 ++
3817 ++ /*
3818 ++ * This is safe to use here because we're only interested in
3819 ++ * making sure the transaction that had the ordered extents is
3820 ++ * committed. We aren't waiting on anything past this point,
3821 ++ * we're purely getting the transaction and committing it.
3822 ++ */
3823 ++ trans = btrfs_attach_transaction_barrier(root);
3824 ++ if (IS_ERR(trans)) {
3825 ++ ret = PTR_ERR(trans);
3826 ++
3827 ++ /*
3828 ++ * We committed the transaction and there's no currently
3829 ++ * running transaction, this means everything we care
3830 ++ * about made it to disk and we are done.
3831 ++ */
3832 ++ if (ret == -ENOENT)
3833 ++ ret = 0;
3834 ++ goto out;
3835 ++ }
3836 + }
3837 ++
3838 ++ ret = btrfs_commit_transaction(trans);
3839 + out:
3840 + ASSERT(list_empty(&ctx.list));
3841 + err = file_check_and_advance_wb_err(file);
3842 +diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
3843 +index 313d9d685adb7..33461b4f9c8b5 100644
3844 +--- a/fs/btrfs/locking.c
3845 ++++ b/fs/btrfs/locking.c
3846 +@@ -45,7 +45,6 @@ void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting ne
3847 + start_ns = ktime_get_ns();
3848 +
3849 + down_read_nested(&eb->lock, nest);
3850 +- eb->lock_owner = current->pid;
3851 + trace_btrfs_tree_read_lock(eb, start_ns);
3852 + }
3853 +
3854 +@@ -62,7 +61,6 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
3855 + int btrfs_try_tree_read_lock(struct extent_buffer *eb)
3856 + {
3857 + if (down_read_trylock(&eb->lock)) {
3858 +- eb->lock_owner = current->pid;
3859 + trace_btrfs_try_tree_read_lock(eb);
3860 + return 1;
3861 + }
3862 +@@ -90,7 +88,6 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
3863 + void btrfs_tree_read_unlock(struct extent_buffer *eb)
3864 + {
3865 + trace_btrfs_tree_read_unlock(eb);
3866 +- eb->lock_owner = 0;
3867 + up_read(&eb->lock);
3868 + }
3869 +
3870 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
3871 +index 7f91d62c2225a..969bf0724fdfe 100644
3872 +--- a/fs/btrfs/super.c
3873 ++++ b/fs/btrfs/super.c
3874 +@@ -712,6 +712,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
3875 + compress_force = false;
3876 + no_compress++;
3877 + } else {
3878 ++ btrfs_err(info, "unrecognized compression value %s",
3879 ++ args[0].from);
3880 + ret = -EINVAL;
3881 + goto out;
3882 + }
3883 +@@ -770,8 +772,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
3884 + case Opt_thread_pool:
3885 + ret = match_int(&args[0], &intarg);
3886 + if (ret) {
3887 ++ btrfs_err(info, "unrecognized thread_pool value %s",
3888 ++ args[0].from);
3889 + goto out;
3890 + } else if (intarg == 0) {
3891 ++ btrfs_err(info, "invalid value 0 for thread_pool");
3892 + ret = -EINVAL;
3893 + goto out;
3894 + }
3895 +@@ -832,8 +837,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
3896 + break;
3897 + case Opt_ratio:
3898 + ret = match_int(&args[0], &intarg);
3899 +- if (ret)
3900 ++ if (ret) {
3901 ++ btrfs_err(info, "unrecognized metadata_ratio value %s",
3902 ++ args[0].from);
3903 + goto out;
3904 ++ }
3905 + info->metadata_ratio = intarg;
3906 + btrfs_info(info, "metadata ratio %u",
3907 + info->metadata_ratio);
3908 +@@ -850,6 +858,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
3909 + btrfs_set_and_info(info, DISCARD_ASYNC,
3910 + "turning on async discard");
3911 + } else {
3912 ++ btrfs_err(info, "unrecognized discard mode value %s",
3913 ++ args[0].from);
3914 + ret = -EINVAL;
3915 + goto out;
3916 + }
3917 +@@ -874,6 +884,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
3918 + btrfs_set_and_info(info, FREE_SPACE_TREE,
3919 + "enabling free space tree");
3920 + } else {
3921 ++ btrfs_err(info, "unrecognized space_cache value %s",
3922 ++ args[0].from);
3923 + ret = -EINVAL;
3924 + goto out;
3925 + }
3926 +@@ -943,8 +955,12 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
3927 + break;
3928 + case Opt_check_integrity_print_mask:
3929 + ret = match_int(&args[0], &intarg);
3930 +- if (ret)
3931 ++ if (ret) {
3932 ++ btrfs_err(info,
3933 ++ "unrecognized check_integrity_print_mask value %s",
3934 ++ args[0].from);
3935 + goto out;
3936 ++ }
3937 + info->check_integrity_print_mask = intarg;
3938 + btrfs_info(info, "check_integrity_print_mask 0x%x",
3939 + info->check_integrity_print_mask);
3940 +@@ -959,13 +975,15 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
3941 + goto out;
3942 + #endif
3943 + case Opt_fatal_errors:
3944 +- if (strcmp(args[0].from, "panic") == 0)
3945 ++ if (strcmp(args[0].from, "panic") == 0) {
3946 + btrfs_set_opt(info->mount_opt,
3947 + PANIC_ON_FATAL_ERROR);
3948 +- else if (strcmp(args[0].from, "bug") == 0)
3949 ++ } else if (strcmp(args[0].from, "bug") == 0) {
3950 + btrfs_clear_opt(info->mount_opt,
3951 + PANIC_ON_FATAL_ERROR);
3952 +- else {
3953 ++ } else {
3954 ++ btrfs_err(info, "unrecognized fatal_errors value %s",
3955 ++ args[0].from);
3956 + ret = -EINVAL;
3957 + goto out;
3958 + }
3959 +@@ -973,8 +991,12 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
3960 + case Opt_commit_interval:
3961 + intarg = 0;
3962 + ret = match_int(&args[0], &intarg);
3963 +- if (ret)
3964 ++ if (ret) {
3965 ++ btrfs_err(info, "unrecognized commit_interval value %s",
3966 ++ args[0].from);
3967 ++ ret = -EINVAL;
3968 + goto out;
3969 ++ }
3970 + if (intarg == 0) {
3971 + btrfs_info(info,
3972 + "using default commit interval %us",
3973 +@@ -988,8 +1010,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
3974 + break;
3975 + case Opt_rescue:
3976 + ret = parse_rescue_options(info, args[0].from);
3977 +- if (ret < 0)
3978 ++ if (ret < 0) {
3979 ++ btrfs_err(info, "unrecognized rescue value %s",
3980 ++ args[0].from);
3981 + goto out;
3982 ++ }
3983 + break;
3984 + #ifdef CONFIG_BTRFS_DEBUG
3985 + case Opt_fragment_all:
3986 +@@ -1917,6 +1942,14 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
3987 + if (ret)
3988 + goto restore;
3989 +
3990 ++ /* V1 cache is not supported for subpage mount. */
3991 ++ if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
3992 ++ btrfs_warn(fs_info,
3993 ++ "v1 space cache is not supported for page size %lu with sectorsize %u",
3994 ++ PAGE_SIZE, fs_info->sectorsize);
3995 ++ ret = -EINVAL;
3996 ++ goto restore;
3997 ++ }
3998 + btrfs_remount_begin(fs_info, old_opts, *flags);
3999 + btrfs_resize_thread_pool(fs_info,
4000 + fs_info->thread_pool_size, old_thread_pool_size);
4001 +diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
4002 +index e4b25ef871b33..7a86a8dcf4f1c 100644
4003 +--- a/fs/f2fs/namei.c
4004 ++++ b/fs/f2fs/namei.c
4005 +@@ -91,8 +91,6 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
4006 + if (test_opt(sbi, INLINE_XATTR))
4007 + set_inode_flag(inode, FI_INLINE_XATTR);
4008 +
4009 +- if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
4010 +- set_inode_flag(inode, FI_INLINE_DATA);
4011 + if (f2fs_may_inline_dentry(inode))
4012 + set_inode_flag(inode, FI_INLINE_DENTRY);
4013 +
4014 +@@ -109,10 +107,6 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
4015 +
4016 + f2fs_init_extent_tree(inode, NULL);
4017 +
4018 +- stat_inc_inline_xattr(inode);
4019 +- stat_inc_inline_inode(inode);
4020 +- stat_inc_inline_dir(inode);
4021 +-
4022 + F2FS_I(inode)->i_flags =
4023 + f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED);
4024 +
4025 +@@ -129,6 +123,14 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
4026 + set_compress_context(inode);
4027 + }
4028 +
4029 ++ /* Should enable inline_data after compression set */
4030 ++ if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
4031 ++ set_inode_flag(inode, FI_INLINE_DATA);
4032 ++
4033 ++ stat_inc_inline_xattr(inode);
4034 ++ stat_inc_inline_inode(inode);
4035 ++ stat_inc_inline_dir(inode);
4036 ++
4037 + f2fs_set_inode_flags(inode);
4038 +
4039 + trace_f2fs_new_inode(inode, 0);
4040 +@@ -327,6 +329,9 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
4041 + if (!is_extension_exist(name, ext[i], false))
4042 + continue;
4043 +
4044 ++ /* Do not use inline_data with compression */
4045 ++ stat_dec_inline_inode(inode);
4046 ++ clear_inode_flag(inode, FI_INLINE_DATA);
4047 + set_compress_context(inode);
4048 + return;
4049 + }
4050 +diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
4051 +index b676aa419eef8..f0e535f199bef 100644
4052 +--- a/include/linux/ratelimit_types.h
4053 ++++ b/include/linux/ratelimit_types.h
4054 +@@ -23,12 +23,16 @@ struct ratelimit_state {
4055 + unsigned long flags;
4056 + };
4057 +
4058 +-#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
4059 +- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
4060 +- .interval = interval_init, \
4061 +- .burst = burst_init, \
4062 ++#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \
4063 ++ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
4064 ++ .interval = interval_init, \
4065 ++ .burst = burst_init, \
4066 ++ .flags = flags_init, \
4067 + }
4068 +
4069 ++#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
4070 ++ RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, 0)
4071 ++
4072 + #define RATELIMIT_STATE_INIT_DISABLED \
4073 + RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
4074 +
4075 +diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
4076 +index 9e1111f5915bd..d81b7f85819ed 100644
4077 +--- a/include/net/inet_sock.h
4078 ++++ b/include/net/inet_sock.h
4079 +@@ -252,6 +252,11 @@ struct inet_sock {
4080 + #define IP_CMSG_CHECKSUM BIT(7)
4081 + #define IP_CMSG_RECVFRAGSIZE BIT(8)
4082 +
4083 ++static inline bool sk_is_inet(struct sock *sk)
4084 ++{
4085 ++ return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
4086 ++}
4087 ++
4088 + /**
4089 + * sk_to_full_sk - Access to a full socket
4090 + * @sk: pointer to a socket
4091 +diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h
4092 +index ab69434e2329e..72e785a903b65 100644
4093 +--- a/include/trace/events/libata.h
4094 ++++ b/include/trace/events/libata.h
4095 +@@ -249,6 +249,7 @@ DECLARE_EVENT_CLASS(ata_qc_complete_template,
4096 + __entry->hob_feature = qc->result_tf.hob_feature;
4097 + __entry->nsect = qc->result_tf.nsect;
4098 + __entry->hob_nsect = qc->result_tf.hob_nsect;
4099 ++ __entry->flags = qc->flags;
4100 + ),
4101 +
4102 + TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s status=%s " \
4103 +diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
4104 +index 854d6df969de2..ed5dd9e023241 100644
4105 +--- a/kernel/dma/direct.c
4106 ++++ b/kernel/dma/direct.c
4107 +@@ -323,7 +323,7 @@ void dma_direct_free(struct device *dev, size_t size,
4108 + } else {
4109 + if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
4110 + arch_dma_clear_uncached(cpu_addr, size);
4111 +- if (dma_set_encrypted(dev, cpu_addr, 1 << page_order))
4112 ++ if (dma_set_encrypted(dev, cpu_addr, size))
4113 + return;
4114 + }
4115 +
4116 +@@ -360,7 +360,6 @@ void dma_direct_free_pages(struct device *dev, size_t size,
4117 + struct page *page, dma_addr_t dma_addr,
4118 + enum dma_data_direction dir)
4119 + {
4120 +- unsigned int page_order = get_order(size);
4121 + void *vaddr = page_address(page);
4122 +
4123 + /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
4124 +@@ -368,7 +367,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
4125 + dma_free_from_pool(dev, vaddr, size))
4126 + return;
4127 +
4128 +- if (dma_set_encrypted(dev, vaddr, 1 << page_order))
4129 ++ if (dma_set_encrypted(dev, vaddr, size))
4130 + return;
4131 + __dma_direct_free_pages(dev, page, size);
4132 + }
4133 +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
4134 +index 39ee60725519b..6a9c1ef15d5d8 100644
4135 +--- a/kernel/trace/trace_kprobe.c
4136 ++++ b/kernel/trace/trace_kprobe.c
4137 +@@ -1733,8 +1733,17 @@ static int
4138 + kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
4139 + {
4140 + struct kretprobe *rp = get_kretprobe(ri);
4141 +- struct trace_kprobe *tk = container_of(rp, struct trace_kprobe, rp);
4142 ++ struct trace_kprobe *tk;
4143 ++
4144 ++ /*
4145 ++ * There is a small chance that get_kretprobe(ri) returns NULL when
4146 ++ * the kretprobe is unregister on another CPU between kretprobe's
4147 ++ * trampoline_handler and this function.
4148 ++ */
4149 ++ if (unlikely(!rp))
4150 ++ return 0;
4151 +
4152 ++ tk = container_of(rp, struct trace_kprobe, rp);
4153 + raw_cpu_inc(*tk->nhit);
4154 +
4155 + if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
4156 +diff --git a/net/core/dev.c b/net/core/dev.c
4157 +index b9731b267d073..6111506a41053 100644
4158 +--- a/net/core/dev.c
4159 ++++ b/net/core/dev.c
4160 +@@ -365,12 +365,12 @@ static void list_netdevice(struct net_device *dev)
4161 +
4162 + ASSERT_RTNL();
4163 +
4164 +- write_lock_bh(&dev_base_lock);
4165 ++ write_lock(&dev_base_lock);
4166 + list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
4167 + netdev_name_node_add(net, dev->name_node);
4168 + hlist_add_head_rcu(&dev->index_hlist,
4169 + dev_index_hash(net, dev->ifindex));
4170 +- write_unlock_bh(&dev_base_lock);
4171 ++ write_unlock(&dev_base_lock);
4172 +
4173 + dev_base_seq_inc(net);
4174 + }
4175 +@@ -378,16 +378,18 @@ static void list_netdevice(struct net_device *dev)
4176 + /* Device list removal
4177 + * caller must respect a RCU grace period before freeing/reusing dev
4178 + */
4179 +-static void unlist_netdevice(struct net_device *dev)
4180 ++static void unlist_netdevice(struct net_device *dev, bool lock)
4181 + {
4182 + ASSERT_RTNL();
4183 +
4184 + /* Unlink dev from the device chain */
4185 +- write_lock_bh(&dev_base_lock);
4186 ++ if (lock)
4187 ++ write_lock(&dev_base_lock);
4188 + list_del_rcu(&dev->dev_list);
4189 + netdev_name_node_del(dev->name_node);
4190 + hlist_del_rcu(&dev->index_hlist);
4191 +- write_unlock_bh(&dev_base_lock);
4192 ++ if (lock)
4193 ++ write_unlock(&dev_base_lock);
4194 +
4195 + dev_base_seq_inc(dev_net(dev));
4196 + }
4197 +@@ -1266,15 +1268,15 @@ rollback:
4198 +
4199 + netdev_adjacent_rename_links(dev, oldname);
4200 +
4201 +- write_lock_bh(&dev_base_lock);
4202 ++ write_lock(&dev_base_lock);
4203 + netdev_name_node_del(dev->name_node);
4204 +- write_unlock_bh(&dev_base_lock);
4205 ++ write_unlock(&dev_base_lock);
4206 +
4207 + synchronize_rcu();
4208 +
4209 +- write_lock_bh(&dev_base_lock);
4210 ++ write_lock(&dev_base_lock);
4211 + netdev_name_node_add(net, dev->name_node);
4212 +- write_unlock_bh(&dev_base_lock);
4213 ++ write_unlock(&dev_base_lock);
4214 +
4215 + ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
4216 + ret = notifier_to_errno(ret);
4217 +@@ -10319,11 +10321,11 @@ int register_netdevice(struct net_device *dev)
4218 + goto err_uninit;
4219 +
4220 + ret = netdev_register_kobject(dev);
4221 +- if (ret) {
4222 +- dev->reg_state = NETREG_UNREGISTERED;
4223 ++ write_lock(&dev_base_lock);
4224 ++ dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
4225 ++ write_unlock(&dev_base_lock);
4226 ++ if (ret)
4227 + goto err_uninit;
4228 +- }
4229 +- dev->reg_state = NETREG_REGISTERED;
4230 +
4231 + __netdev_update_features(dev);
4232 +
4233 +@@ -10483,8 +10485,6 @@ static void netdev_wait_allrefs(struct net_device *dev)
4234 + unsigned long rebroadcast_time, warning_time;
4235 + int wait = 0, refcnt;
4236 +
4237 +- linkwatch_forget_dev(dev);
4238 +-
4239 + rebroadcast_time = warning_time = jiffies;
4240 + refcnt = netdev_refcnt_read(dev);
4241 +
4242 +@@ -10598,7 +10598,10 @@ void netdev_run_todo(void)
4243 + continue;
4244 + }
4245 +
4246 ++ write_lock(&dev_base_lock);
4247 + dev->reg_state = NETREG_UNREGISTERED;
4248 ++ write_unlock(&dev_base_lock);
4249 ++ linkwatch_forget_dev(dev);
4250 +
4251 + netdev_wait_allrefs(dev);
4252 +
4253 +@@ -11043,9 +11046,10 @@ void unregister_netdevice_many(struct list_head *head)
4254 +
4255 + list_for_each_entry(dev, head, unreg_list) {
4256 + /* And unlink it from device chain. */
4257 +- unlist_netdevice(dev);
4258 +-
4259 ++ write_lock(&dev_base_lock);
4260 ++ unlist_netdevice(dev, false);
4261 + dev->reg_state = NETREG_UNREGISTERING;
4262 ++ write_unlock(&dev_base_lock);
4263 + }
4264 + flush_all_backlogs();
4265 +
4266 +@@ -11190,7 +11194,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
4267 + dev_close(dev);
4268 +
4269 + /* And unlink it from device chain */
4270 +- unlist_netdevice(dev);
4271 ++ unlist_netdevice(dev, true);
4272 +
4273 + synchronize_net();
4274 +
4275 +diff --git a/net/core/filter.c b/net/core/filter.c
4276 +index 0816468c545cd..d1e2ef77ce4c1 100644
4277 +--- a/net/core/filter.c
4278 ++++ b/net/core/filter.c
4279 +@@ -6209,10 +6209,21 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
4280 + ifindex, proto, netns_id, flags);
4281 +
4282 + if (sk) {
4283 +- sk = sk_to_full_sk(sk);
4284 +- if (!sk_fullsock(sk)) {
4285 ++ struct sock *sk2 = sk_to_full_sk(sk);
4286 ++
4287 ++ /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
4288 ++ * sock refcnt is decremented to prevent a request_sock leak.
4289 ++ */
4290 ++ if (!sk_fullsock(sk2))
4291 ++ sk2 = NULL;
4292 ++ if (sk2 != sk) {
4293 + sock_gen_put(sk);
4294 +- return NULL;
4295 ++ /* Ensure there is no need to bump sk2 refcnt */
4296 ++ if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
4297 ++ WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
4298 ++ return NULL;
4299 ++ }
4300 ++ sk = sk2;
4301 + }
4302 + }
4303 +
4304 +@@ -6246,10 +6257,21 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
4305 + flags);
4306 +
4307 + if (sk) {
4308 +- sk = sk_to_full_sk(sk);
4309 +- if (!sk_fullsock(sk)) {
4310 ++ struct sock *sk2 = sk_to_full_sk(sk);
4311 ++
4312 ++ /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
4313 ++ * sock refcnt is decremented to prevent a request_sock leak.
4314 ++ */
4315 ++ if (!sk_fullsock(sk2))
4316 ++ sk2 = NULL;
4317 ++ if (sk2 != sk) {
4318 + sock_gen_put(sk);
4319 +- return NULL;
4320 ++ /* Ensure there is no need to bump sk2 refcnt */
4321 ++ if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
4322 ++ WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
4323 ++ return NULL;
4324 ++ }
4325 ++ sk = sk2;
4326 + }
4327 + }
4328 +
4329 +diff --git a/net/core/link_watch.c b/net/core/link_watch.c
4330 +index 1a455847da54f..9599afd0862da 100644
4331 +--- a/net/core/link_watch.c
4332 ++++ b/net/core/link_watch.c
4333 +@@ -55,7 +55,7 @@ static void rfc2863_policy(struct net_device *dev)
4334 + if (operstate == dev->operstate)
4335 + return;
4336 +
4337 +- write_lock_bh(&dev_base_lock);
4338 ++ write_lock(&dev_base_lock);
4339 +
4340 + switch(dev->link_mode) {
4341 + case IF_LINK_MODE_TESTING:
4342 +@@ -74,7 +74,7 @@ static void rfc2863_policy(struct net_device *dev)
4343 +
4344 + dev->operstate = operstate;
4345 +
4346 +- write_unlock_bh(&dev_base_lock);
4347 ++ write_unlock(&dev_base_lock);
4348 + }
4349 +
4350 +
4351 +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
4352 +index 9e5657f632453..e9ea0695efb42 100644
4353 +--- a/net/core/net-sysfs.c
4354 ++++ b/net/core/net-sysfs.c
4355 +@@ -32,6 +32,7 @@ static const char fmt_dec[] = "%d\n";
4356 + static const char fmt_ulong[] = "%lu\n";
4357 + static const char fmt_u64[] = "%llu\n";
4358 +
4359 ++/* Caller holds RTNL or dev_base_lock */
4360 + static inline int dev_isalive(const struct net_device *dev)
4361 + {
4362 + return dev->reg_state <= NETREG_REGISTERED;
4363 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
4364 +index 9c0e8ccf9bc58..8c85e93daa739 100644
4365 +--- a/net/core/rtnetlink.c
4366 ++++ b/net/core/rtnetlink.c
4367 +@@ -842,9 +842,9 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
4368 + }
4369 +
4370 + if (dev->operstate != operstate) {
4371 +- write_lock_bh(&dev_base_lock);
4372 ++ write_lock(&dev_base_lock);
4373 + dev->operstate = operstate;
4374 +- write_unlock_bh(&dev_base_lock);
4375 ++ write_unlock(&dev_base_lock);
4376 + netdev_state_change(dev);
4377 + }
4378 + }
4379 +@@ -2781,11 +2781,11 @@ static int do_setlink(const struct sk_buff *skb,
4380 + if (tb[IFLA_LINKMODE]) {
4381 + unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
4382 +
4383 +- write_lock_bh(&dev_base_lock);
4384 ++ write_lock(&dev_base_lock);
4385 + if (dev->link_mode ^ value)
4386 + status |= DO_SETLINK_NOTIFY;
4387 + dev->link_mode = value;
4388 +- write_unlock_bh(&dev_base_lock);
4389 ++ write_unlock(&dev_base_lock);
4390 + }
4391 +
4392 + if (tb[IFLA_VFINFO_LIST]) {
4393 +diff --git a/net/core/skmsg.c b/net/core/skmsg.c
4394 +index cc381165ea080..ede0af308f404 100644
4395 +--- a/net/core/skmsg.c
4396 ++++ b/net/core/skmsg.c
4397 +@@ -695,6 +695,11 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
4398 +
4399 + write_lock_bh(&sk->sk_callback_lock);
4400 +
4401 ++ if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
4402 ++ psock = ERR_PTR(-EINVAL);
4403 ++ goto out;
4404 ++ }
4405 ++
4406 + if (sk->sk_user_data) {
4407 + psock = ERR_PTR(-EBUSY);
4408 + goto out;
4409 +diff --git a/net/ethtool/eeprom.c b/net/ethtool/eeprom.c
4410 +index 7e6b37a54add3..1c94bb8ea03f2 100644
4411 +--- a/net/ethtool/eeprom.c
4412 ++++ b/net/ethtool/eeprom.c
4413 +@@ -36,7 +36,7 @@ static int fallback_set_params(struct eeprom_req_info *request,
4414 + if (request->page)
4415 + offset = request->page * ETH_MODULE_EEPROM_PAGE_LEN + offset;
4416 +
4417 +- if (modinfo->type == ETH_MODULE_SFF_8079 &&
4418 ++ if (modinfo->type == ETH_MODULE_SFF_8472 &&
4419 + request->i2c_address == 0x51)
4420 + offset += ETH_MODULE_EEPROM_PAGE_LEN * 2;
4421 +
4422 +diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
4423 +index 26c32407f0290..ea7b96e296ef0 100644
4424 +--- a/net/hsr/hsr_device.c
4425 ++++ b/net/hsr/hsr_device.c
4426 +@@ -30,13 +30,13 @@ static bool is_slave_up(struct net_device *dev)
4427 +
4428 + static void __hsr_set_operstate(struct net_device *dev, int transition)
4429 + {
4430 +- write_lock_bh(&dev_base_lock);
4431 ++ write_lock(&dev_base_lock);
4432 + if (dev->operstate != transition) {
4433 + dev->operstate = transition;
4434 +- write_unlock_bh(&dev_base_lock);
4435 ++ write_unlock(&dev_base_lock);
4436 + netdev_state_change(dev);
4437 + } else {
4438 +- write_unlock_bh(&dev_base_lock);
4439 ++ write_unlock(&dev_base_lock);
4440 + }
4441 + }
4442 +
4443 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
4444 +index f23528c775395..fc74a3e3b3e12 100644
4445 +--- a/net/ipv4/ip_gre.c
4446 ++++ b/net/ipv4/ip_gre.c
4447 +@@ -524,7 +524,6 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
4448 + int tunnel_hlen;
4449 + int version;
4450 + int nhoff;
4451 +- int thoff;
4452 +
4453 + tun_info = skb_tunnel_info(skb);
4454 + if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
4455 +@@ -558,10 +557,16 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
4456 + (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
4457 + truncate = true;
4458 +
4459 +- thoff = skb_transport_header(skb) - skb_mac_header(skb);
4460 +- if (skb->protocol == htons(ETH_P_IPV6) &&
4461 +- (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
4462 +- truncate = true;
4463 ++ if (skb->protocol == htons(ETH_P_IPV6)) {
4464 ++ int thoff;
4465 ++
4466 ++ if (skb_transport_header_was_set(skb))
4467 ++ thoff = skb_transport_header(skb) - skb_mac_header(skb);
4468 ++ else
4469 ++ thoff = nhoff + sizeof(struct ipv6hdr);
4470 ++ if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
4471 ++ truncate = true;
4472 ++ }
4473 +
4474 + if (version == 1) {
4475 + erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
4476 +diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
4477 +index 1cdcb4df0eb7e..2c597a4e429ab 100644
4478 +--- a/net/ipv4/tcp_bpf.c
4479 ++++ b/net/ipv4/tcp_bpf.c
4480 +@@ -612,9 +612,6 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
4481 + return 0;
4482 + }
4483 +
4484 +- if (inet_csk_has_ulp(sk))
4485 +- return -EINVAL;
4486 +-
4487 + if (sk->sk_family == AF_INET6) {
4488 + if (tcp_bpf_assert_proto_ops(psock->sk_proto))
4489 + return -EINVAL;
4490 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
4491 +index a817ac6d97598..70ef4d4ebff48 100644
4492 +--- a/net/ipv6/ip6_gre.c
4493 ++++ b/net/ipv6/ip6_gre.c
4494 +@@ -944,7 +944,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
4495 + __be16 proto;
4496 + __u32 mtu;
4497 + int nhoff;
4498 +- int thoff;
4499 +
4500 + if (!pskb_inet_may_pull(skb))
4501 + goto tx_err;
4502 +@@ -965,10 +964,16 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
4503 + (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
4504 + truncate = true;
4505 +
4506 +- thoff = skb_transport_header(skb) - skb_mac_header(skb);
4507 +- if (skb->protocol == htons(ETH_P_IPV6) &&
4508 +- (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
4509 +- truncate = true;
4510 ++ if (skb->protocol == htons(ETH_P_IPV6)) {
4511 ++ int thoff;
4512 ++
4513 ++ if (skb_transport_header_was_set(skb))
4514 ++ thoff = skb_transport_header(skb) - skb_mac_header(skb);
4515 ++ else
4516 ++ thoff = nhoff + sizeof(struct ipv6hdr);
4517 ++ if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
4518 ++ truncate = true;
4519 ++ }
4520 +
4521 + if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
4522 + goto tx_err;
4523 +diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
4524 +index a7e01e9952f17..44d9b38e5f90c 100644
4525 +--- a/net/netfilter/nft_meta.c
4526 ++++ b/net/netfilter/nft_meta.c
4527 +@@ -14,6 +14,7 @@
4528 + #include <linux/in.h>
4529 + #include <linux/ip.h>
4530 + #include <linux/ipv6.h>
4531 ++#include <linux/random.h>
4532 + #include <linux/smp.h>
4533 + #include <linux/static_key.h>
4534 + #include <net/dst.h>
4535 +@@ -32,8 +33,6 @@
4536 + #define NFT_META_SECS_PER_DAY 86400
4537 + #define NFT_META_DAYS_PER_WEEK 7
4538 +
4539 +-static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state);
4540 +-
4541 + static u8 nft_meta_weekday(void)
4542 + {
4543 + time64_t secs = ktime_get_real_seconds();
4544 +@@ -267,13 +266,6 @@ static bool nft_meta_get_eval_ifname(enum nft_meta_keys key, u32 *dest,
4545 + return true;
4546 + }
4547 +
4548 +-static noinline u32 nft_prandom_u32(void)
4549 +-{
4550 +- struct rnd_state *state = this_cpu_ptr(&nft_prandom_state);
4551 +-
4552 +- return prandom_u32_state(state);
4553 +-}
4554 +-
4555 + #ifdef CONFIG_IP_ROUTE_CLASSID
4556 + static noinline bool
4557 + nft_meta_get_eval_rtclassid(const struct sk_buff *skb, u32 *dest)
4558 +@@ -385,7 +377,7 @@ void nft_meta_get_eval(const struct nft_expr *expr,
4559 + break;
4560 + #endif
4561 + case NFT_META_PRANDOM:
4562 +- *dest = nft_prandom_u32();
4563 ++ *dest = get_random_u32();
4564 + break;
4565 + #ifdef CONFIG_XFRM
4566 + case NFT_META_SECPATH:
4567 +@@ -514,7 +506,6 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
4568 + len = IFNAMSIZ;
4569 + break;
4570 + case NFT_META_PRANDOM:
4571 +- prandom_init_once(&nft_prandom_state);
4572 + len = sizeof(u32);
4573 + break;
4574 + #ifdef CONFIG_XFRM
4575 +diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
4576 +index 722cac1e90e0e..4e43214e88def 100644
4577 +--- a/net/netfilter/nft_numgen.c
4578 ++++ b/net/netfilter/nft_numgen.c
4579 +@@ -9,12 +9,11 @@
4580 + #include <linux/netlink.h>
4581 + #include <linux/netfilter.h>
4582 + #include <linux/netfilter/nf_tables.h>
4583 ++#include <linux/random.h>
4584 + #include <linux/static_key.h>
4585 + #include <net/netfilter/nf_tables.h>
4586 + #include <net/netfilter/nf_tables_core.h>
4587 +
4588 +-static DEFINE_PER_CPU(struct rnd_state, nft_numgen_prandom_state);
4589 +-
4590 + struct nft_ng_inc {
4591 + u8 dreg;
4592 + u32 modulus;
4593 +@@ -104,12 +103,9 @@ struct nft_ng_random {
4594 + u32 offset;
4595 + };
4596 +
4597 +-static u32 nft_ng_random_gen(struct nft_ng_random *priv)
4598 ++static u32 nft_ng_random_gen(const struct nft_ng_random *priv)
4599 + {
4600 +- struct rnd_state *state = this_cpu_ptr(&nft_numgen_prandom_state);
4601 +-
4602 +- return reciprocal_scale(prandom_u32_state(state), priv->modulus) +
4603 +- priv->offset;
4604 ++ return reciprocal_scale(get_random_u32(), priv->modulus) + priv->offset;
4605 + }
4606 +
4607 + static void nft_ng_random_eval(const struct nft_expr *expr,
4608 +@@ -137,8 +133,6 @@ static int nft_ng_random_init(const struct nft_ctx *ctx,
4609 + if (priv->offset + priv->modulus - 1 < priv->offset)
4610 + return -EOVERFLOW;
4611 +
4612 +- prandom_init_once(&nft_numgen_prandom_state);
4613 +-
4614 + return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg,
4615 + NULL, NFT_DATA_VALUE, sizeof(u32));
4616 + }
4617 +diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
4618 +index 02096f2ec6784..1b81d71bac3cf 100644
4619 +--- a/net/openvswitch/flow.c
4620 ++++ b/net/openvswitch/flow.c
4621 +@@ -266,7 +266,7 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
4622 + if (flags & IP6_FH_F_FRAG) {
4623 + if (frag_off) {
4624 + key->ip.frag = OVS_FRAG_TYPE_LATER;
4625 +- key->ip.proto = nexthdr;
4626 ++ key->ip.proto = NEXTHDR_FRAGMENT;
4627 + return 0;
4628 + }
4629 + key->ip.frag = OVS_FRAG_TYPE_FIRST;
4630 +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
4631 +index 0c345e43a09a3..adc5407fd5d58 100644
4632 +--- a/net/sched/sch_netem.c
4633 ++++ b/net/sched/sch_netem.c
4634 +@@ -1146,9 +1146,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
4635 + struct tc_netem_rate rate;
4636 + struct tc_netem_slot slot;
4637 +
4638 +- qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
4639 ++ qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
4640 + UINT_MAX);
4641 +- qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
4642 ++ qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
4643 + UINT_MAX);
4644 + qopt.limit = q->limit;
4645 + qopt.loss = q->loss;
4646 +diff --git a/net/tipc/core.c b/net/tipc/core.c
4647 +index 3f4542e0f0650..434e70eabe081 100644
4648 +--- a/net/tipc/core.c
4649 ++++ b/net/tipc/core.c
4650 +@@ -109,10 +109,9 @@ static void __net_exit tipc_exit_net(struct net *net)
4651 + struct tipc_net *tn = tipc_net(net);
4652 +
4653 + tipc_detach_loopback(net);
4654 ++ tipc_net_stop(net);
4655 + /* Make sure the tipc_net_finalize_work() finished */
4656 + cancel_work_sync(&tn->work);
4657 +- tipc_net_stop(net);
4658 +-
4659 + tipc_bcast_stop(net);
4660 + tipc_nametbl_stop(net);
4661 + tipc_sk_rht_destroy(net);
4662 +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
4663 +index 9aac9c60d786d..62b1c5e32bbd7 100644
4664 +--- a/net/tls/tls_main.c
4665 ++++ b/net/tls/tls_main.c
4666 +@@ -790,6 +790,8 @@ static void tls_update(struct sock *sk, struct proto *p,
4667 + {
4668 + struct tls_context *ctx;
4669 +
4670 ++ WARN_ON_ONCE(sk->sk_prot == p);
4671 ++
4672 + ctx = tls_get_ctx(sk);
4673 + if (likely(ctx)) {
4674 + ctx->sk_write_space = write_space;
4675 +diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
4676 +index 16cc38e51f14d..9b55ca27cccf2 100644
4677 +--- a/net/xdp/xsk.c
4678 ++++ b/net/xdp/xsk.c
4679 +@@ -553,12 +553,6 @@ static int xsk_generic_xmit(struct sock *sk)
4680 + goto out;
4681 + }
4682 +
4683 +- skb = xsk_build_skb(xs, &desc);
4684 +- if (IS_ERR(skb)) {
4685 +- err = PTR_ERR(skb);
4686 +- goto out;
4687 +- }
4688 +-
4689 + /* This is the backpressure mechanism for the Tx path.
4690 + * Reserve space in the completion queue and only proceed
4691 + * if there is space in it. This avoids having to implement
4692 +@@ -567,11 +561,19 @@ static int xsk_generic_xmit(struct sock *sk)
4693 + spin_lock_irqsave(&xs->pool->cq_lock, flags);
4694 + if (xskq_prod_reserve(xs->pool->cq)) {
4695 + spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
4696 +- kfree_skb(skb);
4697 + goto out;
4698 + }
4699 + spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
4700 +
4701 ++ skb = xsk_build_skb(xs, &desc);
4702 ++ if (IS_ERR(skb)) {
4703 ++ err = PTR_ERR(skb);
4704 ++ spin_lock_irqsave(&xs->pool->cq_lock, flags);
4705 ++ xskq_prod_cancel(xs->pool->cq);
4706 ++ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
4707 ++ goto out;
4708 ++ }
4709 ++
4710 + err = __dev_direct_xmit(skb, xs->queue_id);
4711 + if (err == NETDEV_TX_BUSY) {
4712 + /* Tell user-space to retry the send */
4713 +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
4714 +index 94041ee32798f..b284ee01fdebb 100644
4715 +--- a/scripts/mod/modpost.c
4716 ++++ b/scripts/mod/modpost.c
4717 +@@ -1108,7 +1108,7 @@ static const struct sectioncheck sectioncheck[] = {
4718 + },
4719 + /* Do not export init/exit functions or data */
4720 + {
4721 +- .fromsec = { "__ksymtab*", NULL },
4722 ++ .fromsec = { "___ksymtab*", NULL },
4723 + .bad_tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
4724 + .mismatch = EXPORT_TO_INIT_EXIT,
4725 + .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
4726 +diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
4727 +index 500d0d474d27b..9cd0d61ab26d5 100644
4728 +--- a/sound/pci/hda/hda_auto_parser.c
4729 ++++ b/sound/pci/hda/hda_auto_parser.c
4730 +@@ -823,7 +823,7 @@ static void set_pin_targets(struct hda_codec *codec,
4731 + snd_hda_set_pin_ctl_cache(codec, cfg->nid, cfg->val);
4732 + }
4733 +
4734 +-static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
4735 ++void __snd_hda_apply_fixup(struct hda_codec *codec, int id, int action, int depth)
4736 + {
4737 + const char *modelname = codec->fixup_name;
4738 +
4739 +@@ -833,7 +833,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
4740 + if (++depth > 10)
4741 + break;
4742 + if (fix->chained_before)
4743 +- apply_fixup(codec, fix->chain_id, action, depth + 1);
4744 ++ __snd_hda_apply_fixup(codec, fix->chain_id, action, depth + 1);
4745 +
4746 + switch (fix->type) {
4747 + case HDA_FIXUP_PINS:
4748 +@@ -874,6 +874,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
4749 + id = fix->chain_id;
4750 + }
4751 + }
4752 ++EXPORT_SYMBOL_GPL(__snd_hda_apply_fixup);
4753 +
4754 + /**
4755 + * snd_hda_apply_fixup - Apply the fixup chain with the given action
4756 +@@ -883,7 +884,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
4757 + void snd_hda_apply_fixup(struct hda_codec *codec, int action)
4758 + {
4759 + if (codec->fixup_list)
4760 +- apply_fixup(codec, codec->fixup_id, action, 0);
4761 ++ __snd_hda_apply_fixup(codec, codec->fixup_id, action, 0);
4762 + }
4763 + EXPORT_SYMBOL_GPL(snd_hda_apply_fixup);
4764 +
4765 +diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
4766 +index 8621f576446b8..63c00363acad9 100644
4767 +--- a/sound/pci/hda/hda_local.h
4768 ++++ b/sound/pci/hda/hda_local.h
4769 +@@ -350,6 +350,7 @@ void snd_hda_apply_verbs(struct hda_codec *codec);
4770 + void snd_hda_apply_pincfgs(struct hda_codec *codec,
4771 + const struct hda_pintbl *cfg);
4772 + void snd_hda_apply_fixup(struct hda_codec *codec, int action);
4773 ++void __snd_hda_apply_fixup(struct hda_codec *codec, int id, int action, int depth);
4774 + void snd_hda_pick_fixup(struct hda_codec *codec,
4775 + const struct hda_model_fixup *models,
4776 + const struct snd_pci_quirk *quirk,
4777 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
4778 +index bce2cef80000b..0b7d500249f6e 100644
4779 +--- a/sound/pci/hda/patch_conexant.c
4780 ++++ b/sound/pci/hda/patch_conexant.c
4781 +@@ -1079,11 +1079,11 @@ static int patch_conexant_auto(struct hda_codec *codec)
4782 + if (err < 0)
4783 + goto error;
4784 +
4785 +- err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
4786 ++ err = cx_auto_parse_beep(codec);
4787 + if (err < 0)
4788 + goto error;
4789 +
4790 +- err = cx_auto_parse_beep(codec);
4791 ++ err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
4792 + if (err < 0)
4793 + goto error;
4794 +
4795 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4796 +index 4903a857f8f64..70664c9832d6f 100644
4797 +--- a/sound/pci/hda/patch_realtek.c
4798 ++++ b/sound/pci/hda/patch_realtek.c
4799 +@@ -2629,6 +2629,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
4800 + SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
4801 + SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
4802 + SND_PCI_QUIRK(0x1558, 0x67f1, "Clevo PC70H[PRS]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
4803 ++ SND_PCI_QUIRK(0x1558, 0x67f5, "Clevo PD70PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
4804 + SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
4805 + SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170SM", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
4806 + SND_PCI_QUIRK(0x1558, 0x7715, "Clevo X170KM-G", ALC1220_FIXUP_CLEVO_PB51ED),
4807 +@@ -6883,6 +6884,7 @@ enum {
4808 + ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS,
4809 + ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
4810 + ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
4811 ++ ALC298_FIXUP_LENOVO_C940_DUET7,
4812 + ALC287_FIXUP_13S_GEN2_SPEAKERS,
4813 + ALC256_FIXUP_SET_COEF_DEFAULTS,
4814 + ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
4815 +@@ -6892,6 +6894,23 @@ enum {
4816 + ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE,
4817 + };
4818 +
4819 ++/* A special fixup for Lenovo C940 and Yoga Duet 7;
4820 ++ * both have the very same PCI SSID, and we need to apply different fixups
4821 ++ * depending on the codec ID
4822 ++ */
4823 ++static void alc298_fixup_lenovo_c940_duet7(struct hda_codec *codec,
4824 ++ const struct hda_fixup *fix,
4825 ++ int action)
4826 ++{
4827 ++ int id;
4828 ++
4829 ++ if (codec->core.vendor_id == 0x10ec0298)
4830 ++ id = ALC298_FIXUP_LENOVO_SPK_VOLUME; /* C940 */
4831 ++ else
4832 ++ id = ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* Duet 7 */
4833 ++ __snd_hda_apply_fixup(codec, id, action, 0);
4834 ++}
4835 ++
4836 + static const struct hda_fixup alc269_fixups[] = {
4837 + [ALC269_FIXUP_GPIO2] = {
4838 + .type = HDA_FIXUP_FUNC,
4839 +@@ -8591,6 +8610,10 @@ static const struct hda_fixup alc269_fixups[] = {
4840 + .chained = true,
4841 + .chain_id = ALC269_FIXUP_HEADSET_MODE,
4842 + },
4843 ++ [ALC298_FIXUP_LENOVO_C940_DUET7] = {
4844 ++ .type = HDA_FIXUP_FUNC,
4845 ++ .v.func = alc298_fixup_lenovo_c940_duet7,
4846 ++ },
4847 + [ALC287_FIXUP_13S_GEN2_SPEAKERS] = {
4848 + .type = HDA_FIXUP_VERBS,
4849 + .v.verbs = (const struct hda_verb[]) {
4850 +@@ -8830,6 +8853,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4851 + ALC285_FIXUP_HP_GPIO_AMP_INIT),
4852 + SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation",
4853 + ALC285_FIXUP_HP_GPIO_AMP_INIT),
4854 ++ SND_PCI_QUIRK(0x103c, 0x8787, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
4855 + SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
4856 + SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
4857 + SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
4858 +@@ -8976,6 +9000,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4859 + SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4860 + SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4861 + SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4862 ++ SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4863 + SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4864 + SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4865 + SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4866 +@@ -9059,7 +9084,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4867 + SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
4868 + SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
4869 + SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
4870 +- SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
4871 ++ SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7),
4872 + SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
4873 + SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
4874 + SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
4875 +@@ -10521,6 +10546,7 @@ enum {
4876 + ALC668_FIXUP_MIC_DET_COEF,
4877 + ALC897_FIXUP_LENOVO_HEADSET_MIC,
4878 + ALC897_FIXUP_HEADSET_MIC_PIN,
4879 ++ ALC897_FIXUP_HP_HSMIC_VERB,
4880 + };
4881 +
4882 + static const struct hda_fixup alc662_fixups[] = {
4883 +@@ -10940,6 +10966,13 @@ static const struct hda_fixup alc662_fixups[] = {
4884 + .chained = true,
4885 + .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC
4886 + },
4887 ++ [ALC897_FIXUP_HP_HSMIC_VERB] = {
4888 ++ .type = HDA_FIXUP_PINS,
4889 ++ .v.pins = (const struct hda_pintbl[]) {
4890 ++ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
4891 ++ { }
4892 ++ },
4893 ++ },
4894 + };
4895 +
4896 + static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4897 +@@ -10965,6 +10998,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4898 + SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4899 + SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4900 + SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
4901 ++ SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
4902 + SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
4903 + SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2),
4904 + SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
4905 +diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
4906 +index 773a136161f11..a188901a83bbe 100644
4907 +--- a/sound/pci/hda/patch_via.c
4908 ++++ b/sound/pci/hda/patch_via.c
4909 +@@ -520,11 +520,11 @@ static int via_parse_auto_config(struct hda_codec *codec)
4910 + if (err < 0)
4911 + return err;
4912 +
4913 +- err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
4914 ++ err = auto_parse_beep(codec);
4915 + if (err < 0)
4916 + return err;
4917 +
4918 +- err = auto_parse_beep(codec);
4919 ++ err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
4920 + if (err < 0)
4921 + return err;
4922 +
4923 +diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
4924 +index 235549bb28b94..569e1b8ad0abc 100644
4925 +--- a/tools/perf/util/arm-spe.c
4926 ++++ b/tools/perf/util/arm-spe.c
4927 +@@ -312,26 +312,16 @@ static int arm_spe__synth_branch_sample(struct arm_spe_queue *speq,
4928 + return arm_spe_deliver_synth_event(spe, speq, event, &sample);
4929 + }
4930 +
4931 +-#define SPE_MEM_TYPE (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS | \
4932 +- ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS | \
4933 +- ARM_SPE_REMOTE_ACCESS)
4934 +-
4935 +-static bool arm_spe__is_memory_event(enum arm_spe_sample_type type)
4936 +-{
4937 +- if (type & SPE_MEM_TYPE)
4938 +- return true;
4939 +-
4940 +- return false;
4941 +-}
4942 +-
4943 + static u64 arm_spe__synth_data_source(const struct arm_spe_record *record)
4944 + {
4945 + union perf_mem_data_src data_src = { 0 };
4946 +
4947 + if (record->op == ARM_SPE_LD)
4948 + data_src.mem_op = PERF_MEM_OP_LOAD;
4949 +- else
4950 ++ else if (record->op == ARM_SPE_ST)
4951 + data_src.mem_op = PERF_MEM_OP_STORE;
4952 ++ else
4953 ++ return 0;
4954 +
4955 + if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
4956 + data_src.mem_lvl = PERF_MEM_LVL_L3;
4957 +@@ -435,7 +425,11 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
4958 + return err;
4959 + }
4960 +
4961 +- if (spe->sample_memory && arm_spe__is_memory_event(record->type)) {
4962 ++ /*
4963 ++ * When data_src is zero it means the record is not a memory operation,
4964 ++ * skip to synthesize memory sample for this case.
4965 ++ */
4966 ++ if (spe->sample_memory && data_src) {
4967 + err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
4968 + if (err)
4969 + return err;
4970 +diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
4971 +index e32e8f2ff3bd7..1d7c53873dd2d 100644
4972 +--- a/tools/perf/util/build-id.c
4973 ++++ b/tools/perf/util/build-id.c
4974 +@@ -872,6 +872,30 @@ out_free:
4975 + return err;
4976 + }
4977 +
4978 ++static int filename__read_build_id_ns(const char *filename,
4979 ++ struct build_id *bid,
4980 ++ struct nsinfo *nsi)
4981 ++{
4982 ++ struct nscookie nsc;
4983 ++ int ret;
4984 ++
4985 ++ nsinfo__mountns_enter(nsi, &nsc);
4986 ++ ret = filename__read_build_id(filename, bid);
4987 ++ nsinfo__mountns_exit(&nsc);
4988 ++
4989 ++ return ret;
4990 ++}
4991 ++
4992 ++static bool dso__build_id_mismatch(struct dso *dso, const char *name)
4993 ++{
4994 ++ struct build_id bid;
4995 ++
4996 ++ if (filename__read_build_id_ns(name, &bid, dso->nsinfo) < 0)
4997 ++ return false;
4998 ++
4999 ++ return !dso__build_id_equal(dso, &bid);
5000 ++}
5001 ++
5002 + static int dso__cache_build_id(struct dso *dso, struct machine *machine,
5003 + void *priv __maybe_unused)
5004 + {
5005 +@@ -886,6 +910,10 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine,
5006 + is_kallsyms = true;
5007 + name = machine->mmap_name;
5008 + }
5009 ++
5010 ++ if (!is_kallsyms && dso__build_id_mismatch(dso, name))
5011 ++ return 0;
5012 ++
5013 + return build_id_cache__add_b(&dso->bid, name, dso->nsinfo,
5014 + is_kallsyms, is_vdso);
5015 + }
5016 +diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh
5017 +index b5eef5ffb58e5..af3461cb5c409 100755
5018 +--- a/tools/testing/selftests/netfilter/nft_concat_range.sh
5019 ++++ b/tools/testing/selftests/netfilter/nft_concat_range.sh
5020 +@@ -31,7 +31,7 @@ BUGS="flush_remove_add reload"
5021 +
5022 + # List of possible paths to pktgen script from kernel tree for performance tests
5023 + PKTGEN_SCRIPT_PATHS="
5024 +- ../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
5025 ++ ../../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
5026 + pktgen/pktgen_bench_xmit_mode_netif_receive.sh"
5027 +
5028 + # Definition of set types: