Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.7 commit in: /
Date: Wed, 01 Jul 2020 12:24:34
Message-Id: 1593606259.16fbe10b9bcf30d335432166d62c2fb674105770.mpagano@gentoo
1 commit: 16fbe10b9bcf30d335432166d62c2fb674105770
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jul 1 12:24:19 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jul 1 12:24:19 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=16fbe10b
7
8 Linux patch 5.7.7
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1006_linux-5.7.7.patch | 8627 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 8631 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 916b8cc..4fdfe73 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -67,6 +67,10 @@ Patch: 1005_linux-5.7.6.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.7.6
23
24 +Patch: 1006_linux-5.7.7.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.7.7
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1006_linux-5.7.7.patch b/1006_linux-5.7.7.patch
33 new file mode 100644
34 index 0000000..ec7b58c
35 --- /dev/null
36 +++ b/1006_linux-5.7.7.patch
37 @@ -0,0 +1,8627 @@
38 +diff --git a/Makefile b/Makefile
39 +index f928cd1dfdc1..5a5e329d9241 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 7
46 +-SUBLEVEL = 6
47 ++SUBLEVEL = 7
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm/boot/dts/am335x-pocketbeagle.dts b/arch/arm/boot/dts/am335x-pocketbeagle.dts
52 +index 4da719098028..f0b222201b86 100644
53 +--- a/arch/arm/boot/dts/am335x-pocketbeagle.dts
54 ++++ b/arch/arm/boot/dts/am335x-pocketbeagle.dts
55 +@@ -88,7 +88,6 @@
56 + AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
57 + AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
58 + AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
59 +- AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT, MUX_MODE4) /* (B12) mcasp0_aclkr.mmc0_sdwp */
60 + >;
61 + };
62 +
63 +diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
64 +index a35f5052d76f..ed6634d34c3c 100644
65 +--- a/arch/arm/boot/dts/am33xx.dtsi
66 ++++ b/arch/arm/boot/dts/am33xx.dtsi
67 +@@ -335,7 +335,7 @@
68 + <0x47400010 0x4>;
69 + reg-names = "rev", "sysc";
70 + ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
71 +- SYSC_OMAP2_SOFTRESET)>;
72 ++ SYSC_OMAP4_SOFTRESET)>;
73 + ti,sysc-midle = <SYSC_IDLE_FORCE>,
74 + <SYSC_IDLE_NO>,
75 + <SYSC_IDLE_SMART>;
76 +@@ -347,7 +347,7 @@
77 + clock-names = "fck";
78 + #address-cells = <1>;
79 + #size-cells = <1>;
80 +- ranges = <0x0 0x47400000 0x5000>;
81 ++ ranges = <0x0 0x47400000 0x8000>;
82 +
83 + usb0_phy: usb-phy@1300 {
84 + compatible = "ti,am335x-usb-phy";
85 +diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
86 +index da6d70f09ef1..3175266ede64 100644
87 +--- a/arch/arm/boot/dts/bcm-nsp.dtsi
88 ++++ b/arch/arm/boot/dts/bcm-nsp.dtsi
89 +@@ -200,7 +200,7 @@
90 + status = "disabled";
91 + };
92 +
93 +- dma@20000 {
94 ++ dma: dma@20000 {
95 + compatible = "arm,pl330", "arm,primecell";
96 + reg = <0x20000 0x1000>;
97 + interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
98 +@@ -215,6 +215,8 @@
99 + clocks = <&iprocslow>;
100 + clock-names = "apb_pclk";
101 + #dma-cells = <1>;
102 ++ dma-coherent;
103 ++ status = "disabled";
104 + };
105 +
106 + sdio: sdhci@21000 {
107 +@@ -257,10 +259,10 @@
108 + status = "disabled";
109 + };
110 +
111 +- mailbox: mailbox@25000 {
112 ++ mailbox: mailbox@25c00 {
113 + compatible = "brcm,iproc-fa2-mbox";
114 +- reg = <0x25000 0x445>;
115 +- interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
116 ++ reg = <0x25c00 0x400>;
117 ++ interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
118 + #mbox-cells = <1>;
119 + brcm,rx-status-len = <32>;
120 + brcm,use-bcm-hdr;
121 +diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
122 +index 334325390aed..29bbecd36f65 100644
123 +--- a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
124 ++++ b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
125 +@@ -17,6 +17,7 @@
126 + };
127 +
128 + memory {
129 ++ device_type = "memory";
130 + reg = <0x00000000 0x08000000
131 + 0x88000000 0x18000000>;
132 + };
133 +diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts
134 +index 8c388eb8a08f..7be4c4e628e0 100644
135 +--- a/arch/arm/boot/dts/bcm958522er.dts
136 ++++ b/arch/arm/boot/dts/bcm958522er.dts
137 +@@ -58,6 +58,10 @@
138 +
139 + /* USB 3 support needed to be complete */
140 +
141 ++&dma {
142 ++ status = "okay";
143 ++};
144 ++
145 + &amac0 {
146 + status = "okay";
147 + };
148 +diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts
149 +index c339771bb22e..e58ed7e95346 100644
150 +--- a/arch/arm/boot/dts/bcm958525er.dts
151 ++++ b/arch/arm/boot/dts/bcm958525er.dts
152 +@@ -58,6 +58,10 @@
153 +
154 + /* USB 3 support needed to be complete */
155 +
156 ++&dma {
157 ++ status = "okay";
158 ++};
159 ++
160 + &amac0 {
161 + status = "okay";
162 + };
163 +diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts
164 +index 1c72ec8288de..716da62f5788 100644
165 +--- a/arch/arm/boot/dts/bcm958525xmc.dts
166 ++++ b/arch/arm/boot/dts/bcm958525xmc.dts
167 +@@ -58,6 +58,10 @@
168 +
169 + /* XHCI support needed to be complete */
170 +
171 ++&dma {
172 ++ status = "okay";
173 ++};
174 ++
175 + &amac0 {
176 + status = "okay";
177 + };
178 +diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts
179 +index 96a021cebd97..a49c2fd21f4a 100644
180 +--- a/arch/arm/boot/dts/bcm958622hr.dts
181 ++++ b/arch/arm/boot/dts/bcm958622hr.dts
182 +@@ -58,6 +58,10 @@
183 +
184 + /* USB 3 and SLIC support needed to be complete */
185 +
186 ++&dma {
187 ++ status = "okay";
188 ++};
189 ++
190 + &amac0 {
191 + status = "okay";
192 + };
193 +diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
194 +index b2c7f21d471e..dd6dff6452b8 100644
195 +--- a/arch/arm/boot/dts/bcm958623hr.dts
196 ++++ b/arch/arm/boot/dts/bcm958623hr.dts
197 +@@ -58,6 +58,10 @@
198 +
199 + /* USB 3 and SLIC support needed to be complete */
200 +
201 ++&dma {
202 ++ status = "okay";
203 ++};
204 ++
205 + &amac0 {
206 + status = "okay";
207 + };
208 +diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
209 +index 536fb24f38bb..a71371b4065e 100644
210 +--- a/arch/arm/boot/dts/bcm958625hr.dts
211 ++++ b/arch/arm/boot/dts/bcm958625hr.dts
212 +@@ -69,6 +69,10 @@
213 + status = "okay";
214 + };
215 +
216 ++&dma {
217 ++ status = "okay";
218 ++};
219 ++
220 + &amac0 {
221 + status = "okay";
222 + };
223 +diff --git a/arch/arm/boot/dts/bcm958625k.dts b/arch/arm/boot/dts/bcm958625k.dts
224 +index 3fcca12d83c2..7b84b54436ed 100644
225 +--- a/arch/arm/boot/dts/bcm958625k.dts
226 ++++ b/arch/arm/boot/dts/bcm958625k.dts
227 +@@ -48,6 +48,10 @@
228 + };
229 + };
230 +
231 ++&dma {
232 ++ status = "okay";
233 ++};
234 ++
235 + &amac0 {
236 + status = "okay";
237 + };
238 +diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi
239 +index f05e91841202..53a25fba34f6 100644
240 +--- a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi
241 ++++ b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi
242 +@@ -232,13 +232,6 @@
243 + status = "okay";
244 + };
245 +
246 +-&wdog1 {
247 +- pinctrl-names = "default";
248 +- pinctrl-0 = <&pinctrl_wdog>;
249 +- fsl,ext-reset-output;
250 +- status = "okay";
251 +-};
252 +-
253 + &iomuxc {
254 + pinctrl-0 = <&pinctrl_reset_out &pinctrl_gpio>;
255 +
256 +@@ -409,10 +402,4 @@
257 + MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x170f9
258 + >;
259 + };
260 +-
261 +- pinctrl_wdog: wdoggrp {
262 +- fsl,pins = <
263 +- MX6UL_PAD_GPIO1_IO09__WDOG1_WDOG_ANY 0x30b0
264 +- >;
265 +- };
266 + };
267 +diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi
268 +index a17af4d9bfdf..61ba21a605a8 100644
269 +--- a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi
270 ++++ b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi
271 +@@ -57,6 +57,13 @@
272 + status = "okay";
273 + };
274 +
275 ++&wdog1 {
276 ++ pinctrl-names = "default";
277 ++ pinctrl-0 = <&pinctrl_wdog>;
278 ++ fsl,ext-reset-output;
279 ++ status = "okay";
280 ++};
281 ++
282 + &iomuxc {
283 + pinctrl-names = "default";
284 + pinctrl-0 = <&pinctrl_reset_out>;
285 +@@ -106,4 +113,10 @@
286 + MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09 0x1b0b0
287 + >;
288 + };
289 ++
290 ++ pinctrl_wdog: wdoggrp {
291 ++ fsl,pins = <
292 ++ MX6UL_PAD_GPIO1_IO09__WDOG1_WDOG_ANY 0x18b0
293 ++ >;
294 ++ };
295 + };
296 +diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts
297 +index 8047e8cdb3af..4548d87534e3 100644
298 +--- a/arch/arm/boot/dts/omap4-duovero-parlor.dts
299 ++++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts
300 +@@ -139,7 +139,7 @@
301 + ethernet@gpmc {
302 + reg = <5 0 0xff>;
303 + interrupt-parent = <&gpio2>;
304 +- interrupts = <12 IRQ_TYPE_EDGE_FALLING>; /* gpio_44 */
305 ++ interrupts = <12 IRQ_TYPE_LEVEL_LOW>; /* gpio_44 */
306 +
307 + phy-mode = "mii";
308 +
309 +diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
310 +index 6aa938b949db..1df0ee01ee02 100644
311 +--- a/arch/arm/mach-bcm/Kconfig
312 ++++ b/arch/arm/mach-bcm/Kconfig
313 +@@ -53,6 +53,7 @@ config ARCH_BCM_NSP
314 + select ARM_ERRATA_754322
315 + select ARM_ERRATA_775420
316 + select ARM_ERRATA_764369 if SMP
317 ++ select ARM_TIMER_SP804
318 + select THERMAL
319 + select THERMAL_OF
320 + help
321 +diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c
322 +index f057df813f83..e9962b48e30c 100644
323 +--- a/arch/arm/mach-imx/pm-imx5.c
324 ++++ b/arch/arm/mach-imx/pm-imx5.c
325 +@@ -295,14 +295,14 @@ static int __init imx_suspend_alloc_ocram(
326 + if (!ocram_pool) {
327 + pr_warn("%s: ocram pool unavailable!\n", __func__);
328 + ret = -ENODEV;
329 +- goto put_node;
330 ++ goto put_device;
331 + }
332 +
333 + ocram_base = gen_pool_alloc(ocram_pool, size);
334 + if (!ocram_base) {
335 + pr_warn("%s: unable to alloc ocram!\n", __func__);
336 + ret = -ENOMEM;
337 +- goto put_node;
338 ++ goto put_device;
339 + }
340 +
341 + phys = gen_pool_virt_to_phys(ocram_pool, ocram_base);
342 +@@ -312,6 +312,8 @@ static int __init imx_suspend_alloc_ocram(
343 + if (virt_out)
344 + *virt_out = virt;
345 +
346 ++put_device:
347 ++ put_device(&pdev->dev);
348 + put_node:
349 + of_node_put(node);
350 +
351 +diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
352 +index 82706af307de..c630457bb228 100644
353 +--- a/arch/arm/mach-omap2/omap_hwmod.c
354 ++++ b/arch/arm/mach-omap2/omap_hwmod.c
355 +@@ -3489,7 +3489,7 @@ static const struct omap_hwmod_reset dra7_reset_quirks[] = {
356 + };
357 +
358 + static const struct omap_hwmod_reset omap_reset_quirks[] = {
359 +- { .match = "dss", .len = 3, .reset = omap_dss_reset, },
360 ++ { .match = "dss_core", .len = 8, .reset = omap_dss_reset, },
361 + { .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, },
362 + { .match = "i2c", .len = 3, .reset = omap_i2c_reset, },
363 + { .match = "wd_timer", .len = 8, .reset = omap2_wd_timer_reset, },
364 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
365 +index 951e14a3de0e..22aed2806fda 100644
366 +--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
367 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
368 +@@ -196,7 +196,7 @@
369 +
370 + ldo1_reg: LDO1 {
371 + regulator-name = "LDO1";
372 +- regulator-min-microvolt = <3000000>;
373 ++ regulator-min-microvolt = <1600000>;
374 + regulator-max-microvolt = <3300000>;
375 + regulator-boot-on;
376 + regulator-always-on;
377 +@@ -204,7 +204,7 @@
378 +
379 + ldo2_reg: LDO2 {
380 + regulator-name = "LDO2";
381 +- regulator-min-microvolt = <900000>;
382 ++ regulator-min-microvolt = <800000>;
383 + regulator-max-microvolt = <900000>;
384 + regulator-boot-on;
385 + regulator-always-on;
386 +diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
387 +index 2497eebb5739..fe49dbc535e1 100644
388 +--- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
389 ++++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
390 +@@ -101,7 +101,7 @@
391 +
392 + ldo1_reg: LDO1 {
393 + regulator-name = "LDO1";
394 +- regulator-min-microvolt = <3000000>;
395 ++ regulator-min-microvolt = <1600000>;
396 + regulator-max-microvolt = <3300000>;
397 + regulator-boot-on;
398 + regulator-always-on;
399 +@@ -109,7 +109,7 @@
400 +
401 + ldo2_reg: LDO2 {
402 + regulator-name = "LDO2";
403 +- regulator-min-microvolt = <900000>;
404 ++ regulator-min-microvolt = <800000>;
405 + regulator-max-microvolt = <900000>;
406 + regulator-boot-on;
407 + regulator-always-on;
408 +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
409 +index 94289d126993..c12186f8ab7a 100644
410 +--- a/arch/arm64/kernel/fpsimd.c
411 ++++ b/arch/arm64/kernel/fpsimd.c
412 +@@ -338,7 +338,7 @@ static unsigned int find_supported_vector_length(unsigned int vl)
413 + return sve_vl_from_vq(__bit_to_vq(bit));
414 + }
415 +
416 +-#ifdef CONFIG_SYSCTL
417 ++#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
418 +
419 + static int sve_proc_do_default_vl(struct ctl_table *table, int write,
420 + void __user *buffer, size_t *lenp,
421 +@@ -384,9 +384,9 @@ static int __init sve_sysctl_init(void)
422 + return 0;
423 + }
424 +
425 +-#else /* ! CONFIG_SYSCTL */
426 ++#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
427 + static int __init sve_sysctl_init(void) { return 0; }
428 +-#endif /* ! CONFIG_SYSCTL */
429 ++#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
430 +
431 + #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
432 + (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
433 +diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
434 +index 0bbac612146e..666b225aeb3a 100644
435 +--- a/arch/arm64/kernel/perf_regs.c
436 ++++ b/arch/arm64/kernel/perf_regs.c
437 +@@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
438 + return 0;
439 +
440 + /*
441 +- * Compat (i.e. 32 bit) mode:
442 +- * - PC has been set in the pt_regs struct in kernel_entry,
443 +- * - Handle SP and LR here.
444 ++ * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
445 ++ * we're stuck with it for ABI compatability reasons.
446 ++ *
447 ++ * For a 32-bit consumer inspecting a 32-bit task, then it will look at
448 ++ * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
449 ++ * These correspond directly to a prefix of the registers saved in our
450 ++ * 'struct pt_regs', with the exception of the PC, so we copy that down
451 ++ * (x15 corresponds to SP_hyp in the architecture).
452 ++ *
453 ++ * So far, so good.
454 ++ *
455 ++ * The oddity arises when a 64-bit consumer looks at a 32-bit task and
456 ++ * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
457 ++ * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
458 ++ * PC registers would normally live. The initial idea was to allow a
459 ++ * 64-bit unwinder to unwind a 32-bit task and, although it's not clear
460 ++ * how well that works in practice, somebody might be relying on it.
461 ++ *
462 ++ * At the time we make a sample, we don't know whether the consumer is
463 ++ * 32-bit or 64-bit, so we have to cater for both possibilities.
464 + */
465 + if (compat_user_mode(regs)) {
466 + if ((u32)idx == PERF_REG_ARM64_SP)
467 + return regs->compat_sp;
468 + if ((u32)idx == PERF_REG_ARM64_LR)
469 + return regs->compat_lr;
470 ++ if (idx == 15)
471 ++ return regs->pc;
472 + }
473 +
474 + if ((u32)idx == PERF_REG_ARM64_SP)
475 +diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
476 +index 4a75f2d9bf0e..bce0e5349978 100644
477 +--- a/arch/powerpc/mm/nohash/kaslr_booke.c
478 ++++ b/arch/powerpc/mm/nohash/kaslr_booke.c
479 +@@ -14,6 +14,7 @@
480 + #include <linux/memblock.h>
481 + #include <linux/libfdt.h>
482 + #include <linux/crash_core.h>
483 ++#include <asm/cacheflush.h>
484 + #include <asm/pgalloc.h>
485 + #include <asm/prom.h>
486 + #include <asm/kdump.h>
487 +diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
488 +index d969bab4a26b..262e5bbb2776 100644
489 +--- a/arch/riscv/include/asm/cmpxchg.h
490 ++++ b/arch/riscv/include/asm/cmpxchg.h
491 +@@ -179,7 +179,7 @@
492 + " bnez %1, 0b\n" \
493 + "1:\n" \
494 + : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
495 +- : "rJ" (__old), "rJ" (__new) \
496 ++ : "rJ" ((long)__old), "rJ" (__new) \
497 + : "memory"); \
498 + break; \
499 + case 8: \
500 +@@ -224,7 +224,7 @@
501 + RISCV_ACQUIRE_BARRIER \
502 + "1:\n" \
503 + : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
504 +- : "rJ" (__old), "rJ" (__new) \
505 ++ : "rJ" ((long)__old), "rJ" (__new) \
506 + : "memory"); \
507 + break; \
508 + case 8: \
509 +@@ -270,7 +270,7 @@
510 + " bnez %1, 0b\n" \
511 + "1:\n" \
512 + : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
513 +- : "rJ" (__old), "rJ" (__new) \
514 ++ : "rJ" ((long)__old), "rJ" (__new) \
515 + : "memory"); \
516 + break; \
517 + case 8: \
518 +@@ -316,7 +316,7 @@
519 + " fence rw, rw\n" \
520 + "1:\n" \
521 + : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
522 +- : "rJ" (__old), "rJ" (__new) \
523 ++ : "rJ" ((long)__old), "rJ" (__new) \
524 + : "memory"); \
525 + break; \
526 + case 8: \
527 +diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
528 +index f3619f59d85c..12f8a7fce78b 100644
529 +--- a/arch/riscv/kernel/sys_riscv.c
530 ++++ b/arch/riscv/kernel/sys_riscv.c
531 +@@ -8,6 +8,7 @@
532 + #include <linux/syscalls.h>
533 + #include <asm/unistd.h>
534 + #include <asm/cacheflush.h>
535 ++#include <asm-generic/mman-common.h>
536 +
537 + static long riscv_sys_mmap(unsigned long addr, unsigned long len,
538 + unsigned long prot, unsigned long flags,
539 +@@ -16,6 +17,11 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
540 + {
541 + if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
542 + return -EINVAL;
543 ++
544 ++ if ((prot & PROT_WRITE) && (prot & PROT_EXEC))
545 ++ if (unlikely(!(prot & PROT_READ)))
546 ++ return -EINVAL;
547 ++
548 + return ksys_mmap_pgoff(addr, len, prot, flags, fd,
549 + offset >> (PAGE_SHIFT - page_shift_offset));
550 + }
551 +diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
552 +index 3bcfdeb01395..0cd085cdeb4f 100644
553 +--- a/arch/s390/include/asm/vdso.h
554 ++++ b/arch/s390/include/asm/vdso.h
555 +@@ -36,6 +36,7 @@ struct vdso_data {
556 + __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
557 + __u32 ts_dir; /* TOD steering direction 0x64 */
558 + __u64 ts_end; /* TOD steering end 0x68 */
559 ++ __u32 hrtimer_res; /* hrtimer resolution 0x70 */
560 + };
561 +
562 + struct vdso_per_cpu_data {
563 +diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
564 +index e80f0e6f5972..46f84cb0d552 100644
565 +--- a/arch/s390/kernel/asm-offsets.c
566 ++++ b/arch/s390/kernel/asm-offsets.c
567 +@@ -76,6 +76,7 @@ int main(void)
568 + OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
569 + OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
570 + OFFSET(__VDSO_TS_END, vdso_data, ts_end);
571 ++ OFFSET(__VDSO_CLOCK_REALTIME_RES, vdso_data, hrtimer_res);
572 + OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
573 + OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
574 + OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val);
575 +@@ -86,7 +87,6 @@ int main(void)
576 + DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
577 + DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
578 + DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
579 +- DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
580 + DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
581 + BLANK();
582 + /* idle data offsets */
583 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
584 +index 3ae64914bd14..9584e743102b 100644
585 +--- a/arch/s390/kernel/entry.S
586 ++++ b/arch/s390/kernel/entry.S
587 +@@ -368,9 +368,9 @@ ENTRY(system_call)
588 + jnz .Lsysc_nr_ok
589 + # svc 0: system call number in %r1
590 + llgfr %r1,%r1 # clear high word in r1
591 ++ sth %r1,__PT_INT_CODE+2(%r11)
592 + cghi %r1,NR_syscalls
593 + jnl .Lsysc_nr_ok
594 +- sth %r1,__PT_INT_CODE+2(%r11)
595 + slag %r8,%r1,3
596 + .Lsysc_nr_ok:
597 + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
598 +diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
599 +index 58faa12542a1..e007224b65bb 100644
600 +--- a/arch/s390/kernel/ptrace.c
601 ++++ b/arch/s390/kernel/ptrace.c
602 +@@ -324,6 +324,25 @@ static inline void __poke_user_per(struct task_struct *child,
603 + child->thread.per_user.end = data;
604 + }
605 +
606 ++static void fixup_int_code(struct task_struct *child, addr_t data)
607 ++{
608 ++ struct pt_regs *regs = task_pt_regs(child);
609 ++ int ilc = regs->int_code >> 16;
610 ++ u16 insn;
611 ++
612 ++ if (ilc > 6)
613 ++ return;
614 ++
615 ++ if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
616 ++ &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
617 ++ return;
618 ++
619 ++ /* double check that tracee stopped on svc instruction */
620 ++ if ((insn >> 8) != 0xa)
621 ++ return;
622 ++
623 ++ regs->int_code = 0x20000 | (data & 0xffff);
624 ++}
625 + /*
626 + * Write a word to the user area of a process at location addr. This
627 + * operation does have an additional problem compared to peek_user.
628 +@@ -335,7 +354,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
629 + struct user *dummy = NULL;
630 + addr_t offset;
631 +
632 ++
633 + if (addr < (addr_t) &dummy->regs.acrs) {
634 ++ struct pt_regs *regs = task_pt_regs(child);
635 + /*
636 + * psw and gprs are stored on the stack
637 + */
638 +@@ -353,7 +374,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
639 + /* Invalid addressing mode bits */
640 + return -EINVAL;
641 + }
642 +- *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
643 ++
644 ++ if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
645 ++ addr == offsetof(struct user, regs.gprs[2]))
646 ++ fixup_int_code(child, data);
647 ++ *(addr_t *)((addr_t) &regs->psw + addr) = data;
648 +
649 + } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
650 + /*
651 +@@ -719,6 +744,10 @@ static int __poke_user_compat(struct task_struct *child,
652 + regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
653 + (__u64)(tmp & PSW32_ADDR_AMODE);
654 + } else {
655 ++
656 ++ if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
657 ++ addr == offsetof(struct compat_user, regs.gprs[2]))
658 ++ fixup_int_code(child, data);
659 + /* gpr 0-15 */
660 + *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
661 + }
662 +@@ -838,40 +867,66 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
663 + asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
664 + {
665 + unsigned long mask = -1UL;
666 ++ long ret = -1;
667 ++
668 ++ if (is_compat_task())
669 ++ mask = 0xffffffff;
670 +
671 + /*
672 + * The sysc_tracesys code in entry.S stored the system
673 + * call number to gprs[2].
674 + */
675 + if (test_thread_flag(TIF_SYSCALL_TRACE) &&
676 +- (tracehook_report_syscall_entry(regs) ||
677 +- regs->gprs[2] >= NR_syscalls)) {
678 ++ tracehook_report_syscall_entry(regs)) {
679 + /*
680 +- * Tracing decided this syscall should not happen or the
681 +- * debugger stored an invalid system call number. Skip
682 ++ * Tracing decided this syscall should not happen. Skip
683 + * the system call and the system call restart handling.
684 + */
685 +- clear_pt_regs_flag(regs, PIF_SYSCALL);
686 +- return -1;
687 ++ goto skip;
688 + }
689 +
690 ++#ifdef CONFIG_SECCOMP
691 + /* Do the secure computing check after ptrace. */
692 +- if (secure_computing()) {
693 +- /* seccomp failures shouldn't expose any additional code. */
694 +- return -1;
695 ++ if (unlikely(test_thread_flag(TIF_SECCOMP))) {
696 ++ struct seccomp_data sd;
697 ++
698 ++ if (is_compat_task()) {
699 ++ sd.instruction_pointer = regs->psw.addr & 0x7fffffff;
700 ++ sd.arch = AUDIT_ARCH_S390;
701 ++ } else {
702 ++ sd.instruction_pointer = regs->psw.addr;
703 ++ sd.arch = AUDIT_ARCH_S390X;
704 ++ }
705 ++
706 ++ sd.nr = regs->int_code & 0xffff;
707 ++ sd.args[0] = regs->orig_gpr2 & mask;
708 ++ sd.args[1] = regs->gprs[3] & mask;
709 ++ sd.args[2] = regs->gprs[4] & mask;
710 ++ sd.args[3] = regs->gprs[5] & mask;
711 ++ sd.args[4] = regs->gprs[6] & mask;
712 ++ sd.args[5] = regs->gprs[7] & mask;
713 ++
714 ++ if (__secure_computing(&sd) == -1)
715 ++ goto skip;
716 + }
717 ++#endif /* CONFIG_SECCOMP */
718 +
719 + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
720 +- trace_sys_enter(regs, regs->gprs[2]);
721 ++ trace_sys_enter(regs, regs->int_code & 0xffff);
722 +
723 +- if (is_compat_task())
724 +- mask = 0xffffffff;
725 +
726 +- audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
727 ++ audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask,
728 + regs->gprs[3] &mask, regs->gprs[4] &mask,
729 + regs->gprs[5] &mask);
730 +
731 ++ if ((signed long)regs->gprs[2] >= NR_syscalls) {
732 ++ regs->gprs[2] = -ENOSYS;
733 ++ ret = -ENOSYS;
734 ++ }
735 + return regs->gprs[2];
736 ++skip:
737 ++ clear_pt_regs_flag(regs, PIF_SYSCALL);
738 ++ return ret;
739 + }
740 +
741 + asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
742 +diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
743 +index f9d070d016e3..b1113b519432 100644
744 +--- a/arch/s390/kernel/time.c
745 ++++ b/arch/s390/kernel/time.c
746 +@@ -301,6 +301,7 @@ void update_vsyscall(struct timekeeper *tk)
747 +
748 + vdso_data->tk_mult = tk->tkr_mono.mult;
749 + vdso_data->tk_shift = tk->tkr_mono.shift;
750 ++ vdso_data->hrtimer_res = hrtimer_resolution;
751 + smp_wmb();
752 + ++vdso_data->tb_update_count;
753 + }
754 +diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
755 +index bec19e7e6e1c..4a66a1cb919b 100644
756 +--- a/arch/s390/kernel/vdso64/Makefile
757 ++++ b/arch/s390/kernel/vdso64/Makefile
758 +@@ -18,8 +18,8 @@ KBUILD_AFLAGS_64 += -m64 -s
759 +
760 + KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
761 + KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
762 +-KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
763 +- -Wl,--hash-style=both
764 ++ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \
765 ++ --hash-style=both --build-id -T
766 +
767 + $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
768 + $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
769 +@@ -37,8 +37,8 @@ KASAN_SANITIZE := n
770 + $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
771 +
772 + # link rule for the .so file, .lds has to be first
773 +-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
774 +- $(call if_changed,vdso64ld)
775 ++$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) FORCE
776 ++ $(call if_changed,ld)
777 +
778 + # strip rule for the .so file
779 + $(obj)/%.so: OBJCOPYFLAGS := -S
780 +@@ -50,8 +50,6 @@ $(obj-vdso64): %.o: %.S FORCE
781 + $(call if_changed_dep,vdso64as)
782 +
783 + # actual build commands
784 +-quiet_cmd_vdso64ld = VDSO64L $@
785 +- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
786 + quiet_cmd_vdso64as = VDSO64A $@
787 + cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
788 +
789 +diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
790 +index 081435398e0a..0c79caa32b59 100644
791 +--- a/arch/s390/kernel/vdso64/clock_getres.S
792 ++++ b/arch/s390/kernel/vdso64/clock_getres.S
793 +@@ -17,12 +17,14 @@
794 + .type __kernel_clock_getres,@function
795 + __kernel_clock_getres:
796 + CFI_STARTPROC
797 +- larl %r1,4f
798 ++ larl %r1,3f
799 ++ lg %r0,0(%r1)
800 + cghi %r2,__CLOCK_REALTIME_COARSE
801 + je 0f
802 + cghi %r2,__CLOCK_MONOTONIC_COARSE
803 + je 0f
804 +- larl %r1,3f
805 ++ larl %r1,_vdso_data
806 ++ llgf %r0,__VDSO_CLOCK_REALTIME_RES(%r1)
807 + cghi %r2,__CLOCK_REALTIME
808 + je 0f
809 + cghi %r2,__CLOCK_MONOTONIC
810 +@@ -36,7 +38,6 @@ __kernel_clock_getres:
811 + jz 2f
812 + 0: ltgr %r3,%r3
813 + jz 1f /* res == NULL */
814 +- lg %r0,0(%r1)
815 + xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
816 + stg %r0,8(%r3) /* store tp->tv_usec */
817 + 1: lghi %r2,0
818 +@@ -45,6 +46,5 @@ __kernel_clock_getres:
819 + svc 0
820 + br %r14
821 + CFI_ENDPROC
822 +-3: .quad __CLOCK_REALTIME_RES
823 +-4: .quad __CLOCK_COARSE_RES
824 ++3: .quad __CLOCK_COARSE_RES
825 + .size __kernel_clock_getres,.-__kernel_clock_getres
826 +diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
827 +index 60f7205ebe40..646dd58169ec 100644
828 +--- a/arch/sparc/kernel/ptrace_32.c
829 ++++ b/arch/sparc/kernel/ptrace_32.c
830 +@@ -168,12 +168,17 @@ static int genregs32_set(struct task_struct *target,
831 + if (ret || !count)
832 + return ret;
833 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
834 +- &regs->y,
835 ++ &regs->npc,
836 + 34 * sizeof(u32), 35 * sizeof(u32));
837 + if (ret || !count)
838 + return ret;
839 ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
840 ++ &regs->y,
841 ++ 35 * sizeof(u32), 36 * sizeof(u32));
842 ++ if (ret || !count)
843 ++ return ret;
844 + return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
845 +- 35 * sizeof(u32), 38 * sizeof(u32));
846 ++ 36 * sizeof(u32), 38 * sizeof(u32));
847 + }
848 +
849 + static int fpregs32_get(struct task_struct *target,
850 +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
851 +index 76d1d64d51e3..41f792208622 100644
852 +--- a/arch/x86/boot/compressed/head_64.S
853 ++++ b/arch/x86/boot/compressed/head_64.S
854 +@@ -213,7 +213,6 @@ SYM_FUNC_START(startup_32)
855 + * We place all of the values on our mini stack so lret can
856 + * used to perform that far jump.
857 + */
858 +- pushl $__KERNEL_CS
859 + leal startup_64(%ebp), %eax
860 + #ifdef CONFIG_EFI_MIXED
861 + movl efi32_boot_args(%ebp), %edi
862 +@@ -224,11 +223,20 @@ SYM_FUNC_START(startup_32)
863 + movl efi32_boot_args+8(%ebp), %edx // saved bootparams pointer
864 + cmpl $0, %edx
865 + jnz 1f
866 ++ /*
867 ++ * efi_pe_entry uses MS calling convention, which requires 32 bytes of
868 ++ * shadow space on the stack even if all arguments are passed in
869 ++ * registers. We also need an additional 8 bytes for the space that
870 ++ * would be occupied by the return address, and this also results in
871 ++ * the correct stack alignment for entry.
872 ++ */
873 ++ subl $40, %esp
874 + leal efi_pe_entry(%ebp), %eax
875 + movl %edi, %ecx // MS calling convention
876 + movl %esi, %edx
877 + 1:
878 + #endif
879 ++ pushl $__KERNEL_CS
880 + pushl %eax
881 +
882 + /* Enter paged protected Mode, activating Long Mode */
883 +@@ -776,6 +784,7 @@ SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0)
884 +
885 + SYM_DATA_START_LOCAL(boot_stack)
886 + .fill BOOT_STACK_SIZE, 1, 0
887 ++ .balign 16
888 + SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end)
889 +
890 + /*
891 +diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
892 +index dd17c2da1af5..da78ccbd493b 100644
893 +--- a/arch/x86/include/asm/cpu.h
894 ++++ b/arch/x86/include/asm/cpu.h
895 +@@ -58,4 +58,9 @@ static inline bool handle_guest_split_lock(unsigned long ip)
896 + return false;
897 + }
898 + #endif
899 ++#ifdef CONFIG_IA32_FEAT_CTL
900 ++void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
901 ++#else
902 ++static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
903 ++#endif
904 + #endif /* _ASM_X86_CPU_H */
905 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
906 +index 0a6b35353fc7..86e2e0272c57 100644
907 +--- a/arch/x86/include/asm/kvm_host.h
908 ++++ b/arch/x86/include/asm/kvm_host.h
909 +@@ -1195,7 +1195,7 @@ struct kvm_x86_ops {
910 + void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
911 + struct kvm_memory_slot *slot,
912 + gfn_t offset, unsigned long mask);
913 +- int (*write_log_dirty)(struct kvm_vcpu *vcpu);
914 ++ int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
915 +
916 + /* pmu operations of sub-arch */
917 + const struct kvm_pmu_ops *pmu_ops;
918 +diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
919 +index b809f117f3f4..9d5252c9685c 100644
920 +--- a/arch/x86/include/asm/mwait.h
921 ++++ b/arch/x86/include/asm/mwait.h
922 +@@ -23,8 +23,6 @@
923 + #define MWAITX_MAX_LOOPS ((u32)-1)
924 + #define MWAITX_DISABLE_CSTATES 0xf0
925 +
926 +-u32 get_umwait_control_msr(void);
927 +-
928 + static inline void __monitor(const void *eax, unsigned long ecx,
929 + unsigned long edx)
930 + {
931 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
932 +index 3bcf27caf6c9..c4e8fd709cf6 100644
933 +--- a/arch/x86/include/asm/processor.h
934 ++++ b/arch/x86/include/asm/processor.h
935 +@@ -113,9 +113,10 @@ struct cpuinfo_x86 {
936 + /* in KB - valid for CPUS which support this call: */
937 + unsigned int x86_cache_size;
938 + int x86_cache_alignment; /* In bytes */
939 +- /* Cache QoS architectural values: */
940 ++ /* Cache QoS architectural values, valid only on the BSP: */
941 + int x86_cache_max_rmid; /* max index */
942 + int x86_cache_occ_scale; /* scale to bytes */
943 ++ int x86_cache_mbm_width_offset;
944 + int x86_power;
945 + unsigned long loops_per_jiffy;
946 + /* cpuid returned max cores value: */
947 +diff --git a/arch/x86/include/asm/resctrl_sched.h b/arch/x86/include/asm/resctrl_sched.h
948 +index f6b7fe2833cc..c8a27cbbdae2 100644
949 +--- a/arch/x86/include/asm/resctrl_sched.h
950 ++++ b/arch/x86/include/asm/resctrl_sched.h
951 +@@ -84,9 +84,12 @@ static inline void resctrl_sched_in(void)
952 + __resctrl_sched_in();
953 + }
954 +
955 ++void resctrl_cpu_detect(struct cpuinfo_x86 *c);
956 ++
957 + #else
958 +
959 + static inline void resctrl_sched_in(void) {}
960 ++static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
961 +
962 + #endif /* CONFIG_X86_CPU_RESCTRL */
963 +
964 +diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
965 +index 426792565d86..c5cf336e5077 100644
966 +--- a/arch/x86/kernel/cpu/centaur.c
967 ++++ b/arch/x86/kernel/cpu/centaur.c
968 +@@ -3,6 +3,7 @@
969 + #include <linux/sched.h>
970 + #include <linux/sched/clock.h>
971 +
972 ++#include <asm/cpu.h>
973 + #include <asm/cpufeature.h>
974 + #include <asm/e820/api.h>
975 + #include <asm/mtrr.h>
976 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
977 +index 8293ee514975..c669a5756bdf 100644
978 +--- a/arch/x86/kernel/cpu/common.c
979 ++++ b/arch/x86/kernel/cpu/common.c
980 +@@ -56,6 +56,7 @@
981 + #include <asm/intel-family.h>
982 + #include <asm/cpu_device_id.h>
983 + #include <asm/uv/uv.h>
984 ++#include <asm/resctrl_sched.h>
985 +
986 + #include "cpu.h"
987 +
988 +@@ -347,6 +348,9 @@ out:
989 + cr4_clear_bits(X86_CR4_UMIP);
990 + }
991 +
992 ++/* These bits should not change their value after CPU init is finished. */
993 ++static const unsigned long cr4_pinned_mask =
994 ++ X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE;
995 + static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
996 + static unsigned long cr4_pinned_bits __ro_after_init;
997 +
998 +@@ -371,20 +375,20 @@ EXPORT_SYMBOL(native_write_cr0);
999 +
1000 + void native_write_cr4(unsigned long val)
1001 + {
1002 +- unsigned long bits_missing = 0;
1003 ++ unsigned long bits_changed = 0;
1004 +
1005 + set_register:
1006 + asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
1007 +
1008 + if (static_branch_likely(&cr_pinning)) {
1009 +- if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) {
1010 +- bits_missing = ~val & cr4_pinned_bits;
1011 +- val |= bits_missing;
1012 ++ if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
1013 ++ bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
1014 ++ val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
1015 + goto set_register;
1016 + }
1017 +- /* Warn after we've set the missing bits. */
1018 +- WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n",
1019 +- bits_missing);
1020 ++ /* Warn after we've corrected the changed bits. */
1021 ++ WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
1022 ++ bits_changed);
1023 + }
1024 + }
1025 + EXPORT_SYMBOL(native_write_cr4);
1026 +@@ -396,7 +400,7 @@ void cr4_init(void)
1027 + if (boot_cpu_has(X86_FEATURE_PCID))
1028 + cr4 |= X86_CR4_PCIDE;
1029 + if (static_branch_likely(&cr_pinning))
1030 +- cr4 |= cr4_pinned_bits;
1031 ++ cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
1032 +
1033 + __write_cr4(cr4);
1034 +
1035 +@@ -411,10 +415,7 @@ void cr4_init(void)
1036 + */
1037 + static void __init setup_cr_pinning(void)
1038 + {
1039 +- unsigned long mask;
1040 +-
1041 +- mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP);
1042 +- cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask;
1043 ++ cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
1044 + static_key_enable(&cr_pinning.key);
1045 + }
1046 +
1047 +@@ -854,30 +855,6 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
1048 + }
1049 + }
1050 +
1051 +-static void init_cqm(struct cpuinfo_x86 *c)
1052 +-{
1053 +- if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
1054 +- c->x86_cache_max_rmid = -1;
1055 +- c->x86_cache_occ_scale = -1;
1056 +- return;
1057 +- }
1058 +-
1059 +- /* will be overridden if occupancy monitoring exists */
1060 +- c->x86_cache_max_rmid = cpuid_ebx(0xf);
1061 +-
1062 +- if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
1063 +- cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
1064 +- cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
1065 +- u32 eax, ebx, ecx, edx;
1066 +-
1067 +- /* QoS sub-leaf, EAX=0Fh, ECX=1 */
1068 +- cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
1069 +-
1070 +- c->x86_cache_max_rmid = ecx;
1071 +- c->x86_cache_occ_scale = ebx;
1072 +- }
1073 +-}
1074 +-
1075 + void get_cpu_cap(struct cpuinfo_x86 *c)
1076 + {
1077 + u32 eax, ebx, ecx, edx;
1078 +@@ -945,7 +922,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
1079 +
1080 + init_scattered_cpuid_features(c);
1081 + init_speculation_control(c);
1082 +- init_cqm(c);
1083 ++ resctrl_cpu_detect(c);
1084 +
1085 + /*
1086 + * Clear/Set all flags overridden by options, after probe.
1087 +diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
1088 +index fb538fccd24c..9d033693519a 100644
1089 +--- a/arch/x86/kernel/cpu/cpu.h
1090 ++++ b/arch/x86/kernel/cpu/cpu.h
1091 +@@ -81,8 +81,4 @@ extern void update_srbds_msr(void);
1092 +
1093 + extern u64 x86_read_arch_cap_msr(void);
1094 +
1095 +-#ifdef CONFIG_IA32_FEAT_CTL
1096 +-void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
1097 +-#endif
1098 +-
1099 + #endif /* ARCH_X86_CPU_H */
1100 +diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
1101 +index d8cc5223b7ce..c1551541c7a5 100644
1102 +--- a/arch/x86/kernel/cpu/resctrl/core.c
1103 ++++ b/arch/x86/kernel/cpu/resctrl/core.c
1104 +@@ -958,6 +958,35 @@ static __init void rdt_init_res_defs(void)
1105 +
1106 + static enum cpuhp_state rdt_online;
1107 +
1108 ++void resctrl_cpu_detect(struct cpuinfo_x86 *c)
1109 ++{
1110 ++ if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
1111 ++ c->x86_cache_max_rmid = -1;
1112 ++ c->x86_cache_occ_scale = -1;
1113 ++ c->x86_cache_mbm_width_offset = -1;
1114 ++ return;
1115 ++ }
1116 ++
1117 ++ /* will be overridden if occupancy monitoring exists */
1118 ++ c->x86_cache_max_rmid = cpuid_ebx(0xf);
1119 ++
1120 ++ if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
1121 ++ cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
1122 ++ cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
1123 ++ u32 eax, ebx, ecx, edx;
1124 ++
1125 ++ /* QoS sub-leaf, EAX=0Fh, ECX=1 */
1126 ++ cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
1127 ++
1128 ++ c->x86_cache_max_rmid = ecx;
1129 ++ c->x86_cache_occ_scale = ebx;
1130 ++ c->x86_cache_mbm_width_offset = eax & 0xff;
1131 ++
1132 ++ if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
1133 ++ c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
1134 ++ }
1135 ++}
1136 ++
1137 + static int __init resctrl_late_init(void)
1138 + {
1139 + struct rdt_resource *r;
1140 +diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
1141 +index 3dd13f3a8b23..096386475714 100644
1142 +--- a/arch/x86/kernel/cpu/resctrl/internal.h
1143 ++++ b/arch/x86/kernel/cpu/resctrl/internal.h
1144 +@@ -37,6 +37,7 @@
1145 + #define MBA_IS_LINEAR 0x4
1146 + #define MBA_MAX_MBPS U32_MAX
1147 + #define MAX_MBA_BW_AMD 0x800
1148 ++#define MBM_CNTR_WIDTH_OFFSET_AMD 20
1149 +
1150 + #define RMID_VAL_ERROR BIT_ULL(63)
1151 + #define RMID_VAL_UNAVAIL BIT_ULL(62)
1152 +diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
1153 +index 5a359d9fcc05..29a3878ab3c0 100644
1154 +--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
1155 ++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
1156 +@@ -1117,6 +1117,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
1157 + _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
1158 + if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
1159 + _r_cdp = NULL;
1160 ++ _d_cdp = NULL;
1161 + ret = -EINVAL;
1162 + }
1163 +
1164 +diff --git a/arch/x86/kernel/cpu/umwait.c b/arch/x86/kernel/cpu/umwait.c
1165 +index 300e3fd5ade3..ec8064c0ae03 100644
1166 +--- a/arch/x86/kernel/cpu/umwait.c
1167 ++++ b/arch/x86/kernel/cpu/umwait.c
1168 +@@ -18,12 +18,6 @@
1169 + */
1170 + static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
1171 +
1172 +-u32 get_umwait_control_msr(void)
1173 +-{
1174 +- return umwait_control_cached;
1175 +-}
1176 +-EXPORT_SYMBOL_GPL(get_umwait_control_msr);
1177 +-
1178 + /*
1179 + * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
1180 + * hardware or BIOS before kernel boot.
1181 +diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c
1182 +index df1358ba622b..05fa4ef63490 100644
1183 +--- a/arch/x86/kernel/cpu/zhaoxin.c
1184 ++++ b/arch/x86/kernel/cpu/zhaoxin.c
1185 +@@ -2,6 +2,7 @@
1186 + #include <linux/sched.h>
1187 + #include <linux/sched/clock.h>
1188 +
1189 ++#include <asm/cpu.h>
1190 + #include <asm/cpufeature.h>
1191 +
1192 + #include "cpu.h"
1193 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
1194 +index 9af25c97612a..8967e320a978 100644
1195 +--- a/arch/x86/kvm/lapic.c
1196 ++++ b/arch/x86/kvm/lapic.c
1197 +@@ -2512,6 +2512,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
1198 + }
1199 + memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
1200 +
1201 ++ apic->vcpu->kvm->arch.apic_map_dirty = true;
1202 + kvm_recalculate_apic_map(vcpu->kvm);
1203 + kvm_apic_set_version(vcpu);
1204 +
1205 +diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
1206 +index 8a3b1bce722a..d0e3b1b6845b 100644
1207 +--- a/arch/x86/kvm/mmu.h
1208 ++++ b/arch/x86/kvm/mmu.h
1209 +@@ -222,7 +222,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
1210 + void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
1211 + bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1212 + struct kvm_memory_slot *slot, u64 gfn);
1213 +-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
1214 ++int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
1215 +
1216 + int kvm_mmu_post_init_vm(struct kvm *kvm);
1217 + void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
1218 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
1219 +index 92d056954194..eb27ab47d607 100644
1220 +--- a/arch/x86/kvm/mmu/mmu.c
1221 ++++ b/arch/x86/kvm/mmu/mmu.c
1222 +@@ -1746,10 +1746,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1223 + * Emulate arch specific page modification logging for the
1224 + * nested hypervisor
1225 + */
1226 +-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
1227 ++int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa)
1228 + {
1229 + if (kvm_x86_ops.write_log_dirty)
1230 +- return kvm_x86_ops.write_log_dirty(vcpu);
1231 ++ return kvm_x86_ops.write_log_dirty(vcpu, l2_gpa);
1232 +
1233 + return 0;
1234 + }
1235 +diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
1236 +index 9bdf9b7d9a96..7098f843eabd 100644
1237 +--- a/arch/x86/kvm/mmu/paging_tmpl.h
1238 ++++ b/arch/x86/kvm/mmu/paging_tmpl.h
1239 +@@ -235,7 +235,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte)
1240 + static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
1241 + struct kvm_mmu *mmu,
1242 + struct guest_walker *walker,
1243 +- int write_fault)
1244 ++ gpa_t addr, int write_fault)
1245 + {
1246 + unsigned level, index;
1247 + pt_element_t pte, orig_pte;
1248 +@@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
1249 + !(pte & PT_GUEST_DIRTY_MASK)) {
1250 + trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
1251 + #if PTTYPE == PTTYPE_EPT
1252 +- if (kvm_arch_write_log_dirty(vcpu))
1253 ++ if (kvm_arch_write_log_dirty(vcpu, addr))
1254 + return -EINVAL;
1255 + #endif
1256 + pte |= PT_GUEST_DIRTY_MASK;
1257 +@@ -457,7 +457,8 @@ retry_walk:
1258 + (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
1259 +
1260 + if (unlikely(!accessed_dirty)) {
1261 +- ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
1262 ++ ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
1263 ++ addr, write_fault);
1264 + if (unlikely(ret < 0))
1265 + goto error;
1266 + else if (ret)
1267 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
1268 +index d7aa0dfab8bb..390ec34e4b4f 100644
1269 +--- a/arch/x86/kvm/vmx/vmx.c
1270 ++++ b/arch/x86/kvm/vmx/vmx.c
1271 +@@ -6467,23 +6467,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
1272 + msrs[i].host, false);
1273 + }
1274 +
1275 +-static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx)
1276 +-{
1277 +- u32 host_umwait_control;
1278 +-
1279 +- if (!vmx_has_waitpkg(vmx))
1280 +- return;
1281 +-
1282 +- host_umwait_control = get_umwait_control_msr();
1283 +-
1284 +- if (vmx->msr_ia32_umwait_control != host_umwait_control)
1285 +- add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL,
1286 +- vmx->msr_ia32_umwait_control,
1287 +- host_umwait_control, false);
1288 +- else
1289 +- clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL);
1290 +-}
1291 +-
1292 + static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
1293 + {
1294 + struct vcpu_vmx *vmx = to_vmx(vcpu);
1295 +@@ -6575,9 +6558,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
1296 +
1297 + pt_guest_enter(vmx);
1298 +
1299 +- if (vcpu_to_pmu(vcpu)->version)
1300 +- atomic_switch_perf_msrs(vmx);
1301 +- atomic_switch_umwait_control_msr(vmx);
1302 ++ atomic_switch_perf_msrs(vmx);
1303 +
1304 + if (enable_preemption_timer)
1305 + vmx_update_hv_timer(vcpu);
1306 +@@ -7334,11 +7315,11 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
1307 + kvm_flush_pml_buffers(kvm);
1308 + }
1309 +
1310 +-static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
1311 ++static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
1312 + {
1313 + struct vmcs12 *vmcs12;
1314 + struct vcpu_vmx *vmx = to_vmx(vcpu);
1315 +- gpa_t gpa, dst;
1316 ++ gpa_t dst;
1317 +
1318 + if (is_guest_mode(vcpu)) {
1319 + WARN_ON_ONCE(vmx->nested.pml_full);
1320 +@@ -7357,7 +7338,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
1321 + return 1;
1322 + }
1323 +
1324 +- gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
1325 ++ gpa &= ~0xFFFull;
1326 + dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
1327 +
1328 + if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
1329 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1330 +index 97c5a92146f9..5f08eeac16c8 100644
1331 +--- a/arch/x86/kvm/x86.c
1332 ++++ b/arch/x86/kvm/x86.c
1333 +@@ -2784,7 +2784,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1334 + return kvm_mtrr_set_msr(vcpu, msr, data);
1335 + case MSR_IA32_APICBASE:
1336 + return kvm_set_apic_base(vcpu, msr_info);
1337 +- case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1338 ++ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
1339 + return kvm_x2apic_msr_write(vcpu, msr, data);
1340 + case MSR_IA32_TSCDEADLINE:
1341 + kvm_set_lapic_tscdeadline_msr(vcpu, data);
1342 +@@ -3112,7 +3112,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1343 + case MSR_IA32_APICBASE:
1344 + msr_info->data = kvm_get_apic_base(vcpu);
1345 + break;
1346 +- case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1347 ++ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
1348 + return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
1349 + case MSR_IA32_TSCDEADLINE:
1350 + msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
1351 +diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
1352 +index fff28c6f73a2..b0dfac3d3df7 100644
1353 +--- a/arch/x86/lib/usercopy_64.c
1354 ++++ b/arch/x86/lib/usercopy_64.c
1355 +@@ -24,6 +24,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
1356 + asm volatile(
1357 + " testq %[size8],%[size8]\n"
1358 + " jz 4f\n"
1359 ++ " .align 16\n"
1360 + "0: movq $0,(%[dst])\n"
1361 + " addq $8,%[dst]\n"
1362 + " decl %%ecx ; jnz 0b\n"
1363 +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
1364 +index aaff9ed7ff45..b0d3c5ca6d80 100644
1365 +--- a/arch/x86/power/cpu.c
1366 ++++ b/arch/x86/power/cpu.c
1367 +@@ -193,6 +193,8 @@ static void fix_processor_context(void)
1368 + */
1369 + static void notrace __restore_processor_state(struct saved_context *ctxt)
1370 + {
1371 ++ struct cpuinfo_x86 *c;
1372 ++
1373 + if (ctxt->misc_enable_saved)
1374 + wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
1375 + /*
1376 +@@ -263,6 +265,10 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
1377 + mtrr_bp_restore();
1378 + perf_restore_debug_store();
1379 + msr_restore_context(ctxt);
1380 ++
1381 ++ c = &cpu_data(smp_processor_id());
1382 ++ if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
1383 ++ init_ia32_feat_ctl(c);
1384 + }
1385 +
1386 + /* Needed by apm.c */
1387 +diff --git a/block/bio-integrity.c b/block/bio-integrity.c
1388 +index bf62c25cde8f..ae07dd78e951 100644
1389 +--- a/block/bio-integrity.c
1390 ++++ b/block/bio-integrity.c
1391 +@@ -278,7 +278,6 @@ bool bio_integrity_prep(struct bio *bio)
1392 +
1393 + if (ret == 0) {
1394 + printk(KERN_ERR "could not attach integrity payload\n");
1395 +- kfree(buf);
1396 + status = BLK_STS_RESOURCE;
1397 + goto err_end_io;
1398 + }
1399 +diff --git a/block/blk-mq.c b/block/blk-mq.c
1400 +index 98a702761e2c..8f580e66691b 100644
1401 +--- a/block/blk-mq.c
1402 ++++ b/block/blk-mq.c
1403 +@@ -3328,7 +3328,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
1404 +
1405 + if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
1406 + nr_hw_queues = nr_cpu_ids;
1407 +- if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
1408 ++ if (nr_hw_queues < 1)
1409 ++ return;
1410 ++ if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
1411 + return;
1412 +
1413 + list_for_each_entry(q, &set->tag_list, tag_set_list)
1414 +diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
1415 +index ece8c1a921cc..88c8af455ea3 100644
1416 +--- a/drivers/acpi/acpi_configfs.c
1417 ++++ b/drivers/acpi/acpi_configfs.c
1418 +@@ -11,6 +11,7 @@
1419 + #include <linux/module.h>
1420 + #include <linux/configfs.h>
1421 + #include <linux/acpi.h>
1422 ++#include <linux/security.h>
1423 +
1424 + #include "acpica/accommon.h"
1425 + #include "acpica/actables.h"
1426 +@@ -28,7 +29,10 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg,
1427 + {
1428 + const struct acpi_table_header *header = data;
1429 + struct acpi_table *table;
1430 +- int ret;
1431 ++ int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
1432 ++
1433 ++ if (ret)
1434 ++ return ret;
1435 +
1436 + table = container_of(cfg, struct acpi_table, cfg);
1437 +
1438 +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
1439 +index 3a89909b50a6..76c668c05fa0 100644
1440 +--- a/drivers/acpi/sysfs.c
1441 ++++ b/drivers/acpi/sysfs.c
1442 +@@ -938,13 +938,13 @@ static void __exit interrupt_stats_exit(void)
1443 + }
1444 +
1445 + static ssize_t
1446 +-acpi_show_profile(struct device *dev, struct device_attribute *attr,
1447 ++acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
1448 + char *buf)
1449 + {
1450 + return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
1451 + }
1452 +
1453 +-static const struct device_attribute pm_profile_attr =
1454 ++static const struct kobj_attribute pm_profile_attr =
1455 + __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
1456 +
1457 + static ssize_t hotplug_enabled_show(struct kobject *kobj,
1458 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
1459 +index e47c8a4c83db..f50c5f182bb5 100644
1460 +--- a/drivers/android/binder.c
1461 ++++ b/drivers/android/binder.c
1462 +@@ -4686,8 +4686,15 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
1463 +
1464 + static void binder_free_proc(struct binder_proc *proc)
1465 + {
1466 ++ struct binder_device *device;
1467 ++
1468 + BUG_ON(!list_empty(&proc->todo));
1469 + BUG_ON(!list_empty(&proc->delivered_death));
1470 ++ device = container_of(proc->context, struct binder_device, context);
1471 ++ if (refcount_dec_and_test(&device->ref)) {
1472 ++ kfree(proc->context->name);
1473 ++ kfree(device);
1474 ++ }
1475 + binder_alloc_deferred_release(&proc->alloc);
1476 + put_task_struct(proc->tsk);
1477 + binder_stats_deleted(BINDER_STAT_PROC);
1478 +@@ -5406,7 +5413,6 @@ static int binder_node_release(struct binder_node *node, int refs)
1479 + static void binder_deferred_release(struct binder_proc *proc)
1480 + {
1481 + struct binder_context *context = proc->context;
1482 +- struct binder_device *device;
1483 + struct rb_node *n;
1484 + int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
1485 +
1486 +@@ -5423,12 +5429,6 @@ static void binder_deferred_release(struct binder_proc *proc)
1487 + context->binder_context_mgr_node = NULL;
1488 + }
1489 + mutex_unlock(&context->context_mgr_node_lock);
1490 +- device = container_of(proc->context, struct binder_device, context);
1491 +- if (refcount_dec_and_test(&device->ref)) {
1492 +- kfree(context->name);
1493 +- kfree(device);
1494 +- }
1495 +- proc->context = NULL;
1496 + binder_inner_proc_lock(proc);
1497 + /*
1498 + * Make sure proc stays alive after we
1499 +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
1500 +index 36e588d88b95..c10deb87015b 100644
1501 +--- a/drivers/ata/libata-scsi.c
1502 ++++ b/drivers/ata/libata-scsi.c
1503 +@@ -3692,12 +3692,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
1504 + {
1505 + struct scsi_cmnd *scmd = qc->scsicmd;
1506 + const u8 *cdb = scmd->cmnd;
1507 +- const u8 *p;
1508 + u8 pg, spg;
1509 + unsigned six_byte, pg_len, hdr_len, bd_len;
1510 + int len;
1511 + u16 fp = (u16)-1;
1512 + u8 bp = 0xff;
1513 ++ u8 buffer[64];
1514 ++ const u8 *p = buffer;
1515 +
1516 + VPRINTK("ENTER\n");
1517 +
1518 +@@ -3731,12 +3732,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
1519 + if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
1520 + goto invalid_param_len;
1521 +
1522 +- p = page_address(sg_page(scsi_sglist(scmd)));
1523 +-
1524 + /* Move past header and block descriptors. */
1525 + if (len < hdr_len)
1526 + goto invalid_param_len;
1527 +
1528 ++ if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd),
1529 ++ buffer, sizeof(buffer)))
1530 ++ goto invalid_param_len;
1531 ++
1532 + if (six_byte)
1533 + bd_len = p[3];
1534 + else
1535 +diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
1536 +index 980aacdbcf3b..141ac600b64c 100644
1537 +--- a/drivers/ata/sata_rcar.c
1538 ++++ b/drivers/ata/sata_rcar.c
1539 +@@ -907,7 +907,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
1540 + pm_runtime_enable(dev);
1541 + ret = pm_runtime_get_sync(dev);
1542 + if (ret < 0)
1543 +- goto err_pm_disable;
1544 ++ goto err_pm_put;
1545 +
1546 + host = ata_host_alloc(dev, 1);
1547 + if (!host) {
1548 +@@ -937,7 +937,6 @@ static int sata_rcar_probe(struct platform_device *pdev)
1549 +
1550 + err_pm_put:
1551 + pm_runtime_put(dev);
1552 +-err_pm_disable:
1553 + pm_runtime_disable(dev);
1554 + return ret;
1555 + }
1556 +@@ -991,8 +990,10 @@ static int sata_rcar_resume(struct device *dev)
1557 + int ret;
1558 +
1559 + ret = pm_runtime_get_sync(dev);
1560 +- if (ret < 0)
1561 ++ if (ret < 0) {
1562 ++ pm_runtime_put(dev);
1563 + return ret;
1564 ++ }
1565 +
1566 + if (priv->type == RCAR_GEN3_SATA) {
1567 + sata_rcar_init_module(priv);
1568 +@@ -1017,8 +1018,10 @@ static int sata_rcar_restore(struct device *dev)
1569 + int ret;
1570 +
1571 + ret = pm_runtime_get_sync(dev);
1572 +- if (ret < 0)
1573 ++ if (ret < 0) {
1574 ++ pm_runtime_put(dev);
1575 + return ret;
1576 ++ }
1577 +
1578 + sata_rcar_setup_port(host);
1579 +
1580 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
1581 +index 59f911e57719..508bbd6ea439 100644
1582 +--- a/drivers/base/regmap/regmap.c
1583 ++++ b/drivers/base/regmap/regmap.c
1584 +@@ -1356,6 +1356,7 @@ void regmap_exit(struct regmap *map)
1585 + if (map->hwlock)
1586 + hwspin_lock_free(map->hwlock);
1587 + kfree_const(map->name);
1588 ++ kfree(map->patch);
1589 + kfree(map);
1590 + }
1591 + EXPORT_SYMBOL_GPL(regmap_exit);
1592 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
1593 +index da693e6a834e..418bb4621255 100644
1594 +--- a/drivers/block/loop.c
1595 ++++ b/drivers/block/loop.c
1596 +@@ -1289,7 +1289,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1597 + if (lo->lo_offset != info->lo_offset ||
1598 + lo->lo_sizelimit != info->lo_sizelimit) {
1599 + sync_blockdev(lo->lo_device);
1600 +- kill_bdev(lo->lo_device);
1601 ++ invalidate_bdev(lo->lo_device);
1602 + }
1603 +
1604 + /* I/O need to be drained during transfer transition */
1605 +@@ -1320,7 +1320,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1606 +
1607 + if (lo->lo_offset != info->lo_offset ||
1608 + lo->lo_sizelimit != info->lo_sizelimit) {
1609 +- /* kill_bdev should have truncated all the pages */
1610 ++ /* invalidate_bdev should have truncated all the pages */
1611 + if (lo->lo_device->bd_inode->i_mapping->nrpages) {
1612 + err = -EAGAIN;
1613 + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1614 +@@ -1565,11 +1565,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1615 + return 0;
1616 +
1617 + sync_blockdev(lo->lo_device);
1618 +- kill_bdev(lo->lo_device);
1619 ++ invalidate_bdev(lo->lo_device);
1620 +
1621 + blk_mq_freeze_queue(lo->lo_queue);
1622 +
1623 +- /* kill_bdev should have truncated all the pages */
1624 ++ /* invalidate_bdev should have truncated all the pages */
1625 + if (lo->lo_device->bd_inode->i_mapping->nrpages) {
1626 + err = -EAGAIN;
1627 + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1628 +diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
1629 +index e5f5f48d69d2..db9541f38505 100644
1630 +--- a/drivers/bus/ti-sysc.c
1631 ++++ b/drivers/bus/ti-sysc.c
1632 +@@ -221,6 +221,35 @@ static u32 sysc_read_sysstatus(struct sysc *ddata)
1633 + return sysc_read(ddata, offset);
1634 + }
1635 +
1636 ++/* Poll on reset status */
1637 ++static int sysc_wait_softreset(struct sysc *ddata)
1638 ++{
1639 ++ u32 sysc_mask, syss_done, rstval;
1640 ++ int syss_offset, error = 0;
1641 ++
1642 ++ syss_offset = ddata->offsets[SYSC_SYSSTATUS];
1643 ++ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
1644 ++
1645 ++ if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
1646 ++ syss_done = 0;
1647 ++ else
1648 ++ syss_done = ddata->cfg.syss_mask;
1649 ++
1650 ++ if (syss_offset >= 0) {
1651 ++ error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
1652 ++ (rstval & ddata->cfg.syss_mask) ==
1653 ++ syss_done,
1654 ++ 100, MAX_MODULE_SOFTRESET_WAIT);
1655 ++
1656 ++ } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
1657 ++ error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
1658 ++ !(rstval & sysc_mask),
1659 ++ 100, MAX_MODULE_SOFTRESET_WAIT);
1660 ++ }
1661 ++
1662 ++ return error;
1663 ++}
1664 ++
1665 + static int sysc_add_named_clock_from_child(struct sysc *ddata,
1666 + const char *name,
1667 + const char *optfck_name)
1668 +@@ -925,18 +954,47 @@ static int sysc_enable_module(struct device *dev)
1669 + struct sysc *ddata;
1670 + const struct sysc_regbits *regbits;
1671 + u32 reg, idlemodes, best_mode;
1672 ++ int error;
1673 +
1674 + ddata = dev_get_drvdata(dev);
1675 ++
1676 ++ /*
1677 ++ * Some modules like DSS reset automatically on idle. Enable optional
1678 ++ * reset clocks and wait for OCP softreset to complete.
1679 ++ */
1680 ++ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
1681 ++ error = sysc_enable_opt_clocks(ddata);
1682 ++ if (error) {
1683 ++ dev_err(ddata->dev,
1684 ++ "Optional clocks failed for enable: %i\n",
1685 ++ error);
1686 ++ return error;
1687 ++ }
1688 ++ }
1689 ++ error = sysc_wait_softreset(ddata);
1690 ++ if (error)
1691 ++ dev_warn(ddata->dev, "OCP softreset timed out\n");
1692 ++ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
1693 ++ sysc_disable_opt_clocks(ddata);
1694 ++
1695 ++ /*
1696 ++ * Some subsystem private interconnects, like DSS top level module,
1697 ++ * need only the automatic OCP softreset handling with no sysconfig
1698 ++ * register bits to configure.
1699 ++ */
1700 + if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
1701 + return 0;
1702 +
1703 + regbits = ddata->cap->regbits;
1704 + reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1705 +
1706 +- /* Set CLOCKACTIVITY, we only use it for ick */
1707 ++ /*
1708 ++ * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
1709 ++ * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
1710 ++ * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
1711 ++ */
1712 + if (regbits->clkact_shift >= 0 &&
1713 +- (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT ||
1714 +- ddata->cfg.sysc_val & BIT(regbits->clkact_shift)))
1715 ++ (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
1716 + reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
1717 +
1718 + /* Set SIDLE mode */
1719 +@@ -991,6 +1049,9 @@ set_autoidle:
1720 + sysc_write_sysconfig(ddata, reg);
1721 + }
1722 +
1723 ++ /* Flush posted write */
1724 ++ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1725 ++
1726 + if (ddata->module_enable_quirk)
1727 + ddata->module_enable_quirk(ddata);
1728 +
1729 +@@ -1071,6 +1132,9 @@ set_sidle:
1730 + reg |= 1 << regbits->autoidle_shift;
1731 + sysc_write_sysconfig(ddata, reg);
1732 +
1733 ++ /* Flush posted write */
1734 ++ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1735 ++
1736 + return 0;
1737 + }
1738 +
1739 +@@ -1488,7 +1552,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
1740 + bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
1741 + const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
1742 + int manager_count;
1743 +- bool framedonetv_irq;
1744 ++ bool framedonetv_irq = true;
1745 + u32 val, irq_mask = 0;
1746 +
1747 + switch (sysc_soc->soc) {
1748 +@@ -1505,6 +1569,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
1749 + break;
1750 + case SOC_AM4:
1751 + manager_count = 1;
1752 ++ framedonetv_irq = false;
1753 + break;
1754 + case SOC_UNKNOWN:
1755 + default:
1756 +@@ -1822,11 +1887,10 @@ static int sysc_legacy_init(struct sysc *ddata)
1757 + */
1758 + static int sysc_reset(struct sysc *ddata)
1759 + {
1760 +- int sysc_offset, syss_offset, sysc_val, rstval, error = 0;
1761 +- u32 sysc_mask, syss_done;
1762 ++ int sysc_offset, sysc_val, error;
1763 ++ u32 sysc_mask;
1764 +
1765 + sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
1766 +- syss_offset = ddata->offsets[SYSC_SYSSTATUS];
1767 +
1768 + if (ddata->legacy_mode ||
1769 + ddata->cap->regbits->srst_shift < 0 ||
1770 +@@ -1835,11 +1899,6 @@ static int sysc_reset(struct sysc *ddata)
1771 +
1772 + sysc_mask = BIT(ddata->cap->regbits->srst_shift);
1773 +
1774 +- if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
1775 +- syss_done = 0;
1776 +- else
1777 +- syss_done = ddata->cfg.syss_mask;
1778 +-
1779 + if (ddata->pre_reset_quirk)
1780 + ddata->pre_reset_quirk(ddata);
1781 +
1782 +@@ -1856,18 +1915,9 @@ static int sysc_reset(struct sysc *ddata)
1783 + if (ddata->post_reset_quirk)
1784 + ddata->post_reset_quirk(ddata);
1785 +
1786 +- /* Poll on reset status */
1787 +- if (syss_offset >= 0) {
1788 +- error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
1789 +- (rstval & ddata->cfg.syss_mask) ==
1790 +- syss_done,
1791 +- 100, MAX_MODULE_SOFTRESET_WAIT);
1792 +-
1793 +- } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
1794 +- error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
1795 +- !(rstval & sysc_mask),
1796 +- 100, MAX_MODULE_SOFTRESET_WAIT);
1797 +- }
1798 ++ error = sysc_wait_softreset(ddata);
1799 ++ if (error)
1800 ++ dev_warn(ddata->dev, "OCP softreset timed out\n");
1801 +
1802 + if (ddata->reset_done_quirk)
1803 + ddata->reset_done_quirk(ddata);
1804 +diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
1805 +index e2330e757f1f..001617033d6a 100644
1806 +--- a/drivers/char/hw_random/ks-sa-rng.c
1807 ++++ b/drivers/char/hw_random/ks-sa-rng.c
1808 +@@ -244,6 +244,7 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
1809 + ret = pm_runtime_get_sync(dev);
1810 + if (ret < 0) {
1811 + dev_err(dev, "Failed to enable SA power-domain\n");
1812 ++ pm_runtime_put_noidle(dev);
1813 + pm_runtime_disable(dev);
1814 + return ret;
1815 + }
1816 +diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c
1817 +index 6282ee2f361c..a8901f90a61a 100644
1818 +--- a/drivers/clk/sifive/fu540-prci.c
1819 ++++ b/drivers/clk/sifive/fu540-prci.c
1820 +@@ -586,7 +586,10 @@ static int sifive_fu540_prci_probe(struct platform_device *pdev)
1821 + struct __prci_data *pd;
1822 + int r;
1823 +
1824 +- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1825 ++ pd = devm_kzalloc(dev,
1826 ++ struct_size(pd, hw_clks.hws,
1827 ++ ARRAY_SIZE(__prci_init_clocks)),
1828 ++ GFP_KERNEL);
1829 + if (!pd)
1830 + return -ENOMEM;
1831 +
1832 +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
1833 +index 4e9994de0b90..0d89c3e473bd 100644
1834 +--- a/drivers/edac/amd64_edac.c
1835 ++++ b/drivers/edac/amd64_edac.c
1836 +@@ -272,6 +272,8 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
1837 +
1838 + if (pvt->model == 0x60)
1839 + amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
1840 ++ else
1841 ++ amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
1842 + } else {
1843 + amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
1844 + }
1845 +diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
1846 +index e3d692696583..d5915272141f 100644
1847 +--- a/drivers/firmware/efi/esrt.c
1848 ++++ b/drivers/firmware/efi/esrt.c
1849 +@@ -181,7 +181,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
1850 + rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
1851 + "entry%d", entry_num);
1852 + if (rc) {
1853 +- kfree(entry);
1854 ++ kobject_put(&entry->kobj);
1855 + return rc;
1856 + }
1857 + }
1858 +diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
1859 +index ea66b1f16a79..f1c4faf58c76 100644
1860 +--- a/drivers/firmware/efi/libstub/file.c
1861 ++++ b/drivers/firmware/efi/libstub/file.c
1862 +@@ -104,12 +104,20 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
1863 + if (!found)
1864 + return 0;
1865 +
1866 ++ /* Skip any leading slashes */
1867 ++ while (cmdline[i] == L'/' || cmdline[i] == L'\\')
1868 ++ i++;
1869 ++
1870 + while (--result_len > 0 && i < cmdline_len) {
1871 +- if (cmdline[i] == L'\0' ||
1872 +- cmdline[i] == L'\n' ||
1873 +- cmdline[i] == L' ')
1874 ++ efi_char16_t c = cmdline[i++];
1875 ++
1876 ++ if (c == L'\0' || c == L'\n' || c == L' ')
1877 + break;
1878 +- *result++ = cmdline[i++];
1879 ++ else if (c == L'/')
1880 ++ /* Replace UNIX dir separators with EFI standard ones */
1881 ++ *result++ = L'\\';
1882 ++ else
1883 ++ *result++ = c;
1884 + }
1885 + *result = L'\0';
1886 + return i;
1887 +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
1888 +index d2840c2f6286..1dc57079933c 100644
1889 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
1890 ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
1891 +@@ -1261,8 +1261,12 @@ static int sdma_v5_0_sw_fini(void *handle)
1892 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1893 + int i;
1894 +
1895 +- for (i = 0; i < adev->sdma.num_instances; i++)
1896 ++ for (i = 0; i < adev->sdma.num_instances; i++) {
1897 ++ if (adev->sdma.instance[i].fw != NULL)
1898 ++ release_firmware(adev->sdma.instance[i].fw);
1899 ++
1900 + amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1901 ++ }
1902 +
1903 + return 0;
1904 + }
1905 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1906 +index fe0cd49d4ea7..d8c74aa4e565 100644
1907 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1908 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1909 +@@ -396,6 +396,7 @@ struct kfd_process *kfd_create_process(struct file *filep)
1910 + (int)process->lead_thread->pid);
1911 + if (ret) {
1912 + pr_warn("Creating procfs pid directory failed");
1913 ++ kobject_put(process->kobj);
1914 + goto out;
1915 + }
1916 +
1917 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
1918 +index 0461fecd68db..11491ae1effc 100644
1919 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
1920 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
1921 +@@ -1017,7 +1017,6 @@ static const struct {
1922 + {"link_settings", &dp_link_settings_debugfs_fops},
1923 + {"phy_settings", &dp_phy_settings_debugfs_fop},
1924 + {"test_pattern", &dp_phy_test_pattern_fops},
1925 +- {"output_bpc", &output_bpc_fops},
1926 + {"vrr_range", &vrr_range_fops},
1927 + {"sdp_message", &sdp_message_fops},
1928 + {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
1929 +@@ -1090,6 +1089,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
1930 + debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
1931 + &force_yuv420_output_fops);
1932 +
1933 ++ debugfs_create_file("output_bpc", 0644, dir, connector,
1934 ++ &output_bpc_fops);
1935 ++
1936 + connector->debugfs_dpcd_address = 0;
1937 + connector->debugfs_dpcd_size = 0;
1938 +
1939 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
1940 +index dcf84a61de37..949d10ef8304 100644
1941 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
1942 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
1943 +@@ -510,8 +510,10 @@ static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin
1944 +
1945 + srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
1946 +
1947 +- if (!srm)
1948 +- return -EINVAL;
1949 ++ if (!srm) {
1950 ++ ret = -EINVAL;
1951 ++ goto ret;
1952 ++ }
1953 +
1954 + if (pos >= srm_size)
1955 + ret = 0;
1956 +diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
1957 +index e89694eb90b4..700f0039df7b 100644
1958 +--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
1959 ++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
1960 +@@ -1777,7 +1777,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
1961 +
1962 + kfree(rgb_regamma);
1963 + rgb_regamma_alloc_fail:
1964 +- kvfree(rgb_user);
1965 ++ kfree(rgb_user);
1966 + rgb_user_alloc_fail:
1967 + return ret;
1968 + }
1969 +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
1970 +index a9771de4d17e..c7be39a00d43 100644
1971 +--- a/drivers/gpu/drm/drm_fb_helper.c
1972 ++++ b/drivers/gpu/drm/drm_fb_helper.c
1973 +@@ -227,18 +227,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
1974 + }
1975 + EXPORT_SYMBOL(drm_fb_helper_debug_leave);
1976 +
1977 +-/**
1978 +- * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
1979 +- * @fb_helper: driver-allocated fbdev helper, can be NULL
1980 +- *
1981 +- * This should be called from driver's drm &drm_driver.lastclose callback
1982 +- * when implementing an fbcon on top of kms using this helper. This ensures that
1983 +- * the user isn't greeted with a black screen when e.g. X dies.
1984 +- *
1985 +- * RETURNS:
1986 +- * Zero if everything went ok, negative error code otherwise.
1987 +- */
1988 +-int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
1989 ++static int
1990 ++__drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
1991 ++ bool force)
1992 + {
1993 + bool do_delayed;
1994 + int ret;
1995 +@@ -250,7 +241,16 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
1996 + return 0;
1997 +
1998 + mutex_lock(&fb_helper->lock);
1999 +- ret = drm_client_modeset_commit(&fb_helper->client);
2000 ++ if (force) {
2001 ++ /*
2002 ++ * Yes this is the _locked version which expects the master lock
2003 ++ * to be held. But for forced restores we're intentionally
2004 ++ * racing here, see drm_fb_helper_set_par().
2005 ++ */
2006 ++ ret = drm_client_modeset_commit_locked(&fb_helper->client);
2007 ++ } else {
2008 ++ ret = drm_client_modeset_commit(&fb_helper->client);
2009 ++ }
2010 +
2011 + do_delayed = fb_helper->delayed_hotplug;
2012 + if (do_delayed)
2013 +@@ -262,6 +262,22 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
2014 +
2015 + return ret;
2016 + }
2017 ++
2018 ++/**
2019 ++ * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
2020 ++ * @fb_helper: driver-allocated fbdev helper, can be NULL
2021 ++ *
2022 ++ * This should be called from driver's drm &drm_driver.lastclose callback
2023 ++ * when implementing an fbcon on top of kms using this helper. This ensures that
2024 ++ * the user isn't greeted with a black screen when e.g. X dies.
2025 ++ *
2026 ++ * RETURNS:
2027 ++ * Zero if everything went ok, negative error code otherwise.
2028 ++ */
2029 ++int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
2030 ++{
2031 ++ return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false);
2032 ++}
2033 + EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
2034 +
2035 + #ifdef CONFIG_MAGIC_SYSRQ
2036 +@@ -1310,6 +1326,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
2037 + {
2038 + struct drm_fb_helper *fb_helper = info->par;
2039 + struct fb_var_screeninfo *var = &info->var;
2040 ++ bool force;
2041 +
2042 + if (oops_in_progress)
2043 + return -EBUSY;
2044 +@@ -1319,7 +1336,25 @@ int drm_fb_helper_set_par(struct fb_info *info)
2045 + return -EINVAL;
2046 + }
2047 +
2048 +- drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
2049 ++ /*
2050 ++ * Normally we want to make sure that a kms master takes precedence over
2051 ++ * fbdev, to avoid fbdev flickering and occasionally stealing the
2052 ++ * display status. But Xorg first sets the vt back to text mode using
2053 ++ * the KDSET IOCTL with KD_TEXT, and only after that drops the master
2054 ++ * status when exiting.
2055 ++ *
2056 ++ * In the past this was caught by drm_fb_helper_lastclose(), but on
2057 ++ * modern systems where logind always keeps a drm fd open to orchestrate
2058 ++ * the vt switching, this doesn't work.
2059 ++ *
2060 ++ * To not break the userspace ABI we have this special case here, which
2061 ++ * is only used for the above case. Everything else uses the normal
2062 ++ * commit function, which ensures that we never steal the display from
2063 ++ * an active drm master.
2064 ++ */
2065 ++ force = var->activate & FB_ACTIVATE_KD_TEXT;
2066 ++
2067 ++ __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
2068 +
2069 + return 0;
2070 + }
2071 +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
2072 +index 3ad828eaefe1..db91b3c031a1 100644
2073 +--- a/drivers/gpu/drm/panel/panel-simple.c
2074 ++++ b/drivers/gpu/drm/panel/panel-simple.c
2075 +@@ -2297,6 +2297,7 @@ static const struct panel_desc logicpd_type_28 = {
2076 + .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
2077 + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
2078 + DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
2079 ++ .connector_type = DRM_MODE_CONNECTOR_DPI,
2080 + };
2081 +
2082 + static const struct panel_desc mitsubishi_aa070mc01 = {
2083 +@@ -2465,6 +2466,7 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
2084 + .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
2085 + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
2086 + DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
2087 ++ .connector_type = DRM_MODE_CONNECTOR_DPI,
2088 + };
2089 +
2090 + static const struct display_timing nlt_nl192108ac18_02d_timing = {
2091 +diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
2092 +index b57c37ddd164..c7fbb7932f37 100644
2093 +--- a/drivers/gpu/drm/radeon/ni_dpm.c
2094 ++++ b/drivers/gpu/drm/radeon/ni_dpm.c
2095 +@@ -2127,7 +2127,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev)
2096 + if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
2097 + ret = -EINVAL;
2098 +
2099 +- if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
2100 ++ if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
2101 + ret = -EINVAL;
2102 +
2103 + if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
2104 +diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
2105 +index 0919f1f159a4..f65d1489dc50 100644
2106 +--- a/drivers/gpu/drm/rcar-du/Kconfig
2107 ++++ b/drivers/gpu/drm/rcar-du/Kconfig
2108 +@@ -31,6 +31,7 @@ config DRM_RCAR_DW_HDMI
2109 + config DRM_RCAR_LVDS
2110 + tristate "R-Car DU LVDS Encoder Support"
2111 + depends on DRM && DRM_BRIDGE && OF
2112 ++ select DRM_KMS_HELPER
2113 + select DRM_PANEL
2114 + select OF_FLATTREE
2115 + select OF_OVERLAY
2116 +diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c
2117 +index e0c256922d4f..977d6f524649 100644
2118 +--- a/drivers/i2c/busses/i2c-fsi.c
2119 ++++ b/drivers/i2c/busses/i2c-fsi.c
2120 +@@ -98,7 +98,7 @@
2121 + #define I2C_STAT_DAT_REQ BIT(25)
2122 + #define I2C_STAT_CMD_COMP BIT(24)
2123 + #define I2C_STAT_STOP_ERR BIT(23)
2124 +-#define I2C_STAT_MAX_PORT GENMASK(19, 16)
2125 ++#define I2C_STAT_MAX_PORT GENMASK(22, 16)
2126 + #define I2C_STAT_ANY_INT BIT(15)
2127 + #define I2C_STAT_SCL_IN BIT(11)
2128 + #define I2C_STAT_SDA_IN BIT(10)
2129 +diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
2130 +index 4c4d17ddc96b..7c88611c732c 100644
2131 +--- a/drivers/i2c/busses/i2c-tegra.c
2132 ++++ b/drivers/i2c/busses/i2c-tegra.c
2133 +@@ -1769,14 +1769,9 @@ static int tegra_i2c_remove(struct platform_device *pdev)
2134 + static int __maybe_unused tegra_i2c_suspend(struct device *dev)
2135 + {
2136 + struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
2137 +- int err;
2138 +
2139 + i2c_mark_adapter_suspended(&i2c_dev->adapter);
2140 +
2141 +- err = pm_runtime_force_suspend(dev);
2142 +- if (err < 0)
2143 +- return err;
2144 +-
2145 + return 0;
2146 + }
2147 +
2148 +@@ -1797,10 +1792,6 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev)
2149 + if (err)
2150 + return err;
2151 +
2152 +- err = pm_runtime_force_resume(dev);
2153 +- if (err < 0)
2154 +- return err;
2155 +-
2156 + i2c_mark_adapter_resumed(&i2c_dev->adapter);
2157 +
2158 + return 0;
2159 +diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
2160 +index b34d2ff06931..bbb70a8a411e 100644
2161 +--- a/drivers/i2c/i2c-core-smbus.c
2162 ++++ b/drivers/i2c/i2c-core-smbus.c
2163 +@@ -495,6 +495,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
2164 + break;
2165 + case I2C_SMBUS_BLOCK_DATA:
2166 + case I2C_SMBUS_BLOCK_PROC_CALL:
2167 ++ if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) {
2168 ++ dev_err(&adapter->dev,
2169 ++ "Invalid block size returned: %d\n",
2170 ++ msg[1].buf[0]);
2171 ++ status = -EPROTO;
2172 ++ goto cleanup;
2173 ++ }
2174 + for (i = 0; i < msg[1].buf[0] + 1; i++)
2175 + data->block[i] = msg[1].buf[i];
2176 + break;
2177 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
2178 +index 26e6f7df247b..12ada58c96a9 100644
2179 +--- a/drivers/infiniband/core/cma.c
2180 ++++ b/drivers/infiniband/core/cma.c
2181 +@@ -1619,6 +1619,8 @@ static struct rdma_id_private *cma_find_listener(
2182 + {
2183 + struct rdma_id_private *id_priv, *id_priv_dev;
2184 +
2185 ++ lockdep_assert_held(&lock);
2186 ++
2187 + if (!bind_list)
2188 + return ERR_PTR(-EINVAL);
2189 +
2190 +@@ -1665,6 +1667,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
2191 + }
2192 + }
2193 +
2194 ++ mutex_lock(&lock);
2195 + /*
2196 + * Net namespace might be getting deleted while route lookup,
2197 + * cm_id lookup is in progress. Therefore, perform netdevice
2198 +@@ -1706,6 +1709,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
2199 + id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
2200 + err:
2201 + rcu_read_unlock();
2202 ++ mutex_unlock(&lock);
2203 + if (IS_ERR(id_priv) && *net_dev) {
2204 + dev_put(*net_dev);
2205 + *net_dev = NULL;
2206 +@@ -2481,6 +2485,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
2207 + struct net *net = id_priv->id.route.addr.dev_addr.net;
2208 + int ret;
2209 +
2210 ++ lockdep_assert_held(&lock);
2211 ++
2212 + if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
2213 + return;
2214 +
2215 +@@ -3308,6 +3314,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
2216 + u64 sid, mask;
2217 + __be16 port;
2218 +
2219 ++ lockdep_assert_held(&lock);
2220 ++
2221 + addr = cma_src_addr(id_priv);
2222 + port = htons(bind_list->port);
2223 +
2224 +@@ -3336,6 +3344,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,
2225 + struct rdma_bind_list *bind_list;
2226 + int ret;
2227 +
2228 ++ lockdep_assert_held(&lock);
2229 ++
2230 + bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
2231 + if (!bind_list)
2232 + return -ENOMEM;
2233 +@@ -3362,6 +3372,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
2234 + struct sockaddr *saddr = cma_src_addr(id_priv);
2235 + __be16 dport = cma_port(daddr);
2236 +
2237 ++ lockdep_assert_held(&lock);
2238 ++
2239 + hlist_for_each_entry(cur_id, &bind_list->owners, node) {
2240 + struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
2241 + struct sockaddr *cur_saddr = cma_src_addr(cur_id);
2242 +@@ -3401,6 +3413,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
2243 + unsigned int rover;
2244 + struct net *net = id_priv->id.route.addr.dev_addr.net;
2245 +
2246 ++ lockdep_assert_held(&lock);
2247 ++
2248 + inet_get_local_port_range(net, &low, &high);
2249 + remaining = (high - low) + 1;
2250 + rover = prandom_u32() % remaining + low;
2251 +@@ -3448,6 +3462,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
2252 + struct rdma_id_private *cur_id;
2253 + struct sockaddr *addr, *cur_addr;
2254 +
2255 ++ lockdep_assert_held(&lock);
2256 ++
2257 + addr = cma_src_addr(id_priv);
2258 + hlist_for_each_entry(cur_id, &bind_list->owners, node) {
2259 + if (id_priv == cur_id)
2260 +@@ -3478,6 +3494,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
2261 + unsigned short snum;
2262 + int ret;
2263 +
2264 ++ lockdep_assert_held(&lock);
2265 ++
2266 + snum = ntohs(cma_port(cma_src_addr(id_priv)));
2267 + if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
2268 + return -EACCES;
2269 +diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
2270 +index c54db13fa9b0..049c9cdc10de 100644
2271 +--- a/drivers/infiniband/core/mad.c
2272 ++++ b/drivers/infiniband/core/mad.c
2273 +@@ -639,10 +639,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
2274 + xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
2275 +
2276 + flush_workqueue(port_priv->wq);
2277 +- ib_cancel_rmpp_recvs(mad_agent_priv);
2278 +
2279 + deref_mad_agent(mad_agent_priv);
2280 + wait_for_completion(&mad_agent_priv->comp);
2281 ++ ib_cancel_rmpp_recvs(mad_agent_priv);
2282 +
2283 + ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
2284 +
2285 +@@ -2941,6 +2941,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2286 + DMA_FROM_DEVICE);
2287 + if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2288 + sg_list.addr))) {
2289 ++ kfree(mad_priv);
2290 + ret = -ENOMEM;
2291 + break;
2292 + }
2293 +diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
2294 +index e0a5e897e4b1..75bcbc625616 100644
2295 +--- a/drivers/infiniband/core/rdma_core.c
2296 ++++ b/drivers/infiniband/core/rdma_core.c
2297 +@@ -459,40 +459,46 @@ static struct ib_uobject *
2298 + alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
2299 + struct uverbs_attr_bundle *attrs)
2300 + {
2301 +- const struct uverbs_obj_fd_type *fd_type =
2302 +- container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
2303 ++ const struct uverbs_obj_fd_type *fd_type;
2304 + int new_fd;
2305 +- struct ib_uobject *uobj;
2306 ++ struct ib_uobject *uobj, *ret;
2307 + struct file *filp;
2308 +
2309 ++ uobj = alloc_uobj(attrs, obj);
2310 ++ if (IS_ERR(uobj))
2311 ++ return uobj;
2312 ++
2313 ++ fd_type =
2314 ++ container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
2315 + if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
2316 +- fd_type->fops->release != &uverbs_async_event_release))
2317 +- return ERR_PTR(-EINVAL);
2318 ++ fd_type->fops->release != &uverbs_async_event_release)) {
2319 ++ ret = ERR_PTR(-EINVAL);
2320 ++ goto err_fd;
2321 ++ }
2322 +
2323 + new_fd = get_unused_fd_flags(O_CLOEXEC);
2324 +- if (new_fd < 0)
2325 +- return ERR_PTR(new_fd);
2326 +-
2327 +- uobj = alloc_uobj(attrs, obj);
2328 +- if (IS_ERR(uobj))
2329 ++ if (new_fd < 0) {
2330 ++ ret = ERR_PTR(new_fd);
2331 + goto err_fd;
2332 ++ }
2333 +
2334 + /* Note that uverbs_uobject_fd_release() is called during abort */
2335 + filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
2336 + fd_type->flags);
2337 + if (IS_ERR(filp)) {
2338 +- uverbs_uobject_put(uobj);
2339 +- uobj = ERR_CAST(filp);
2340 +- goto err_fd;
2341 ++ ret = ERR_CAST(filp);
2342 ++ goto err_getfile;
2343 + }
2344 + uobj->object = filp;
2345 +
2346 + uobj->id = new_fd;
2347 + return uobj;
2348 +
2349 +-err_fd:
2350 ++err_getfile:
2351 + put_unused_fd(new_fd);
2352 +- return uobj;
2353 ++err_fd:
2354 ++ uverbs_uobject_put(uobj);
2355 ++ return ret;
2356 + }
2357 +
2358 + struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
2359 +diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
2360 +index 5c57098a4aee..3420c7742486 100644
2361 +--- a/drivers/infiniband/hw/efa/efa_verbs.c
2362 ++++ b/drivers/infiniband/hw/efa/efa_verbs.c
2363 +@@ -209,6 +209,7 @@ int efa_query_device(struct ib_device *ibdev,
2364 + props->max_send_sge = dev_attr->max_sq_sge;
2365 + props->max_recv_sge = dev_attr->max_rq_sge;
2366 + props->max_sge_rd = dev_attr->max_wr_rdma_sge;
2367 ++ props->max_pkeys = 1;
2368 +
2369 + if (udata && udata->outlen) {
2370 + resp.max_sq_sge = dev_attr->max_sq_sge;
2371 +diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
2372 +index 4633a0ce1a8c..2ced236e1553 100644
2373 +--- a/drivers/infiniband/hw/hfi1/debugfs.c
2374 ++++ b/drivers/infiniband/hw/hfi1/debugfs.c
2375 +@@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
2376 + static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
2377 + {
2378 + struct hfi1_pportdata *ppd;
2379 +- int ret;
2380 +
2381 + ppd = private2ppd(fp);
2382 +
2383 +- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
2384 +- if (ret) /* failed - release the module */
2385 +- module_put(THIS_MODULE);
2386 +-
2387 +- return ret;
2388 ++ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
2389 + }
2390 +
2391 + static int i2c1_debugfs_open(struct inode *in, struct file *fp)
2392 +@@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target)
2393 + ppd = private2ppd(fp);
2394 +
2395 + release_chip_resource(ppd->dd, i2c_target(target));
2396 +- module_put(THIS_MODULE);
2397 +
2398 + return 0;
2399 + }
2400 +@@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp)
2401 + static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
2402 + {
2403 + struct hfi1_pportdata *ppd;
2404 +- int ret;
2405 +-
2406 +- if (!try_module_get(THIS_MODULE))
2407 +- return -ENODEV;
2408 +
2409 + ppd = private2ppd(fp);
2410 +
2411 +- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
2412 +- if (ret) /* failed - release the module */
2413 +- module_put(THIS_MODULE);
2414 +-
2415 +- return ret;
2416 ++ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
2417 + }
2418 +
2419 + static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
2420 +@@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target)
2421 + ppd = private2ppd(fp);
2422 +
2423 + release_chip_resource(ppd->dd, i2c_target(target));
2424 +- module_put(THIS_MODULE);
2425 +
2426 + return 0;
2427 + }
2428 +diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
2429 +index 792eecd206b6..97fc7dd353b0 100644
2430 +--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
2431 ++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
2432 +@@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context,
2433 + if (params->cm_info) {
2434 + event.ird = params->cm_info->ird;
2435 + event.ord = params->cm_info->ord;
2436 +- event.private_data_len = params->cm_info->private_data_len;
2437 +- event.private_data = (void *)params->cm_info->private_data;
2438 ++ /* Only connect_request and reply have valid private data
2439 ++ * the rest of the events this may be left overs from
2440 ++ * connection establishment. CONNECT_REQUEST is issued via
2441 ++ * qedr_iw_mpa_request
2442 ++ */
2443 ++ if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
2444 ++ event.private_data_len =
2445 ++ params->cm_info->private_data_len;
2446 ++ event.private_data =
2447 ++ (void *)params->cm_info->private_data;
2448 ++ }
2449 + }
2450 +
2451 + if (ep->cm_id)
2452 +diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
2453 +index 500a7ee04c44..ca29954a54ac 100644
2454 +--- a/drivers/infiniband/sw/rdmavt/qp.c
2455 ++++ b/drivers/infiniband/sw/rdmavt/qp.c
2456 +@@ -1196,7 +1196,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
2457 + err = alloc_ud_wq_attr(qp, rdi->dparms.node);
2458 + if (err) {
2459 + ret = (ERR_PTR(err));
2460 +- goto bail_driver_priv;
2461 ++ goto bail_rq_rvt;
2462 + }
2463 +
2464 + err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
2465 +@@ -1300,9 +1300,11 @@ bail_qpn:
2466 + rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
2467 +
2468 + bail_rq_wq:
2469 +- rvt_free_rq(&qp->r_rq);
2470 + free_ud_wq_attr(qp);
2471 +
2472 ++bail_rq_rvt:
2473 ++ rvt_free_rq(&qp->r_rq);
2474 ++
2475 + bail_driver_priv:
2476 + rdi->driver_f.qp_priv_free(rdi, qp);
2477 +
2478 +diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
2479 +index 650520244ed0..7271d705f4b0 100644
2480 +--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
2481 ++++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
2482 +@@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
2483 + break;
2484 +
2485 + bytes = min(bytes, len);
2486 +- if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) {
2487 ++ if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
2488 ++ bytes) {
2489 + copied += bytes;
2490 + offset += bytes;
2491 + len -= bytes;
2492 +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
2493 +index f77dae7ba7d4..7df5621bba8d 100644
2494 +--- a/drivers/iommu/dmar.c
2495 ++++ b/drivers/iommu/dmar.c
2496 +@@ -898,7 +898,8 @@ int __init detect_intel_iommu(void)
2497 + if (!ret)
2498 + ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
2499 + &validate_drhd_cb);
2500 +- if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2501 ++ if (!ret && !no_iommu && !iommu_detected &&
2502 ++ (!dmar_disabled || dmar_platform_optin())) {
2503 + iommu_detected = 1;
2504 + /* Make sure ACS will be enabled */
2505 + pci_request_acs();
2506 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
2507 +index fde7aba49b74..34b2ed91cf4d 100644
2508 +--- a/drivers/iommu/intel-iommu.c
2509 ++++ b/drivers/iommu/intel-iommu.c
2510 +@@ -634,6 +634,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
2511 + return g_iommus[iommu_id];
2512 + }
2513 +
2514 ++static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
2515 ++{
2516 ++ return sm_supported(iommu) ?
2517 ++ ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
2518 ++}
2519 ++
2520 + static void domain_update_iommu_coherency(struct dmar_domain *domain)
2521 + {
2522 + struct dmar_drhd_unit *drhd;
2523 +@@ -645,7 +651,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
2524 +
2525 + for_each_domain_iommu(i, domain) {
2526 + found = true;
2527 +- if (!ecap_coherent(g_iommus[i]->ecap)) {
2528 ++ if (!iommu_paging_structure_coherency(g_iommus[i])) {
2529 + domain->iommu_coherency = 0;
2530 + break;
2531 + }
2532 +@@ -656,7 +662,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
2533 + /* No hardware attached; use lowest common denominator */
2534 + rcu_read_lock();
2535 + for_each_active_iommu(iommu, drhd) {
2536 +- if (!ecap_coherent(iommu->ecap)) {
2537 ++ if (!iommu_paging_structure_coherency(iommu)) {
2538 + domain->iommu_coherency = 0;
2539 + break;
2540 + }
2541 +@@ -943,7 +949,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
2542 + domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
2543 + pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
2544 + if (domain_use_first_level(domain))
2545 +- pteval |= DMA_FL_PTE_XD;
2546 ++ pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
2547 + if (cmpxchg64(&pte->val, 0ULL, pteval))
2548 + /* Someone else set it while we were thinking; use theirs. */
2549 + free_pgtable_page(tmp_page);
2550 +@@ -2034,7 +2040,6 @@ static inline void
2551 + context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
2552 + {
2553 + context->hi |= pasid & ((1 << 20) - 1);
2554 +- context->hi |= (1 << 20);
2555 + }
2556 +
2557 + /*
2558 +@@ -2178,7 +2183,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
2559 +
2560 + context_set_fault_enable(context);
2561 + context_set_present(context);
2562 +- domain_flush_cache(domain, context, sizeof(*context));
2563 ++ if (!ecap_coherent(iommu->ecap))
2564 ++ clflush_cache_range(context, sizeof(*context));
2565 +
2566 + /*
2567 + * It's a non-present to present mapping. If hardware doesn't cache
2568 +@@ -2326,7 +2332,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2569 +
2570 + attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
2571 + if (domain_use_first_level(domain))
2572 +- attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD;
2573 ++ attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
2574 +
2575 + if (!sg) {
2576 + sg_res = nr_pages;
2577 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
2578 +index 4d8bf731b118..a2e5a0fcd7d5 100644
2579 +--- a/drivers/md/bcache/super.c
2580 ++++ b/drivers/md/bcache/super.c
2581 +@@ -819,7 +819,8 @@ static void bcache_device_free(struct bcache_device *d)
2582 + }
2583 +
2584 + static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
2585 +- sector_t sectors, make_request_fn make_request_fn)
2586 ++ sector_t sectors, make_request_fn make_request_fn,
2587 ++ struct block_device *cached_bdev)
2588 + {
2589 + struct request_queue *q;
2590 + const size_t max_stripes = min_t(size_t, INT_MAX,
2591 +@@ -885,6 +886,21 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
2592 + q->limits.io_min = block_size;
2593 + q->limits.logical_block_size = block_size;
2594 + q->limits.physical_block_size = block_size;
2595 ++
2596 ++ if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) {
2597 ++ /*
2598 ++ * This should only happen with BCACHE_SB_VERSION_BDEV.
2599 ++ * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
2600 ++ */
2601 ++ pr_info("%s: sb/logical block size (%u) greater than page size "
2602 ++ "(%lu) falling back to device logical block size (%u)",
2603 ++ d->disk->disk_name, q->limits.logical_block_size,
2604 ++ PAGE_SIZE, bdev_logical_block_size(cached_bdev));
2605 ++
2606 ++ /* This also adjusts physical block size/min io size if needed */
2607 ++ blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev));
2608 ++ }
2609 ++
2610 + blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
2611 + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
2612 + blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
2613 +@@ -1342,7 +1358,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
2614 +
2615 + ret = bcache_device_init(&dc->disk, block_size,
2616 + dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
2617 +- cached_dev_make_request);
2618 ++ cached_dev_make_request, dc->bdev);
2619 + if (ret)
2620 + return ret;
2621 +
2622 +@@ -1455,7 +1471,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
2623 + kobject_init(&d->kobj, &bch_flash_dev_ktype);
2624 +
2625 + if (bcache_device_init(d, block_bytes(c), u->sectors,
2626 +- flash_dev_make_request))
2627 ++ flash_dev_make_request, NULL))
2628 + goto err;
2629 +
2630 + bcache_device_attach(d, c, u - c->uuids);
2631 +diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
2632 +index 613c171b1b6d..5cc94f57421c 100644
2633 +--- a/drivers/md/dm-writecache.c
2634 ++++ b/drivers/md/dm-writecache.c
2635 +@@ -286,6 +286,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
2636 + while (daa-- && i < p) {
2637 + pages[i++] = pfn_t_to_page(pfn);
2638 + pfn.val++;
2639 ++ if (!(i & 15))
2640 ++ cond_resched();
2641 + }
2642 + } while (i < p);
2643 + wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
2644 +@@ -857,6 +859,8 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
2645 + writecache_wait_for_ios(wc, WRITE);
2646 + discarded_something = true;
2647 + }
2648 ++ if (!writecache_entry_is_committed(wc, e))
2649 ++ wc->uncommitted_blocks--;
2650 + writecache_free_entry(wc, e);
2651 + }
2652 +
2653 +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
2654 +index 9392934e3a06..7becfc768bbc 100644
2655 +--- a/drivers/misc/mei/hw-me-regs.h
2656 ++++ b/drivers/misc/mei/hw-me-regs.h
2657 +@@ -94,6 +94,7 @@
2658 + #define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */
2659 +
2660 + #define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
2661 ++#define MEI_DEV_ID_TGP_H 0x43E0 /* Tiger Lake Point H */
2662 +
2663 + #define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
2664 + #define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
2665 +@@ -107,6 +108,8 @@
2666 + # define PCI_CFG_HFS_1_D0I3_MSK 0x80000000
2667 + #define PCI_CFG_HFS_2 0x48
2668 + #define PCI_CFG_HFS_3 0x60
2669 ++# define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070
2670 ++# define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060
2671 + #define PCI_CFG_HFS_4 0x64
2672 + #define PCI_CFG_HFS_5 0x68
2673 + #define PCI_CFG_HFS_6 0x6C
2674 +diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
2675 +index f620442addf5..7649710a2ab9 100644
2676 +--- a/drivers/misc/mei/hw-me.c
2677 ++++ b/drivers/misc/mei/hw-me.c
2678 +@@ -1366,7 +1366,7 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
2679 + #define MEI_CFG_FW_NM \
2680 + .quirk_probe = mei_me_fw_type_nm
2681 +
2682 +-static bool mei_me_fw_type_sps(struct pci_dev *pdev)
2683 ++static bool mei_me_fw_type_sps_4(struct pci_dev *pdev)
2684 + {
2685 + u32 reg;
2686 + unsigned int devfn;
2687 +@@ -1382,7 +1382,36 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
2688 + return (reg & 0xf0000) == 0xf0000;
2689 + }
2690 +
2691 +-#define MEI_CFG_FW_SPS \
2692 ++#define MEI_CFG_FW_SPS_4 \
2693 ++ .quirk_probe = mei_me_fw_type_sps_4
2694 ++
2695 ++/**
2696 ++ * mei_me_fw_sku_sps() - check for sps sku
2697 ++ *
2698 ++ * Read ME FW Status register to check for SPS Firmware.
2699 ++ * The SPS FW is only signaled in pci function 0
2700 ++ *
2701 ++ * @pdev: pci device
2702 ++ *
2703 ++ * Return: true in case of SPS firmware
2704 ++ */
2705 ++static bool mei_me_fw_type_sps(struct pci_dev *pdev)
2706 ++{
2707 ++ u32 reg;
2708 ++ u32 fw_type;
2709 ++ unsigned int devfn;
2710 ++
2711 ++ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
2712 ++ pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, &reg);
2713 ++ trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
2714 ++ fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
2715 ++
2716 ++ dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
2717 ++
2718 ++ return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
2719 ++}
2720 ++
2721 ++#define MEI_CFG_FW_SPS \
2722 + .quirk_probe = mei_me_fw_type_sps
2723 +
2724 + #define MEI_CFG_FW_VER_SUPP \
2725 +@@ -1452,10 +1481,17 @@ static const struct mei_cfg mei_me_pch8_cfg = {
2726 + };
2727 +
2728 + /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
2729 +-static const struct mei_cfg mei_me_pch8_sps_cfg = {
2730 ++static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
2731 + MEI_CFG_PCH8_HFS,
2732 + MEI_CFG_FW_VER_SUPP,
2733 +- MEI_CFG_FW_SPS,
2734 ++ MEI_CFG_FW_SPS_4,
2735 ++};
2736 ++
2737 ++/* LBG with quirk for SPS (4.0) Firmware exclusion */
2738 ++static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
2739 ++ MEI_CFG_PCH8_HFS,
2740 ++ MEI_CFG_FW_VER_SUPP,
2741 ++ MEI_CFG_FW_SPS_4,
2742 + };
2743 +
2744 + /* Cannon Lake and newer devices */
2745 +@@ -1465,8 +1501,18 @@ static const struct mei_cfg mei_me_pch12_cfg = {
2746 + MEI_CFG_DMA_128,
2747 + };
2748 +
2749 +-/* LBG with quirk for SPS Firmware exclusion */
2750 ++/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */
2751 + static const struct mei_cfg mei_me_pch12_sps_cfg = {
2752 ++ MEI_CFG_PCH8_HFS,
2753 ++ MEI_CFG_FW_VER_SUPP,
2754 ++ MEI_CFG_DMA_128,
2755 ++ MEI_CFG_FW_SPS,
2756 ++};
2757 ++
2758 ++/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion
2759 ++ * w/o DMA support
2760 ++ */
2761 ++static const struct mei_cfg mei_me_pch12_nodma_sps_cfg = {
2762 + MEI_CFG_PCH8_HFS,
2763 + MEI_CFG_FW_VER_SUPP,
2764 + MEI_CFG_FW_SPS,
2765 +@@ -1480,6 +1526,15 @@ static const struct mei_cfg mei_me_pch15_cfg = {
2766 + MEI_CFG_TRC,
2767 + };
2768 +
2769 ++/* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */
2770 ++static const struct mei_cfg mei_me_pch15_sps_cfg = {
2771 ++ MEI_CFG_PCH8_HFS,
2772 ++ MEI_CFG_FW_VER_SUPP,
2773 ++ MEI_CFG_DMA_128,
2774 ++ MEI_CFG_TRC,
2775 ++ MEI_CFG_FW_SPS,
2776 ++};
2777 ++
2778 + /*
2779 + * mei_cfg_list - A list of platform platform specific configurations.
2780 + * Note: has to be synchronized with enum mei_cfg_idx.
2781 +@@ -1492,10 +1547,13 @@ static const struct mei_cfg *const mei_cfg_list[] = {
2782 + [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
2783 + [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
2784 + [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
2785 +- [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
2786 ++ [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
2787 + [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
2788 ++ [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
2789 + [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
2790 ++ [MEI_ME_PCH12_SPS_NODMA_CFG] = &mei_me_pch12_nodma_sps_cfg,
2791 + [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
2792 ++ [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
2793 + };
2794 +
2795 + const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
2796 +diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
2797 +index b6b94e211464..6a8973649c49 100644
2798 +--- a/drivers/misc/mei/hw-me.h
2799 ++++ b/drivers/misc/mei/hw-me.h
2800 +@@ -1,6 +1,6 @@
2801 + /* SPDX-License-Identifier: GPL-2.0 */
2802 + /*
2803 +- * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
2804 ++ * Copyright (c) 2012-2020, Intel Corporation. All rights reserved.
2805 + * Intel Management Engine Interface (Intel MEI) Linux driver
2806 + */
2807 +
2808 +@@ -76,14 +76,20 @@ struct mei_me_hw {
2809 + * with quirk for Node Manager exclusion.
2810 + * @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer
2811 + * client platforms.
2812 +- * @MEI_ME_PCH8_SPS_CFG: Platform Controller Hub Gen8 and newer
2813 ++ * @MEI_ME_PCH8_SPS_4_CFG: Platform Controller Hub Gen8 and newer
2814 + * servers platforms with quirk for
2815 + * SPS firmware exclusion.
2816 + * @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer
2817 +- * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 and newer
2818 ++ * @MEI_ME_PCH12_SPS_4_CFG:Platform Controller Hub Gen12 up to 4.0
2819 ++ * servers platforms with quirk for
2820 ++ * SPS firmware exclusion.
2821 ++ * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 5.0 and newer
2822 + * servers platforms with quirk for
2823 + * SPS firmware exclusion.
2824 + * @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer
2825 ++ * @MEI_ME_PCH15_SPS_CFG: Platform Controller Hub Gen15 and newer
2826 ++ * servers platforms with quirk for
2827 ++ * SPS firmware exclusion.
2828 + * @MEI_ME_NUM_CFG: Upper Sentinel.
2829 + */
2830 + enum mei_cfg_idx {
2831 +@@ -94,10 +100,13 @@ enum mei_cfg_idx {
2832 + MEI_ME_PCH7_CFG,
2833 + MEI_ME_PCH_CPT_PBG_CFG,
2834 + MEI_ME_PCH8_CFG,
2835 +- MEI_ME_PCH8_SPS_CFG,
2836 ++ MEI_ME_PCH8_SPS_4_CFG,
2837 + MEI_ME_PCH12_CFG,
2838 ++ MEI_ME_PCH12_SPS_4_CFG,
2839 + MEI_ME_PCH12_SPS_CFG,
2840 ++ MEI_ME_PCH12_SPS_NODMA_CFG,
2841 + MEI_ME_PCH15_CFG,
2842 ++ MEI_ME_PCH15_SPS_CFG,
2843 + MEI_ME_NUM_CFG,
2844 + };
2845 +
2846 +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
2847 +index a1ed375fed37..81e759674c1b 100644
2848 +--- a/drivers/misc/mei/pci-me.c
2849 ++++ b/drivers/misc/mei/pci-me.c
2850 +@@ -59,18 +59,18 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
2851 + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
2852 + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
2853 + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
2854 +- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
2855 +- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
2856 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)},
2857 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)},
2858 + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
2859 +- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)},
2860 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)},
2861 + {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
2862 + {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
2863 +
2864 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
2865 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
2866 +- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
2867 +- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
2868 +- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)},
2869 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
2870 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
2871 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},
2872 +
2873 + {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
2874 + {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
2875 +@@ -84,8 +84,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
2876 +
2877 + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
2878 + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_CFG)},
2879 +- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
2880 +- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH8_CFG)},
2881 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
2882 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_NODMA_CFG)},
2883 +
2884 + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
2885 + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
2886 +@@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
2887 + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
2888 +
2889 + {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
2890 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
2891 +
2892 + {MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
2893 +
2894 +diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
2895 +index 5d3c691a1c66..3dd46cd55114 100644
2896 +--- a/drivers/net/bareudp.c
2897 ++++ b/drivers/net/bareudp.c
2898 +@@ -572,6 +572,9 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
2899 + if (data[IFLA_BAREUDP_SRCPORT_MIN])
2900 + conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
2901 +
2902 ++ if (data[IFLA_BAREUDP_MULTIPROTO_MODE])
2903 ++ conf->multi_proto_mode = true;
2904 ++
2905 + return 0;
2906 + }
2907 +
2908 +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
2909 +index c7ac63f41918..946e41f020a5 100644
2910 +--- a/drivers/net/dsa/bcm_sf2.c
2911 ++++ b/drivers/net/dsa/bcm_sf2.c
2912 +@@ -1147,6 +1147,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
2913 + set_bit(0, priv->cfp.used);
2914 + set_bit(0, priv->cfp.unique);
2915 +
2916 ++ /* Balance of_node_put() done by of_find_node_by_name() */
2917 ++ of_node_get(dn);
2918 + ports = of_find_node_by_name(dn, "ports");
2919 + if (ports) {
2920 + bcm_sf2_identify_ports(priv, ports);
2921 +diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
2922 +index b9b4edb913c1..9b7f1af5f574 100644
2923 +--- a/drivers/net/ethernet/atheros/alx/main.c
2924 ++++ b/drivers/net/ethernet/atheros/alx/main.c
2925 +@@ -1249,8 +1249,12 @@ out_disable_adv_intr:
2926 +
2927 + static void __alx_stop(struct alx_priv *alx)
2928 + {
2929 +- alx_halt(alx);
2930 + alx_free_irq(alx);
2931 ++
2932 ++ cancel_work_sync(&alx->link_check_wk);
2933 ++ cancel_work_sync(&alx->reset_wk);
2934 ++
2935 ++ alx_halt(alx);
2936 + alx_free_rings(alx);
2937 + alx_free_napis(alx);
2938 + }
2939 +@@ -1855,9 +1859,6 @@ static void alx_remove(struct pci_dev *pdev)
2940 + struct alx_priv *alx = pci_get_drvdata(pdev);
2941 + struct alx_hw *hw = &alx->hw;
2942 +
2943 +- cancel_work_sync(&alx->link_check_wk);
2944 +- cancel_work_sync(&alx->reset_wk);
2945 +-
2946 + /* restore permanent mac address */
2947 + alx_set_macaddr(hw, hw->perm_addr);
2948 +
2949 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2950 +index 19c4a0a5727a..b6fb5a1709c0 100644
2951 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2952 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2953 +@@ -6293,6 +6293,7 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
2954 +
2955 + static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
2956 + {
2957 ++ struct hwrm_stat_ctx_clr_stats_input req0 = {0};
2958 + struct hwrm_stat_ctx_free_input req = {0};
2959 + int i;
2960 +
2961 +@@ -6302,6 +6303,7 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
2962 + if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2963 + return;
2964 +
2965 ++ bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
2966 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
2967 +
2968 + mutex_lock(&bp->hwrm_cmd_lock);
2969 +@@ -6311,7 +6313,11 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
2970 +
2971 + if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
2972 + req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
2973 +-
2974 ++ if (BNXT_FW_MAJ(bp) <= 20) {
2975 ++ req0.stat_ctx_id = req.stat_ctx_id;
2976 ++ _hwrm_send_message(bp, &req0, sizeof(req0),
2977 ++ HWRM_CMD_TIMEOUT);
2978 ++ }
2979 + _hwrm_send_message(bp, &req, sizeof(req),
2980 + HWRM_CMD_TIMEOUT);
2981 +
2982 +@@ -6953,7 +6959,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
2983 + bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
2984 +
2985 + bp->tx_push_thresh = 0;
2986 +- if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
2987 ++ if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
2988 ++ BNXT_FW_MAJ(bp) > 217)
2989 + bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
2990 +
2991 + hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
2992 +@@ -7217,8 +7224,9 @@ static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
2993 + static int bnxt_hwrm_ver_get(struct bnxt *bp)
2994 + {
2995 + struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
2996 ++ u16 fw_maj, fw_min, fw_bld, fw_rsv;
2997 + u32 dev_caps_cfg, hwrm_ver;
2998 +- int rc;
2999 ++ int rc, len;
3000 +
3001 + bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
3002 + mutex_lock(&bp->hwrm_cmd_lock);
3003 +@@ -7250,9 +7258,22 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
3004 + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
3005 + resp->hwrm_intf_upd_8b);
3006 +
3007 +- snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
3008 +- resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
3009 +- resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
3010 ++ fw_maj = le16_to_cpu(resp->hwrm_fw_major);
3011 ++ if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
3012 ++ fw_min = le16_to_cpu(resp->hwrm_fw_minor);
3013 ++ fw_bld = le16_to_cpu(resp->hwrm_fw_build);
3014 ++ fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
3015 ++ len = FW_VER_STR_LEN;
3016 ++ } else {
3017 ++ fw_maj = resp->hwrm_fw_maj_8b;
3018 ++ fw_min = resp->hwrm_fw_min_8b;
3019 ++ fw_bld = resp->hwrm_fw_bld_8b;
3020 ++ fw_rsv = resp->hwrm_fw_rsvd_8b;
3021 ++ len = BC_HWRM_STR_LEN;
3022 ++ }
3023 ++ bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
3024 ++ snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
3025 ++ fw_rsv);
3026 +
3027 + if (strlen(resp->active_pkg_name)) {
3028 + int fw_ver_len = strlen(bp->fw_ver_str);
3029 +@@ -11863,7 +11884,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3030 + dev->ethtool_ops = &bnxt_ethtool_ops;
3031 + pci_set_drvdata(pdev, dev);
3032 +
3033 +- bnxt_vpd_read_info(bp);
3034 ++ if (BNXT_PF(bp))
3035 ++ bnxt_vpd_read_info(bp);
3036 +
3037 + rc = bnxt_alloc_hwrm_resources(bp);
3038 + if (rc)
3039 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
3040 +index 3d39638521d6..23ee433db864 100644
3041 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
3042 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
3043 +@@ -1729,6 +1729,11 @@ struct bnxt {
3044 + #define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
3045 + char fw_ver_str[FW_VER_STR_LEN];
3046 + char hwrm_ver_supp[FW_VER_STR_LEN];
3047 ++ u64 fw_ver_code;
3048 ++#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \
3049 ++ ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
3050 ++#define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48)
3051 ++
3052 + __be16 vxlan_port;
3053 + u8 vxlan_port_cnt;
3054 + __le16 vxlan_fw_dst_port_id;
3055 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3056 +index 38bdfd4b46f0..dde1c23c8e39 100644
3057 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3058 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3059 +@@ -1520,11 +1520,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
3060 + goto out;
3061 + }
3062 +
3063 +- if (skb_padto(skb, ETH_ZLEN)) {
3064 +- ret = NETDEV_TX_OK;
3065 +- goto out;
3066 +- }
3067 +-
3068 + /* Retain how many bytes will be sent on the wire, without TSB inserted
3069 + * by transmit checksum offload
3070 + */
3071 +@@ -1571,6 +1566,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
3072 + len_stat = (size << DMA_BUFLENGTH_SHIFT) |
3073 + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
3074 +
3075 ++ /* Note: if we ever change from DMA_TX_APPEND_CRC below we
3076 ++ * will need to restore software padding of "runt" packets
3077 ++ */
3078 + if (!i) {
3079 + len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
3080 + if (skb->ip_summed == CHECKSUM_PARTIAL)
3081 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
3082 +index ff98a82b7bc4..d71ce7634ac1 100644
3083 +--- a/drivers/net/ethernet/broadcom/tg3.c
3084 ++++ b/drivers/net/ethernet/broadcom/tg3.c
3085 +@@ -18170,8 +18170,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
3086 +
3087 + rtnl_lock();
3088 +
3089 +- /* We probably don't have netdev yet */
3090 +- if (!netdev || !netif_running(netdev))
3091 ++ /* Could be second call or maybe we don't have netdev yet */
3092 ++ if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
3093 + goto done;
3094 +
3095 + /* We needn't recover from permanent error */
3096 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
3097 +index 67933079aeea..52582e8ed90e 100644
3098 +--- a/drivers/net/ethernet/cadence/macb_main.c
3099 ++++ b/drivers/net/ethernet/cadence/macb_main.c
3100 +@@ -2558,7 +2558,7 @@ static int macb_open(struct net_device *dev)
3101 +
3102 + err = macb_phylink_connect(bp);
3103 + if (err)
3104 +- goto napi_exit;
3105 ++ goto reset_hw;
3106 +
3107 + netif_tx_start_all_queues(dev);
3108 +
3109 +@@ -2567,9 +2567,11 @@ static int macb_open(struct net_device *dev)
3110 +
3111 + return 0;
3112 +
3113 +-napi_exit:
3114 ++reset_hw:
3115 ++ macb_reset_hw(bp);
3116 + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
3117 + napi_disable(&queue->napi);
3118 ++ macb_free_consistent(bp);
3119 + pm_exit:
3120 + pm_runtime_put_sync(&bp->pdev->dev);
3121 + return err;
3122 +@@ -3760,15 +3762,9 @@ static int macb_init(struct platform_device *pdev)
3123 +
3124 + static struct sifive_fu540_macb_mgmt *mgmt;
3125 +
3126 +-/* Initialize and start the Receiver and Transmit subsystems */
3127 +-static int at91ether_start(struct net_device *dev)
3128 ++static int at91ether_alloc_coherent(struct macb *lp)
3129 + {
3130 +- struct macb *lp = netdev_priv(dev);
3131 + struct macb_queue *q = &lp->queues[0];
3132 +- struct macb_dma_desc *desc;
3133 +- dma_addr_t addr;
3134 +- u32 ctl;
3135 +- int i;
3136 +
3137 + q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
3138 + (AT91ETHER_MAX_RX_DESCR *
3139 +@@ -3790,6 +3786,43 @@ static int at91ether_start(struct net_device *dev)
3140 + return -ENOMEM;
3141 + }
3142 +
3143 ++ return 0;
3144 ++}
3145 ++
3146 ++static void at91ether_free_coherent(struct macb *lp)
3147 ++{
3148 ++ struct macb_queue *q = &lp->queues[0];
3149 ++
3150 ++ if (q->rx_ring) {
3151 ++ dma_free_coherent(&lp->pdev->dev,
3152 ++ AT91ETHER_MAX_RX_DESCR *
3153 ++ macb_dma_desc_get_size(lp),
3154 ++ q->rx_ring, q->rx_ring_dma);
3155 ++ q->rx_ring = NULL;
3156 ++ }
3157 ++
3158 ++ if (q->rx_buffers) {
3159 ++ dma_free_coherent(&lp->pdev->dev,
3160 ++ AT91ETHER_MAX_RX_DESCR *
3161 ++ AT91ETHER_MAX_RBUFF_SZ,
3162 ++ q->rx_buffers, q->rx_buffers_dma);
3163 ++ q->rx_buffers = NULL;
3164 ++ }
3165 ++}
3166 ++
3167 ++/* Initialize and start the Receiver and Transmit subsystems */
3168 ++static int at91ether_start(struct macb *lp)
3169 ++{
3170 ++ struct macb_queue *q = &lp->queues[0];
3171 ++ struct macb_dma_desc *desc;
3172 ++ dma_addr_t addr;
3173 ++ u32 ctl;
3174 ++ int i, ret;
3175 ++
3176 ++ ret = at91ether_alloc_coherent(lp);
3177 ++ if (ret)
3178 ++ return ret;
3179 ++
3180 + addr = q->rx_buffers_dma;
3181 + for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
3182 + desc = macb_rx_desc(q, i);
3183 +@@ -3811,9 +3844,39 @@ static int at91ether_start(struct net_device *dev)
3184 + ctl = macb_readl(lp, NCR);
3185 + macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
3186 +
3187 ++ /* Enable MAC interrupts */
3188 ++ macb_writel(lp, IER, MACB_BIT(RCOMP) |
3189 ++ MACB_BIT(RXUBR) |
3190 ++ MACB_BIT(ISR_TUND) |
3191 ++ MACB_BIT(ISR_RLE) |
3192 ++ MACB_BIT(TCOMP) |
3193 ++ MACB_BIT(ISR_ROVR) |
3194 ++ MACB_BIT(HRESP));
3195 ++
3196 + return 0;
3197 + }
3198 +
3199 ++static void at91ether_stop(struct macb *lp)
3200 ++{
3201 ++ u32 ctl;
3202 ++
3203 ++ /* Disable MAC interrupts */
3204 ++ macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3205 ++ MACB_BIT(RXUBR) |
3206 ++ MACB_BIT(ISR_TUND) |
3207 ++ MACB_BIT(ISR_RLE) |
3208 ++ MACB_BIT(TCOMP) |
3209 ++ MACB_BIT(ISR_ROVR) |
3210 ++ MACB_BIT(HRESP));
3211 ++
3212 ++ /* Disable Receiver and Transmitter */
3213 ++ ctl = macb_readl(lp, NCR);
3214 ++ macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3215 ++
3216 ++ /* Free resources. */
3217 ++ at91ether_free_coherent(lp);
3218 ++}
3219 ++
3220 + /* Open the ethernet interface */
3221 + static int at91ether_open(struct net_device *dev)
3222 + {
3223 +@@ -3833,63 +3896,36 @@ static int at91ether_open(struct net_device *dev)
3224 +
3225 + macb_set_hwaddr(lp);
3226 +
3227 +- ret = at91ether_start(dev);
3228 ++ ret = at91ether_start(lp);
3229 + if (ret)
3230 +- return ret;
3231 +-
3232 +- /* Enable MAC interrupts */
3233 +- macb_writel(lp, IER, MACB_BIT(RCOMP) |
3234 +- MACB_BIT(RXUBR) |
3235 +- MACB_BIT(ISR_TUND) |
3236 +- MACB_BIT(ISR_RLE) |
3237 +- MACB_BIT(TCOMP) |
3238 +- MACB_BIT(ISR_ROVR) |
3239 +- MACB_BIT(HRESP));
3240 ++ goto pm_exit;
3241 +
3242 + ret = macb_phylink_connect(lp);
3243 + if (ret)
3244 +- return ret;
3245 ++ goto stop;
3246 +
3247 + netif_start_queue(dev);
3248 +
3249 + return 0;
3250 ++
3251 ++stop:
3252 ++ at91ether_stop(lp);
3253 ++pm_exit:
3254 ++ pm_runtime_put_sync(&lp->pdev->dev);
3255 ++ return ret;
3256 + }
3257 +
3258 + /* Close the interface */
3259 + static int at91ether_close(struct net_device *dev)
3260 + {
3261 + struct macb *lp = netdev_priv(dev);
3262 +- struct macb_queue *q = &lp->queues[0];
3263 +- u32 ctl;
3264 +-
3265 +- /* Disable Receiver and Transmitter */
3266 +- ctl = macb_readl(lp, NCR);
3267 +- macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3268 +-
3269 +- /* Disable MAC interrupts */
3270 +- macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3271 +- MACB_BIT(RXUBR) |
3272 +- MACB_BIT(ISR_TUND) |
3273 +- MACB_BIT(ISR_RLE) |
3274 +- MACB_BIT(TCOMP) |
3275 +- MACB_BIT(ISR_ROVR) |
3276 +- MACB_BIT(HRESP));
3277 +
3278 + netif_stop_queue(dev);
3279 +
3280 + phylink_stop(lp->phylink);
3281 + phylink_disconnect_phy(lp->phylink);
3282 +
3283 +- dma_free_coherent(&lp->pdev->dev,
3284 +- AT91ETHER_MAX_RX_DESCR *
3285 +- macb_dma_desc_get_size(lp),
3286 +- q->rx_ring, q->rx_ring_dma);
3287 +- q->rx_ring = NULL;
3288 +-
3289 +- dma_free_coherent(&lp->pdev->dev,
3290 +- AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
3291 +- q->rx_buffers, q->rx_buffers_dma);
3292 +- q->rx_buffers = NULL;
3293 ++ at91ether_stop(lp);
3294 +
3295 + return pm_runtime_put(&lp->pdev->dev);
3296 + }
3297 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
3298 +index 72b37a66c7d8..0ed20a9cca14 100644
3299 +--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
3300 ++++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
3301 +@@ -502,41 +502,20 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
3302 + }
3303 + EXPORT_SYMBOL(cxgb4_select_ntuple);
3304 +
3305 +-/*
3306 +- * Called when address resolution fails for an L2T entry to handle packets
3307 +- * on the arpq head. If a packet specifies a failure handler it is invoked,
3308 +- * otherwise the packet is sent to the device.
3309 +- */
3310 +-static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
3311 +-{
3312 +- struct sk_buff *skb;
3313 +-
3314 +- while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
3315 +- const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
3316 +-
3317 +- spin_unlock(&e->lock);
3318 +- if (cb->arp_err_handler)
3319 +- cb->arp_err_handler(cb->handle, skb);
3320 +- else
3321 +- t4_ofld_send(adap, skb);
3322 +- spin_lock(&e->lock);
3323 +- }
3324 +-}
3325 +-
3326 + /*
3327 + * Called when the host's neighbor layer makes a change to some entry that is
3328 + * loaded into the HW L2 table.
3329 + */
3330 + void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
3331 + {
3332 +- struct l2t_entry *e;
3333 +- struct sk_buff_head *arpq = NULL;
3334 +- struct l2t_data *d = adap->l2t;
3335 + unsigned int addr_len = neigh->tbl->key_len;
3336 + u32 *addr = (u32 *) neigh->primary_key;
3337 +- int ifidx = neigh->dev->ifindex;
3338 +- int hash = addr_hash(d, addr, addr_len, ifidx);
3339 ++ int hash, ifidx = neigh->dev->ifindex;
3340 ++ struct sk_buff_head *arpq = NULL;
3341 ++ struct l2t_data *d = adap->l2t;
3342 ++ struct l2t_entry *e;
3343 +
3344 ++ hash = addr_hash(d, addr, addr_len, ifidx);
3345 + read_lock_bh(&d->lock);
3346 + for (e = d->l2tab[hash].first; e; e = e->next)
3347 + if (!addreq(e, addr) && e->ifindex == ifidx) {
3348 +@@ -569,8 +548,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
3349 + write_l2e(adap, e, 0);
3350 + }
3351 +
3352 +- if (arpq)
3353 +- handle_failed_resolution(adap, e);
3354 ++ if (arpq) {
3355 ++ struct sk_buff *skb;
3356 ++
3357 ++ /* Called when address resolution fails for an L2T
3358 ++ * entry to handle packets on the arpq head. If a
3359 ++ * packet specifies a failure handler it is invoked,
3360 ++ * otherwise the packet is sent to the device.
3361 ++ */
3362 ++ while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
3363 ++ const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
3364 ++
3365 ++ spin_unlock(&e->lock);
3366 ++ if (cb->arp_err_handler)
3367 ++ cb->arp_err_handler(cb->handle, skb);
3368 ++ else
3369 ++ t4_ofld_send(adap, skb);
3370 ++ spin_lock(&e->lock);
3371 ++ }
3372 ++ }
3373 + spin_unlock_bh(&e->lock);
3374 + }
3375 +
3376 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
3377 +index 6516c45864b3..db8106d9d6ed 100644
3378 +--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
3379 ++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
3380 +@@ -1425,12 +1425,10 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
3381 +
3382 + qidx = skb_get_queue_mapping(skb);
3383 + if (ptp_enabled) {
3384 +- spin_lock(&adap->ptp_lock);
3385 + if (!(adap->ptp_tx_skb)) {
3386 + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3387 + adap->ptp_tx_skb = skb_get(skb);
3388 + } else {
3389 +- spin_unlock(&adap->ptp_lock);
3390 + goto out_free;
3391 + }
3392 + q = &adap->sge.ptptxq;
3393 +@@ -1444,11 +1442,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
3394 +
3395 + #ifdef CONFIG_CHELSIO_T4_FCOE
3396 + ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
3397 +- if (unlikely(ret == -ENOTSUPP)) {
3398 +- if (ptp_enabled)
3399 +- spin_unlock(&adap->ptp_lock);
3400 ++ if (unlikely(ret == -EOPNOTSUPP))
3401 + goto out_free;
3402 +- }
3403 + #endif /* CONFIG_CHELSIO_T4_FCOE */
3404 +
3405 + chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
3406 +@@ -1461,8 +1456,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
3407 + dev_err(adap->pdev_dev,
3408 + "%s: Tx ring %u full while queue awake!\n",
3409 + dev->name, qidx);
3410 +- if (ptp_enabled)
3411 +- spin_unlock(&adap->ptp_lock);
3412 + return NETDEV_TX_BUSY;
3413 + }
3414 +
3415 +@@ -1481,8 +1474,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
3416 + unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
3417 + memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
3418 + q->mapping_err++;
3419 +- if (ptp_enabled)
3420 +- spin_unlock(&adap->ptp_lock);
3421 + goto out_free;
3422 + }
3423 +
3424 +@@ -1630,8 +1621,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
3425 + txq_advance(&q->q, ndesc);
3426 +
3427 + cxgb4_ring_tx_db(adap, &q->q, ndesc);
3428 +- if (ptp_enabled)
3429 +- spin_unlock(&adap->ptp_lock);
3430 + return NETDEV_TX_OK;
3431 +
3432 + out_free:
3433 +@@ -2365,6 +2354,16 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
3434 + if (unlikely(qid >= pi->nqsets))
3435 + return cxgb4_ethofld_xmit(skb, dev);
3436 +
3437 ++ if (is_ptp_enabled(skb, dev)) {
3438 ++ struct adapter *adap = netdev2adap(dev);
3439 ++ netdev_tx_t ret;
3440 ++
3441 ++ spin_lock(&adap->ptp_lock);
3442 ++ ret = cxgb4_eth_xmit(skb, dev);
3443 ++ spin_unlock(&adap->ptp_lock);
3444 ++ return ret;
3445 ++ }
3446 ++
3447 + return cxgb4_eth_xmit(skb, dev);
3448 + }
3449 +
3450 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
3451 +index ccf2611f4a20..4486a0db8ef0 100644
3452 +--- a/drivers/net/ethernet/freescale/enetc/enetc.c
3453 ++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
3454 +@@ -266,7 +266,7 @@ static irqreturn_t enetc_msix(int irq, void *data)
3455 + /* disable interrupts */
3456 + enetc_wr_reg(v->rbier, 0);
3457 +
3458 +- for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
3459 ++ for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
3460 + enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
3461 +
3462 + napi_schedule_irqoff(&v->napi);
3463 +@@ -302,7 +302,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
3464 + /* enable interrupts */
3465 + enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
3466 +
3467 +- for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
3468 ++ for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
3469 + enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
3470 + ENETC_TBIER_TXTIE);
3471 +
3472 +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
3473 +index 96d36ae5049e..c5c732601e35 100644
3474 +--- a/drivers/net/ethernet/ibm/ibmveth.c
3475 ++++ b/drivers/net/ethernet/ibm/ibmveth.c
3476 +@@ -1715,7 +1715,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
3477 + }
3478 +
3479 + netdev->min_mtu = IBMVETH_MIN_MTU;
3480 +- netdev->max_mtu = ETH_MAX_MTU;
3481 ++ netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
3482 +
3483 + memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
3484 +
3485 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
3486 +index 1b4d04e4474b..2baf7b3ff4cb 100644
3487 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
3488 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
3489 +@@ -842,12 +842,13 @@ static int ibmvnic_login(struct net_device *netdev)
3490 + struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3491 + unsigned long timeout = msecs_to_jiffies(30000);
3492 + int retry_count = 0;
3493 ++ int retries = 10;
3494 + bool retry;
3495 + int rc;
3496 +
3497 + do {
3498 + retry = false;
3499 +- if (retry_count > IBMVNIC_MAX_QUEUES) {
3500 ++ if (retry_count > retries) {
3501 + netdev_warn(netdev, "Login attempts exceeded\n");
3502 + return -1;
3503 + }
3504 +@@ -862,11 +863,23 @@ static int ibmvnic_login(struct net_device *netdev)
3505 +
3506 + if (!wait_for_completion_timeout(&adapter->init_done,
3507 + timeout)) {
3508 +- netdev_warn(netdev, "Login timed out\n");
3509 +- return -1;
3510 ++ netdev_warn(netdev, "Login timed out, retrying...\n");
3511 ++ retry = true;
3512 ++ adapter->init_done_rc = 0;
3513 ++ retry_count++;
3514 ++ continue;
3515 + }
3516 +
3517 +- if (adapter->init_done_rc == PARTIALSUCCESS) {
3518 ++ if (adapter->init_done_rc == ABORTED) {
3519 ++ netdev_warn(netdev, "Login aborted, retrying...\n");
3520 ++ retry = true;
3521 ++ adapter->init_done_rc = 0;
3522 ++ retry_count++;
3523 ++ /* FW or device may be busy, so
3524 ++ * wait a bit before retrying login
3525 ++ */
3526 ++ msleep(500);
3527 ++ } else if (adapter->init_done_rc == PARTIALSUCCESS) {
3528 + retry_count++;
3529 + release_sub_crqs(adapter, 1);
3530 +
3531 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3532 +index b7b553602ea9..24f4d8e0da98 100644
3533 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3534 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3535 +@@ -1544,7 +1544,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port)
3536 + for (q = 0; q < port->ntxqs; q++)
3537 + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
3538 + *pstats++ += mvpp2_read_index(port->priv,
3539 +- MVPP22_CTRS_TX_CTR(port->id, i),
3540 ++ MVPP22_CTRS_TX_CTR(port->id, q),
3541 + mvpp2_ethtool_txq_regs[i].offset);
3542 +
3543 + /* Rxqs are numbered from 0 from the user standpoint, but not from the
3544 +@@ -1553,7 +1553,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port)
3545 + for (q = 0; q < port->nrxqs; q++)
3546 + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
3547 + *pstats++ += mvpp2_read_index(port->priv,
3548 +- port->first_rxq + i,
3549 ++ port->first_rxq + q,
3550 + mvpp2_ethtool_rxq_regs[i].offset);
3551 + }
3552 +
3553 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3554 +index 3e4199246a18..d9a2267aeaea 100644
3555 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3556 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3557 +@@ -990,10 +990,10 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
3558 +
3559 + lossy = !(pfc || pause_en);
3560 + thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
3561 +- mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &thres_cells);
3562 ++ thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
3563 + delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
3564 + pfc, pause_en);
3565 +- mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &delay_cells);
3566 ++ delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
3567 + total_cells = thres_cells + delay_cells;
3568 +
3569 + taken_headroom_cells += total_cells;
3570 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
3571 +index e28ecb84b816..6b2e4e730b18 100644
3572 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
3573 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
3574 +@@ -395,17 +395,15 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
3575 + return NULL;
3576 + }
3577 +
3578 +-static inline void
3579 ++static inline u32
3580 + mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
3581 +- u16 *p_size)
3582 ++ u32 size_cells)
3583 + {
3584 + /* Ports with eight lanes use two headroom buffers between which the
3585 + * configured headroom size is split. Therefore, multiply the calculated
3586 + * headroom size by two.
3587 + */
3588 +- if (mlxsw_sp_port->mapping.width != 8)
3589 +- return;
3590 +- *p_size *= 2;
3591 ++ return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
3592 + }
3593 +
3594 + enum mlxsw_sp_flood_type {
3595 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
3596 +index 19bf0768ed78..2fb2cbd4f229 100644
3597 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
3598 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
3599 +@@ -312,7 +312,7 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
3600 +
3601 + if (i == MLXSW_SP_PB_UNUSED)
3602 + continue;
3603 +- mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &size);
3604 ++ size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size);
3605 + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
3606 + }
3607 + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
3608 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
3609 +index 7c5032f9c8ff..76242c70d41a 100644
3610 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
3611 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
3612 +@@ -776,7 +776,7 @@ mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
3613 + speed = 0;
3614 +
3615 + buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
3616 +- mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, (u16 *) &buffsize);
3617 ++ buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize);
3618 + mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
3619 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
3620 + }
3621 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
3622 +index 7aa037c3fe02..790d4854b8ef 100644
3623 +--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
3624 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
3625 +@@ -1653,6 +1653,14 @@ int ionic_open(struct net_device *netdev)
3626 + if (err)
3627 + goto err_out;
3628 +
3629 ++ err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
3630 ++ if (err)
3631 ++ goto err_txrx_deinit;
3632 ++
3633 ++ err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
3634 ++ if (err)
3635 ++ goto err_txrx_deinit;
3636 ++
3637 + /* don't start the queues until we have link */
3638 + if (netif_carrier_ok(netdev)) {
3639 + err = ionic_start_queues(lif);
3640 +@@ -1674,8 +1682,8 @@ static void ionic_stop_queues(struct ionic_lif *lif)
3641 + if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
3642 + return;
3643 +
3644 +- ionic_txrx_disable(lif);
3645 + netif_tx_disable(lif->netdev);
3646 ++ ionic_txrx_disable(lif);
3647 + }
3648 +
3649 + int ionic_stop(struct net_device *netdev)
3650 +@@ -1941,18 +1949,19 @@ int ionic_reset_queues(struct ionic_lif *lif)
3651 + bool running;
3652 + int err = 0;
3653 +
3654 +- /* Put off the next watchdog timeout */
3655 +- netif_trans_update(lif->netdev);
3656 +-
3657 + err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
3658 + if (err)
3659 + return err;
3660 +
3661 + running = netif_running(lif->netdev);
3662 +- if (running)
3663 ++ if (running) {
3664 ++ netif_device_detach(lif->netdev);
3665 + err = ionic_stop(lif->netdev);
3666 +- if (!err && running)
3667 ++ }
3668 ++ if (!err && running) {
3669 + ionic_open(lif->netdev);
3670 ++ netif_device_attach(lif->netdev);
3671 ++ }
3672 +
3673 + clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
3674 +
3675 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
3676 +index 1a636bad717d..aeed8939f410 100644
3677 +--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
3678 ++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
3679 +@@ -270,7 +270,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
3680 + vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
3681 + }
3682 +
3683 +- iids->vf_cids += vf_cids * p_mngr->vf_count;
3684 ++ iids->vf_cids = vf_cids;
3685 + iids->tids += vf_tids * p_mngr->vf_count;
3686 +
3687 + DP_VERBOSE(p_hwfn, QED_MSG_ILT,
3688 +@@ -442,6 +442,20 @@ static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
3689 + return p_blk;
3690 + }
3691 +
3692 ++static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn)
3693 ++{
3694 ++ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
3695 ++ u32 cli_idx, blk_idx;
3696 ++
3697 ++ for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
3698 ++ for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
3699 ++ clients[cli_idx].pf_blks[blk_idx].total_size = 0;
3700 ++
3701 ++ for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
3702 ++ clients[cli_idx].vf_blks[blk_idx].total_size = 0;
3703 ++ }
3704 ++}
3705 ++
3706 + int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
3707 + {
3708 + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
3709 +@@ -461,6 +475,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
3710 +
3711 + p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
3712 +
3713 ++ /* Reset all ILT blocks at the beginning of ILT computing in order
3714 ++ * to prevent memory allocation for irrelevant blocks afterwards.
3715 ++ */
3716 ++ qed_cxt_ilt_blk_reset(p_hwfn);
3717 ++
3718 + DP_VERBOSE(p_hwfn, QED_MSG_ILT,
3719 + "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
3720 + p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
3721 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
3722 +index f4eebaabb6d0..3e56b6056b47 100644
3723 +--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
3724 ++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
3725 +@@ -5568,7 +5568,8 @@ static const char * const s_status_str[] = {
3726 +
3727 + /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
3728 + "The filter/trigger constraint dword offsets are not enabled for recording",
3729 +-
3730 ++ /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
3731 ++ "No matching framing mode",
3732 +
3733 + /* DBG_STATUS_VFC_READ_ERROR */
3734 + "Error reading from VFC",
3735 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
3736 +index 38a65b984e47..9b00988fb77e 100644
3737 +--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
3738 ++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
3739 +@@ -1368,6 +1368,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
3740 +
3741 + void qed_resc_free(struct qed_dev *cdev)
3742 + {
3743 ++ struct qed_rdma_info *rdma_info;
3744 ++ struct qed_hwfn *p_hwfn;
3745 + int i;
3746 +
3747 + if (IS_VF(cdev)) {
3748 +@@ -1385,7 +1387,8 @@ void qed_resc_free(struct qed_dev *cdev)
3749 + qed_llh_free(cdev);
3750 +
3751 + for_each_hwfn(cdev, i) {
3752 +- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3753 ++ p_hwfn = cdev->hwfns + i;
3754 ++ rdma_info = p_hwfn->p_rdma_info;
3755 +
3756 + qed_cxt_mngr_free(p_hwfn);
3757 + qed_qm_info_free(p_hwfn);
3758 +@@ -1404,8 +1407,10 @@ void qed_resc_free(struct qed_dev *cdev)
3759 + qed_ooo_free(p_hwfn);
3760 + }
3761 +
3762 +- if (QED_IS_RDMA_PERSONALITY(p_hwfn))
3763 ++ if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
3764 ++ qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
3765 + qed_rdma_info_free(p_hwfn);
3766 ++ }
3767 +
3768 + qed_iov_free(p_hwfn);
3769 + qed_l2_free(p_hwfn);
3770 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
3771 +index d2fe61a5cf56..5409a2da6106 100644
3772 +--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
3773 ++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
3774 +@@ -2836,8 +2836,6 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
3775 + if (rc)
3776 + return rc;
3777 +
3778 +- qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
3779 +-
3780 + return qed_iwarp_ll2_stop(p_hwfn);
3781 + }
3782 +
3783 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
3784 +index 37e70562a964..f15c26ef8870 100644
3785 +--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
3786 ++++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
3787 +@@ -113,7 +113,6 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
3788 + break;
3789 + }
3790 + }
3791 +- qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
3792 + }
3793 +
3794 + static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
3795 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
3796 +index 856051f50eb7..adc2c8f3d48e 100644
3797 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
3798 ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
3799 +@@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
3800 + mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
3801 + }
3802 +
3803 ++#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
3804 ++#define QED_VF_CHANNEL_USLEEP_DELAY 100
3805 ++#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
3806 ++#define QED_VF_CHANNEL_MSLEEP_DELAY 25
3807 ++
3808 + static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
3809 + {
3810 + union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
3811 + struct ustorm_trigger_vf_zone trigger;
3812 + struct ustorm_vf_zone *zone_data;
3813 +- int rc = 0, time = 100;
3814 ++ int iter, rc = 0;
3815 +
3816 + zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
3817 +
3818 +@@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
3819 + REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
3820 +
3821 + /* When PF would be done with the response, it would write back to the
3822 +- * `done' address. Poll until then.
3823 ++ * `done' address from a coherent DMA zone. Poll until then.
3824 + */
3825 +- while ((!*done) && time) {
3826 +- msleep(25);
3827 +- time--;
3828 ++
3829 ++ iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
3830 ++ while (!*done && iter--) {
3831 ++ udelay(QED_VF_CHANNEL_USLEEP_DELAY);
3832 ++ dma_rmb();
3833 ++ }
3834 ++
3835 ++ iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
3836 ++ while (!*done && iter--) {
3837 ++ msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
3838 ++ dma_rmb();
3839 + }
3840 +
3841 + if (!*done) {
3842 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
3843 +index 1a83d1fd8ccd..26eb58e7e076 100644
3844 +--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
3845 ++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
3846 +@@ -1158,7 +1158,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
3847 +
3848 + /* PTP not supported on VFs */
3849 + if (!is_vf)
3850 +- qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL));
3851 ++ qede_ptp_enable(edev);
3852 +
3853 + edev->ops->register_ops(cdev, &qede_ll_ops, edev);
3854 +
3855 +@@ -1247,6 +1247,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
3856 + if (system_state == SYSTEM_POWER_OFF)
3857 + return;
3858 + qed_ops->common->remove(cdev);
3859 ++ edev->cdev = NULL;
3860 +
3861 + /* Since this can happen out-of-sync with other flows,
3862 + * don't release the netdevice until after slowpath stop
3863 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
3864 +index 4c7f7a7fc151..cd5841a9415e 100644
3865 +--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
3866 ++++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
3867 +@@ -412,6 +412,7 @@ void qede_ptp_disable(struct qede_dev *edev)
3868 + if (ptp->tx_skb) {
3869 + dev_kfree_skb_any(ptp->tx_skb);
3870 + ptp->tx_skb = NULL;
3871 ++ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
3872 + }
3873 +
3874 + /* Disable PTP in HW */
3875 +@@ -423,7 +424,7 @@ void qede_ptp_disable(struct qede_dev *edev)
3876 + edev->ptp = NULL;
3877 + }
3878 +
3879 +-static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
3880 ++static int qede_ptp_init(struct qede_dev *edev)
3881 + {
3882 + struct qede_ptp *ptp;
3883 + int rc;
3884 +@@ -444,25 +445,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
3885 + /* Init work queue for Tx timestamping */
3886 + INIT_WORK(&ptp->work, qede_ptp_task);
3887 +
3888 +- /* Init cyclecounter and timecounter. This is done only in the first
3889 +- * load. If done in every load, PTP application will fail when doing
3890 +- * unload / load (e.g. MTU change) while it is running.
3891 +- */
3892 +- if (init_tc) {
3893 +- memset(&ptp->cc, 0, sizeof(ptp->cc));
3894 +- ptp->cc.read = qede_ptp_read_cc;
3895 +- ptp->cc.mask = CYCLECOUNTER_MASK(64);
3896 +- ptp->cc.shift = 0;
3897 +- ptp->cc.mult = 1;
3898 +-
3899 +- timecounter_init(&ptp->tc, &ptp->cc,
3900 +- ktime_to_ns(ktime_get_real()));
3901 +- }
3902 ++ /* Init cyclecounter and timecounter */
3903 ++ memset(&ptp->cc, 0, sizeof(ptp->cc));
3904 ++ ptp->cc.read = qede_ptp_read_cc;
3905 ++ ptp->cc.mask = CYCLECOUNTER_MASK(64);
3906 ++ ptp->cc.shift = 0;
3907 ++ ptp->cc.mult = 1;
3908 +
3909 +- return rc;
3910 ++ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
3911 ++
3912 ++ return 0;
3913 + }
3914 +
3915 +-int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
3916 ++int qede_ptp_enable(struct qede_dev *edev)
3917 + {
3918 + struct qede_ptp *ptp;
3919 + int rc;
3920 +@@ -483,7 +478,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
3921 +
3922 + edev->ptp = ptp;
3923 +
3924 +- rc = qede_ptp_init(edev, init_tc);
3925 ++ rc = qede_ptp_init(edev);
3926 + if (rc)
3927 + goto err1;
3928 +
3929 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
3930 +index 691a14c4b2c5..89c7f3cf3ee2 100644
3931 +--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
3932 ++++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
3933 +@@ -41,7 +41,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
3934 + void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
3935 + int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
3936 + void qede_ptp_disable(struct qede_dev *edev);
3937 +-int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
3938 ++int qede_ptp_enable(struct qede_dev *edev);
3939 + int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
3940 +
3941 + static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
3942 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
3943 +index 2d873ae8a234..668ccc9d49f8 100644
3944 +--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
3945 ++++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
3946 +@@ -105,6 +105,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev)
3947 +
3948 + qede_rdma_cleanup_event(edev);
3949 + destroy_workqueue(edev->rdma_info.rdma_wq);
3950 ++ edev->rdma_info.rdma_wq = NULL;
3951 + }
3952 +
3953 + int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
3954 +@@ -325,7 +326,7 @@ static void qede_rdma_add_event(struct qede_dev *edev,
3955 + if (edev->rdma_info.exp_recovery)
3956 + return;
3957 +
3958 +- if (!edev->rdma_info.qedr_dev)
3959 ++ if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
3960 + return;
3961 +
3962 + /* We don't want the cleanup flow to start while we're allocating and
3963 +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
3964 +index c51b48dc3639..7bda2671bd5b 100644
3965 +--- a/drivers/net/ethernet/realtek/r8169_main.c
3966 ++++ b/drivers/net/ethernet/realtek/r8169_main.c
3967 +@@ -2192,8 +2192,11 @@ static void rtl_release_firmware(struct rtl8169_private *tp)
3968 + void r8169_apply_firmware(struct rtl8169_private *tp)
3969 + {
3970 + /* TODO: release firmware if rtl_fw_write_firmware signals failure. */
3971 +- if (tp->rtl_fw)
3972 ++ if (tp->rtl_fw) {
3973 + rtl_fw_write_firmware(tp, tp->rtl_fw);
3974 ++ /* At least one firmware doesn't reset tp->ocp_base. */
3975 ++ tp->ocp_base = OCP_STD_PHY_BASE;
3976 ++ }
3977 + }
3978 +
3979 + static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
3980 +diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
3981 +index 7585cd2270ba..fc99e7118e49 100644
3982 +--- a/drivers/net/ethernet/rocker/rocker_main.c
3983 ++++ b/drivers/net/ethernet/rocker/rocker_main.c
3984 +@@ -647,10 +647,10 @@ static int rocker_dma_rings_init(struct rocker *rocker)
3985 + err_dma_event_ring_bufs_alloc:
3986 + rocker_dma_ring_destroy(rocker, &rocker->event_ring);
3987 + err_dma_event_ring_create:
3988 ++ rocker_dma_cmd_ring_waits_free(rocker);
3989 ++err_dma_cmd_ring_waits_alloc:
3990 + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
3991 + PCI_DMA_BIDIRECTIONAL);
3992 +-err_dma_cmd_ring_waits_alloc:
3993 +- rocker_dma_cmd_ring_waits_free(rocker);
3994 + err_dma_cmd_ring_bufs_alloc:
3995 + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
3996 + return err;
3997 +diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
3998 +index a5a0fb60193a..5a70c49bf454 100644
3999 +--- a/drivers/net/ethernet/socionext/netsec.c
4000 ++++ b/drivers/net/ethernet/socionext/netsec.c
4001 +@@ -1038,8 +1038,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
4002 + skb->ip_summed = CHECKSUM_UNNECESSARY;
4003 +
4004 + next:
4005 +- if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
4006 +- xdp_result) {
4007 ++ if (skb)
4008 ++ napi_gro_receive(&priv->napi, skb);
4009 ++ if (skb || xdp_result) {
4010 + ndev->stats.rx_packets++;
4011 + ndev->stats.rx_bytes += xdp.data_end - xdp.data;
4012 + }
4013 +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
4014 +index 75266580b586..4661ef865807 100644
4015 +--- a/drivers/net/geneve.c
4016 ++++ b/drivers/net/geneve.c
4017 +@@ -1649,6 +1649,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
4018 + geneve->collect_md = metadata;
4019 + geneve->use_udp6_rx_checksums = use_udp6_rx_checksums;
4020 + geneve->ttl_inherit = ttl_inherit;
4021 ++ geneve->df = df;
4022 + geneve_unquiesce(geneve, gs4, gs6);
4023 +
4024 + return 0;
4025 +diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
4026 +index 3fa33d27eeba..d140e3c93fe3 100644
4027 +--- a/drivers/net/phy/Kconfig
4028 ++++ b/drivers/net/phy/Kconfig
4029 +@@ -461,8 +461,7 @@ config MICROCHIP_T1_PHY
4030 + config MICROSEMI_PHY
4031 + tristate "Microsemi PHYs"
4032 + depends on MACSEC || MACSEC=n
4033 +- select CRYPTO_AES
4034 +- select CRYPTO_ECB
4035 ++ select CRYPTO_LIB_AES if MACSEC
4036 + ---help---
4037 + Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
4038 +
4039 +diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
4040 +index b4d3dc4068e2..d53ca884b5c9 100644
4041 +--- a/drivers/net/phy/mscc/mscc_macsec.c
4042 ++++ b/drivers/net/phy/mscc/mscc_macsec.c
4043 +@@ -10,7 +10,7 @@
4044 + #include <linux/phy.h>
4045 + #include <dt-bindings/net/mscc-phy-vsc8531.h>
4046 +
4047 +-#include <crypto/skcipher.h>
4048 ++#include <crypto/aes.h>
4049 +
4050 + #include <net/macsec.h>
4051 +
4052 +@@ -500,39 +500,17 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
4053 + static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
4054 + u16 key_len, u8 hkey[16])
4055 + {
4056 +- struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
4057 +- struct skcipher_request *req = NULL;
4058 +- struct scatterlist src, dst;
4059 +- DECLARE_CRYPTO_WAIT(wait);
4060 +- u32 input[4] = {0};
4061 ++ const u8 input[AES_BLOCK_SIZE] = {0};
4062 ++ struct crypto_aes_ctx ctx;
4063 + int ret;
4064 +
4065 +- if (IS_ERR(tfm))
4066 +- return PTR_ERR(tfm);
4067 +-
4068 +- req = skcipher_request_alloc(tfm, GFP_KERNEL);
4069 +- if (!req) {
4070 +- ret = -ENOMEM;
4071 +- goto out;
4072 +- }
4073 +-
4074 +- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
4075 +- CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
4076 +- &wait);
4077 +- ret = crypto_skcipher_setkey(tfm, key, key_len);
4078 +- if (ret < 0)
4079 +- goto out;
4080 +-
4081 +- sg_init_one(&src, input, 16);
4082 +- sg_init_one(&dst, hkey, 16);
4083 +- skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
4084 +-
4085 +- ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
4086 ++ ret = aes_expandkey(&ctx, key, key_len);
4087 ++ if (ret)
4088 ++ return ret;
4089 +
4090 +-out:
4091 +- skcipher_request_free(req);
4092 +- crypto_free_skcipher(tfm);
4093 +- return ret;
4094 ++ aes_encrypt(&ctx, hkey, input);
4095 ++ memzero_explicit(&ctx, sizeof(ctx));
4096 ++ return 0;
4097 + }
4098 +
4099 + static int vsc8584_macsec_transformation(struct phy_device *phydev,
4100 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
4101 +index 697c74deb222..0881b4b92363 100644
4102 +--- a/drivers/net/phy/phy_device.c
4103 ++++ b/drivers/net/phy/phy_device.c
4104 +@@ -798,8 +798,10 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
4105 +
4106 + /* Grab the bits from PHYIR2, and put them in the lower half */
4107 + phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
4108 +- if (phy_reg < 0)
4109 +- return -EIO;
4110 ++ if (phy_reg < 0) {
4111 ++ /* returning -ENODEV doesn't stop bus scanning */
4112 ++ return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO;
4113 ++ }
4114 +
4115 + *phy_id |= phy_reg;
4116 +
4117 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
4118 +index 34ca12aec61b..ac38bead1cd2 100644
4119 +--- a/drivers/net/phy/phylink.c
4120 ++++ b/drivers/net/phy/phylink.c
4121 +@@ -1480,6 +1480,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
4122 + struct ethtool_pauseparam *pause)
4123 + {
4124 + struct phylink_link_state *config = &pl->link_config;
4125 ++ bool manual_changed;
4126 ++ int pause_state;
4127 +
4128 + ASSERT_RTNL();
4129 +
4130 +@@ -1494,15 +1496,15 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
4131 + !pause->autoneg && pause->rx_pause != pause->tx_pause)
4132 + return -EINVAL;
4133 +
4134 +- mutex_lock(&pl->state_mutex);
4135 +- config->pause = 0;
4136 ++ pause_state = 0;
4137 + if (pause->autoneg)
4138 +- config->pause |= MLO_PAUSE_AN;
4139 ++ pause_state |= MLO_PAUSE_AN;
4140 + if (pause->rx_pause)
4141 +- config->pause |= MLO_PAUSE_RX;
4142 ++ pause_state |= MLO_PAUSE_RX;
4143 + if (pause->tx_pause)
4144 +- config->pause |= MLO_PAUSE_TX;
4145 ++ pause_state |= MLO_PAUSE_TX;
4146 +
4147 ++ mutex_lock(&pl->state_mutex);
4148 + /*
4149 + * See the comments for linkmode_set_pause(), wrt the deficiencies
4150 + * with the current implementation. A solution to this issue would
4151 +@@ -1519,18 +1521,35 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
4152 + linkmode_set_pause(config->advertising, pause->tx_pause,
4153 + pause->rx_pause);
4154 +
4155 +- /* If we have a PHY, phylib will call our link state function if the
4156 +- * mode has changed, which will trigger a resolve and update the MAC
4157 +- * configuration.
4158 ++ manual_changed = (config->pause ^ pause_state) & MLO_PAUSE_AN ||
4159 ++ (!(pause_state & MLO_PAUSE_AN) &&
4160 ++ (config->pause ^ pause_state) & MLO_PAUSE_TXRX_MASK);
4161 ++
4162 ++ config->pause = pause_state;
4163 ++
4164 ++ if (!pl->phydev && !test_bit(PHYLINK_DISABLE_STOPPED,
4165 ++ &pl->phylink_disable_state))
4166 ++ phylink_pcs_config(pl, true, &pl->link_config);
4167 ++
4168 ++ mutex_unlock(&pl->state_mutex);
4169 ++
4170 ++ /* If we have a PHY, a change of the pause frame advertisement will
4171 ++ * cause phylib to renegotiate (if AN is enabled) which will in turn
4172 ++ * call our phylink_phy_change() and trigger a resolve. Note that
4173 ++ * we can't hold our state mutex while calling phy_set_asym_pause().
4174 + */
4175 +- if (pl->phydev) {
4176 ++ if (pl->phydev)
4177 + phy_set_asym_pause(pl->phydev, pause->rx_pause,
4178 + pause->tx_pause);
4179 +- } else if (!test_bit(PHYLINK_DISABLE_STOPPED,
4180 +- &pl->phylink_disable_state)) {
4181 +- phylink_pcs_config(pl, true, &pl->link_config);
4182 ++
4183 ++ /* If the manual pause settings changed, make sure we trigger a
4184 ++ * resolve to update their state; we can not guarantee that the
4185 ++ * link will cycle.
4186 ++ */
4187 ++ if (manual_changed) {
4188 ++ pl->mac_link_dropped = true;
4189 ++ phylink_run_resolve(pl);
4190 + }
4191 +- mutex_unlock(&pl->state_mutex);
4192 +
4193 + return 0;
4194 + }
4195 +diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
4196 +index 93da7d3d0954..74568ae16125 100644
4197 +--- a/drivers/net/phy/smsc.c
4198 ++++ b/drivers/net/phy/smsc.c
4199 +@@ -122,10 +122,13 @@ static int lan87xx_read_status(struct phy_device *phydev)
4200 + if (rc < 0)
4201 + return rc;
4202 +
4203 +- /* Wait max 640 ms to detect energy */
4204 +- phy_read_poll_timeout(phydev, MII_LAN83C185_CTRL_STATUS, rc,
4205 +- rc & MII_LAN83C185_ENERGYON, 10000,
4206 +- 640000, true);
4207 ++ /* Wait max 640 ms to detect energy and the timeout is not
4208 ++ * an actual error.
4209 ++ */
4210 ++ read_poll_timeout(phy_read, rc,
4211 ++ rc & MII_LAN83C185_ENERGYON || rc < 0,
4212 ++ 10000, 640000, true, phydev,
4213 ++ MII_LAN83C185_CTRL_STATUS);
4214 + if (rc < 0)
4215 + return rc;
4216 +
4217 +diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
4218 +index 93044cf1417a..1fe4cc28d154 100644
4219 +--- a/drivers/net/usb/ax88179_178a.c
4220 ++++ b/drivers/net/usb/ax88179_178a.c
4221 +@@ -1414,10 +1414,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
4222 + }
4223 +
4224 + if (pkt_cnt == 0) {
4225 +- /* Skip IP alignment psudo header */
4226 +- skb_pull(skb, 2);
4227 + skb->len = pkt_len;
4228 +- skb_set_tail_pointer(skb, pkt_len);
4229 ++ /* Skip IP alignment pseudo header */
4230 ++ skb_pull(skb, 2);
4231 ++ skb_set_tail_pointer(skb, skb->len);
4232 + skb->truesize = pkt_len + sizeof(struct sk_buff);
4233 + ax88179_rx_checksum(skb, pkt_hdr);
4234 + return 1;
4235 +@@ -1426,8 +1426,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
4236 + ax_skb = skb_clone(skb, GFP_ATOMIC);
4237 + if (ax_skb) {
4238 + ax_skb->len = pkt_len;
4239 +- ax_skb->data = skb->data + 2;
4240 +- skb_set_tail_pointer(ax_skb, pkt_len);
4241 ++ /* Skip IP alignment pseudo header */
4242 ++ skb_pull(ax_skb, 2);
4243 ++ skb_set_tail_pointer(ax_skb, ax_skb->len);
4244 + ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
4245 + ax88179_rx_checksum(ax_skb, pkt_hdr);
4246 + usbnet_skb_return(dev, ax_skb);
4247 +diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
4248 +index 3ac3f8570ca1..a8f151b1b5fa 100644
4249 +--- a/drivers/net/wireguard/device.c
4250 ++++ b/drivers/net/wireguard/device.c
4251 +@@ -45,17 +45,18 @@ static int wg_open(struct net_device *dev)
4252 + if (dev_v6)
4253 + dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;
4254 +
4255 ++ mutex_lock(&wg->device_update_lock);
4256 + ret = wg_socket_init(wg, wg->incoming_port);
4257 + if (ret < 0)
4258 +- return ret;
4259 +- mutex_lock(&wg->device_update_lock);
4260 ++ goto out;
4261 + list_for_each_entry(peer, &wg->peer_list, peer_list) {
4262 + wg_packet_send_staged_packets(peer);
4263 + if (peer->persistent_keepalive_interval)
4264 + wg_packet_send_keepalive(peer);
4265 + }
4266 ++out:
4267 + mutex_unlock(&wg->device_update_lock);
4268 +- return 0;
4269 ++ return ret;
4270 + }
4271 +
4272 + #ifdef CONFIG_PM_SLEEP
4273 +@@ -225,6 +226,7 @@ static void wg_destruct(struct net_device *dev)
4274 + list_del(&wg->device_list);
4275 + rtnl_unlock();
4276 + mutex_lock(&wg->device_update_lock);
4277 ++ rcu_assign_pointer(wg->creating_net, NULL);
4278 + wg->incoming_port = 0;
4279 + wg_socket_reinit(wg, NULL, NULL);
4280 + /* The final references are cleared in the below calls to destroy_workqueue. */
4281 +@@ -240,13 +242,11 @@ static void wg_destruct(struct net_device *dev)
4282 + skb_queue_purge(&wg->incoming_handshakes);
4283 + free_percpu(dev->tstats);
4284 + free_percpu(wg->incoming_handshakes_worker);
4285 +- if (wg->have_creating_net_ref)
4286 +- put_net(wg->creating_net);
4287 + kvfree(wg->index_hashtable);
4288 + kvfree(wg->peer_hashtable);
4289 + mutex_unlock(&wg->device_update_lock);
4290 +
4291 +- pr_debug("%s: Interface deleted\n", dev->name);
4292 ++ pr_debug("%s: Interface destroyed\n", dev->name);
4293 + free_netdev(dev);
4294 + }
4295 +
4296 +@@ -292,7 +292,7 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
4297 + struct wg_device *wg = netdev_priv(dev);
4298 + int ret = -ENOMEM;
4299 +
4300 +- wg->creating_net = src_net;
4301 ++ rcu_assign_pointer(wg->creating_net, src_net);
4302 + init_rwsem(&wg->static_identity.lock);
4303 + mutex_init(&wg->socket_update_lock);
4304 + mutex_init(&wg->device_update_lock);
4305 +@@ -393,30 +393,26 @@ static struct rtnl_link_ops link_ops __read_mostly = {
4306 + .newlink = wg_newlink,
4307 + };
4308 +
4309 +-static int wg_netdevice_notification(struct notifier_block *nb,
4310 +- unsigned long action, void *data)
4311 ++static void wg_netns_pre_exit(struct net *net)
4312 + {
4313 +- struct net_device *dev = ((struct netdev_notifier_info *)data)->dev;
4314 +- struct wg_device *wg = netdev_priv(dev);
4315 +-
4316 +- ASSERT_RTNL();
4317 +-
4318 +- if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops)
4319 +- return 0;
4320 ++ struct wg_device *wg;
4321 +
4322 +- if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) {
4323 +- put_net(wg->creating_net);
4324 +- wg->have_creating_net_ref = false;
4325 +- } else if (dev_net(dev) != wg->creating_net &&
4326 +- !wg->have_creating_net_ref) {
4327 +- wg->have_creating_net_ref = true;
4328 +- get_net(wg->creating_net);
4329 ++ rtnl_lock();
4330 ++ list_for_each_entry(wg, &device_list, device_list) {
4331 ++ if (rcu_access_pointer(wg->creating_net) == net) {
4332 ++ pr_debug("%s: Creating namespace exiting\n", wg->dev->name);
4333 ++ netif_carrier_off(wg->dev);
4334 ++ mutex_lock(&wg->device_update_lock);
4335 ++ rcu_assign_pointer(wg->creating_net, NULL);
4336 ++ wg_socket_reinit(wg, NULL, NULL);
4337 ++ mutex_unlock(&wg->device_update_lock);
4338 ++ }
4339 + }
4340 +- return 0;
4341 ++ rtnl_unlock();
4342 + }
4343 +
4344 +-static struct notifier_block netdevice_notifier = {
4345 +- .notifier_call = wg_netdevice_notification
4346 ++static struct pernet_operations pernet_ops = {
4347 ++ .pre_exit = wg_netns_pre_exit
4348 + };
4349 +
4350 + int __init wg_device_init(void)
4351 +@@ -429,18 +425,18 @@ int __init wg_device_init(void)
4352 + return ret;
4353 + #endif
4354 +
4355 +- ret = register_netdevice_notifier(&netdevice_notifier);
4356 ++ ret = register_pernet_device(&pernet_ops);
4357 + if (ret)
4358 + goto error_pm;
4359 +
4360 + ret = rtnl_link_register(&link_ops);
4361 + if (ret)
4362 +- goto error_netdevice;
4363 ++ goto error_pernet;
4364 +
4365 + return 0;
4366 +
4367 +-error_netdevice:
4368 +- unregister_netdevice_notifier(&netdevice_notifier);
4369 ++error_pernet:
4370 ++ unregister_pernet_device(&pernet_ops);
4371 + error_pm:
4372 + #ifdef CONFIG_PM_SLEEP
4373 + unregister_pm_notifier(&pm_notifier);
4374 +@@ -451,7 +447,7 @@ error_pm:
4375 + void wg_device_uninit(void)
4376 + {
4377 + rtnl_link_unregister(&link_ops);
4378 +- unregister_netdevice_notifier(&netdevice_notifier);
4379 ++ unregister_pernet_device(&pernet_ops);
4380 + #ifdef CONFIG_PM_SLEEP
4381 + unregister_pm_notifier(&pm_notifier);
4382 + #endif
4383 +diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h
4384 +index b15a8be9d816..4d0144e16947 100644
4385 +--- a/drivers/net/wireguard/device.h
4386 ++++ b/drivers/net/wireguard/device.h
4387 +@@ -40,7 +40,7 @@ struct wg_device {
4388 + struct net_device *dev;
4389 + struct crypt_queue encrypt_queue, decrypt_queue;
4390 + struct sock __rcu *sock4, *sock6;
4391 +- struct net *creating_net;
4392 ++ struct net __rcu *creating_net;
4393 + struct noise_static_identity static_identity;
4394 + struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
4395 + struct workqueue_struct *packet_crypt_wq;
4396 +@@ -56,7 +56,6 @@ struct wg_device {
4397 + unsigned int num_peers, device_update_gen;
4398 + u32 fwmark;
4399 + u16 incoming_port;
4400 +- bool have_creating_net_ref;
4401 + };
4402 +
4403 + int wg_device_init(void);
4404 +diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
4405 +index 802099c8828a..20a4f3c0a0a1 100644
4406 +--- a/drivers/net/wireguard/netlink.c
4407 ++++ b/drivers/net/wireguard/netlink.c
4408 +@@ -511,11 +511,15 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
4409 + if (flags & ~__WGDEVICE_F_ALL)
4410 + goto out;
4411 +
4412 +- ret = -EPERM;
4413 +- if ((info->attrs[WGDEVICE_A_LISTEN_PORT] ||
4414 +- info->attrs[WGDEVICE_A_FWMARK]) &&
4415 +- !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN))
4416 +- goto out;
4417 ++ if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) {
4418 ++ struct net *net;
4419 ++ rcu_read_lock();
4420 ++ net = rcu_dereference(wg->creating_net);
4421 ++ ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0;
4422 ++ rcu_read_unlock();
4423 ++ if (ret)
4424 ++ goto out;
4425 ++ }
4426 +
4427 + ++wg->device_update_gen;
4428 +
4429 +diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
4430 +index 91438144e4f7..9b2ab6fc91cd 100644
4431 +--- a/drivers/net/wireguard/receive.c
4432 ++++ b/drivers/net/wireguard/receive.c
4433 +@@ -414,14 +414,8 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
4434 + if (unlikely(routed_peer != peer))
4435 + goto dishonest_packet_peer;
4436 +
4437 +- if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) {
4438 +- ++dev->stats.rx_dropped;
4439 +- net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n",
4440 +- dev->name, peer->internal_id,
4441 +- &peer->endpoint.addr);
4442 +- } else {
4443 +- update_rx_stats(peer, message_data_len(len_before_trim));
4444 +- }
4445 ++ napi_gro_receive(&peer->napi, skb);
4446 ++ update_rx_stats(peer, message_data_len(len_before_trim));
4447 + return;
4448 +
4449 + dishonest_packet_peer:
4450 +diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
4451 +index f9018027fc13..c33e2c81635f 100644
4452 +--- a/drivers/net/wireguard/socket.c
4453 ++++ b/drivers/net/wireguard/socket.c
4454 +@@ -347,6 +347,7 @@ static void set_sock_opts(struct socket *sock)
4455 +
4456 + int wg_socket_init(struct wg_device *wg, u16 port)
4457 + {
4458 ++ struct net *net;
4459 + int ret;
4460 + struct udp_tunnel_sock_cfg cfg = {
4461 + .sk_user_data = wg,
4462 +@@ -371,37 +372,47 @@ int wg_socket_init(struct wg_device *wg, u16 port)
4463 + };
4464 + #endif
4465 +
4466 ++ rcu_read_lock();
4467 ++ net = rcu_dereference(wg->creating_net);
4468 ++ net = net ? maybe_get_net(net) : NULL;
4469 ++ rcu_read_unlock();
4470 ++ if (unlikely(!net))
4471 ++ return -ENONET;
4472 ++
4473 + #if IS_ENABLED(CONFIG_IPV6)
4474 + retry:
4475 + #endif
4476 +
4477 +- ret = udp_sock_create(wg->creating_net, &port4, &new4);
4478 ++ ret = udp_sock_create(net, &port4, &new4);
4479 + if (ret < 0) {
4480 + pr_err("%s: Could not create IPv4 socket\n", wg->dev->name);
4481 +- return ret;
4482 ++ goto out;
4483 + }
4484 + set_sock_opts(new4);
4485 +- setup_udp_tunnel_sock(wg->creating_net, new4, &cfg);
4486 ++ setup_udp_tunnel_sock(net, new4, &cfg);
4487 +
4488 + #if IS_ENABLED(CONFIG_IPV6)
4489 + if (ipv6_mod_enabled()) {
4490 + port6.local_udp_port = inet_sk(new4->sk)->inet_sport;
4491 +- ret = udp_sock_create(wg->creating_net, &port6, &new6);
4492 ++ ret = udp_sock_create(net, &port6, &new6);
4493 + if (ret < 0) {
4494 + udp_tunnel_sock_release(new4);
4495 + if (ret == -EADDRINUSE && !port && retries++ < 100)
4496 + goto retry;
4497 + pr_err("%s: Could not create IPv6 socket\n",
4498 + wg->dev->name);
4499 +- return ret;
4500 ++ goto out;
4501 + }
4502 + set_sock_opts(new6);
4503 +- setup_udp_tunnel_sock(wg->creating_net, new6, &cfg);
4504 ++ setup_udp_tunnel_sock(net, new6, &cfg);
4505 + }
4506 + #endif
4507 +
4508 + wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL);
4509 +- return 0;
4510 ++ ret = 0;
4511 ++out:
4512 ++ put_net(net);
4513 ++ return ret;
4514 + }
4515 +
4516 + void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
4517 +diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
4518 +index bc8c15fb609d..080e5aa60bea 100644
4519 +--- a/drivers/net/wireless/ath/wil6210/txrx.c
4520 ++++ b/drivers/net/wireless/ath/wil6210/txrx.c
4521 +@@ -897,7 +897,6 @@ static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
4522 + void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
4523 + struct wil_net_stats *stats, bool gro)
4524 + {
4525 +- gro_result_t rc = GRO_NORMAL;
4526 + struct wil6210_vif *vif = ndev_to_vif(ndev);
4527 + struct wil6210_priv *wil = ndev_to_wil(ndev);
4528 + struct wireless_dev *wdev = vif_to_wdev(vif);
4529 +@@ -908,22 +907,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
4530 + */
4531 + int mcast = is_multicast_ether_addr(da);
4532 + struct sk_buff *xmit_skb = NULL;
4533 +- static const char * const gro_res_str[] = {
4534 +- [GRO_MERGED] = "GRO_MERGED",
4535 +- [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
4536 +- [GRO_HELD] = "GRO_HELD",
4537 +- [GRO_NORMAL] = "GRO_NORMAL",
4538 +- [GRO_DROP] = "GRO_DROP",
4539 +- [GRO_CONSUMED] = "GRO_CONSUMED",
4540 +- };
4541 +
4542 + if (wdev->iftype == NL80211_IFTYPE_STATION) {
4543 + sa = wil_skb_get_sa(skb);
4544 + if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
4545 + /* mcast packet looped back to us */
4546 +- rc = GRO_DROP;
4547 + dev_kfree_skb(skb);
4548 +- goto stats;
4549 ++ ndev->stats.rx_dropped++;
4550 ++ stats->rx_dropped++;
4551 ++ wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
4552 ++ return;
4553 + }
4554 + } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
4555 + if (mcast) {
4556 +@@ -967,26 +960,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
4557 + wil_rx_handle_eapol(vif, skb);
4558 +
4559 + if (gro)
4560 +- rc = napi_gro_receive(&wil->napi_rx, skb);
4561 ++ napi_gro_receive(&wil->napi_rx, skb);
4562 + else
4563 + netif_rx_ni(skb);
4564 +- wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
4565 +- len, gro_res_str[rc]);
4566 +- }
4567 +-stats:
4568 +- /* statistics. rc set to GRO_NORMAL for AP bridging */
4569 +- if (unlikely(rc == GRO_DROP)) {
4570 +- ndev->stats.rx_dropped++;
4571 +- stats->rx_dropped++;
4572 +- wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
4573 +- } else {
4574 +- ndev->stats.rx_packets++;
4575 +- stats->rx_packets++;
4576 +- ndev->stats.rx_bytes += len;
4577 +- stats->rx_bytes += len;
4578 +- if (mcast)
4579 +- ndev->stats.multicast++;
4580 + }
4581 ++ ndev->stats.rx_packets++;
4582 ++ stats->rx_packets++;
4583 ++ ndev->stats.rx_bytes += len;
4584 ++ stats->rx_bytes += len;
4585 ++ if (mcast)
4586 ++ ndev->stats.multicast++;
4587 + }
4588 +
4589 + void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
4590 +diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
4591 +index ccbb5b43b8b2..4502f9c4708d 100644
4592 +--- a/drivers/nvdimm/region_devs.c
4593 ++++ b/drivers/nvdimm/region_devs.c
4594 +@@ -679,18 +679,8 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
4595 + return a->mode;
4596 + }
4597 +
4598 +- if (a == &dev_attr_align.attr) {
4599 +- int i;
4600 +-
4601 +- for (i = 0; i < nd_region->ndr_mappings; i++) {
4602 +- struct nd_mapping *nd_mapping = &nd_region->mapping[i];
4603 +- struct nvdimm *nvdimm = nd_mapping->nvdimm;
4604 +-
4605 +- if (test_bit(NDD_LABELING, &nvdimm->flags))
4606 +- return a->mode;
4607 +- }
4608 +- return 0;
4609 +- }
4610 ++ if (a == &dev_attr_align.attr)
4611 ++ return a->mode;
4612 +
4613 + if (a != &dev_attr_set_cookie.attr
4614 + && a != &dev_attr_available_size.attr)
4615 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
4616 +index 54603bd3e02d..17f172cf456a 100644
4617 +--- a/drivers/nvme/host/multipath.c
4618 ++++ b/drivers/nvme/host/multipath.c
4619 +@@ -409,11 +409,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
4620 + {
4621 + struct nvme_ns_head *head = ns->head;
4622 +
4623 +- lockdep_assert_held(&ns->head->lock);
4624 +-
4625 + if (!head->disk)
4626 + return;
4627 +
4628 ++ mutex_lock(&head->lock);
4629 + if (!(head->disk->flags & GENHD_FL_UP))
4630 + device_add_disk(&head->subsys->dev, head->disk,
4631 + nvme_ns_id_attr_groups);
4632 +@@ -426,9 +425,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
4633 + __nvme_find_path(head, node);
4634 + srcu_read_unlock(&head->srcu, srcu_idx);
4635 + }
4636 ++ mutex_unlock(&head->lock);
4637 +
4638 +- synchronize_srcu(&ns->head->srcu);
4639 +- kblockd_schedule_work(&ns->head->requeue_work);
4640 ++ synchronize_srcu(&head->srcu);
4641 ++ kblockd_schedule_work(&head->requeue_work);
4642 + }
4643 +
4644 + static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
4645 +@@ -483,14 +483,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
4646 + static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
4647 + struct nvme_ns *ns)
4648 + {
4649 +- mutex_lock(&ns->head->lock);
4650 + ns->ana_grpid = le32_to_cpu(desc->grpid);
4651 + ns->ana_state = desc->state;
4652 + clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
4653 +
4654 + if (nvme_state_is_live(ns->ana_state))
4655 + nvme_mpath_set_live(ns);
4656 +- mutex_unlock(&ns->head->lock);
4657 + }
4658 +
4659 + static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
4660 +@@ -661,10 +659,8 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
4661 + nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
4662 + mutex_unlock(&ns->ctrl->ana_lock);
4663 + } else {
4664 +- mutex_lock(&ns->head->lock);
4665 + ns->ana_state = NVME_ANA_OPTIMIZED;
4666 + nvme_mpath_set_live(ns);
4667 +- mutex_unlock(&ns->head->lock);
4668 + }
4669 + }
4670 +
4671 +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
4672 +index aa5ca222c6f5..96deaf348466 100644
4673 +--- a/drivers/nvme/target/core.c
4674 ++++ b/drivers/nvme/target/core.c
4675 +@@ -129,30 +129,41 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
4676 + return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
4677 + }
4678 +
4679 +-static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
4680 ++static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
4681 + {
4682 +- struct nvmet_async_event *aen;
4683 ++ u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
4684 + struct nvmet_req *req;
4685 +
4686 +- while (1) {
4687 ++ mutex_lock(&ctrl->lock);
4688 ++ while (ctrl->nr_async_event_cmds) {
4689 ++ req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
4690 ++ mutex_unlock(&ctrl->lock);
4691 ++ nvmet_req_complete(req, status);
4692 + mutex_lock(&ctrl->lock);
4693 +- aen = list_first_entry_or_null(&ctrl->async_events,
4694 +- struct nvmet_async_event, entry);
4695 +- if (!aen || !ctrl->nr_async_event_cmds) {
4696 +- mutex_unlock(&ctrl->lock);
4697 +- break;
4698 +- }
4699 ++ }
4700 ++ mutex_unlock(&ctrl->lock);
4701 ++}
4702 ++
4703 ++static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
4704 ++{
4705 ++ struct nvmet_async_event *aen;
4706 ++ struct nvmet_req *req;
4707 +
4708 ++ mutex_lock(&ctrl->lock);
4709 ++ while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
4710 ++ aen = list_first_entry(&ctrl->async_events,
4711 ++ struct nvmet_async_event, entry);
4712 + req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
4713 +- if (status == 0)
4714 +- nvmet_set_result(req, nvmet_async_event_result(aen));
4715 ++ nvmet_set_result(req, nvmet_async_event_result(aen));
4716 +
4717 + list_del(&aen->entry);
4718 + kfree(aen);
4719 +
4720 + mutex_unlock(&ctrl->lock);
4721 +- nvmet_req_complete(req, status);
4722 ++ nvmet_req_complete(req, 0);
4723 ++ mutex_lock(&ctrl->lock);
4724 + }
4725 ++ mutex_unlock(&ctrl->lock);
4726 + }
4727 +
4728 + static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
4729 +@@ -172,7 +183,7 @@ static void nvmet_async_event_work(struct work_struct *work)
4730 + struct nvmet_ctrl *ctrl =
4731 + container_of(work, struct nvmet_ctrl, async_event_work);
4732 +
4733 +- nvmet_async_events_process(ctrl, 0);
4734 ++ nvmet_async_events_process(ctrl);
4735 + }
4736 +
4737 + void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
4738 +@@ -755,7 +766,6 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
4739 +
4740 + void nvmet_sq_destroy(struct nvmet_sq *sq)
4741 + {
4742 +- u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
4743 + struct nvmet_ctrl *ctrl = sq->ctrl;
4744 +
4745 + /*
4746 +@@ -763,7 +773,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
4747 + * queue doesn't have outstanding requests on it.
4748 + */
4749 + if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
4750 +- nvmet_async_events_process(ctrl, status);
4751 ++ nvmet_async_events_failall(ctrl);
4752 + percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
4753 + wait_for_completion(&sq->confirm_done);
4754 + wait_for_completion(&sq->free_done);
4755 +diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
4756 +index 9f982c0627a0..95a3bb2e5eab 100644
4757 +--- a/drivers/of/of_mdio.c
4758 ++++ b/drivers/of/of_mdio.c
4759 +@@ -303,10 +303,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
4760 + child, addr);
4761 +
4762 + if (of_mdiobus_child_is_phy(child)) {
4763 ++ /* -ENODEV is the return code that PHYLIB has
4764 ++ * standardized on to indicate that bus
4765 ++ * scanning should continue.
4766 ++ */
4767 + rc = of_mdiobus_register_phy(mdio, child, addr);
4768 +- if (rc && rc != -ENODEV)
4769 ++ if (!rc)
4770 ++ break;
4771 ++ if (rc != -ENODEV)
4772 + goto unregister;
4773 +- break;
4774 + }
4775 + }
4776 + }
4777 +diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
4778 +index fe0be8a6ebb7..092a48e4dff5 100644
4779 +--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
4780 ++++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
4781 +@@ -170,6 +170,7 @@ struct pmic_gpio_state {
4782 + struct regmap *map;
4783 + struct pinctrl_dev *ctrl;
4784 + struct gpio_chip chip;
4785 ++ struct irq_chip irq;
4786 + };
4787 +
4788 + static const struct pinconf_generic_params pmic_gpio_bindings[] = {
4789 +@@ -917,16 +918,6 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state,
4790 + return 0;
4791 + }
4792 +
4793 +-static struct irq_chip pmic_gpio_irq_chip = {
4794 +- .name = "spmi-gpio",
4795 +- .irq_ack = irq_chip_ack_parent,
4796 +- .irq_mask = irq_chip_mask_parent,
4797 +- .irq_unmask = irq_chip_unmask_parent,
4798 +- .irq_set_type = irq_chip_set_type_parent,
4799 +- .irq_set_wake = irq_chip_set_wake_parent,
4800 +- .flags = IRQCHIP_MASK_ON_SUSPEND,
4801 +-};
4802 +-
4803 + static int pmic_gpio_domain_translate(struct irq_domain *domain,
4804 + struct irq_fwspec *fwspec,
4805 + unsigned long *hwirq,
4806 +@@ -1053,8 +1044,16 @@ static int pmic_gpio_probe(struct platform_device *pdev)
4807 + if (!parent_domain)
4808 + return -ENXIO;
4809 +
4810 ++ state->irq.name = "spmi-gpio",
4811 ++ state->irq.irq_ack = irq_chip_ack_parent,
4812 ++ state->irq.irq_mask = irq_chip_mask_parent,
4813 ++ state->irq.irq_unmask = irq_chip_unmask_parent,
4814 ++ state->irq.irq_set_type = irq_chip_set_type_parent,
4815 ++ state->irq.irq_set_wake = irq_chip_set_wake_parent,
4816 ++ state->irq.flags = IRQCHIP_MASK_ON_SUSPEND,
4817 ++
4818 + girq = &state->chip.irq;
4819 +- girq->chip = &pmic_gpio_irq_chip;
4820 ++ girq->chip = &state->irq;
4821 + girq->default_type = IRQ_TYPE_NONE;
4822 + girq->handler = handle_level_irq;
4823 + girq->fwnode = of_node_to_fwnode(state->dev->of_node);
4824 +diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
4825 +index 21661f6490d6..195cfe557511 100644
4826 +--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
4827 ++++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
4828 +@@ -731,8 +731,8 @@ static int tegra_pinctrl_resume(struct device *dev)
4829 + }
4830 +
4831 + const struct dev_pm_ops tegra_pinctrl_pm = {
4832 +- .suspend = &tegra_pinctrl_suspend,
4833 +- .resume = &tegra_pinctrl_resume
4834 ++ .suspend_noirq = &tegra_pinctrl_suspend,
4835 ++ .resume_noirq = &tegra_pinctrl_resume
4836 + };
4837 +
4838 + static bool tegra_pinctrl_gpio_node_has_range(struct tegra_pmx *pmx)
4839 +diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
4840 +index e1d6c8f6d40b..fe65b5acaf28 100644
4841 +--- a/drivers/regulator/da9063-regulator.c
4842 ++++ b/drivers/regulator/da9063-regulator.c
4843 +@@ -512,7 +512,6 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
4844 + },
4845 + {
4846 + DA9063_LDO(DA9063, LDO9, 950, 50, 3600),
4847 +- .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL),
4848 + },
4849 + {
4850 + DA9063_LDO(DA9063, LDO11, 900, 50, 3600),
4851 +diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
4852 +index 689537927f6f..4c8e8b472287 100644
4853 +--- a/drivers/regulator/pfuze100-regulator.c
4854 ++++ b/drivers/regulator/pfuze100-regulator.c
4855 +@@ -209,6 +209,19 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
4856 +
4857 + };
4858 +
4859 ++static const struct regulator_ops pfuze3000_sw_regulator_ops = {
4860 ++ .enable = regulator_enable_regmap,
4861 ++ .disable = regulator_disable_regmap,
4862 ++ .is_enabled = regulator_is_enabled_regmap,
4863 ++ .list_voltage = regulator_list_voltage_table,
4864 ++ .map_voltage = regulator_map_voltage_ascend,
4865 ++ .set_voltage_sel = regulator_set_voltage_sel_regmap,
4866 ++ .get_voltage_sel = regulator_get_voltage_sel_regmap,
4867 ++ .set_voltage_time_sel = regulator_set_voltage_time_sel,
4868 ++ .set_ramp_delay = pfuze100_set_ramp_delay,
4869 ++
4870 ++};
4871 ++
4872 + #define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \
4873 + [_chip ## _ ## _name] = { \
4874 + .desc = { \
4875 +@@ -318,23 +331,28 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
4876 + .stby_mask = 0x20, \
4877 + }
4878 +
4879 +-
4880 +-#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \
4881 +- .desc = { \
4882 +- .name = #_name,\
4883 +- .n_voltages = ((max) - (min)) / (step) + 1, \
4884 +- .ops = &pfuze100_sw_regulator_ops, \
4885 +- .type = REGULATOR_VOLTAGE, \
4886 +- .id = _chip ## _ ## _name, \
4887 +- .owner = THIS_MODULE, \
4888 +- .min_uV = (min), \
4889 +- .uV_step = (step), \
4890 +- .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
4891 +- .vsel_mask = 0x7, \
4892 +- }, \
4893 +- .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
4894 +- .stby_mask = 0x7, \
4895 +-}
4896 ++/* No linar case for the some switches of PFUZE3000 */
4897 ++#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages) \
4898 ++ [_chip ## _ ## _name] = { \
4899 ++ .desc = { \
4900 ++ .name = #_name, \
4901 ++ .n_voltages = ARRAY_SIZE(voltages), \
4902 ++ .ops = &pfuze3000_sw_regulator_ops, \
4903 ++ .type = REGULATOR_VOLTAGE, \
4904 ++ .id = _chip ## _ ## _name, \
4905 ++ .owner = THIS_MODULE, \
4906 ++ .volt_table = voltages, \
4907 ++ .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
4908 ++ .vsel_mask = (mask), \
4909 ++ .enable_reg = (base) + PFUZE100_MODE_OFFSET, \
4910 ++ .enable_mask = 0xf, \
4911 ++ .enable_val = 0x8, \
4912 ++ .enable_time = 500, \
4913 ++ }, \
4914 ++ .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
4915 ++ .stby_mask = (mask), \
4916 ++ .sw_reg = true, \
4917 ++ }
4918 +
4919 + #define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \
4920 + .desc = { \
4921 +@@ -391,9 +409,9 @@ static struct pfuze_regulator pfuze200_regulators[] = {
4922 + };
4923 +
4924 + static struct pfuze_regulator pfuze3000_regulators[] = {
4925 +- PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
4926 ++ PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
4927 + PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
4928 +- PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
4929 ++ PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
4930 + PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
4931 + PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
4932 + PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
4933 +@@ -407,8 +425,8 @@ static struct pfuze_regulator pfuze3000_regulators[] = {
4934 + };
4935 +
4936 + static struct pfuze_regulator pfuze3001_regulators[] = {
4937 +- PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
4938 +- PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
4939 ++ PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
4940 ++ PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
4941 + PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
4942 + PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
4943 + PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
4944 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
4945 +index 569966bdc513..60d675fefac7 100644
4946 +--- a/drivers/s390/net/qeth_core_main.c
4947 ++++ b/drivers/s390/net/qeth_core_main.c
4948 +@@ -4265,9 +4265,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4949 + int fallback = *(int *)reply->param;
4950 +
4951 + QETH_CARD_TEXT(card, 4, "setaccb");
4952 +- if (cmd->hdr.return_code)
4953 +- return -EIO;
4954 +- qeth_setadpparms_inspect_rc(cmd);
4955 +
4956 + access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4957 + QETH_CARD_TEXT_(card, 2, "rc=%d",
4958 +@@ -4277,7 +4274,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4959 + QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4960 + access_ctrl_req->subcmd_code, CARD_DEVID(card),
4961 + cmd->data.setadapterparms.hdr.return_code);
4962 +- switch (cmd->data.setadapterparms.hdr.return_code) {
4963 ++ switch (qeth_setadpparms_inspect_rc(cmd)) {
4964 + case SET_ACCESS_CTRL_RC_SUCCESS:
4965 + if (card->options.isolation == ISOLATION_MODE_NONE) {
4966 + dev_info(&card->gdev->dev,
4967 +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
4968 +index 3d0bc000f500..c621e8f0897f 100644
4969 +--- a/drivers/s390/scsi/zfcp_erp.c
4970 ++++ b/drivers/s390/scsi/zfcp_erp.c
4971 +@@ -576,7 +576,10 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
4972 + ZFCP_STATUS_ERP_TIMEDOUT)) {
4973 + req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
4974 + zfcp_dbf_rec_run("erscf_1", act);
4975 +- req->erp_action = NULL;
4976 ++ /* lock-free concurrent access with
4977 ++ * zfcp_erp_timeout_handler()
4978 ++ */
4979 ++ WRITE_ONCE(req->erp_action, NULL);
4980 + }
4981 + if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
4982 + zfcp_dbf_rec_run("erscf_2", act);
4983 +@@ -612,8 +615,14 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
4984 + void zfcp_erp_timeout_handler(struct timer_list *t)
4985 + {
4986 + struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
4987 +- struct zfcp_erp_action *act = fsf_req->erp_action;
4988 ++ struct zfcp_erp_action *act;
4989 +
4990 ++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
4991 ++ return;
4992 ++ /* lock-free concurrent access with zfcp_erp_strategy_check_fsfreq() */
4993 ++ act = READ_ONCE(fsf_req->erp_action);
4994 ++ if (!act)
4995 ++ return;
4996 + zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
4997 + }
4998 +
4999 +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
5000 +index 4104bdcdbb6f..70be1f5de873 100644
5001 +--- a/drivers/scsi/lpfc/lpfc_init.c
5002 ++++ b/drivers/scsi/lpfc/lpfc_init.c
5003 +@@ -11895,7 +11895,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
5004 + lpfc_sli4_xri_exchange_busy_wait(phba);
5005 +
5006 + /* per-phba callback de-registration for hotplug event */
5007 +- lpfc_cpuhp_remove(phba);
5008 ++ if (phba->pport)
5009 ++ lpfc_cpuhp_remove(phba);
5010 +
5011 + /* Disable PCI subsystem interrupt */
5012 + lpfc_sli4_disable_intr(phba);
5013 +diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
5014 +index 42c3ad27f1cb..df670fba2ab8 100644
5015 +--- a/drivers/scsi/qla2xxx/qla_gs.c
5016 ++++ b/drivers/scsi/qla2xxx/qla_gs.c
5017 +@@ -3496,7 +3496,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
5018 + qla2x00_clear_loop_id(fcport);
5019 + fcport->flags |= FCF_FABRIC_DEVICE;
5020 + } else if (fcport->d_id.b24 != rp->id.b24 ||
5021 +- fcport->scan_needed) {
5022 ++ (fcport->scan_needed &&
5023 ++ fcport->port_type != FCT_INITIATOR &&
5024 ++ fcport->port_type != FCT_NVME_INITIATOR)) {
5025 + qlt_schedule_sess_for_deletion(fcport);
5026 + }
5027 + fcport->d_id.b24 = rp->id.b24;
5028 +diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
5029 +index 719e1f189ebf..ada0d8804d84 100644
5030 +--- a/drivers/soc/imx/soc-imx8m.c
5031 ++++ b/drivers/soc/imx/soc-imx8m.c
5032 +@@ -22,6 +22,8 @@
5033 + #define OCOTP_UID_LOW 0x410
5034 + #define OCOTP_UID_HIGH 0x420
5035 +
5036 ++#define IMX8MP_OCOTP_UID_OFFSET 0x10
5037 ++
5038 + /* Same as ANADIG_DIGPROG_IMX7D */
5039 + #define ANADIG_DIGPROG_IMX8MM 0x800
5040 +
5041 +@@ -88,6 +90,8 @@ static void __init imx8mm_soc_uid(void)
5042 + {
5043 + void __iomem *ocotp_base;
5044 + struct device_node *np;
5045 ++ u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
5046 ++ IMX8MP_OCOTP_UID_OFFSET : 0;
5047 +
5048 + np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
5049 + if (!np)
5050 +@@ -96,9 +100,9 @@ static void __init imx8mm_soc_uid(void)
5051 + ocotp_base = of_iomap(np, 0);
5052 + WARN_ON(!ocotp_base);
5053 +
5054 +- soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
5055 ++ soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
5056 + soc_uid <<= 32;
5057 +- soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
5058 ++ soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
5059 +
5060 + iounmap(ocotp_base);
5061 + of_node_put(np);
5062 +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
5063 +index 2e9f9adc5900..88176eaca448 100644
5064 +--- a/drivers/spi/spi-fsl-dspi.c
5065 ++++ b/drivers/spi/spi-fsl-dspi.c
5066 +@@ -584,14 +584,14 @@ static void dspi_release_dma(struct fsl_dspi *dspi)
5067 + return;
5068 +
5069 + if (dma->chan_tx) {
5070 +- dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys,
5071 +- dma_bufsize, DMA_TO_DEVICE);
5072 ++ dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
5073 ++ dma->tx_dma_buf, dma->tx_dma_phys);
5074 + dma_release_channel(dma->chan_tx);
5075 + }
5076 +
5077 + if (dma->chan_rx) {
5078 +- dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys,
5079 +- dma_bufsize, DMA_FROM_DEVICE);
5080 ++ dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
5081 ++ dma->rx_dma_buf, dma->rx_dma_phys);
5082 + dma_release_channel(dma->chan_rx);
5083 + }
5084 + }
5085 +diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
5086 +index 110338dbe372..cc60f6a28d70 100644
5087 +--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
5088 ++++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
5089 +@@ -1830,12 +1830,14 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
5090 + pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
5091 + if (!pIE)
5092 + return _FAIL;
5093 ++ if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates))
5094 ++ return _FAIL;
5095 +
5096 + memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
5097 + supportRateNum = ie_len;
5098 +
5099 + pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
5100 +- if (pIE)
5101 ++ if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum))
5102 + memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
5103 +
5104 + return _SUCCESS;
5105 +diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
5106 +index f8e43a6faea9..cdcc64ea2554 100644
5107 +--- a/drivers/tty/hvc/hvc_console.c
5108 ++++ b/drivers/tty/hvc/hvc_console.c
5109 +@@ -75,8 +75,6 @@ static LIST_HEAD(hvc_structs);
5110 + */
5111 + static DEFINE_MUTEX(hvc_structs_mutex);
5112 +
5113 +-/* Mutex to serialize hvc_open */
5114 +-static DEFINE_MUTEX(hvc_open_mutex);
5115 + /*
5116 + * This value is used to assign a tty->index value to a hvc_struct based
5117 + * upon order of exposure via hvc_probe(), when we can not match it to
5118 +@@ -348,24 +346,16 @@ static int hvc_install(struct tty_driver *driver, struct tty_struct *tty)
5119 + */
5120 + static int hvc_open(struct tty_struct *tty, struct file * filp)
5121 + {
5122 +- struct hvc_struct *hp;
5123 ++ struct hvc_struct *hp = tty->driver_data;
5124 + unsigned long flags;
5125 + int rc = 0;
5126 +
5127 +- mutex_lock(&hvc_open_mutex);
5128 +-
5129 +- hp = tty->driver_data;
5130 +- if (!hp) {
5131 +- rc = -EIO;
5132 +- goto out;
5133 +- }
5134 +-
5135 + spin_lock_irqsave(&hp->port.lock, flags);
5136 + /* Check and then increment for fast path open. */
5137 + if (hp->port.count++ > 0) {
5138 + spin_unlock_irqrestore(&hp->port.lock, flags);
5139 + hvc_kick();
5140 +- goto out;
5141 ++ return 0;
5142 + } /* else count == 0 */
5143 + spin_unlock_irqrestore(&hp->port.lock, flags);
5144 +
5145 +@@ -393,8 +383,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
5146 + /* Force wakeup of the polling thread */
5147 + hvc_kick();
5148 +
5149 +-out:
5150 +- mutex_unlock(&hvc_open_mutex);
5151 + return rc;
5152 + }
5153 +
5154 +diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c
5155 +index e71240b386b4..da4c5eb03d7e 100644
5156 +--- a/drivers/usb/cdns3/ep0.c
5157 ++++ b/drivers/usb/cdns3/ep0.c
5158 +@@ -327,7 +327,8 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
5159 + if (!set || (tmode & 0xff) != 0)
5160 + return -EINVAL;
5161 +
5162 +- switch (tmode >> 8) {
5163 ++ tmode >>= 8;
5164 ++ switch (tmode) {
5165 + case TEST_J:
5166 + case TEST_K:
5167 + case TEST_SE0_NAK:
5168 +@@ -711,15 +712,17 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
5169 + int ret = 0;
5170 + u8 zlp = 0;
5171 +
5172 ++ spin_lock_irqsave(&priv_dev->lock, flags);
5173 + trace_cdns3_ep0_queue(priv_dev, request);
5174 +
5175 + /* cancel the request if controller receive new SETUP packet. */
5176 +- if (cdns3_check_new_setup(priv_dev))
5177 ++ if (cdns3_check_new_setup(priv_dev)) {
5178 ++ spin_unlock_irqrestore(&priv_dev->lock, flags);
5179 + return -ECONNRESET;
5180 ++ }
5181 +
5182 + /* send STATUS stage. Should be called only for SET_CONFIGURATION */
5183 + if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) {
5184 +- spin_lock_irqsave(&priv_dev->lock, flags);
5185 + cdns3_select_ep(priv_dev, 0x00);
5186 +
5187 + erdy_sent = !priv_dev->hw_configured_flag;
5188 +@@ -744,7 +747,6 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
5189 + return 0;
5190 + }
5191 +
5192 +- spin_lock_irqsave(&priv_dev->lock, flags);
5193 + if (!list_empty(&priv_ep->pending_req_list)) {
5194 + dev_err(priv_dev->dev,
5195 + "can't handle multiple requests for ep0\n");
5196 +diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h
5197 +index 8d121e207fd8..755c56582257 100644
5198 +--- a/drivers/usb/cdns3/trace.h
5199 ++++ b/drivers/usb/cdns3/trace.h
5200 +@@ -156,7 +156,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep0_irq,
5201 + __dynamic_array(char, str, CDNS3_MSG_MAX)
5202 + ),
5203 + TP_fast_assign(
5204 +- __entry->ep_dir = priv_dev->ep0_data_dir;
5205 ++ __entry->ep_dir = priv_dev->selected_ep;
5206 + __entry->ep_sts = ep_sts;
5207 + ),
5208 + TP_printk("%s", cdns3_decode_ep0_irq(__get_str(str),
5209 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
5210 +index f67088bb8218..d5187b50fc82 100644
5211 +--- a/drivers/usb/class/cdc-acm.c
5212 ++++ b/drivers/usb/class/cdc-acm.c
5213 +@@ -1689,6 +1689,8 @@ static int acm_pre_reset(struct usb_interface *intf)
5214 +
5215 + static const struct usb_device_id acm_ids[] = {
5216 + /* quirky and broken devices */
5217 ++ { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */
5218 ++ .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */
5219 + { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
5220 + .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
5221 + { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
5222 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
5223 +index 3e8efe759c3e..e0b77674869c 100644
5224 +--- a/drivers/usb/core/quirks.c
5225 ++++ b/drivers/usb/core/quirks.c
5226 +@@ -218,11 +218,12 @@ static const struct usb_device_id usb_quirk_list[] = {
5227 + /* Logitech HD Webcam C270 */
5228 + { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
5229 +
5230 +- /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
5231 ++ /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */
5232 + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
5233 + { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
5234 + { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
5235 + { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
5236 ++ { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT },
5237 +
5238 + /* Logitech ConferenceCam CC3000e */
5239 + { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
5240 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
5241 +index 12b98b466287..7faf5f8c056d 100644
5242 +--- a/drivers/usb/dwc2/gadget.c
5243 ++++ b/drivers/usb/dwc2/gadget.c
5244 +@@ -4920,12 +4920,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
5245 + epnum, 0);
5246 + }
5247 +
5248 +- ret = usb_add_gadget_udc(dev, &hsotg->gadget);
5249 +- if (ret) {
5250 +- dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
5251 +- hsotg->ctrl_req);
5252 +- return ret;
5253 +- }
5254 + dwc2_hsotg_dump(hsotg);
5255 +
5256 + return 0;
5257 +diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
5258 +index 69972750e161..5684c4781af9 100644
5259 +--- a/drivers/usb/dwc2/platform.c
5260 ++++ b/drivers/usb/dwc2/platform.c
5261 +@@ -536,6 +536,17 @@ static int dwc2_driver_probe(struct platform_device *dev)
5262 + if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
5263 + dwc2_lowlevel_hw_disable(hsotg);
5264 +
5265 ++#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
5266 ++ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
5267 ++ /* Postponed adding a new gadget to the udc class driver list */
5268 ++ if (hsotg->gadget_enabled) {
5269 ++ retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget);
5270 ++ if (retval) {
5271 ++ dwc2_hsotg_remove(hsotg);
5272 ++ goto error_init;
5273 ++ }
5274 ++ }
5275 ++#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
5276 + return 0;
5277 +
5278 + error_init:
5279 +diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
5280 +index 48b68b6f0dc8..90bb022737da 100644
5281 +--- a/drivers/usb/dwc3/dwc3-exynos.c
5282 ++++ b/drivers/usb/dwc3/dwc3-exynos.c
5283 +@@ -162,12 +162,6 @@ static const struct dwc3_exynos_driverdata exynos5250_drvdata = {
5284 + .suspend_clk_idx = -1,
5285 + };
5286 +
5287 +-static const struct dwc3_exynos_driverdata exynos5420_drvdata = {
5288 +- .clk_names = { "usbdrd30", "usbdrd30_susp_clk"},
5289 +- .num_clks = 2,
5290 +- .suspend_clk_idx = 1,
5291 +-};
5292 +-
5293 + static const struct dwc3_exynos_driverdata exynos5433_drvdata = {
5294 + .clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" },
5295 + .num_clks = 4,
5296 +@@ -184,9 +178,6 @@ static const struct of_device_id exynos_dwc3_match[] = {
5297 + {
5298 + .compatible = "samsung,exynos5250-dwusb3",
5299 + .data = &exynos5250_drvdata,
5300 +- }, {
5301 +- .compatible = "samsung,exynos5420-dwusb3",
5302 +- .data = &exynos5420_drvdata,
5303 + }, {
5304 + .compatible = "samsung,exynos5433-dwusb3",
5305 + .data = &exynos5433_drvdata,
5306 +diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
5307 +index cafde053788b..80a1b52c656e 100644
5308 +--- a/drivers/usb/gadget/udc/mv_udc_core.c
5309 ++++ b/drivers/usb/gadget/udc/mv_udc_core.c
5310 +@@ -2313,7 +2313,8 @@ static int mv_udc_probe(struct platform_device *pdev)
5311 + return 0;
5312 +
5313 + err_create_workqueue:
5314 +- destroy_workqueue(udc->qwork);
5315 ++ if (udc->qwork)
5316 ++ destroy_workqueue(udc->qwork);
5317 + err_destroy_dma:
5318 + dma_pool_destroy(udc->dtd_pool);
5319 + err_free_dma:
5320 +diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
5321 +index a4e9abcbdc4f..1a9b7572e17f 100644
5322 +--- a/drivers/usb/host/ehci-exynos.c
5323 ++++ b/drivers/usb/host/ehci-exynos.c
5324 +@@ -203,9 +203,8 @@ static int exynos_ehci_probe(struct platform_device *pdev)
5325 + hcd->rsrc_len = resource_size(res);
5326 +
5327 + irq = platform_get_irq(pdev, 0);
5328 +- if (!irq) {
5329 +- dev_err(&pdev->dev, "Failed to get IRQ\n");
5330 +- err = -ENODEV;
5331 ++ if (irq < 0) {
5332 ++ err = irq;
5333 + goto fail_io;
5334 + }
5335 +
5336 +diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
5337 +index 1a48ab1bd3b2..7ff2cbdcd0b2 100644
5338 +--- a/drivers/usb/host/ehci-pci.c
5339 ++++ b/drivers/usb/host/ehci-pci.c
5340 +@@ -216,6 +216,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
5341 + ehci_info(ehci, "applying MosChip frame-index workaround\n");
5342 + ehci->frame_index_bug = 1;
5343 + break;
5344 ++ case PCI_VENDOR_ID_HUAWEI:
5345 ++ /* Synopsys HC bug */
5346 ++ if (pdev->device == 0xa239) {
5347 ++ ehci_info(ehci, "applying Synopsys HC workaround\n");
5348 ++ ehci->has_synopsys_hc_bug = 1;
5349 ++ }
5350 ++ break;
5351 + }
5352 +
5353 + /* optional debug port, normally in the first BAR */
5354 +diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
5355 +index cff965240327..b91d50da6127 100644
5356 +--- a/drivers/usb/host/ohci-sm501.c
5357 ++++ b/drivers/usb/host/ohci-sm501.c
5358 +@@ -191,6 +191,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
5359 + struct resource *mem;
5360 +
5361 + usb_remove_hcd(hcd);
5362 ++ iounmap(hcd->regs);
5363 + release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
5364 + usb_put_hcd(hcd);
5365 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
5366 +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
5367 +index bfbdb3ceed29..4311d4c9b68d 100644
5368 +--- a/drivers/usb/host/xhci-mtk.c
5369 ++++ b/drivers/usb/host/xhci-mtk.c
5370 +@@ -587,6 +587,9 @@ static int xhci_mtk_remove(struct platform_device *dev)
5371 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5372 + struct usb_hcd *shared_hcd = xhci->shared_hcd;
5373 +
5374 ++ pm_runtime_put_noidle(&dev->dev);
5375 ++ pm_runtime_disable(&dev->dev);
5376 ++
5377 + usb_remove_hcd(shared_hcd);
5378 + xhci->shared_hcd = NULL;
5379 + device_init_wakeup(&dev->dev, false);
5380 +@@ -597,8 +600,6 @@ static int xhci_mtk_remove(struct platform_device *dev)
5381 + xhci_mtk_sch_exit(mtk);
5382 + xhci_mtk_clks_disable(mtk);
5383 + xhci_mtk_ldos_disable(mtk);
5384 +- pm_runtime_put_sync(&dev->dev);
5385 +- pm_runtime_disable(&dev->dev);
5386 +
5387 + return 0;
5388 + }
5389 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
5390 +index bee5deccc83d..ed468eed299c 100644
5391 +--- a/drivers/usb/host/xhci.c
5392 ++++ b/drivers/usb/host/xhci.c
5393 +@@ -1430,6 +1430,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
5394 + xhci->devs[slot_id]->out_ctx, ep_index);
5395 +
5396 + ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
5397 ++ ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
5398 + ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
5399 + ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
5400 +
5401 +@@ -4390,6 +4391,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
5402 + int hird, exit_latency;
5403 + int ret;
5404 +
5405 ++ if (xhci->quirks & XHCI_HW_LPM_DISABLE)
5406 ++ return -EPERM;
5407 ++
5408 + if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
5409 + !udev->lpm_capable)
5410 + return -EPERM;
5411 +@@ -4412,7 +4416,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
5412 + xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
5413 + enable ? "enable" : "disable", port_num + 1);
5414 +
5415 +- if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
5416 ++ if (enable) {
5417 + /* Host supports BESL timeout instead of HIRD */
5418 + if (udev->usb2_hw_lpm_besl_capable) {
5419 + /* if device doesn't have a preferred BESL value use a
5420 +@@ -4471,6 +4475,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
5421 + mutex_lock(hcd->bandwidth_mutex);
5422 + xhci_change_max_exit_latency(xhci, udev, 0);
5423 + mutex_unlock(hcd->bandwidth_mutex);
5424 ++ readl_poll_timeout(ports[port_num]->addr, pm_val,
5425 ++ (pm_val & PORT_PLS_MASK) == XDEV_U0,
5426 ++ 100, 10000);
5427 + return 0;
5428 + }
5429 + }
5430 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
5431 +index 86cfefdd6632..c80710e47476 100644
5432 +--- a/drivers/usb/host/xhci.h
5433 ++++ b/drivers/usb/host/xhci.h
5434 +@@ -716,7 +716,7 @@ struct xhci_ep_ctx {
5435 + * 4 - TRB error
5436 + * 5-7 - reserved
5437 + */
5438 +-#define EP_STATE_MASK (0xf)
5439 ++#define EP_STATE_MASK (0x7)
5440 + #define EP_STATE_DISABLED 0
5441 + #define EP_STATE_RUNNING 1
5442 + #define EP_STATE_HALTED 2
5443 +diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
5444 +index 01c6a48c41bc..ac9a81ae8216 100644
5445 +--- a/drivers/usb/renesas_usbhs/fifo.c
5446 ++++ b/drivers/usb/renesas_usbhs/fifo.c
5447 +@@ -803,7 +803,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
5448 + return info->dma_map_ctrl(chan->device->dev, pkt, map);
5449 + }
5450 +
5451 +-static void usbhsf_dma_complete(void *arg);
5452 ++static void usbhsf_dma_complete(void *arg,
5453 ++ const struct dmaengine_result *result);
5454 + static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
5455 + {
5456 + struct usbhs_pipe *pipe = pkt->pipe;
5457 +@@ -813,6 +814,7 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
5458 + struct dma_chan *chan;
5459 + struct device *dev = usbhs_priv_to_dev(priv);
5460 + enum dma_transfer_direction dir;
5461 ++ dma_cookie_t cookie;
5462 +
5463 + fifo = usbhs_pipe_to_fifo(pipe);
5464 + if (!fifo)
5465 +@@ -827,11 +829,11 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
5466 + if (!desc)
5467 + return;
5468 +
5469 +- desc->callback = usbhsf_dma_complete;
5470 +- desc->callback_param = pipe;
5471 ++ desc->callback_result = usbhsf_dma_complete;
5472 ++ desc->callback_param = pkt;
5473 +
5474 +- pkt->cookie = dmaengine_submit(desc);
5475 +- if (pkt->cookie < 0) {
5476 ++ cookie = dmaengine_submit(desc);
5477 ++ if (cookie < 0) {
5478 + dev_err(dev, "Failed to submit dma descriptor\n");
5479 + return;
5480 + }
5481 +@@ -1152,12 +1154,10 @@ static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt,
5482 + struct dma_chan *chan, int dtln)
5483 + {
5484 + struct usbhs_pipe *pipe = pkt->pipe;
5485 +- struct dma_tx_state state;
5486 + size_t received_size;
5487 + int maxp = usbhs_pipe_get_maxpacket(pipe);
5488 +
5489 +- dmaengine_tx_status(chan, pkt->cookie, &state);
5490 +- received_size = pkt->length - state.residue;
5491 ++ received_size = pkt->length - pkt->dma_result->residue;
5492 +
5493 + if (dtln) {
5494 + received_size -= USBHS_USB_DMAC_XFER_SIZE;
5495 +@@ -1363,13 +1363,16 @@ static int usbhsf_irq_ready(struct usbhs_priv *priv,
5496 + return 0;
5497 + }
5498 +
5499 +-static void usbhsf_dma_complete(void *arg)
5500 ++static void usbhsf_dma_complete(void *arg,
5501 ++ const struct dmaengine_result *result)
5502 + {
5503 +- struct usbhs_pipe *pipe = arg;
5504 ++ struct usbhs_pkt *pkt = arg;
5505 ++ struct usbhs_pipe *pipe = pkt->pipe;
5506 + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
5507 + struct device *dev = usbhs_priv_to_dev(priv);
5508 + int ret;
5509 +
5510 ++ pkt->dma_result = result;
5511 + ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
5512 + if (ret < 0)
5513 + dev_err(dev, "dma_complete run_error %d : %d\n",
5514 +diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
5515 +index c3d3cc35cee0..4a7dc23ce3d3 100644
5516 +--- a/drivers/usb/renesas_usbhs/fifo.h
5517 ++++ b/drivers/usb/renesas_usbhs/fifo.h
5518 +@@ -50,7 +50,7 @@ struct usbhs_pkt {
5519 + struct usbhs_pkt *pkt);
5520 + struct work_struct work;
5521 + dma_addr_t dma;
5522 +- dma_cookie_t cookie;
5523 ++ const struct dmaengine_result *dma_result;
5524 + void *buf;
5525 + int length;
5526 + int trans;
5527 +diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
5528 +index c22e5c4bbf1a..e35508f5e128 100644
5529 +--- a/drivers/usb/typec/mux/intel_pmc_mux.c
5530 ++++ b/drivers/usb/typec/mux/intel_pmc_mux.c
5531 +@@ -129,7 +129,8 @@ pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_mux_state *state)
5532 + msg[0] = PMC_USB_DP_HPD;
5533 + msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
5534 +
5535 +- msg[1] = PMC_USB_DP_HPD_IRQ;
5536 ++ if (data->status & DP_STATUS_IRQ_HPD)
5537 ++ msg[1] = PMC_USB_DP_HPD_IRQ;
5538 +
5539 + if (data->status & DP_STATUS_HPD_STATE)
5540 + msg[1] |= PMC_USB_DP_HPD_LVL;
5541 +@@ -142,6 +143,7 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
5542 + {
5543 + struct typec_displayport_data *data = state->data;
5544 + struct altmode_req req = { };
5545 ++ int ret;
5546 +
5547 + if (data->status & DP_STATUS_IRQ_HPD)
5548 + return pmc_usb_mux_dp_hpd(port, state);
5549 +@@ -161,7 +163,14 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
5550 + if (data->status & DP_STATUS_HPD_STATE)
5551 + req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH;
5552 +
5553 +- return pmc_usb_command(port, (void *)&req, sizeof(req));
5554 ++ ret = pmc_usb_command(port, (void *)&req, sizeof(req));
5555 ++ if (ret)
5556 ++ return ret;
5557 ++
5558 ++ if (data->status & DP_STATUS_HPD_STATE)
5559 ++ return pmc_usb_mux_dp_hpd(port, state);
5560 ++
5561 ++ return 0;
5562 + }
5563 +
5564 + static int
5565 +diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
5566 +index 017389021b96..b56a0880a044 100644
5567 +--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
5568 ++++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
5569 +@@ -179,26 +179,6 @@ out:
5570 + return tcpci_irq(chip->tcpci);
5571 + }
5572 +
5573 +-static int rt1711h_init_alert(struct rt1711h_chip *chip,
5574 +- struct i2c_client *client)
5575 +-{
5576 +- int ret;
5577 +-
5578 +- /* Disable chip interrupts before requesting irq */
5579 +- ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
5580 +- if (ret < 0)
5581 +- return ret;
5582 +-
5583 +- ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
5584 +- rt1711h_irq,
5585 +- IRQF_ONESHOT | IRQF_TRIGGER_LOW,
5586 +- dev_name(chip->dev), chip);
5587 +- if (ret < 0)
5588 +- return ret;
5589 +- enable_irq_wake(client->irq);
5590 +- return 0;
5591 +-}
5592 +-
5593 + static int rt1711h_sw_reset(struct rt1711h_chip *chip)
5594 + {
5595 + int ret;
5596 +@@ -260,7 +240,8 @@ static int rt1711h_probe(struct i2c_client *client,
5597 + if (ret < 0)
5598 + return ret;
5599 +
5600 +- ret = rt1711h_init_alert(chip, client);
5601 ++ /* Disable chip interrupts before requesting irq */
5602 ++ ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
5603 + if (ret < 0)
5604 + return ret;
5605 +
5606 +@@ -271,6 +252,14 @@ static int rt1711h_probe(struct i2c_client *client,
5607 + if (IS_ERR_OR_NULL(chip->tcpci))
5608 + return PTR_ERR(chip->tcpci);
5609 +
5610 ++ ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
5611 ++ rt1711h_irq,
5612 ++ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
5613 ++ dev_name(chip->dev), chip);
5614 ++ if (ret < 0)
5615 ++ return ret;
5616 ++ enable_irq_wake(client->irq);
5617 ++
5618 + return 0;
5619 + }
5620 +
5621 +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
5622 +index 9d28a8e3328f..e2a490c5ae08 100644
5623 +--- a/drivers/video/fbdev/core/fbcon.c
5624 ++++ b/drivers/video/fbdev/core/fbcon.c
5625 +@@ -2402,7 +2402,8 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
5626 + ops->graphics = 1;
5627 +
5628 + if (!blank) {
5629 +- var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
5630 ++ var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE |
5631 ++ FB_ACTIVATE_KD_TEXT;
5632 + fb_set_var(info, &var);
5633 + ops->graphics = 0;
5634 + ops->var = info->var;
5635 +diff --git a/fs/afs/cell.c b/fs/afs/cell.c
5636 +index 78ba5f932287..296b489861a9 100644
5637 +--- a/fs/afs/cell.c
5638 ++++ b/fs/afs/cell.c
5639 +@@ -154,10 +154,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
5640 + return ERR_PTR(-ENOMEM);
5641 + }
5642 +
5643 ++ cell->name = kmalloc(namelen + 1, GFP_KERNEL);
5644 ++ if (!cell->name) {
5645 ++ kfree(cell);
5646 ++ return ERR_PTR(-ENOMEM);
5647 ++ }
5648 ++
5649 + cell->net = net;
5650 + cell->name_len = namelen;
5651 + for (i = 0; i < namelen; i++)
5652 + cell->name[i] = tolower(name[i]);
5653 ++ cell->name[i] = 0;
5654 +
5655 + atomic_set(&cell->usage, 2);
5656 + INIT_WORK(&cell->manager, afs_manage_cell);
5657 +@@ -203,6 +210,7 @@ parse_failed:
5658 + if (ret == -EINVAL)
5659 + printk(KERN_ERR "kAFS: bad VL server IP address\n");
5660 + error:
5661 ++ kfree(cell->name);
5662 + kfree(cell);
5663 + _leave(" = %d", ret);
5664 + return ERR_PTR(ret);
5665 +@@ -483,6 +491,7 @@ static void afs_cell_destroy(struct rcu_head *rcu)
5666 +
5667 + afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
5668 + key_put(cell->anonymous_key);
5669 ++ kfree(cell->name);
5670 + kfree(cell);
5671 +
5672 + _leave(" [destroyed]");
5673 +diff --git a/fs/afs/internal.h b/fs/afs/internal.h
5674 +index 98e0cebd5e5e..c67a9767397d 100644
5675 +--- a/fs/afs/internal.h
5676 ++++ b/fs/afs/internal.h
5677 +@@ -397,7 +397,7 @@ struct afs_cell {
5678 + struct afs_vlserver_list __rcu *vl_servers;
5679 +
5680 + u8 name_len; /* Length of name */
5681 +- char name[64 + 1]; /* Cell name, case-flattened and NUL-padded */
5682 ++ char *name; /* Cell name, case-flattened and NUL-padded */
5683 + };
5684 +
5685 + /*
5686 +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
5687 +index 233c5663f233..0c17f18b4794 100644
5688 +--- a/fs/btrfs/block-group.c
5689 ++++ b/fs/btrfs/block-group.c
5690 +@@ -916,7 +916,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5691 + path = btrfs_alloc_path();
5692 + if (!path) {
5693 + ret = -ENOMEM;
5694 +- goto out_put_group;
5695 ++ goto out;
5696 + }
5697 +
5698 + /*
5699 +@@ -954,7 +954,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5700 + ret = btrfs_orphan_add(trans, BTRFS_I(inode));
5701 + if (ret) {
5702 + btrfs_add_delayed_iput(inode);
5703 +- goto out_put_group;
5704 ++ goto out;
5705 + }
5706 + clear_nlink(inode);
5707 + /* One for the block groups ref */
5708 +@@ -977,13 +977,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5709 +
5710 + ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
5711 + if (ret < 0)
5712 +- goto out_put_group;
5713 ++ goto out;
5714 + if (ret > 0)
5715 + btrfs_release_path(path);
5716 + if (ret == 0) {
5717 + ret = btrfs_del_item(trans, tree_root, path);
5718 + if (ret)
5719 +- goto out_put_group;
5720 ++ goto out;
5721 + btrfs_release_path(path);
5722 + }
5723 +
5724 +@@ -992,6 +992,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5725 + &fs_info->block_group_cache_tree);
5726 + RB_CLEAR_NODE(&block_group->cache_node);
5727 +
5728 ++ /* Once for the block groups rbtree */
5729 ++ btrfs_put_block_group(block_group);
5730 ++
5731 + if (fs_info->first_logical_byte == block_group->start)
5732 + fs_info->first_logical_byte = (u64)-1;
5733 + spin_unlock(&fs_info->block_group_cache_lock);
5734 +@@ -1102,10 +1105,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5735 +
5736 + ret = remove_block_group_free_space(trans, block_group);
5737 + if (ret)
5738 +- goto out_put_group;
5739 +-
5740 +- /* Once for the block groups rbtree */
5741 +- btrfs_put_block_group(block_group);
5742 ++ goto out;
5743 +
5744 + ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
5745 + if (ret > 0)
5746 +@@ -1128,10 +1128,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5747 + free_extent_map(em);
5748 + }
5749 +
5750 +-out_put_group:
5751 ++out:
5752 + /* Once for the lookup reference */
5753 + btrfs_put_block_group(block_group);
5754 +-out:
5755 + if (remove_rsv)
5756 + btrfs_delayed_refs_rsv_release(fs_info, 1);
5757 + btrfs_free_path(path);
5758 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
5759 +index 196d4511f812..09e6dff8a8f8 100644
5760 +--- a/fs/btrfs/ctree.h
5761 ++++ b/fs/btrfs/ctree.h
5762 +@@ -988,6 +988,8 @@ enum {
5763 + BTRFS_ROOT_DEAD_RELOC_TREE,
5764 + /* Mark dead root stored on device whose cleanup needs to be resumed */
5765 + BTRFS_ROOT_DEAD_TREE,
5766 ++ /* The root has a log tree. Used only for subvolume roots. */
5767 ++ BTRFS_ROOT_HAS_LOG_TREE,
5768 + };
5769 +
5770 + /*
5771 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
5772 +index 719e68ab552c..52d565ff66e2 100644
5773 +--- a/fs/btrfs/file.c
5774 ++++ b/fs/btrfs/file.c
5775 +@@ -1912,13 +1912,26 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
5776 + pos = iocb->ki_pos;
5777 + count = iov_iter_count(from);
5778 + if (iocb->ki_flags & IOCB_NOWAIT) {
5779 ++ size_t nocow_bytes = count;
5780 ++
5781 + /*
5782 + * We will allocate space in case nodatacow is not set,
5783 + * so bail
5784 + */
5785 + if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
5786 + BTRFS_INODE_PREALLOC)) ||
5787 +- check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
5788 ++ check_can_nocow(BTRFS_I(inode), pos, &nocow_bytes) <= 0) {
5789 ++ inode_unlock(inode);
5790 ++ return -EAGAIN;
5791 ++ }
5792 ++ /* check_can_nocow() locks the snapshot lock on success */
5793 ++ btrfs_drew_write_unlock(&root->snapshot_lock);
5794 ++ /*
5795 ++ * There are holes in the range or parts of the range that must
5796 ++ * be COWed (shared extents, RO block groups, etc), so just bail
5797 ++ * out.
5798 ++ */
5799 ++ if (nocow_bytes < count) {
5800 + inode_unlock(inode);
5801 + return -EAGAIN;
5802 + }
5803 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
5804 +index 66dd919fc723..6aa200e373c8 100644
5805 +--- a/fs/btrfs/inode.c
5806 ++++ b/fs/btrfs/inode.c
5807 +@@ -985,6 +985,7 @@ static noinline int cow_file_range(struct inode *inode,
5808 + u64 num_bytes;
5809 + unsigned long ram_size;
5810 + u64 cur_alloc_size = 0;
5811 ++ u64 min_alloc_size;
5812 + u64 blocksize = fs_info->sectorsize;
5813 + struct btrfs_key ins;
5814 + struct extent_map *em;
5815 +@@ -1035,10 +1036,26 @@ static noinline int cow_file_range(struct inode *inode,
5816 + btrfs_drop_extent_cache(BTRFS_I(inode), start,
5817 + start + num_bytes - 1, 0);
5818 +
5819 ++ /*
5820 ++ * Relocation relies on the relocated extents to have exactly the same
5821 ++ * size as the original extents. Normally writeback for relocation data
5822 ++ * extents follows a NOCOW path because relocation preallocates the
5823 ++ * extents. However, due to an operation such as scrub turning a block
5824 ++ * group to RO mode, it may fallback to COW mode, so we must make sure
5825 ++ * an extent allocated during COW has exactly the requested size and can
5826 ++ * not be split into smaller extents, otherwise relocation breaks and
5827 ++ * fails during the stage where it updates the bytenr of file extent
5828 ++ * items.
5829 ++ */
5830 ++ if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
5831 ++ min_alloc_size = num_bytes;
5832 ++ else
5833 ++ min_alloc_size = fs_info->sectorsize;
5834 ++
5835 + while (num_bytes > 0) {
5836 + cur_alloc_size = num_bytes;
5837 + ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
5838 +- fs_info->sectorsize, 0, alloc_hint,
5839 ++ min_alloc_size, 0, alloc_hint,
5840 + &ins, 1, 1);
5841 + if (ret < 0)
5842 + goto out_unlock;
5843 +@@ -1361,6 +1378,8 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
5844 + int *page_started, unsigned long *nr_written)
5845 + {
5846 + const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode));
5847 ++ const bool is_reloc_ino = (BTRFS_I(inode)->root->root_key.objectid ==
5848 ++ BTRFS_DATA_RELOC_TREE_OBJECTID);
5849 + const u64 range_bytes = end + 1 - start;
5850 + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5851 + u64 range_start = start;
5852 +@@ -1391,18 +1410,23 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
5853 + * data space info, which we incremented in the step above.
5854 + *
5855 + * If we need to fallback to cow and the inode corresponds to a free
5856 +- * space cache inode, we must also increment bytes_may_use of the data
5857 +- * space_info for the same reason. Space caches always get a prealloc
5858 ++ * space cache inode or an inode of the data relocation tree, we must
5859 ++ * also increment bytes_may_use of the data space_info for the same
5860 ++ * reason. Space caches and relocated data extents always get a prealloc
5861 + * extent for them, however scrub or balance may have set the block
5862 +- * group that contains that extent to RO mode.
5863 ++ * group that contains that extent to RO mode and therefore force COW
5864 ++ * when starting writeback.
5865 + */
5866 + count = count_range_bits(io_tree, &range_start, end, range_bytes,
5867 + EXTENT_NORESERVE, 0);
5868 +- if (count > 0 || is_space_ino) {
5869 +- const u64 bytes = is_space_ino ? range_bytes : count;
5870 ++ if (count > 0 || is_space_ino || is_reloc_ino) {
5871 ++ u64 bytes = count;
5872 + struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
5873 + struct btrfs_space_info *sinfo = fs_info->data_sinfo;
5874 +
5875 ++ if (is_space_ino || is_reloc_ino)
5876 ++ bytes = range_bytes;
5877 ++
5878 + spin_lock(&sinfo->lock);
5879 + btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
5880 + spin_unlock(&sinfo->lock);
5881 +@@ -8238,9 +8262,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
5882 + dio_data.overwrite = 1;
5883 + inode_unlock(inode);
5884 + relock = true;
5885 +- } else if (iocb->ki_flags & IOCB_NOWAIT) {
5886 +- ret = -EAGAIN;
5887 +- goto out;
5888 + }
5889 + ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
5890 + offset, count);
5891 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
5892 +index ea72b9d54ec8..bdfc42149448 100644
5893 +--- a/fs/btrfs/tree-log.c
5894 ++++ b/fs/btrfs/tree-log.c
5895 +@@ -169,6 +169,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
5896 + if (ret)
5897 + goto out;
5898 +
5899 ++ set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
5900 + clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
5901 + root->log_start_pid = current->pid;
5902 + }
5903 +@@ -195,6 +196,9 @@ static int join_running_log_trans(struct btrfs_root *root)
5904 + {
5905 + int ret = -ENOENT;
5906 +
5907 ++ if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
5908 ++ return ret;
5909 ++
5910 + mutex_lock(&root->log_mutex);
5911 + if (root->log_root) {
5912 + ret = 0;
5913 +@@ -3312,6 +3316,7 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
5914 + if (root->log_root) {
5915 + free_log_tree(trans, root->log_root);
5916 + root->log_root = NULL;
5917 ++ clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
5918 + }
5919 + return 0;
5920 + }
5921 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
5922 +index f829f4165d38..6fc69c3b2749 100644
5923 +--- a/fs/cifs/smb2ops.c
5924 ++++ b/fs/cifs/smb2ops.c
5925 +@@ -759,6 +759,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
5926 + /* close extra handle outside of crit sec */
5927 + SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
5928 + }
5929 ++ rc = 0;
5930 + goto oshr_free;
5931 + }
5932 +
5933 +@@ -3144,6 +3145,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
5934 + trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
5935 + ses->Suid, offset, len);
5936 +
5937 ++ /*
5938 ++ * We zero the range through ioctl, so we need remove the page caches
5939 ++ * first, otherwise the data may be inconsistent with the server.
5940 ++ */
5941 ++ truncate_pagecache_range(inode, offset, offset + len - 1);
5942 +
5943 + /* if file not oplocked can't be sure whether asking to extend size */
5944 + if (!CIFS_CACHE_READ(cifsi))
5945 +@@ -3210,6 +3216,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
5946 + return rc;
5947 + }
5948 +
5949 ++ /*
5950 ++ * We implement the punch hole through ioctl, so we need remove the page
5951 ++ * caches first, otherwise the data may be inconsistent with the server.
5952 ++ */
5953 ++ truncate_pagecache_range(inode, offset, offset + len - 1);
5954 ++
5955 + cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
5956 +
5957 + fsctl_buf.FileOffset = cpu_to_le64(offset);
5958 +diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
5959 +index 7824f5563a55..9b66c28b3ae9 100644
5960 +--- a/fs/erofs/zdata.h
5961 ++++ b/fs/erofs/zdata.h
5962 +@@ -144,22 +144,22 @@ static inline void z_erofs_onlinepage_init(struct page *page)
5963 + static inline void z_erofs_onlinepage_fixup(struct page *page,
5964 + uintptr_t index, bool down)
5965 + {
5966 +- unsigned long *p, o, v, id;
5967 +-repeat:
5968 +- p = &page_private(page);
5969 +- o = READ_ONCE(*p);
5970 ++ union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
5971 ++ int orig, orig_index, val;
5972 +
5973 +- id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
5974 +- if (id) {
5975 ++repeat:
5976 ++ orig = atomic_read(u.o);
5977 ++ orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
5978 ++ if (orig_index) {
5979 + if (!index)
5980 + return;
5981 +
5982 +- DBG_BUGON(id != index);
5983 ++ DBG_BUGON(orig_index != index);
5984 + }
5985 +
5986 +- v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
5987 +- ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
5988 +- if (cmpxchg(p, o, v) != o)
5989 ++ val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
5990 ++ ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
5991 ++ if (atomic_cmpxchg(u.o, orig, val) != orig)
5992 + goto repeat;
5993 + }
5994 +
5995 +diff --git a/fs/io_uring.c b/fs/io_uring.c
5996 +index 1829be7f63a3..4ab1728de247 100644
5997 +--- a/fs/io_uring.c
5998 ++++ b/fs/io_uring.c
5999 +@@ -1942,10 +1942,8 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
6000 +
6001 + WRITE_ONCE(req->result, res);
6002 + /* order with io_poll_complete() checking ->result */
6003 +- if (res != -EAGAIN) {
6004 +- smp_wmb();
6005 +- WRITE_ONCE(req->iopoll_completed, 1);
6006 +- }
6007 ++ smp_wmb();
6008 ++ WRITE_ONCE(req->iopoll_completed, 1);
6009 + }
6010 +
6011 + /*
6012 +@@ -5425,9 +5423,6 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6013 + if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
6014 + const bool in_async = io_wq_current_is_worker();
6015 +
6016 +- if (req->result == -EAGAIN)
6017 +- return -EAGAIN;
6018 +-
6019 + /* workqueue context doesn't hold uring_lock, grab it now */
6020 + if (in_async)
6021 + mutex_lock(&ctx->uring_lock);
6022 +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
6023 +index d49b1d197908..f0c3f0123131 100644
6024 +--- a/fs/nfs/direct.c
6025 ++++ b/fs/nfs/direct.c
6026 +@@ -267,8 +267,6 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
6027 + {
6028 + struct inode *inode = dreq->inode;
6029 +
6030 +- inode_dio_end(inode);
6031 +-
6032 + if (dreq->iocb) {
6033 + long res = (long) dreq->error;
6034 + if (dreq->count != 0) {
6035 +@@ -280,7 +278,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
6036 +
6037 + complete(&dreq->completion);
6038 +
6039 ++ igrab(inode);
6040 + nfs_direct_req_release(dreq);
6041 ++ inode_dio_end(inode);
6042 ++ iput(inode);
6043 + }
6044 +
6045 + static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
6046 +@@ -410,8 +411,10 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
6047 + * generic layer handle the completion.
6048 + */
6049 + if (requested_bytes == 0) {
6050 +- inode_dio_end(inode);
6051 ++ igrab(inode);
6052 + nfs_direct_req_release(dreq);
6053 ++ inode_dio_end(inode);
6054 ++ iput(inode);
6055 + return result < 0 ? result : -EIO;
6056 + }
6057 +
6058 +@@ -864,8 +867,10 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
6059 + * generic layer handle the completion.
6060 + */
6061 + if (requested_bytes == 0) {
6062 +- inode_dio_end(inode);
6063 ++ igrab(inode);
6064 + nfs_direct_req_release(dreq);
6065 ++ inode_dio_end(inode);
6066 ++ iput(inode);
6067 + return result < 0 ? result : -EIO;
6068 + }
6069 +
6070 +diff --git a/fs/nfs/file.c b/fs/nfs/file.c
6071 +index f96367a2463e..ccd6c1637b27 100644
6072 +--- a/fs/nfs/file.c
6073 ++++ b/fs/nfs/file.c
6074 +@@ -83,6 +83,7 @@ nfs_file_release(struct inode *inode, struct file *filp)
6075 + dprintk("NFS: release(%pD2)\n", filp);
6076 +
6077 + nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
6078 ++ inode_dio_wait(inode);
6079 + nfs_file_clear_open_context(filp);
6080 + return 0;
6081 + }
6082 +diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
6083 +index 7d399f72ebbb..de03e440b7ee 100644
6084 +--- a/fs/nfs/flexfilelayout/flexfilelayout.c
6085 ++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
6086 +@@ -907,9 +907,8 @@ retry:
6087 + goto out_mds;
6088 +
6089 + /* Use a direct mapping of ds_idx to pgio mirror_idx */
6090 +- if (WARN_ON_ONCE(pgio->pg_mirror_count !=
6091 +- FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
6092 +- goto out_mds;
6093 ++ if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
6094 ++ goto out_eagain;
6095 +
6096 + for (i = 0; i < pgio->pg_mirror_count; i++) {
6097 + mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
6098 +@@ -931,7 +930,10 @@ retry:
6099 + (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
6100 + pgio->pg_maxretrans = io_maxretrans;
6101 + return;
6102 +-
6103 ++out_eagain:
6104 ++ pnfs_generic_pg_cleanup(pgio);
6105 ++ pgio->pg_error = -EAGAIN;
6106 ++ return;
6107 + out_mds:
6108 + trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
6109 + 0, NFS4_MAX_UINT64, IOMODE_RW,
6110 +@@ -941,6 +943,7 @@ out_mds:
6111 + pgio->pg_lseg = NULL;
6112 + pgio->pg_maxretrans = 0;
6113 + nfs_pageio_reset_write_mds(pgio);
6114 ++ pgio->pg_error = -EAGAIN;
6115 + }
6116 +
6117 + static unsigned int
6118 +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
6119 +index 152a0fc4e905..751bc4dc7466 100644
6120 +--- a/fs/ocfs2/dlmglue.c
6121 ++++ b/fs/ocfs2/dlmglue.c
6122 +@@ -689,6 +689,12 @@ static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
6123 + &ocfs2_nfs_sync_lops, osb);
6124 + }
6125 +
6126 ++static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb)
6127 ++{
6128 ++ ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
6129 ++ init_rwsem(&osb->nfs_sync_rwlock);
6130 ++}
6131 ++
6132 + void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
6133 + {
6134 + struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
6135 +@@ -2855,6 +2861,11 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
6136 + if (ocfs2_is_hard_readonly(osb))
6137 + return -EROFS;
6138 +
6139 ++ if (ex)
6140 ++ down_write(&osb->nfs_sync_rwlock);
6141 ++ else
6142 ++ down_read(&osb->nfs_sync_rwlock);
6143 ++
6144 + if (ocfs2_mount_local(osb))
6145 + return 0;
6146 +
6147 +@@ -2873,6 +2884,10 @@ void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
6148 + if (!ocfs2_mount_local(osb))
6149 + ocfs2_cluster_unlock(osb, lockres,
6150 + ex ? LKM_EXMODE : LKM_PRMODE);
6151 ++ if (ex)
6152 ++ up_write(&osb->nfs_sync_rwlock);
6153 ++ else
6154 ++ up_read(&osb->nfs_sync_rwlock);
6155 + }
6156 +
6157 + int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
6158 +@@ -3340,7 +3355,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
6159 + local:
6160 + ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
6161 + ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
6162 +- ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
6163 ++ ocfs2_nfs_sync_lock_init(osb);
6164 + ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
6165 +
6166 + osb->cconn = conn;
6167 +diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
6168 +index 9150cfa4df7d..9461bd3e1c0c 100644
6169 +--- a/fs/ocfs2/ocfs2.h
6170 ++++ b/fs/ocfs2/ocfs2.h
6171 +@@ -394,6 +394,7 @@ struct ocfs2_super
6172 + struct ocfs2_lock_res osb_super_lockres;
6173 + struct ocfs2_lock_res osb_rename_lockres;
6174 + struct ocfs2_lock_res osb_nfs_sync_lockres;
6175 ++ struct rw_semaphore nfs_sync_rwlock;
6176 + struct ocfs2_lock_res osb_trim_fs_lockres;
6177 + struct mutex obs_trim_fs_mutex;
6178 + struct ocfs2_dlm_debug *osb_dlm_debug;
6179 +diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
6180 +index 0dd8c41bafd4..19137c6d087b 100644
6181 +--- a/fs/ocfs2/ocfs2_fs.h
6182 ++++ b/fs/ocfs2/ocfs2_fs.h
6183 +@@ -290,7 +290,7 @@
6184 + #define OCFS2_MAX_SLOTS 255
6185 +
6186 + /* Slot map indicator for an empty slot */
6187 +-#define OCFS2_INVALID_SLOT -1
6188 ++#define OCFS2_INVALID_SLOT ((u16)-1)
6189 +
6190 + #define OCFS2_VOL_UUID_LEN 16
6191 + #define OCFS2_MAX_VOL_LABEL_LEN 64
6192 +@@ -326,8 +326,8 @@ struct ocfs2_system_inode_info {
6193 + enum {
6194 + BAD_BLOCK_SYSTEM_INODE = 0,
6195 + GLOBAL_INODE_ALLOC_SYSTEM_INODE,
6196 ++#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE
6197 + SLOT_MAP_SYSTEM_INODE,
6198 +-#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE
6199 + HEARTBEAT_SYSTEM_INODE,
6200 + GLOBAL_BITMAP_SYSTEM_INODE,
6201 + USER_QUOTA_SYSTEM_INODE,
6202 +diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
6203 +index 4836becb7578..45745cc3408a 100644
6204 +--- a/fs/ocfs2/suballoc.c
6205 ++++ b/fs/ocfs2/suballoc.c
6206 +@@ -2825,9 +2825,12 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
6207 + goto bail;
6208 + }
6209 +
6210 +- inode_alloc_inode =
6211 +- ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
6212 +- suballoc_slot);
6213 ++ if (suballoc_slot == (u16)OCFS2_INVALID_SLOT)
6214 ++ inode_alloc_inode = ocfs2_get_system_file_inode(osb,
6215 ++ GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
6216 ++ else
6217 ++ inode_alloc_inode = ocfs2_get_system_file_inode(osb,
6218 ++ INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
6219 + if (!inode_alloc_inode) {
6220 + /* the error code could be inaccurate, but we are not able to
6221 + * get the correct one. */
6222 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
6223 +index de23fb95fe91..64a5335046b0 100644
6224 +--- a/include/linux/intel-iommu.h
6225 ++++ b/include/linux/intel-iommu.h
6226 +@@ -40,6 +40,7 @@
6227 + #define DMA_PTE_SNP BIT_ULL(11)
6228 +
6229 + #define DMA_FL_PTE_PRESENT BIT_ULL(0)
6230 ++#define DMA_FL_PTE_US BIT_ULL(2)
6231 + #define DMA_FL_PTE_XD BIT_ULL(63)
6232 +
6233 + #define CONTEXT_TT_MULTI_LEVEL 0
6234 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
6235 +index 130a668049ab..36c7ad24d54d 100644
6236 +--- a/include/linux/netdevice.h
6237 ++++ b/include/linux/netdevice.h
6238 +@@ -3125,7 +3125,7 @@ static inline int dev_recursion_level(void)
6239 + return this_cpu_read(softnet_data.xmit.recursion);
6240 + }
6241 +
6242 +-#define XMIT_RECURSION_LIMIT 10
6243 ++#define XMIT_RECURSION_LIMIT 8
6244 + static inline bool dev_xmit_recursion(void)
6245 + {
6246 + return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
6247 +diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
6248 +index 733fad7dfbed..6d15040c642c 100644
6249 +--- a/include/linux/qed/qed_chain.h
6250 ++++ b/include/linux/qed/qed_chain.h
6251 +@@ -207,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
6252 +
6253 + static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
6254 + {
6255 ++ u16 elem_per_page = p_chain->elem_per_page;
6256 ++ u32 prod = p_chain->u.chain16.prod_idx;
6257 ++ u32 cons = p_chain->u.chain16.cons_idx;
6258 + u16 used;
6259 +
6260 +- used = (u16) (((u32)0x10000 +
6261 +- (u32)p_chain->u.chain16.prod_idx) -
6262 +- (u32)p_chain->u.chain16.cons_idx);
6263 ++ if (prod < cons)
6264 ++ prod += (u32)U16_MAX + 1;
6265 ++
6266 ++ used = (u16)(prod - cons);
6267 + if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
6268 +- used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
6269 +- p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
6270 ++ used -= prod / elem_per_page - cons / elem_per_page;
6271 +
6272 + return (u16)(p_chain->capacity - used);
6273 + }
6274 +
6275 + static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
6276 + {
6277 ++ u16 elem_per_page = p_chain->elem_per_page;
6278 ++ u64 prod = p_chain->u.chain32.prod_idx;
6279 ++ u64 cons = p_chain->u.chain32.cons_idx;
6280 + u32 used;
6281 +
6282 +- used = (u32) (((u64)0x100000000ULL +
6283 +- (u64)p_chain->u.chain32.prod_idx) -
6284 +- (u64)p_chain->u.chain32.cons_idx);
6285 ++ if (prod < cons)
6286 ++ prod += (u64)U32_MAX + 1;
6287 ++
6288 ++ used = (u32)(prod - cons);
6289 + if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
6290 +- used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
6291 +- p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
6292 ++ used -= (u32)(prod / elem_per_page - cons / elem_per_page);
6293 +
6294 + return p_chain->capacity - used;
6295 + }
6296 +diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
6297 +index 1815065d52f3..1b0c7813197b 100644
6298 +--- a/include/linux/syscalls.h
6299 ++++ b/include/linux/syscalls.h
6300 +@@ -1358,7 +1358,7 @@ static inline long ksys_lchown(const char __user *filename, uid_t user,
6301 +
6302 + extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
6303 +
6304 +-static inline long ksys_ftruncate(unsigned int fd, unsigned long length)
6305 ++static inline long ksys_ftruncate(unsigned int fd, loff_t length)
6306 + {
6307 + return do_sys_ftruncate(fd, length, 1);
6308 + }
6309 +diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
6310 +index c253461b1c4e..96d36b7a1344 100644
6311 +--- a/include/linux/tpm_eventlog.h
6312 ++++ b/include/linux/tpm_eventlog.h
6313 +@@ -81,6 +81,8 @@ struct tcg_efi_specid_event_algs {
6314 + u16 digest_size;
6315 + } __packed;
6316 +
6317 ++#define TCG_SPECID_SIG "Spec ID Event03"
6318 ++
6319 + struct tcg_efi_specid_event_head {
6320 + u8 signature[16];
6321 + u32 platform_class;
6322 +@@ -171,6 +173,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
6323 + int i;
6324 + int j;
6325 + u32 count, event_type;
6326 ++ const u8 zero_digest[sizeof(event_header->digest)] = {0};
6327 +
6328 + marker = event;
6329 + marker_start = marker;
6330 +@@ -198,10 +201,19 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
6331 + count = READ_ONCE(event->count);
6332 + event_type = READ_ONCE(event->event_type);
6333 +
6334 ++ /* Verify that it's the log header */
6335 ++ if (event_header->pcr_idx != 0 ||
6336 ++ event_header->event_type != NO_ACTION ||
6337 ++ memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) {
6338 ++ size = 0;
6339 ++ goto out;
6340 ++ }
6341 ++
6342 + efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
6343 +
6344 + /* Check if event is malformed. */
6345 +- if (count > efispecid->num_algs) {
6346 ++ if (memcmp(efispecid->signature, TCG_SPECID_SIG,
6347 ++ sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) {
6348 + size = 0;
6349 + goto out;
6350 + }
6351 +diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
6352 +index 15b4d9aec7ff..122d9e2d8dfd 100644
6353 +--- a/include/net/sctp/constants.h
6354 ++++ b/include/net/sctp/constants.h
6355 +@@ -353,11 +353,13 @@ enum {
6356 + ipv4_is_anycast_6to4(a))
6357 +
6358 + /* Flags used for the bind address copy functions. */
6359 +-#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by
6360 ++#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by
6361 + local sock family */
6362 +-#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by
6363 ++#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by
6364 ++ local sock family */
6365 ++#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by
6366 + peer */
6367 +-#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by
6368 ++#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by
6369 + peer */
6370 +
6371 + /* Reasons to retransmit. */
6372 +diff --git a/include/net/sock.h b/include/net/sock.h
6373 +index 3e8c6d4b4b59..46423e86dba5 100644
6374 +--- a/include/net/sock.h
6375 ++++ b/include/net/sock.h
6376 +@@ -1846,7 +1846,6 @@ static inline int sk_rx_queue_get(const struct sock *sk)
6377 +
6378 + static inline void sk_set_socket(struct sock *sk, struct socket *sock)
6379 + {
6380 +- sk_tx_queue_clear(sk);
6381 + sk->sk_socket = sock;
6382 + }
6383 +
6384 +diff --git a/include/net/xfrm.h b/include/net/xfrm.h
6385 +index 8f71c111e65a..03024701c79f 100644
6386 +--- a/include/net/xfrm.h
6387 ++++ b/include/net/xfrm.h
6388 +@@ -1013,6 +1013,7 @@ struct xfrm_offload {
6389 + #define XFRM_GRO 32
6390 + #define XFRM_ESP_NO_TRAILER 64
6391 + #define XFRM_DEV_RESUME 128
6392 ++#define XFRM_XMIT 256
6393 +
6394 + __u32 status;
6395 + #define CRYPTO_SUCCESS 1
6396 +diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
6397 +index b6aac7ee1f67..4c14e8be7267 100644
6398 +--- a/include/uapi/linux/fb.h
6399 ++++ b/include/uapi/linux/fb.h
6400 +@@ -205,6 +205,7 @@ struct fb_bitfield {
6401 + #define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */
6402 + #define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/
6403 + #define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */
6404 ++#define FB_ACTIVATE_KD_TEXT 512 /* for KDSET vt ioctl */
6405 +
6406 + #define FB_ACCELF_TEXT 1 /* (OBSOLETE) see fb_info.flags and vc_mode */
6407 +
6408 +diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
6409 +index cb305e71e7de..25aebd21c15b 100644
6410 +--- a/kernel/bpf/cgroup.c
6411 ++++ b/kernel/bpf/cgroup.c
6412 +@@ -1240,16 +1240,23 @@ static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
6413 +
6414 + static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
6415 + {
6416 +- if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0)
6417 ++ if (unlikely(max_optlen < 0))
6418 + return -EINVAL;
6419 +
6420 ++ if (unlikely(max_optlen > PAGE_SIZE)) {
6421 ++ /* We don't expose optvals that are greater than PAGE_SIZE
6422 ++ * to the BPF program.
6423 ++ */
6424 ++ max_optlen = PAGE_SIZE;
6425 ++ }
6426 ++
6427 + ctx->optval = kzalloc(max_optlen, GFP_USER);
6428 + if (!ctx->optval)
6429 + return -ENOMEM;
6430 +
6431 + ctx->optval_end = ctx->optval + max_optlen;
6432 +
6433 +- return 0;
6434 ++ return max_optlen;
6435 + }
6436 +
6437 + static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
6438 +@@ -1283,13 +1290,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
6439 + */
6440 + max_optlen = max_t(int, 16, *optlen);
6441 +
6442 +- ret = sockopt_alloc_buf(&ctx, max_optlen);
6443 +- if (ret)
6444 +- return ret;
6445 ++ max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
6446 ++ if (max_optlen < 0)
6447 ++ return max_optlen;
6448 +
6449 + ctx.optlen = *optlen;
6450 +
6451 +- if (copy_from_user(ctx.optval, optval, *optlen) != 0) {
6452 ++ if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
6453 + ret = -EFAULT;
6454 + goto out;
6455 + }
6456 +@@ -1317,8 +1324,14 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
6457 + /* export any potential modifications */
6458 + *level = ctx.level;
6459 + *optname = ctx.optname;
6460 +- *optlen = ctx.optlen;
6461 +- *kernel_optval = ctx.optval;
6462 ++
6463 ++ /* optlen == 0 from BPF indicates that we should
6464 ++ * use original userspace data.
6465 ++ */
6466 ++ if (ctx.optlen != 0) {
6467 ++ *optlen = ctx.optlen;
6468 ++ *kernel_optval = ctx.optval;
6469 ++ }
6470 + }
6471 +
6472 + out:
6473 +@@ -1350,12 +1363,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
6474 + __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
6475 + return retval;
6476 +
6477 +- ret = sockopt_alloc_buf(&ctx, max_optlen);
6478 +- if (ret)
6479 +- return ret;
6480 +-
6481 + ctx.optlen = max_optlen;
6482 +
6483 ++ max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
6484 ++ if (max_optlen < 0)
6485 ++ return max_optlen;
6486 ++
6487 + if (!retval) {
6488 + /* If kernel getsockopt finished successfully,
6489 + * copy whatever was returned to the user back
6490 +@@ -1369,10 +1382,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
6491 + goto out;
6492 + }
6493 +
6494 +- if (ctx.optlen > max_optlen)
6495 +- ctx.optlen = max_optlen;
6496 +-
6497 +- if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) {
6498 ++ if (copy_from_user(ctx.optval, optval,
6499 ++ min(ctx.optlen, max_optlen)) != 0) {
6500 + ret = -EFAULT;
6501 + goto out;
6502 + }
6503 +@@ -1401,10 +1412,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
6504 + goto out;
6505 + }
6506 +
6507 +- if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
6508 +- put_user(ctx.optlen, optlen)) {
6509 +- ret = -EFAULT;
6510 +- goto out;
6511 ++ if (ctx.optlen != 0) {
6512 ++ if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
6513 ++ put_user(ctx.optlen, optlen)) {
6514 ++ ret = -EFAULT;
6515 ++ goto out;
6516 ++ }
6517 + }
6518 +
6519 + ret = ctx.retval;
6520 +diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
6521 +index 58bdca5d978a..badf382bbd36 100644
6522 +--- a/kernel/bpf/devmap.c
6523 ++++ b/kernel/bpf/devmap.c
6524 +@@ -85,12 +85,13 @@ static DEFINE_PER_CPU(struct list_head, dev_flush_list);
6525 + static DEFINE_SPINLOCK(dev_map_lock);
6526 + static LIST_HEAD(dev_map_list);
6527 +
6528 +-static struct hlist_head *dev_map_create_hash(unsigned int entries)
6529 ++static struct hlist_head *dev_map_create_hash(unsigned int entries,
6530 ++ int numa_node)
6531 + {
6532 + int i;
6533 + struct hlist_head *hash;
6534 +
6535 +- hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
6536 ++ hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
6537 + if (hash != NULL)
6538 + for (i = 0; i < entries; i++)
6539 + INIT_HLIST_HEAD(&hash[i]);
6540 +@@ -138,7 +139,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
6541 + return -EINVAL;
6542 +
6543 + if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
6544 +- dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
6545 ++ dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
6546 ++ dtab->map.numa_node);
6547 + if (!dtab->dev_index_head)
6548 + goto free_charge;
6549 +
6550 +@@ -223,7 +225,7 @@ static void dev_map_free(struct bpf_map *map)
6551 + }
6552 + }
6553 +
6554 +- kfree(dtab->dev_index_head);
6555 ++ bpf_map_area_free(dtab->dev_index_head);
6556 + } else {
6557 + for (i = 0; i < dtab->map.max_entries; i++) {
6558 + struct bpf_dtab_netdev *dev;
6559 +diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
6560 +index 8f4bbdaf965e..2270930f36f8 100644
6561 +--- a/kernel/dma/direct.c
6562 ++++ b/kernel/dma/direct.c
6563 +@@ -124,6 +124,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
6564 + {
6565 + struct page *page;
6566 + void *ret;
6567 ++ int err;
6568 +
6569 + if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
6570 + dma_alloc_need_uncached(dev, attrs) &&
6571 +@@ -160,6 +161,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
6572 + __builtin_return_address(0));
6573 + if (!ret)
6574 + goto out_free_pages;
6575 ++ if (force_dma_unencrypted(dev)) {
6576 ++ err = set_memory_decrypted((unsigned long)ret,
6577 ++ 1 << get_order(size));
6578 ++ if (err)
6579 ++ goto out_free_pages;
6580 ++ }
6581 + memset(ret, 0, size);
6582 + goto done;
6583 + }
6584 +@@ -176,8 +183,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
6585 + }
6586 +
6587 + ret = page_address(page);
6588 +- if (force_dma_unencrypted(dev))
6589 +- set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
6590 ++ if (force_dma_unencrypted(dev)) {
6591 ++ err = set_memory_decrypted((unsigned long)ret,
6592 ++ 1 << get_order(size));
6593 ++ if (err)
6594 ++ goto out_free_pages;
6595 ++ }
6596 +
6597 + memset(ret, 0, size);
6598 +
6599 +@@ -186,7 +197,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
6600 + arch_dma_prep_coherent(page, size);
6601 + ret = arch_dma_set_uncached(ret, size);
6602 + if (IS_ERR(ret))
6603 +- goto out_free_pages;
6604 ++ goto out_encrypt_pages;
6605 + }
6606 + done:
6607 + if (force_dma_unencrypted(dev))
6608 +@@ -194,6 +205,15 @@ done:
6609 + else
6610 + *dma_handle = phys_to_dma(dev, page_to_phys(page));
6611 + return ret;
6612 ++
6613 ++out_encrypt_pages:
6614 ++ if (force_dma_unencrypted(dev)) {
6615 ++ err = set_memory_encrypted((unsigned long)page_address(page),
6616 ++ 1 << get_order(size));
6617 ++ /* If memory cannot be re-encrypted, it must be leaked */
6618 ++ if (err)
6619 ++ return NULL;
6620 ++ }
6621 + out_free_pages:
6622 + dma_free_contiguous(dev, page, size);
6623 + return NULL;
6624 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
6625 +index 195ecb955fcc..950a5cfd262c 100644
6626 +--- a/kernel/kprobes.c
6627 ++++ b/kernel/kprobes.c
6628 +@@ -326,7 +326,8 @@ struct kprobe *get_kprobe(void *addr)
6629 + struct kprobe *p;
6630 +
6631 + head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
6632 +- hlist_for_each_entry_rcu(p, head, hlist) {
6633 ++ hlist_for_each_entry_rcu(p, head, hlist,
6634 ++ lockdep_is_held(&kprobe_mutex)) {
6635 + if (p->addr == addr)
6636 + return p;
6637 + }
6638 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
6639 +index 5eccfb816d23..f2618ade8047 100644
6640 +--- a/kernel/sched/core.c
6641 ++++ b/kernel/sched/core.c
6642 +@@ -4461,7 +4461,8 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
6643 + */
6644 + if (dl_prio(prio)) {
6645 + if (!dl_prio(p->normal_prio) ||
6646 +- (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
6647 ++ (pi_task && dl_prio(pi_task->prio) &&
6648 ++ dl_entity_preempt(&pi_task->dl, &p->dl))) {
6649 + p->dl.dl_boosted = 1;
6650 + queue_flag |= ENQUEUE_REPLENISH;
6651 + } else
6652 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
6653 +index 504d2f51b0d6..f63f337c7147 100644
6654 +--- a/kernel/sched/deadline.c
6655 ++++ b/kernel/sched/deadline.c
6656 +@@ -2692,6 +2692,7 @@ void __dl_clear_params(struct task_struct *p)
6657 + dl_se->dl_bw = 0;
6658 + dl_se->dl_density = 0;
6659 +
6660 ++ dl_se->dl_boosted = 0;
6661 + dl_se->dl_throttled = 0;
6662 + dl_se->dl_yielded = 0;
6663 + dl_se->dl_non_contending = 0;
6664 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
6665 +index 2ae7e30ccb33..5725199b32dc 100644
6666 +--- a/kernel/sched/fair.c
6667 ++++ b/kernel/sched/fair.c
6668 +@@ -807,7 +807,7 @@ void post_init_entity_util_avg(struct task_struct *p)
6669 + }
6670 + }
6671 +
6672 +- sa->runnable_avg = cpu_scale;
6673 ++ sa->runnable_avg = sa->util_avg;
6674 +
6675 + if (p->sched_class != &fair_sched_class) {
6676 + /*
6677 +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
6678 +index 35610a4be4a9..085fceca3377 100644
6679 +--- a/kernel/trace/blktrace.c
6680 ++++ b/kernel/trace/blktrace.c
6681 +@@ -3,6 +3,9 @@
6682 + * Copyright (C) 2006 Jens Axboe <axboe@××××××.dk>
6683 + *
6684 + */
6685 ++
6686 ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6687 ++
6688 + #include <linux/kernel.h>
6689 + #include <linux/blkdev.h>
6690 + #include <linux/blktrace_api.h>
6691 +@@ -494,6 +497,16 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
6692 + */
6693 + strreplace(buts->name, '/', '_');
6694 +
6695 ++ /*
6696 ++ * bdev can be NULL, as with scsi-generic, this is a helpful as
6697 ++ * we can be.
6698 ++ */
6699 ++ if (q->blk_trace) {
6700 ++ pr_warn("Concurrent blktraces are not allowed on %s\n",
6701 ++ buts->name);
6702 ++ return -EBUSY;
6703 ++ }
6704 ++
6705 + bt = kzalloc(sizeof(*bt), GFP_KERNEL);
6706 + if (!bt)
6707 + return -ENOMEM;
6708 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
6709 +index b8e1ca48be50..00867ff82412 100644
6710 +--- a/kernel/trace/ring_buffer.c
6711 ++++ b/kernel/trace/ring_buffer.c
6712 +@@ -2427,7 +2427,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
6713 + if (unlikely(info->add_timestamp)) {
6714 + bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
6715 +
6716 +- event = rb_add_time_stamp(event, info->delta, abs);
6717 ++ event = rb_add_time_stamp(event, abs ? info->delta : delta, abs);
6718 + length -= RB_LEN_TIME_EXTEND;
6719 + delta = 0;
6720 + }
6721 +diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
6722 +index 9de29bb45a27..fdc5abc00bf8 100644
6723 +--- a/kernel/trace/trace_boot.c
6724 ++++ b/kernel/trace/trace_boot.c
6725 +@@ -101,12 +101,16 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
6726 + kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
6727 +
6728 + ret = kprobe_event_gen_cmd_start(&cmd, event, val);
6729 +- if (ret)
6730 ++ if (ret) {
6731 ++ pr_err("Failed to generate probe: %s\n", buf);
6732 + break;
6733 ++ }
6734 +
6735 + ret = kprobe_event_gen_cmd_end(&cmd);
6736 +- if (ret)
6737 ++ if (ret) {
6738 + pr_err("Failed to add probe: %s\n", buf);
6739 ++ break;
6740 ++ }
6741 + }
6742 +
6743 + return ret;
6744 +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
6745 +index 3a74736da363..f725802160c0 100644
6746 +--- a/kernel/trace/trace_events_trigger.c
6747 ++++ b/kernel/trace/trace_events_trigger.c
6748 +@@ -216,11 +216,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
6749 +
6750 + int trigger_process_regex(struct trace_event_file *file, char *buff)
6751 + {
6752 +- char *command, *next = buff;
6753 ++ char *command, *next;
6754 + struct event_command *p;
6755 + int ret = -EINVAL;
6756 +
6757 ++ next = buff = skip_spaces(buff);
6758 + command = strsep(&next, ": \t");
6759 ++ if (next) {
6760 ++ next = skip_spaces(next);
6761 ++ if (!*next)
6762 ++ next = NULL;
6763 ++ }
6764 + command = (command[0] != '!') ? command : command + 1;
6765 +
6766 + mutex_lock(&trigger_cmd_mutex);
6767 +@@ -630,8 +636,14 @@ event_trigger_callback(struct event_command *cmd_ops,
6768 + int ret;
6769 +
6770 + /* separate the trigger from the filter (t:n [if filter]) */
6771 +- if (param && isdigit(param[0]))
6772 ++ if (param && isdigit(param[0])) {
6773 + trigger = strsep(&param, " \t");
6774 ++ if (param) {
6775 ++ param = skip_spaces(param);
6776 ++ if (!*param)
6777 ++ param = NULL;
6778 ++ }
6779 ++ }
6780 +
6781 + trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
6782 +
6783 +@@ -1368,6 +1380,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
6784 + trigger = strsep(&param, " \t");
6785 + if (!trigger)
6786 + return -EINVAL;
6787 ++ if (param) {
6788 ++ param = skip_spaces(param);
6789 ++ if (!*param)
6790 ++ param = NULL;
6791 ++ }
6792 +
6793 + system = strsep(&trigger, ":");
6794 + if (!trigger)
6795 +diff --git a/lib/test_objagg.c b/lib/test_objagg.c
6796 +index 72c1abfa154d..da137939a410 100644
6797 +--- a/lib/test_objagg.c
6798 ++++ b/lib/test_objagg.c
6799 +@@ -979,10 +979,10 @@ err_check_expect_stats2:
6800 + err_world2_obj_get:
6801 + for (i--; i >= 0; i--)
6802 + world_obj_put(&world2, objagg, hints_case->key_ids[i]);
6803 +- objagg_hints_put(hints);
6804 +- objagg_destroy(objagg2);
6805 + i = hints_case->key_ids_count;
6806 ++ objagg_destroy(objagg2);
6807 + err_check_expect_hints_stats:
6808 ++ objagg_hints_put(hints);
6809 + err_hints_get:
6810 + err_check_expect_stats:
6811 + err_world_obj_get:
6812 +diff --git a/mm/compaction.c b/mm/compaction.c
6813 +index 46f0fcc93081..65b568e19582 100644
6814 +--- a/mm/compaction.c
6815 ++++ b/mm/compaction.c
6816 +@@ -2318,15 +2318,26 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
6817 + .page = NULL,
6818 + };
6819 +
6820 +- current->capture_control = &capc;
6821 ++ /*
6822 ++ * Make sure the structs are really initialized before we expose the
6823 ++ * capture control, in case we are interrupted and the interrupt handler
6824 ++ * frees a page.
6825 ++ */
6826 ++ barrier();
6827 ++ WRITE_ONCE(current->capture_control, &capc);
6828 +
6829 + ret = compact_zone(&cc, &capc);
6830 +
6831 + VM_BUG_ON(!list_empty(&cc.freepages));
6832 + VM_BUG_ON(!list_empty(&cc.migratepages));
6833 +
6834 +- *capture = capc.page;
6835 +- current->capture_control = NULL;
6836 ++ /*
6837 ++ * Make sure we hide capture control first before we read the captured
6838 ++ * page pointer, otherwise an interrupt could free and capture a page
6839 ++ * and we would leak it.
6840 ++ */
6841 ++ WRITE_ONCE(current->capture_control, NULL);
6842 ++ *capture = READ_ONCE(capc.page);
6843 +
6844 + return ret;
6845 + }
6846 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
6847 +index a3b97f103966..ef0e291a8cf4 100644
6848 +--- a/mm/memcontrol.c
6849 ++++ b/mm/memcontrol.c
6850 +@@ -2790,8 +2790,10 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
6851 + return;
6852 +
6853 + cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
6854 +- if (!cw)
6855 ++ if (!cw) {
6856 ++ css_put(&memcg->css);
6857 + return;
6858 ++ }
6859 +
6860 + cw->memcg = memcg;
6861 + cw->cachep = cachep;
6862 +@@ -6349,11 +6351,16 @@ static unsigned long effective_protection(unsigned long usage,
6863 + * We're using unprotected memory for the weight so that if
6864 + * some cgroups DO claim explicit protection, we don't protect
6865 + * the same bytes twice.
6866 ++ *
6867 ++ * Check both usage and parent_usage against the respective
6868 ++ * protected values. One should imply the other, but they
6869 ++ * aren't read atomically - make sure the division is sane.
6870 + */
6871 + if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6872 + return ep;
6873 +-
6874 +- if (parent_effective > siblings_protected && usage > protected) {
6875 ++ if (parent_effective > siblings_protected &&
6876 ++ parent_usage > siblings_protected &&
6877 ++ usage > protected) {
6878 + unsigned long unclaimed;
6879 +
6880 + unclaimed = parent_effective - siblings_protected;
6881 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
6882 +index fc0aad0bc1f5..744a3ea284b7 100644
6883 +--- a/mm/memory_hotplug.c
6884 ++++ b/mm/memory_hotplug.c
6885 +@@ -468,11 +468,20 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
6886 + unsigned long start_pfn,
6887 + unsigned long nr_pages)
6888 + {
6889 ++ const unsigned long end_pfn = start_pfn + nr_pages;
6890 + struct pglist_data *pgdat = zone->zone_pgdat;
6891 +- unsigned long flags;
6892 ++ unsigned long pfn, cur_nr_pages, flags;
6893 +
6894 + /* Poison struct pages because they are now uninitialized again. */
6895 +- page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
6896 ++ for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
6897 ++ cond_resched();
6898 ++
6899 ++ /* Select all remaining pages up to the next section boundary */
6900 ++ cur_nr_pages =
6901 ++ min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
6902 ++ page_init_poison(pfn_to_page(pfn),
6903 ++ sizeof(struct page) * cur_nr_pages);
6904 ++ }
6905 +
6906 + #ifdef CONFIG_ZONE_DEVICE
6907 + /*
6908 +diff --git a/mm/slab.h b/mm/slab.h
6909 +index 207c83ef6e06..74f7e09a7cfd 100644
6910 +--- a/mm/slab.h
6911 ++++ b/mm/slab.h
6912 +@@ -348,7 +348,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
6913 + gfp_t gfp, int order,
6914 + struct kmem_cache *s)
6915 + {
6916 +- unsigned int nr_pages = 1 << order;
6917 ++ int nr_pages = 1 << order;
6918 + struct mem_cgroup *memcg;
6919 + struct lruvec *lruvec;
6920 + int ret;
6921 +@@ -388,7 +388,7 @@ out:
6922 + static __always_inline void memcg_uncharge_slab(struct page *page, int order,
6923 + struct kmem_cache *s)
6924 + {
6925 +- unsigned int nr_pages = 1 << order;
6926 ++ int nr_pages = 1 << order;
6927 + struct mem_cgroup *memcg;
6928 + struct lruvec *lruvec;
6929 +
6930 +diff --git a/mm/slab_common.c b/mm/slab_common.c
6931 +index 9e72ba224175..37d48a56431d 100644
6932 +--- a/mm/slab_common.c
6933 ++++ b/mm/slab_common.c
6934 +@@ -1726,7 +1726,7 @@ void kzfree(const void *p)
6935 + if (unlikely(ZERO_OR_NULL_PTR(mem)))
6936 + return;
6937 + ks = ksize(mem);
6938 +- memset(mem, 0, ks);
6939 ++ memzero_explicit(mem, ks);
6940 + kfree(mem);
6941 + }
6942 + EXPORT_SYMBOL(kzfree);
6943 +diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
6944 +index 1f97703a52ff..18430f79ac37 100644
6945 +--- a/net/bridge/br_private.h
6946 ++++ b/net/bridge/br_private.h
6947 +@@ -217,8 +217,8 @@ struct net_bridge_port_group {
6948 + struct rcu_head rcu;
6949 + struct timer_list timer;
6950 + struct br_ip addr;
6951 ++ unsigned char eth_addr[ETH_ALEN] __aligned(2);
6952 + unsigned char flags;
6953 +- unsigned char eth_addr[ETH_ALEN];
6954 + };
6955 +
6956 + struct net_bridge_mdb_entry {
6957 +diff --git a/net/core/dev.c b/net/core/dev.c
6958 +index 93a279ab4e97..c9ee5d80d5ea 100644
6959 +--- a/net/core/dev.c
6960 ++++ b/net/core/dev.c
6961 +@@ -4109,10 +4109,12 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
6962 +
6963 + local_bh_disable();
6964 +
6965 ++ dev_xmit_recursion_inc();
6966 + HARD_TX_LOCK(dev, txq, smp_processor_id());
6967 + if (!netif_xmit_frozen_or_drv_stopped(txq))
6968 + ret = netdev_start_xmit(skb, dev, txq, false);
6969 + HARD_TX_UNLOCK(dev, txq);
6970 ++ dev_xmit_recursion_dec();
6971 +
6972 + local_bh_enable();
6973 +
6974 +@@ -9435,6 +9437,13 @@ int register_netdevice(struct net_device *dev)
6975 + rcu_barrier();
6976 +
6977 + dev->reg_state = NETREG_UNREGISTERED;
6978 ++ /* We should put the kobject that hold in
6979 ++ * netdev_unregister_kobject(), otherwise
6980 ++ * the net device cannot be freed when
6981 ++ * driver calls free_netdev(), because the
6982 ++ * kobject is being hold.
6983 ++ */
6984 ++ kobject_put(&dev->dev.kobj);
6985 + }
6986 + /*
6987 + * Prevent userspace races by waiting until the network
6988 +diff --git a/net/core/sock.c b/net/core/sock.c
6989 +index b714162213ae..afe4a62adf8f 100644
6990 +--- a/net/core/sock.c
6991 ++++ b/net/core/sock.c
6992 +@@ -707,7 +707,7 @@ bool sk_mc_loop(struct sock *sk)
6993 + return inet6_sk(sk)->mc_loop;
6994 + #endif
6995 + }
6996 +- WARN_ON(1);
6997 ++ WARN_ON_ONCE(1);
6998 + return true;
6999 + }
7000 + EXPORT_SYMBOL(sk_mc_loop);
7001 +@@ -1678,6 +1678,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
7002 + cgroup_sk_alloc(&sk->sk_cgrp_data);
7003 + sock_update_classid(&sk->sk_cgrp_data);
7004 + sock_update_netprioidx(&sk->sk_cgrp_data);
7005 ++ sk_tx_queue_clear(sk);
7006 + }
7007 +
7008 + return sk;
7009 +@@ -1901,6 +1902,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
7010 + */
7011 + sk_refcnt_debug_inc(newsk);
7012 + sk_set_socket(newsk, NULL);
7013 ++ sk_tx_queue_clear(newsk);
7014 + RCU_INIT_POINTER(newsk->sk_wq, NULL);
7015 +
7016 + if (newsk->sk_prot->sockets_allocated)
7017 +diff --git a/net/ethtool/common.c b/net/ethtool/common.c
7018 +index 423e640e3876..aaecfc916a4d 100644
7019 +--- a/net/ethtool/common.c
7020 ++++ b/net/ethtool/common.c
7021 +@@ -40,9 +40,11 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
7022 + [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
7023 + [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
7024 + [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial",
7025 ++ [NETIF_F_GSO_TUNNEL_REMCSUM_BIT] = "tx-tunnel-remcsum-segmentation",
7026 + [NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation",
7027 + [NETIF_F_GSO_ESP_BIT] = "tx-esp-segmentation",
7028 + [NETIF_F_GSO_UDP_L4_BIT] = "tx-udp-segmentation",
7029 ++ [NETIF_F_GSO_FRAGLIST_BIT] = "tx-gso-list",
7030 +
7031 + [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
7032 + [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp",
7033 +diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
7034 +index 89d0b1827aaf..d3eeeb26396c 100644
7035 +--- a/net/ethtool/ioctl.c
7036 ++++ b/net/ethtool/ioctl.c
7037 +@@ -2957,7 +2957,7 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
7038 + sizeof(match->mask.ipv6.dst));
7039 + }
7040 + if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
7041 +- memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
7042 ++ memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
7043 + match->dissector.used_keys |=
7044 + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
7045 + match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
7046 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
7047 +index 55ca2e521828..871c035be31f 100644
7048 +--- a/net/ipv4/fib_semantics.c
7049 ++++ b/net/ipv4/fib_semantics.c
7050 +@@ -1109,7 +1109,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
7051 + if (fl4.flowi4_scope < RT_SCOPE_LINK)
7052 + fl4.flowi4_scope = RT_SCOPE_LINK;
7053 +
7054 +- if (table)
7055 ++ if (table && table != RT_TABLE_MAIN)
7056 + tbl = fib_get_table(net, table);
7057 +
7058 + if (tbl)
7059 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
7060 +index cd4b84310d92..a0b4dc54f8a6 100644
7061 +--- a/net/ipv4/ip_tunnel.c
7062 ++++ b/net/ipv4/ip_tunnel.c
7063 +@@ -85,9 +85,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
7064 + __be32 remote, __be32 local,
7065 + __be32 key)
7066 + {
7067 +- unsigned int hash;
7068 + struct ip_tunnel *t, *cand = NULL;
7069 + struct hlist_head *head;
7070 ++ struct net_device *ndev;
7071 ++ unsigned int hash;
7072 +
7073 + hash = ip_tunnel_hash(key, remote);
7074 + head = &itn->tunnels[hash];
7075 +@@ -162,8 +163,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
7076 + if (t && t->dev->flags & IFF_UP)
7077 + return t;
7078 +
7079 +- if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
7080 +- return netdev_priv(itn->fb_tunnel_dev);
7081 ++ ndev = READ_ONCE(itn->fb_tunnel_dev);
7082 ++ if (ndev && ndev->flags & IFF_UP)
7083 ++ return netdev_priv(ndev);
7084 +
7085 + return NULL;
7086 + }
7087 +@@ -1245,9 +1247,9 @@ void ip_tunnel_uninit(struct net_device *dev)
7088 + struct ip_tunnel_net *itn;
7089 +
7090 + itn = net_generic(net, tunnel->ip_tnl_net_id);
7091 +- /* fb_tunnel_dev will be unregisted in net-exit call. */
7092 +- if (itn->fb_tunnel_dev != dev)
7093 +- ip_tunnel_del(itn, netdev_priv(dev));
7094 ++ ip_tunnel_del(itn, netdev_priv(dev));
7095 ++ if (itn->fb_tunnel_dev == dev)
7096 ++ WRITE_ONCE(itn->fb_tunnel_dev, NULL);
7097 +
7098 + dst_cache_reset(&tunnel->dst_cache);
7099 + }
7100 +diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
7101 +index 8f8eefd3a3ce..c7bf5b26bf0c 100644
7102 +--- a/net/ipv4/tcp_cubic.c
7103 ++++ b/net/ipv4/tcp_cubic.c
7104 +@@ -432,10 +432,9 @@ static void hystart_update(struct sock *sk, u32 delay)
7105 +
7106 + if (hystart_detect & HYSTART_DELAY) {
7107 + /* obtain the minimum delay of more than sampling packets */
7108 ++ if (ca->curr_rtt > delay)
7109 ++ ca->curr_rtt = delay;
7110 + if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
7111 +- if (ca->curr_rtt > delay)
7112 +- ca->curr_rtt = delay;
7113 +-
7114 + ca->sample_cnt++;
7115 + } else {
7116 + if (ca->curr_rtt > ca->delay_min +
7117 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
7118 +index 29c6fc8c7716..1fa009999f57 100644
7119 +--- a/net/ipv4/tcp_input.c
7120 ++++ b/net/ipv4/tcp_input.c
7121 +@@ -261,7 +261,8 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
7122 + * cwnd may be very low (even just 1 packet), so we should ACK
7123 + * immediately.
7124 + */
7125 +- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
7126 ++ if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
7127 ++ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
7128 + }
7129 + }
7130 +
7131 +@@ -3683,6 +3684,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
7132 + tcp_in_ack_event(sk, ack_ev_flags);
7133 + }
7134 +
7135 ++ /* This is a deviation from RFC3168 since it states that:
7136 ++ * "When the TCP data sender is ready to set the CWR bit after reducing
7137 ++ * the congestion window, it SHOULD set the CWR bit only on the first
7138 ++ * new data packet that it transmits."
7139 ++ * We accept CWR on pure ACKs to be more robust
7140 ++ * with widely-deployed TCP implementations that do this.
7141 ++ */
7142 ++ tcp_ecn_accept_cwr(sk, skb);
7143 ++
7144 + /* We passed data and got it acked, remove any soft error
7145 + * log. Something worked...
7146 + */
7147 +@@ -4593,7 +4603,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
7148 + if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
7149 + skb, &fragstolen)) {
7150 + coalesce_done:
7151 +- tcp_grow_window(sk, skb);
7152 ++ /* For non sack flows, do not grow window to force DUPACK
7153 ++ * and trigger fast retransmit.
7154 ++ */
7155 ++ if (tcp_is_sack(tp))
7156 ++ tcp_grow_window(sk, skb);
7157 + kfree_skb_partial(skb, fragstolen);
7158 + skb = NULL;
7159 + goto add_sack;
7160 +@@ -4677,7 +4691,11 @@ add_sack:
7161 + tcp_sack_new_ofo_skb(sk, seq, end_seq);
7162 + end:
7163 + if (skb) {
7164 +- tcp_grow_window(sk, skb);
7165 ++ /* For non sack flows, do not grow window to force DUPACK
7166 ++ * and trigger fast retransmit.
7167 ++ */
7168 ++ if (tcp_is_sack(tp))
7169 ++ tcp_grow_window(sk, skb);
7170 + skb_condense(skb);
7171 + skb_set_owner_r(skb, sk);
7172 + }
7173 +@@ -4780,8 +4798,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
7174 + skb_dst_drop(skb);
7175 + __skb_pull(skb, tcp_hdr(skb)->doff * 4);
7176 +
7177 +- tcp_ecn_accept_cwr(sk, skb);
7178 +-
7179 + tp->rx_opt.dsack = 0;
7180 +
7181 + /* Queue data for delivery to the user.
7182 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
7183 +index 781ca8c07a0d..6532bde82b40 100644
7184 +--- a/net/ipv6/ip6_gre.c
7185 ++++ b/net/ipv6/ip6_gre.c
7186 +@@ -127,6 +127,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
7187 + gre_proto == htons(ETH_P_ERSPAN2)) ?
7188 + ARPHRD_ETHER : ARPHRD_IP6GRE;
7189 + int score, cand_score = 4;
7190 ++ struct net_device *ndev;
7191 +
7192 + for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
7193 + if (!ipv6_addr_equal(local, &t->parms.laddr) ||
7194 +@@ -238,9 +239,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
7195 + if (t && t->dev->flags & IFF_UP)
7196 + return t;
7197 +
7198 +- dev = ign->fb_tunnel_dev;
7199 +- if (dev && dev->flags & IFF_UP)
7200 +- return netdev_priv(dev);
7201 ++ ndev = READ_ONCE(ign->fb_tunnel_dev);
7202 ++ if (ndev && ndev->flags & IFF_UP)
7203 ++ return netdev_priv(ndev);
7204 +
7205 + return NULL;
7206 + }
7207 +@@ -413,6 +414,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
7208 +
7209 + ip6gre_tunnel_unlink_md(ign, t);
7210 + ip6gre_tunnel_unlink(ign, t);
7211 ++ if (ign->fb_tunnel_dev == dev)
7212 ++ WRITE_ONCE(ign->fb_tunnel_dev, NULL);
7213 + dst_cache_reset(&t->dst_cache);
7214 + dev_put(dev);
7215 + }
7216 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
7217 +index eaa4c2cc2fbb..c875c9b6edbe 100644
7218 +--- a/net/ipv6/mcast.c
7219 ++++ b/net/ipv6/mcast.c
7220 +@@ -2618,6 +2618,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
7221 + idev->mc_list = i->next;
7222 +
7223 + write_unlock_bh(&idev->lock);
7224 ++ ip6_mc_clear_src(i);
7225 + ma_put(i);
7226 + write_lock_bh(&idev->lock);
7227 + }
7228 +diff --git a/net/mptcp/options.c b/net/mptcp/options.c
7229 +index 1c20dd14b2aa..2430bbfa3405 100644
7230 +--- a/net/mptcp/options.c
7231 ++++ b/net/mptcp/options.c
7232 +@@ -336,9 +336,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
7233 + */
7234 + subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
7235 + if (subflow->request_mptcp) {
7236 +- pr_debug("local_key=%llu", subflow->local_key);
7237 + opts->suboptions = OPTION_MPTCP_MPC_SYN;
7238 +- opts->sndr_key = subflow->local_key;
7239 + *size = TCPOLEN_MPTCP_MPC_SYN;
7240 + return true;
7241 + } else if (subflow->request_join) {
7242 +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
7243 +index e6feb05a93dc..db3e4e74e785 100644
7244 +--- a/net/mptcp/subflow.c
7245 ++++ b/net/mptcp/subflow.c
7246 +@@ -1015,8 +1015,10 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
7247 + err = tcp_set_ulp(sf->sk, "mptcp");
7248 + release_sock(sf->sk);
7249 +
7250 +- if (err)
7251 ++ if (err) {
7252 ++ sock_release(sf);
7253 + return err;
7254 ++ }
7255 +
7256 + /* the newly created socket really belongs to the owning MPTCP master
7257 + * socket, even if for additional subflows the allocation is performed
7258 +diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
7259 +index 340cb955af25..56621d6bfd29 100644
7260 +--- a/net/netfilter/ipset/ip_set_core.c
7261 ++++ b/net/netfilter/ipset/ip_set_core.c
7262 +@@ -460,6 +460,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
7263 + for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
7264 + if (!add_extension(id, cadt_flags, tb))
7265 + continue;
7266 ++ if (align < ip_set_extensions[id].align)
7267 ++ align = ip_set_extensions[id].align;
7268 + len = ALIGN(len, ip_set_extensions[id].align);
7269 + set->offset[id] = len;
7270 + set->extensions |= ip_set_extensions[id].type;
7271 +diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
7272 +index bcbba0bef1c2..9c1c27f3a089 100644
7273 +--- a/net/netlink/genetlink.c
7274 ++++ b/net/netlink/genetlink.c
7275 +@@ -474,8 +474,7 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
7276 + struct netlink_ext_ack *extack,
7277 + const struct genl_ops *ops,
7278 + int hdrlen,
7279 +- enum genl_validate_flags no_strict_flag,
7280 +- bool parallel)
7281 ++ enum genl_validate_flags no_strict_flag)
7282 + {
7283 + enum netlink_validation validate = ops->validate & no_strict_flag ?
7284 + NL_VALIDATE_LIBERAL :
7285 +@@ -486,7 +485,7 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
7286 + if (!family->maxattr)
7287 + return NULL;
7288 +
7289 +- if (parallel) {
7290 ++ if (family->parallel_ops) {
7291 + attrbuf = kmalloc_array(family->maxattr + 1,
7292 + sizeof(struct nlattr *), GFP_KERNEL);
7293 + if (!attrbuf)
7294 +@@ -498,7 +497,7 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
7295 + err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
7296 + family->policy, validate, extack);
7297 + if (err) {
7298 +- if (parallel)
7299 ++ if (family->parallel_ops)
7300 + kfree(attrbuf);
7301 + return ERR_PTR(err);
7302 + }
7303 +@@ -506,10 +505,9 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
7304 + }
7305 +
7306 + static void genl_family_rcv_msg_attrs_free(const struct genl_family *family,
7307 +- struct nlattr **attrbuf,
7308 +- bool parallel)
7309 ++ struct nlattr **attrbuf)
7310 + {
7311 +- if (parallel)
7312 ++ if (family->parallel_ops)
7313 + kfree(attrbuf);
7314 + }
7315 +
7316 +@@ -537,15 +535,14 @@ static int genl_start(struct netlink_callback *cb)
7317 +
7318 + attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack,
7319 + ops, ctx->hdrlen,
7320 +- GENL_DONT_VALIDATE_DUMP_STRICT,
7321 +- true);
7322 ++ GENL_DONT_VALIDATE_DUMP_STRICT);
7323 + if (IS_ERR(attrs))
7324 + return PTR_ERR(attrs);
7325 +
7326 + no_attrs:
7327 + info = genl_dumpit_info_alloc();
7328 + if (!info) {
7329 +- kfree(attrs);
7330 ++ genl_family_rcv_msg_attrs_free(ctx->family, attrs);
7331 + return -ENOMEM;
7332 + }
7333 + info->family = ctx->family;
7334 +@@ -562,7 +559,7 @@ no_attrs:
7335 + }
7336 +
7337 + if (rc) {
7338 +- kfree(attrs);
7339 ++ genl_family_rcv_msg_attrs_free(info->family, info->attrs);
7340 + genl_dumpit_info_free(info);
7341 + cb->data = NULL;
7342 + }
7343 +@@ -591,7 +588,7 @@ static int genl_lock_done(struct netlink_callback *cb)
7344 + rc = ops->done(cb);
7345 + genl_unlock();
7346 + }
7347 +- genl_family_rcv_msg_attrs_free(info->family, info->attrs, false);
7348 ++ genl_family_rcv_msg_attrs_free(info->family, info->attrs);
7349 + genl_dumpit_info_free(info);
7350 + return rc;
7351 + }
7352 +@@ -604,7 +601,7 @@ static int genl_parallel_done(struct netlink_callback *cb)
7353 +
7354 + if (ops->done)
7355 + rc = ops->done(cb);
7356 +- genl_family_rcv_msg_attrs_free(info->family, info->attrs, true);
7357 ++ genl_family_rcv_msg_attrs_free(info->family, info->attrs);
7358 + genl_dumpit_info_free(info);
7359 + return rc;
7360 + }
7361 +@@ -671,8 +668,7 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family,
7362 +
7363 + attrbuf = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
7364 + ops, hdrlen,
7365 +- GENL_DONT_VALIDATE_STRICT,
7366 +- family->parallel_ops);
7367 ++ GENL_DONT_VALIDATE_STRICT);
7368 + if (IS_ERR(attrbuf))
7369 + return PTR_ERR(attrbuf);
7370 +
7371 +@@ -698,7 +694,7 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family,
7372 + family->post_doit(ops, skb, &info);
7373 +
7374 + out:
7375 +- genl_family_rcv_msg_attrs_free(family, attrbuf, family->parallel_ops);
7376 ++ genl_family_rcv_msg_attrs_free(family, attrbuf);
7377 +
7378 + return err;
7379 + }
7380 +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
7381 +index fc0efd8833c8..2611657f40ca 100644
7382 +--- a/net/openvswitch/actions.c
7383 ++++ b/net/openvswitch/actions.c
7384 +@@ -1169,9 +1169,10 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
7385 + struct sw_flow_key *key,
7386 + const struct nlattr *attr, bool last)
7387 + {
7388 ++ struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
7389 + const struct nlattr *actions, *cpl_arg;
7390 ++ int len, max_len, rem = nla_len(attr);
7391 + const struct check_pkt_len_arg *arg;
7392 +- int rem = nla_len(attr);
7393 + bool clone_flow_key;
7394 +
7395 + /* The first netlink attribute in 'attr' is always
7396 +@@ -1180,7 +1181,11 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
7397 + cpl_arg = nla_data(attr);
7398 + arg = nla_data(cpl_arg);
7399 +
7400 +- if (skb->len <= arg->pkt_len) {
7401 ++ len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
7402 ++ max_len = arg->pkt_len;
7403 ++
7404 ++ if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
7405 ++ len <= max_len) {
7406 + /* Second netlink attribute in 'attr' is always
7407 + * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
7408 + */
7409 +diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
7410 +index b7611cc159e5..032ed76c0166 100644
7411 +--- a/net/rxrpc/call_accept.c
7412 ++++ b/net/rxrpc/call_accept.c
7413 +@@ -22,6 +22,11 @@
7414 + #include <net/ip.h>
7415 + #include "ar-internal.h"
7416 +
7417 ++static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
7418 ++ unsigned long user_call_ID)
7419 ++{
7420 ++}
7421 ++
7422 + /*
7423 + * Preallocate a single service call, connection and peer and, if possible,
7424 + * give them a user ID and attach the user's side of the ID to them.
7425 +@@ -228,6 +233,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
7426 + if (rx->discard_new_call) {
7427 + _debug("discard %lx", call->user_call_ID);
7428 + rx->discard_new_call(call, call->user_call_ID);
7429 ++ if (call->notify_rx)
7430 ++ call->notify_rx = rxrpc_dummy_notify;
7431 + rxrpc_put_call(call, rxrpc_call_put_kernel);
7432 + }
7433 + rxrpc_call_completed(call);
7434 +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
7435 +index 3be4177baf70..22dec6049e1b 100644
7436 +--- a/net/rxrpc/input.c
7437 ++++ b/net/rxrpc/input.c
7438 +@@ -723,13 +723,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
7439 + ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
7440 + rwind, ntohl(ackinfo->jumbo_max));
7441 +
7442 ++ if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
7443 ++ rwind = RXRPC_RXTX_BUFF_SIZE - 1;
7444 + if (call->tx_winsize != rwind) {
7445 +- if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
7446 +- rwind = RXRPC_RXTX_BUFF_SIZE - 1;
7447 + if (rwind > call->tx_winsize)
7448 + wake = true;
7449 +- trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
7450 +- ntohl(ackinfo->rwind), wake);
7451 ++ trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
7452 + call->tx_winsize = rwind;
7453 + }
7454 +
7455 +diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
7456 +index 1496e87cd07b..9475fa81ea7f 100644
7457 +--- a/net/sched/sch_cake.c
7458 ++++ b/net/sched/sch_cake.c
7459 +@@ -1514,32 +1514,51 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
7460 + return idx + (tin << 16);
7461 + }
7462 +
7463 +-static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
7464 ++static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
7465 + {
7466 +- int wlen = skb_network_offset(skb);
7467 ++ const int offset = skb_network_offset(skb);
7468 ++ u16 *buf, buf_;
7469 + u8 dscp;
7470 +
7471 + switch (tc_skb_protocol(skb)) {
7472 + case htons(ETH_P_IP):
7473 +- wlen += sizeof(struct iphdr);
7474 +- if (!pskb_may_pull(skb, wlen) ||
7475 +- skb_try_make_writable(skb, wlen))
7476 ++ buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
7477 ++ if (unlikely(!buf))
7478 + return 0;
7479 +
7480 +- dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
7481 +- if (wash && dscp)
7482 ++ /* ToS is in the second byte of iphdr */
7483 ++ dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
7484 ++
7485 ++ if (wash && dscp) {
7486 ++ const int wlen = offset + sizeof(struct iphdr);
7487 ++
7488 ++ if (!pskb_may_pull(skb, wlen) ||
7489 ++ skb_try_make_writable(skb, wlen))
7490 ++ return 0;
7491 ++
7492 + ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
7493 ++ }
7494 ++
7495 + return dscp;
7496 +
7497 + case htons(ETH_P_IPV6):
7498 +- wlen += sizeof(struct ipv6hdr);
7499 +- if (!pskb_may_pull(skb, wlen) ||
7500 +- skb_try_make_writable(skb, wlen))
7501 ++ buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
7502 ++ if (unlikely(!buf))
7503 + return 0;
7504 +
7505 +- dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
7506 +- if (wash && dscp)
7507 ++ /* Traffic class is in the first and second bytes of ipv6hdr */
7508 ++ dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
7509 ++
7510 ++ if (wash && dscp) {
7511 ++ const int wlen = offset + sizeof(struct ipv6hdr);
7512 ++
7513 ++ if (!pskb_may_pull(skb, wlen) ||
7514 ++ skb_try_make_writable(skb, wlen))
7515 ++ return 0;
7516 ++
7517 + ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
7518 ++ }
7519 ++
7520 + return dscp;
7521 +
7522 + case htons(ETH_P_ARP):
7523 +@@ -1556,14 +1575,17 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
7524 + {
7525 + struct cake_sched_data *q = qdisc_priv(sch);
7526 + u32 tin, mark;
7527 ++ bool wash;
7528 + u8 dscp;
7529 +
7530 + /* Tin selection: Default to diffserv-based selection, allow overriding
7531 +- * using firewall marks or skb->priority.
7532 ++ * using firewall marks or skb->priority. Call DSCP parsing early if
7533 ++ * wash is enabled, otherwise defer to below to skip unneeded parsing.
7534 + */
7535 +- dscp = cake_handle_diffserv(skb,
7536 +- q->rate_flags & CAKE_FLAG_WASH);
7537 + mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
7538 ++ wash = !!(q->rate_flags & CAKE_FLAG_WASH);
7539 ++ if (wash)
7540 ++ dscp = cake_handle_diffserv(skb, wash);
7541 +
7542 + if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
7543 + tin = 0;
7544 +@@ -1577,6 +1599,8 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
7545 + tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
7546 +
7547 + else {
7548 ++ if (!wash)
7549 ++ dscp = cake_handle_diffserv(skb, wash);
7550 + tin = q->tin_index[dscp];
7551 +
7552 + if (unlikely(tin >= q->tin_cnt))
7553 +@@ -2654,7 +2678,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
7554 + qdisc_watchdog_init(&q->watchdog, sch);
7555 +
7556 + if (opt) {
7557 +- int err = cake_change(sch, opt, extack);
7558 ++ err = cake_change(sch, opt, extack);
7559 +
7560 + if (err)
7561 + return err;
7562 +@@ -2971,7 +2995,7 @@ static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
7563 + PUT_STAT_S32(BLUE_TIMER_US,
7564 + ktime_to_us(
7565 + ktime_sub(now,
7566 +- flow->cvars.blue_timer)));
7567 ++ flow->cvars.blue_timer)));
7568 + }
7569 + if (flow->cvars.dropping) {
7570 + PUT_STAT_S32(DROP_NEXT_US,
7571 +diff --git a/net/sctp/associola.c b/net/sctp/associola.c
7572 +index 437079a4883d..732bc9a45190 100644
7573 +--- a/net/sctp/associola.c
7574 ++++ b/net/sctp/associola.c
7575 +@@ -1565,12 +1565,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
7576 + int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
7577 + enum sctp_scope scope, gfp_t gfp)
7578 + {
7579 ++ struct sock *sk = asoc->base.sk;
7580 + int flags;
7581 +
7582 + /* Use scoping rules to determine the subset of addresses from
7583 + * the endpoint.
7584 + */
7585 +- flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
7586 ++ flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
7587 ++ if (!inet_v6_ipv6only(sk))
7588 ++ flags |= SCTP_ADDR4_ALLOWED;
7589 + if (asoc->peer.ipv4_address)
7590 + flags |= SCTP_ADDR4_PEERSUPP;
7591 + if (asoc->peer.ipv6_address)
7592 +diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
7593 +index 53bc61537f44..701c5a4e441d 100644
7594 +--- a/net/sctp/bind_addr.c
7595 ++++ b/net/sctp/bind_addr.c
7596 +@@ -461,6 +461,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
7597 + * well as the remote peer.
7598 + */
7599 + if ((((AF_INET == addr->sa.sa_family) &&
7600 ++ (flags & SCTP_ADDR4_ALLOWED) &&
7601 + (flags & SCTP_ADDR4_PEERSUPP))) ||
7602 + (((AF_INET6 == addr->sa.sa_family) &&
7603 + (flags & SCTP_ADDR6_ALLOWED) &&
7604 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
7605 +index 092d1afdee0d..cde29f3c7fb3 100644
7606 +--- a/net/sctp/protocol.c
7607 ++++ b/net/sctp/protocol.c
7608 +@@ -148,7 +148,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
7609 + * sock as well as the remote peer.
7610 + */
7611 + if (addr->a.sa.sa_family == AF_INET &&
7612 +- !(copy_flags & SCTP_ADDR4_PEERSUPP))
7613 ++ (!(copy_flags & SCTP_ADDR4_ALLOWED) ||
7614 ++ !(copy_flags & SCTP_ADDR4_PEERSUPP)))
7615 + continue;
7616 + if (addr->a.sa.sa_family == AF_INET6 &&
7617 + (!(copy_flags & SCTP_ADDR6_ALLOWED) ||
7618 +diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
7619 +index 39e14d5edaf1..e9d0953522f0 100644
7620 +--- a/net/sunrpc/rpc_pipe.c
7621 ++++ b/net/sunrpc/rpc_pipe.c
7622 +@@ -1317,6 +1317,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
7623 + q.len = strlen(gssd_dummy_clnt_dir[0].name);
7624 + clnt_dentry = d_hash_and_lookup(gssd_dentry, &q);
7625 + if (!clnt_dentry) {
7626 ++ __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
7627 + pipe_dentry = ERR_PTR(-ENOENT);
7628 + goto out;
7629 + }
7630 +diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
7631 +index 6f7d82fb1eb0..be11d672b5b9 100644
7632 +--- a/net/sunrpc/xdr.c
7633 ++++ b/net/sunrpc/xdr.c
7634 +@@ -1118,6 +1118,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
7635 + base = 0;
7636 + } else {
7637 + base -= buf->head[0].iov_len;
7638 ++ subbuf->head[0].iov_base = buf->head[0].iov_base;
7639 + subbuf->head[0].iov_len = 0;
7640 + }
7641 +
7642 +@@ -1130,6 +1131,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
7643 + base = 0;
7644 + } else {
7645 + base -= buf->page_len;
7646 ++ subbuf->pages = buf->pages;
7647 ++ subbuf->page_base = 0;
7648 + subbuf->page_len = 0;
7649 + }
7650 +
7651 +@@ -1141,6 +1144,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
7652 + base = 0;
7653 + } else {
7654 + base -= buf->tail[0].iov_len;
7655 ++ subbuf->tail[0].iov_base = buf->tail[0].iov_base;
7656 + subbuf->tail[0].iov_len = 0;
7657 + }
7658 +
7659 +diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
7660 +index 3c627dc685cc..57118e342c8e 100644
7661 +--- a/net/sunrpc/xprtrdma/rpc_rdma.c
7662 ++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
7663 +@@ -1349,8 +1349,7 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
7664 + be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
7665 + }
7666 +
7667 +- r_xprt->rx_stats.bad_reply_count++;
7668 +- return -EREMOTEIO;
7669 ++ return -EIO;
7670 + }
7671 +
7672 + /* Perform XID lookup, reconstruction of the RPC reply, and
7673 +@@ -1387,13 +1386,11 @@ out:
7674 + spin_unlock(&xprt->queue_lock);
7675 + return;
7676 +
7677 +-/* If the incoming reply terminated a pending RPC, the next
7678 +- * RPC call will post a replacement receive buffer as it is
7679 +- * being marshaled.
7680 +- */
7681 + out_badheader:
7682 + trace_xprtrdma_reply_hdr(rep);
7683 + r_xprt->rx_stats.bad_reply_count++;
7684 ++ rqst->rq_task->tk_status = status;
7685 ++ status = 0;
7686 + goto out;
7687 + }
7688 +
7689 +diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
7690 +index f50d1f97cf8e..626096bd0d29 100644
7691 +--- a/net/xfrm/xfrm_device.c
7692 ++++ b/net/xfrm/xfrm_device.c
7693 +@@ -108,7 +108,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
7694 + struct xfrm_offload *xo = xfrm_offload(skb);
7695 + struct sec_path *sp;
7696 +
7697 +- if (!xo)
7698 ++ if (!xo || (xo->flags & XFRM_XMIT))
7699 + return skb;
7700 +
7701 + if (!(features & NETIF_F_HW_ESP))
7702 +@@ -129,6 +129,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
7703 + return skb;
7704 + }
7705 +
7706 ++ xo->flags |= XFRM_XMIT;
7707 ++
7708 + if (skb_is_gso(skb)) {
7709 + struct net_device *dev = skb->dev;
7710 +
7711 +diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c
7712 +index dd558cbb2309..ef53b93db573 100644
7713 +--- a/samples/bpf/xdp_monitor_user.c
7714 ++++ b/samples/bpf/xdp_monitor_user.c
7715 +@@ -509,11 +509,8 @@ static void *alloc_rec_per_cpu(int record_size)
7716 + {
7717 + unsigned int nr_cpus = bpf_num_possible_cpus();
7718 + void *array;
7719 +- size_t size;
7720 +
7721 +- size = record_size * nr_cpus;
7722 +- array = malloc(size);
7723 +- memset(array, 0, size);
7724 ++ array = calloc(nr_cpus, record_size);
7725 + if (!array) {
7726 + fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
7727 + exit(EXIT_FAIL_MEM);
7728 +@@ -528,8 +525,7 @@ static struct stats_record *alloc_stats_record(void)
7729 + int i;
7730 +
7731 + /* Alloc main stats_record structure */
7732 +- rec = malloc(sizeof(*rec));
7733 +- memset(rec, 0, sizeof(*rec));
7734 ++ rec = calloc(1, sizeof(*rec));
7735 + if (!rec) {
7736 + fprintf(stderr, "Mem alloc error\n");
7737 + exit(EXIT_FAIL_MEM);
7738 +diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c
7739 +index 313a8fe6d125..2baf8db1f7e7 100644
7740 +--- a/samples/bpf/xdp_redirect_cpu_kern.c
7741 ++++ b/samples/bpf/xdp_redirect_cpu_kern.c
7742 +@@ -15,7 +15,7 @@
7743 + #include <bpf/bpf_helpers.h>
7744 + #include "hash_func01.h"
7745 +
7746 +-#define MAX_CPUS 64 /* WARNING - sync with _user.c */
7747 ++#define MAX_CPUS NR_CPUS
7748 +
7749 + /* Special map type that can XDP_REDIRECT frames to another CPU */
7750 + struct {
7751 +diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
7752 +index 15bdf047a222..e86fed5cdb92 100644
7753 +--- a/samples/bpf/xdp_redirect_cpu_user.c
7754 ++++ b/samples/bpf/xdp_redirect_cpu_user.c
7755 +@@ -13,6 +13,7 @@ static const char *__doc__ =
7756 + #include <unistd.h>
7757 + #include <locale.h>
7758 + #include <sys/resource.h>
7759 ++#include <sys/sysinfo.h>
7760 + #include <getopt.h>
7761 + #include <net/if.h>
7762 + #include <time.h>
7763 +@@ -24,8 +25,6 @@ static const char *__doc__ =
7764 + #include <arpa/inet.h>
7765 + #include <linux/if_link.h>
7766 +
7767 +-#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
7768 +-
7769 + /* How many xdp_progs are defined in _kern.c */
7770 + #define MAX_PROG 6
7771 +
7772 +@@ -40,6 +39,7 @@ static char *ifname;
7773 + static __u32 prog_id;
7774 +
7775 + static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
7776 ++static int n_cpus;
7777 + static int cpu_map_fd;
7778 + static int rx_cnt_map_fd;
7779 + static int redirect_err_cnt_map_fd;
7780 +@@ -170,7 +170,7 @@ struct stats_record {
7781 + struct record redir_err;
7782 + struct record kthread;
7783 + struct record exception;
7784 +- struct record enq[MAX_CPUS];
7785 ++ struct record enq[];
7786 + };
7787 +
7788 + static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
7789 +@@ -210,11 +210,8 @@ static struct datarec *alloc_record_per_cpu(void)
7790 + {
7791 + unsigned int nr_cpus = bpf_num_possible_cpus();
7792 + struct datarec *array;
7793 +- size_t size;
7794 +
7795 +- size = sizeof(struct datarec) * nr_cpus;
7796 +- array = malloc(size);
7797 +- memset(array, 0, size);
7798 ++ array = calloc(nr_cpus, sizeof(struct datarec));
7799 + if (!array) {
7800 + fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
7801 + exit(EXIT_FAIL_MEM);
7802 +@@ -225,19 +222,20 @@ static struct datarec *alloc_record_per_cpu(void)
7803 + static struct stats_record *alloc_stats_record(void)
7804 + {
7805 + struct stats_record *rec;
7806 +- int i;
7807 ++ int i, size;
7808 +
7809 +- rec = malloc(sizeof(*rec));
7810 +- memset(rec, 0, sizeof(*rec));
7811 ++ size = sizeof(*rec) + n_cpus * sizeof(struct record);
7812 ++ rec = malloc(size);
7813 + if (!rec) {
7814 + fprintf(stderr, "Mem alloc error\n");
7815 + exit(EXIT_FAIL_MEM);
7816 + }
7817 ++ memset(rec, 0, size);
7818 + rec->rx_cnt.cpu = alloc_record_per_cpu();
7819 + rec->redir_err.cpu = alloc_record_per_cpu();
7820 + rec->kthread.cpu = alloc_record_per_cpu();
7821 + rec->exception.cpu = alloc_record_per_cpu();
7822 +- for (i = 0; i < MAX_CPUS; i++)
7823 ++ for (i = 0; i < n_cpus; i++)
7824 + rec->enq[i].cpu = alloc_record_per_cpu();
7825 +
7826 + return rec;
7827 +@@ -247,7 +245,7 @@ static void free_stats_record(struct stats_record *r)
7828 + {
7829 + int i;
7830 +
7831 +- for (i = 0; i < MAX_CPUS; i++)
7832 ++ for (i = 0; i < n_cpus; i++)
7833 + free(r->enq[i].cpu);
7834 + free(r->exception.cpu);
7835 + free(r->kthread.cpu);
7836 +@@ -350,7 +348,7 @@ static void stats_print(struct stats_record *stats_rec,
7837 + }
7838 +
7839 + /* cpumap enqueue stats */
7840 +- for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) {
7841 ++ for (to_cpu = 0; to_cpu < n_cpus; to_cpu++) {
7842 + char *fmt = "%-15s %3d:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
7843 + char *fm2 = "%-15s %3s:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
7844 + char *errstr = "";
7845 +@@ -475,7 +473,7 @@ static void stats_collect(struct stats_record *rec)
7846 + map_collect_percpu(fd, 1, &rec->redir_err);
7847 +
7848 + fd = cpumap_enqueue_cnt_map_fd;
7849 +- for (i = 0; i < MAX_CPUS; i++)
7850 ++ for (i = 0; i < n_cpus; i++)
7851 + map_collect_percpu(fd, i, &rec->enq[i]);
7852 +
7853 + fd = cpumap_kthread_cnt_map_fd;
7854 +@@ -549,10 +547,10 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
7855 + */
7856 + static void mark_cpus_unavailable(void)
7857 + {
7858 +- __u32 invalid_cpu = MAX_CPUS;
7859 ++ __u32 invalid_cpu = n_cpus;
7860 + int ret, i;
7861 +
7862 +- for (i = 0; i < MAX_CPUS; i++) {
7863 ++ for (i = 0; i < n_cpus; i++) {
7864 + ret = bpf_map_update_elem(cpus_available_map_fd, &i,
7865 + &invalid_cpu, 0);
7866 + if (ret) {
7867 +@@ -688,6 +686,8 @@ int main(int argc, char **argv)
7868 + int prog_fd;
7869 + __u32 qsize;
7870 +
7871 ++ n_cpus = get_nprocs_conf();
7872 ++
7873 + /* Notice: choosing he queue size is very important with the
7874 + * ixgbe driver, because it's driver page recycling trick is
7875 + * dependend on pages being returned quickly. The number of
7876 +@@ -757,7 +757,7 @@ int main(int argc, char **argv)
7877 + case 'c':
7878 + /* Add multiple CPUs */
7879 + add_cpu = strtoul(optarg, NULL, 0);
7880 +- if (add_cpu >= MAX_CPUS) {
7881 ++ if (add_cpu >= n_cpus) {
7882 + fprintf(stderr,
7883 + "--cpu nr too large for cpumap err(%d):%s\n",
7884 + errno, strerror(errno));
7885 +diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
7886 +index 4fe47502ebed..caa4e7ffcfc7 100644
7887 +--- a/samples/bpf/xdp_rxq_info_user.c
7888 ++++ b/samples/bpf/xdp_rxq_info_user.c
7889 +@@ -198,11 +198,8 @@ static struct datarec *alloc_record_per_cpu(void)
7890 + {
7891 + unsigned int nr_cpus = bpf_num_possible_cpus();
7892 + struct datarec *array;
7893 +- size_t size;
7894 +
7895 +- size = sizeof(struct datarec) * nr_cpus;
7896 +- array = malloc(size);
7897 +- memset(array, 0, size);
7898 ++ array = calloc(nr_cpus, sizeof(struct datarec));
7899 + if (!array) {
7900 + fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
7901 + exit(EXIT_FAIL_MEM);
7902 +@@ -214,11 +211,8 @@ static struct record *alloc_record_per_rxq(void)
7903 + {
7904 + unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
7905 + struct record *array;
7906 +- size_t size;
7907 +
7908 +- size = sizeof(struct record) * nr_rxqs;
7909 +- array = malloc(size);
7910 +- memset(array, 0, size);
7911 ++ array = calloc(nr_rxqs, sizeof(struct record));
7912 + if (!array) {
7913 + fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
7914 + exit(EXIT_FAIL_MEM);
7915 +@@ -232,8 +226,7 @@ static struct stats_record *alloc_stats_record(void)
7916 + struct stats_record *rec;
7917 + int i;
7918 +
7919 +- rec = malloc(sizeof(*rec));
7920 +- memset(rec, 0, sizeof(*rec));
7921 ++ rec = calloc(1, sizeof(struct stats_record));
7922 + if (!rec) {
7923 + fprintf(stderr, "Mem alloc error\n");
7924 + exit(EXIT_FAIL_MEM);
7925 +diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
7926 +index 6cabf20ce66a..fe427f7fcfb3 100644
7927 +--- a/scripts/Kbuild.include
7928 ++++ b/scripts/Kbuild.include
7929 +@@ -86,20 +86,21 @@ cc-cross-prefix = $(firstword $(foreach c, $(1), \
7930 + $(if $(shell command -v -- $(c)gcc 2>/dev/null), $(c))))
7931 +
7932 + # output directory for tests below
7933 +-TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
7934 ++TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$
7935 +
7936 + # try-run
7937 + # Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
7938 + # Exit code chooses option. "$$TMP" serves as a temporary file and is
7939 + # automatically cleaned up.
7940 + try-run = $(shell set -e; \
7941 +- TMP="$(TMPOUT).$$$$.tmp"; \
7942 +- TMPO="$(TMPOUT).$$$$.o"; \
7943 ++ TMP=$(TMPOUT)/tmp; \
7944 ++ TMPO=$(TMPOUT)/tmp.o; \
7945 ++ mkdir -p $(TMPOUT); \
7946 ++ trap "rm -rf $(TMPOUT)" EXIT; \
7947 + if ($(1)) >/dev/null 2>&1; \
7948 + then echo "$(2)"; \
7949 + else echo "$(3)"; \
7950 +- fi; \
7951 +- rm -f "$$TMP" "$$TMPO")
7952 ++ fi)
7953 +
7954 + # as-option
7955 + # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
7956 +diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
7957 +index 74eab03e31d4..f9b19524da11 100644
7958 +--- a/scripts/recordmcount.h
7959 ++++ b/scripts/recordmcount.h
7960 +@@ -29,6 +29,11 @@
7961 + #undef has_rel_mcount
7962 + #undef tot_relsize
7963 + #undef get_mcountsym
7964 ++#undef find_symtab
7965 ++#undef get_shnum
7966 ++#undef set_shnum
7967 ++#undef get_shstrndx
7968 ++#undef get_symindex
7969 + #undef get_sym_str_and_relp
7970 + #undef do_func
7971 + #undef Elf_Addr
7972 +@@ -58,6 +63,11 @@
7973 + # define __has_rel_mcount __has64_rel_mcount
7974 + # define has_rel_mcount has64_rel_mcount
7975 + # define tot_relsize tot64_relsize
7976 ++# define find_symtab find_symtab64
7977 ++# define get_shnum get_shnum64
7978 ++# define set_shnum set_shnum64
7979 ++# define get_shstrndx get_shstrndx64
7980 ++# define get_symindex get_symindex64
7981 + # define get_sym_str_and_relp get_sym_str_and_relp_64
7982 + # define do_func do64
7983 + # define get_mcountsym get_mcountsym_64
7984 +@@ -91,6 +101,11 @@
7985 + # define __has_rel_mcount __has32_rel_mcount
7986 + # define has_rel_mcount has32_rel_mcount
7987 + # define tot_relsize tot32_relsize
7988 ++# define find_symtab find_symtab32
7989 ++# define get_shnum get_shnum32
7990 ++# define set_shnum set_shnum32
7991 ++# define get_shstrndx get_shstrndx32
7992 ++# define get_symindex get_symindex32
7993 + # define get_sym_str_and_relp get_sym_str_and_relp_32
7994 + # define do_func do32
7995 + # define get_mcountsym get_mcountsym_32
7996 +@@ -173,6 +188,67 @@ static int MIPS_is_fake_mcount(Elf_Rel const *rp)
7997 + return is_fake;
7998 + }
7999 +
8000 ++static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab,
8001 ++ Elf32_Word const *symtab_shndx)
8002 ++{
8003 ++ unsigned long offset;
8004 ++ int index;
8005 ++
8006 ++ if (sym->st_shndx != SHN_XINDEX)
8007 ++ return w2(sym->st_shndx);
8008 ++
8009 ++ offset = (unsigned long)sym - (unsigned long)symtab;
8010 ++ index = offset / sizeof(*sym);
8011 ++
8012 ++ return w(symtab_shndx[index]);
8013 ++}
8014 ++
8015 ++static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
8016 ++{
8017 ++ if (shdr0 && !ehdr->e_shnum)
8018 ++ return w(shdr0->sh_size);
8019 ++
8020 ++ return w2(ehdr->e_shnum);
8021 ++}
8022 ++
8023 ++static void set_shnum(Elf_Ehdr *ehdr, Elf_Shdr *shdr0, unsigned int new_shnum)
8024 ++{
8025 ++ if (new_shnum >= SHN_LORESERVE) {
8026 ++ ehdr->e_shnum = 0;
8027 ++ shdr0->sh_size = w(new_shnum);
8028 ++ } else
8029 ++ ehdr->e_shnum = w2(new_shnum);
8030 ++}
8031 ++
8032 ++static int get_shstrndx(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
8033 ++{
8034 ++ if (ehdr->e_shstrndx != SHN_XINDEX)
8035 ++ return w2(ehdr->e_shstrndx);
8036 ++
8037 ++ return w(shdr0->sh_link);
8038 ++}
8039 ++
8040 ++static void find_symtab(Elf_Ehdr *const ehdr, Elf_Shdr const *shdr0,
8041 ++ unsigned const nhdr, Elf32_Word **symtab,
8042 ++ Elf32_Word **symtab_shndx)
8043 ++{
8044 ++ Elf_Shdr const *relhdr;
8045 ++ unsigned k;
8046 ++
8047 ++ *symtab = NULL;
8048 ++ *symtab_shndx = NULL;
8049 ++
8050 ++ for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) {
8051 ++ if (relhdr->sh_type == SHT_SYMTAB)
8052 ++ *symtab = (void *)ehdr + relhdr->sh_offset;
8053 ++ else if (relhdr->sh_type == SHT_SYMTAB_SHNDX)
8054 ++ *symtab_shndx = (void *)ehdr + relhdr->sh_offset;
8055 ++
8056 ++ if (*symtab && *symtab_shndx)
8057 ++ break;
8058 ++ }
8059 ++}
8060 ++
8061 + /* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */
8062 + static int append_func(Elf_Ehdr *const ehdr,
8063 + Elf_Shdr *const shstr,
8064 +@@ -188,10 +264,12 @@ static int append_func(Elf_Ehdr *const ehdr,
8065 + char const *mc_name = (sizeof(Elf_Rela) == rel_entsize)
8066 + ? ".rela__mcount_loc"
8067 + : ".rel__mcount_loc";
8068 +- unsigned const old_shnum = w2(ehdr->e_shnum);
8069 + uint_t const old_shoff = _w(ehdr->e_shoff);
8070 + uint_t const old_shstr_sh_size = _w(shstr->sh_size);
8071 + uint_t const old_shstr_sh_offset = _w(shstr->sh_offset);
8072 ++ Elf_Shdr *const shdr0 = (Elf_Shdr *)(old_shoff + (void *)ehdr);
8073 ++ unsigned int const old_shnum = get_shnum(ehdr, shdr0);
8074 ++ unsigned int const new_shnum = 2 + old_shnum; /* {.rel,}__mcount_loc */
8075 + uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size);
8076 + uint_t new_e_shoff;
8077 +
8078 +@@ -201,6 +279,8 @@ static int append_func(Elf_Ehdr *const ehdr,
8079 + t += (_align & -t); /* word-byte align */
8080 + new_e_shoff = t;
8081 +
8082 ++ set_shnum(ehdr, shdr0, new_shnum);
8083 ++
8084 + /* body for new shstrtab */
8085 + if (ulseek(sb.st_size, SEEK_SET) < 0)
8086 + return -1;
8087 +@@ -255,7 +335,6 @@ static int append_func(Elf_Ehdr *const ehdr,
8088 + return -1;
8089 +
8090 + ehdr->e_shoff = _w(new_e_shoff);
8091 +- ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum)); /* {.rel,}__mcount_loc */
8092 + if (ulseek(0, SEEK_SET) < 0)
8093 + return -1;
8094 + if (uwrite(ehdr, sizeof(*ehdr)) < 0)
8095 +@@ -434,6 +513,8 @@ static int find_secsym_ndx(unsigned const txtndx,
8096 + uint_t *const recvalp,
8097 + unsigned int *sym_index,
8098 + Elf_Shdr const *const symhdr,
8099 ++ Elf32_Word const *symtab,
8100 ++ Elf32_Word const *symtab_shndx,
8101 + Elf_Ehdr const *const ehdr)
8102 + {
8103 + Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symhdr->sh_offset)
8104 +@@ -445,7 +526,7 @@ static int find_secsym_ndx(unsigned const txtndx,
8105 + for (symp = sym0, t = nsym; t; --t, ++symp) {
8106 + unsigned int const st_bind = ELF_ST_BIND(symp->st_info);
8107 +
8108 +- if (txtndx == w2(symp->st_shndx)
8109 ++ if (txtndx == get_symindex(symp, symtab, symtab_shndx)
8110 + /* avoid STB_WEAK */
8111 + && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) {
8112 + /* function symbols on ARM have quirks, avoid them */
8113 +@@ -516,21 +597,23 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0,
8114 + return totrelsz;
8115 + }
8116 +
8117 +-
8118 + /* Overall supervision for Elf32 ET_REL file. */
8119 + static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
8120 + unsigned const reltype)
8121 + {
8122 + Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff)
8123 + + (void *)ehdr);
8124 +- unsigned const nhdr = w2(ehdr->e_shnum);
8125 +- Elf_Shdr *const shstr = &shdr0[w2(ehdr->e_shstrndx)];
8126 ++ unsigned const nhdr = get_shnum(ehdr, shdr0);
8127 ++ Elf_Shdr *const shstr = &shdr0[get_shstrndx(ehdr, shdr0)];
8128 + char const *const shstrtab = (char const *)(_w(shstr->sh_offset)
8129 + + (void *)ehdr);
8130 +
8131 + Elf_Shdr const *relhdr;
8132 + unsigned k;
8133 +
8134 ++ Elf32_Word *symtab;
8135 ++ Elf32_Word *symtab_shndx;
8136 ++
8137 + /* Upper bound on space: assume all relevant relocs are for mcount. */
8138 + unsigned totrelsz;
8139 +
8140 +@@ -561,6 +644,8 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
8141 + return -1;
8142 + }
8143 +
8144 ++ find_symtab(ehdr, shdr0, nhdr, &symtab, &symtab_shndx);
8145 ++
8146 + for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) {
8147 + char const *const txtname = has_rel_mcount(relhdr, shdr0,
8148 + shstrtab, fname);
8149 +@@ -577,6 +662,7 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
8150 + result = find_secsym_ndx(w(relhdr->sh_info), txtname,
8151 + &recval, &recsym,
8152 + &shdr0[symsec_sh_link],
8153 ++ symtab, symtab_shndx,
8154 + ehdr);
8155 + if (result)
8156 + goto out;
8157 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
8158 +index 93760a3564cf..137d655fed8f 100644
8159 +--- a/sound/pci/hda/patch_hdmi.c
8160 ++++ b/sound/pci/hda/patch_hdmi.c
8161 +@@ -4145,6 +4145,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi),
8162 + HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi),
8163 + HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi),
8164 + HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi),
8165 ++HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP", patch_nvhdmi),
8166 ++HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi),
8167 ++HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi),
8168 ++HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi),
8169 ++HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi),
8170 + HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
8171 + HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch),
8172 + HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
8173 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
8174 +index e057ecb5a904..cb689878ba20 100644
8175 +--- a/sound/pci/hda/patch_realtek.c
8176 ++++ b/sound/pci/hda/patch_realtek.c
8177 +@@ -2460,6 +2460,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
8178 + SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
8179 + SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
8180 + SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
8181 ++ SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
8182 + SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
8183 + SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950),
8184 + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
8185 +@@ -7435,6 +7436,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
8186 + SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
8187 + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
8188 + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
8189 ++ SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
8190 ++ SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
8191 + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
8192 + SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
8193 + SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
8194 +diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
8195 +index bad89b0d129e..1a2fa7f18142 100644
8196 +--- a/sound/soc/fsl/fsl_ssi.c
8197 ++++ b/sound/soc/fsl/fsl_ssi.c
8198 +@@ -678,8 +678,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
8199 + struct regmap *regs = ssi->regs;
8200 + u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
8201 + unsigned long clkrate, baudrate, tmprate;
8202 +- unsigned int slots = params_channels(hw_params);
8203 +- unsigned int slot_width = 32;
8204 ++ unsigned int channels = params_channels(hw_params);
8205 ++ unsigned int slot_width = params_width(hw_params);
8206 ++ unsigned int slots = 2;
8207 + u64 sub, savesub = 100000;
8208 + unsigned int freq;
8209 + bool baudclk_is_used;
8210 +@@ -688,10 +689,14 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
8211 + /* Override slots and slot_width if being specifically set... */
8212 + if (ssi->slots)
8213 + slots = ssi->slots;
8214 +- /* ...but keep 32 bits if slots is 2 -- I2S Master mode */
8215 +- if (ssi->slot_width && slots != 2)
8216 ++ if (ssi->slot_width)
8217 + slot_width = ssi->slot_width;
8218 +
8219 ++ /* ...but force 32 bits for stereo audio using I2S Master Mode */
8220 ++ if (channels == 2 &&
8221 ++ (ssi->i2s_net & SSI_SCR_I2S_MODE_MASK) == SSI_SCR_I2S_MODE_MASTER)
8222 ++ slot_width = 32;
8223 ++
8224 + /* Generate bit clock based on the slot number and slot width */
8225 + freq = slots * slot_width * params_rate(hw_params);
8226 +
8227 +diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
8228 +index 6c20bdd850f3..8ada4ecba847 100644
8229 +--- a/sound/soc/qcom/common.c
8230 ++++ b/sound/soc/qcom/common.c
8231 +@@ -4,6 +4,7 @@
8232 +
8233 + #include <linux/module.h>
8234 + #include "common.h"
8235 ++#include "qdsp6/q6afe.h"
8236 +
8237 + int qcom_snd_parse_of(struct snd_soc_card *card)
8238 + {
8239 +@@ -101,6 +102,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
8240 + }
8241 + link->no_pcm = 1;
8242 + link->ignore_pmdown_time = 1;
8243 ++
8244 ++ if (q6afe_is_rx_port(link->id)) {
8245 ++ link->dpcm_playback = 1;
8246 ++ link->dpcm_capture = 0;
8247 ++ } else {
8248 ++ link->dpcm_playback = 0;
8249 ++ link->dpcm_capture = 1;
8250 ++ }
8251 ++
8252 + } else {
8253 + dlc = devm_kzalloc(dev, sizeof(*dlc), GFP_KERNEL);
8254 + if (!dlc)
8255 +@@ -113,12 +123,12 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
8256 + link->codecs->dai_name = "snd-soc-dummy-dai";
8257 + link->codecs->name = "snd-soc-dummy";
8258 + link->dynamic = 1;
8259 ++ link->dpcm_playback = 1;
8260 ++ link->dpcm_capture = 1;
8261 + }
8262 +
8263 + link->ignore_suspend = 1;
8264 + link->nonatomic = 1;
8265 +- link->dpcm_playback = 1;
8266 +- link->dpcm_capture = 1;
8267 + link->stream_name = link->name;
8268 + link++;
8269 +
8270 +diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
8271 +index e0945f7a58c8..0ce4eb60f984 100644
8272 +--- a/sound/soc/qcom/qdsp6/q6afe.c
8273 ++++ b/sound/soc/qcom/qdsp6/q6afe.c
8274 +@@ -800,6 +800,14 @@ int q6afe_get_port_id(int index)
8275 + }
8276 + EXPORT_SYMBOL_GPL(q6afe_get_port_id);
8277 +
8278 ++int q6afe_is_rx_port(int index)
8279 ++{
8280 ++ if (index < 0 || index >= AFE_PORT_MAX)
8281 ++ return -EINVAL;
8282 ++
8283 ++ return port_maps[index].is_rx;
8284 ++}
8285 ++EXPORT_SYMBOL_GPL(q6afe_is_rx_port);
8286 + static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt,
8287 + struct q6afe_port *port)
8288 + {
8289 +diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h
8290 +index c7ed5422baff..1a0f80a14afe 100644
8291 +--- a/sound/soc/qcom/qdsp6/q6afe.h
8292 ++++ b/sound/soc/qcom/qdsp6/q6afe.h
8293 +@@ -198,6 +198,7 @@ int q6afe_port_start(struct q6afe_port *port);
8294 + int q6afe_port_stop(struct q6afe_port *port);
8295 + void q6afe_port_put(struct q6afe_port *port);
8296 + int q6afe_get_port_id(int index);
8297 ++int q6afe_is_rx_port(int index);
8298 + void q6afe_hdmi_port_prepare(struct q6afe_port *port,
8299 + struct q6afe_hdmi_cfg *cfg);
8300 + void q6afe_slim_port_prepare(struct q6afe_port *port,
8301 +diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
8302 +index 0e0e8f7a460a..ae4b2cabdf2d 100644
8303 +--- a/sound/soc/qcom/qdsp6/q6asm.c
8304 ++++ b/sound/soc/qcom/qdsp6/q6asm.c
8305 +@@ -25,6 +25,7 @@
8306 + #define ASM_STREAM_CMD_FLUSH 0x00010BCE
8307 + #define ASM_SESSION_CMD_PAUSE 0x00010BD3
8308 + #define ASM_DATA_CMD_EOS 0x00010BDB
8309 ++#define ASM_DATA_EVENT_RENDERED_EOS 0x00010C1C
8310 + #define ASM_NULL_POPP_TOPOLOGY 0x00010C68
8311 + #define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09
8312 + #define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
8313 +@@ -622,9 +623,6 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
8314 + case ASM_SESSION_CMD_SUSPEND:
8315 + client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE;
8316 + break;
8317 +- case ASM_DATA_CMD_EOS:
8318 +- client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
8319 +- break;
8320 + case ASM_STREAM_CMD_FLUSH:
8321 + client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
8322 + break;
8323 +@@ -727,6 +725,9 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
8324 + spin_unlock_irqrestore(&ac->lock, flags);
8325 + }
8326 +
8327 ++ break;
8328 ++ case ASM_DATA_EVENT_RENDERED_EOS:
8329 ++ client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
8330 + break;
8331 + }
8332 +
8333 +diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
8334 +index 7cd42fcfcf38..1707414cfa92 100644
8335 +--- a/sound/soc/rockchip/rockchip_pdm.c
8336 ++++ b/sound/soc/rockchip/rockchip_pdm.c
8337 +@@ -590,8 +590,10 @@ static int rockchip_pdm_resume(struct device *dev)
8338 + int ret;
8339 +
8340 + ret = pm_runtime_get_sync(dev);
8341 +- if (ret < 0)
8342 ++ if (ret < 0) {
8343 ++ pm_runtime_put(dev);
8344 + return ret;
8345 ++ }
8346 +
8347 + ret = regcache_sync(pdm->regmap);
8348 +
8349 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
8350 +index 39ce61c5b874..fde097a7aad3 100644
8351 +--- a/sound/soc/soc-pcm.c
8352 ++++ b/sound/soc/soc-pcm.c
8353 +@@ -2749,15 +2749,15 @@ static int soc_dpcm_fe_runtime_update(struct snd_soc_pcm_runtime *fe, int new)
8354 + int count, paths;
8355 + int ret;
8356 +
8357 ++ if (!fe->dai_link->dynamic)
8358 ++ return 0;
8359 ++
8360 + if (fe->num_cpus > 1) {
8361 + dev_err(fe->dev,
8362 + "%s doesn't support Multi CPU yet\n", __func__);
8363 + return -EINVAL;
8364 + }
8365 +
8366 +- if (!fe->dai_link->dynamic)
8367 +- return 0;
8368 +-
8369 + /* only check active links */
8370 + if (!fe->cpu_dai->active)
8371 + return 0;
8372 +diff --git a/sound/usb/format.c b/sound/usb/format.c
8373 +index 5ffb457cc88c..1b28d01d1f4c 100644
8374 +--- a/sound/usb/format.c
8375 ++++ b/sound/usb/format.c
8376 +@@ -394,8 +394,9 @@ skip_rate:
8377 + return nr_rates;
8378 + }
8379 +
8380 +-/* Line6 Helix series don't support the UAC2_CS_RANGE usb function
8381 +- * call. Return a static table of known clock rates.
8382 ++/* Line6 Helix series and the Rode Rodecaster Pro don't support the
8383 ++ * UAC2_CS_RANGE usb function call. Return a static table of known
8384 ++ * clock rates.
8385 + */
8386 + static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
8387 + struct audioformat *fp)
8388 +@@ -408,6 +409,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
8389 + case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
8390 + case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
8391 + case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
8392 ++ case USB_ID(0x19f7, 0x0011): /* Rode Rodecaster Pro */
8393 + return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
8394 + }
8395 +
8396 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
8397 +index 15769f266790..eab0fd4fd7c3 100644
8398 +--- a/sound/usb/mixer.c
8399 ++++ b/sound/usb/mixer.c
8400 +@@ -581,8 +581,9 @@ static int check_matrix_bitmap(unsigned char *bmap,
8401 + * if failed, give up and free the control instance.
8402 + */
8403 +
8404 +-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
8405 +- struct snd_kcontrol *kctl)
8406 ++int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
8407 ++ struct snd_kcontrol *kctl,
8408 ++ bool is_std_info)
8409 + {
8410 + struct usb_mixer_interface *mixer = list->mixer;
8411 + int err;
8412 +@@ -596,6 +597,7 @@ int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
8413 + return err;
8414 + }
8415 + list->kctl = kctl;
8416 ++ list->is_std_info = is_std_info;
8417 + list->next_id_elem = mixer->id_elems[list->id];
8418 + mixer->id_elems[list->id] = list;
8419 + return 0;
8420 +@@ -3234,8 +3236,11 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
8421 + unitid = delegate_notify(mixer, unitid, NULL, NULL);
8422 +
8423 + for_each_mixer_elem(list, mixer, unitid) {
8424 +- struct usb_mixer_elem_info *info =
8425 +- mixer_elem_list_to_info(list);
8426 ++ struct usb_mixer_elem_info *info;
8427 ++
8428 ++ if (!list->is_std_info)
8429 ++ continue;
8430 ++ info = mixer_elem_list_to_info(list);
8431 + /* invalidate cache, so the value is read from the device */
8432 + info->cached = 0;
8433 + snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
8434 +@@ -3315,6 +3320,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
8435 +
8436 + if (!list->kctl)
8437 + continue;
8438 ++ if (!list->is_std_info)
8439 ++ continue;
8440 +
8441 + info = mixer_elem_list_to_info(list);
8442 + if (count > 1 && info->control != control)
8443 +diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
8444 +index 41ec9dc4139b..c29e27ac43a7 100644
8445 +--- a/sound/usb/mixer.h
8446 ++++ b/sound/usb/mixer.h
8447 +@@ -66,6 +66,7 @@ struct usb_mixer_elem_list {
8448 + struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */
8449 + struct snd_kcontrol *kctl;
8450 + unsigned int id;
8451 ++ bool is_std_info;
8452 + usb_mixer_elem_dump_func_t dump;
8453 + usb_mixer_elem_resume_func_t resume;
8454 + };
8455 +@@ -103,8 +104,12 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid);
8456 + int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
8457 + int request, int validx, int value_set);
8458 +
8459 +-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
8460 +- struct snd_kcontrol *kctl);
8461 ++int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
8462 ++ struct snd_kcontrol *kctl,
8463 ++ bool is_std_info);
8464 ++
8465 ++#define snd_usb_mixer_add_control(list, kctl) \
8466 ++ snd_usb_mixer_add_list(list, kctl, true)
8467 +
8468 + void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
8469 + struct usb_mixer_interface *mixer,
8470 +diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
8471 +index aad2683ff793..260607144f56 100644
8472 +--- a/sound/usb/mixer_quirks.c
8473 ++++ b/sound/usb/mixer_quirks.c
8474 +@@ -158,7 +158,8 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer,
8475 + return -ENOMEM;
8476 + }
8477 + kctl->private_free = snd_usb_mixer_elem_free;
8478 +- return snd_usb_mixer_add_control(list, kctl);
8479 ++ /* don't use snd_usb_mixer_add_control() here, this is a special list element */
8480 ++ return snd_usb_mixer_add_list(list, kctl, false);
8481 + }
8482 +
8483 + /*
8484 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
8485 +index d61c2f1095b5..39aec83f8aca 100644
8486 +--- a/sound/usb/pcm.c
8487 ++++ b/sound/usb/pcm.c
8488 +@@ -367,6 +367,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
8489 + ifnum = 0;
8490 + goto add_sync_ep_from_ifnum;
8491 + case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
8492 ++ case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
8493 + ep = 0x81;
8494 + ifnum = 2;
8495 + goto add_sync_ep_from_ifnum;
8496 +@@ -1782,6 +1783,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
8497 + return 0;
8498 + case SNDRV_PCM_TRIGGER_STOP:
8499 + stop_endpoints(subs);
8500 ++ subs->data_endpoint->retire_data_urb = NULL;
8501 + subs->running = 0;
8502 + return 0;
8503 + case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
8504 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
8505 +index d8a765be5dfe..d7d900ebcf37 100644
8506 +--- a/sound/usb/quirks.c
8507 ++++ b/sound/usb/quirks.c
8508 +@@ -1505,6 +1505,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
8509 + static bool is_itf_usb_dsd_dac(unsigned int id)
8510 + {
8511 + switch (id) {
8512 ++ case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */
8513 + case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
8514 + case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
8515 + case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
8516 +@@ -1646,6 +1647,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
8517 + chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
8518 + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
8519 + usleep_range(1000, 2000);
8520 ++
8521 ++ /*
8522 ++ * Samsung USBC Headset (AKG) need a tiny delay after each
8523 ++ * class compliant request. (Model number: AAM625R or AAM627R)
8524 ++ */
8525 ++ if (chip->usb_id == USB_ID(0x04e8, 0xa051) &&
8526 ++ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
8527 ++ usleep_range(5000, 6000);
8528 + }
8529 +
8530 + /*
8531 +@@ -1843,6 +1852,7 @@ struct registration_quirk {
8532 + static const struct registration_quirk registration_quirks[] = {
8533 + REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */
8534 + REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */
8535 ++ REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */
8536 + { 0 } /* terminator */
8537 + };
8538 +
8539 +diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
8540 +index 7897c8f4d363..ef574087f1e1 100644
8541 +--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
8542 ++++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c
8543 +@@ -480,10 +480,9 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay)
8544 +
8545 + if (hystart_detect & HYSTART_DELAY) {
8546 + /* obtain the minimum delay of more than sampling packets */
8547 ++ if (ca->curr_rtt > delay)
8548 ++ ca->curr_rtt = delay;
8549 + if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
8550 +- if (ca->curr_rtt > delay)
8551 +- ca->curr_rtt = delay;
8552 +-
8553 + ca->sample_cnt++;
8554 + } else {
8555 + if (ca->curr_rtt > ca->delay_min +
8556 +diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
8557 +index 383bac05ac32..ceaad78e9667 100644
8558 +--- a/tools/testing/selftests/net/so_txtime.c
8559 ++++ b/tools/testing/selftests/net/so_txtime.c
8560 +@@ -15,8 +15,9 @@
8561 + #include <inttypes.h>
8562 + #include <linux/net_tstamp.h>
8563 + #include <linux/errqueue.h>
8564 ++#include <linux/if_ether.h>
8565 + #include <linux/ipv6.h>
8566 +-#include <linux/tcp.h>
8567 ++#include <linux/udp.h>
8568 + #include <stdbool.h>
8569 + #include <stdlib.h>
8570 + #include <stdio.h>
8571 +@@ -140,8 +141,8 @@ static void do_recv_errqueue_timeout(int fdt)
8572 + {
8573 + char control[CMSG_SPACE(sizeof(struct sock_extended_err)) +
8574 + CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0};
8575 +- char data[sizeof(struct ipv6hdr) +
8576 +- sizeof(struct tcphdr) + 1];
8577 ++ char data[sizeof(struct ethhdr) + sizeof(struct ipv6hdr) +
8578 ++ sizeof(struct udphdr) + 1];
8579 + struct sock_extended_err *err;
8580 + struct msghdr msg = {0};
8581 + struct iovec iov = {0};
8582 +@@ -159,6 +160,8 @@ static void do_recv_errqueue_timeout(int fdt)
8583 + msg.msg_controllen = sizeof(control);
8584 +
8585 + while (1) {
8586 ++ const char *reason;
8587 ++
8588 + ret = recvmsg(fdt, &msg, MSG_ERRQUEUE);
8589 + if (ret == -1 && errno == EAGAIN)
8590 + break;
8591 +@@ -176,14 +179,30 @@ static void do_recv_errqueue_timeout(int fdt)
8592 + err = (struct sock_extended_err *)CMSG_DATA(cm);
8593 + if (err->ee_origin != SO_EE_ORIGIN_TXTIME)
8594 + error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin);
8595 +- if (err->ee_code != ECANCELED)
8596 +- error(1, 0, "errqueue: code 0x%x\n", err->ee_code);
8597 ++
8598 ++ switch (err->ee_errno) {
8599 ++ case ECANCELED:
8600 ++ if (err->ee_code != SO_EE_CODE_TXTIME_MISSED)
8601 ++ error(1, 0, "errqueue: unknown ECANCELED %u\n",
8602 ++ err->ee_code);
8603 ++ reason = "missed txtime";
8604 ++ break;
8605 ++ case EINVAL:
8606 ++ if (err->ee_code != SO_EE_CODE_TXTIME_INVALID_PARAM)
8607 ++ error(1, 0, "errqueue: unknown EINVAL %u\n",
8608 ++ err->ee_code);
8609 ++ reason = "invalid txtime";
8610 ++ break;
8611 ++ default:
8612 ++ error(1, 0, "errqueue: errno %u code %u\n",
8613 ++ err->ee_errno, err->ee_code);
8614 ++ };
8615 +
8616 + tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info;
8617 + tstamp -= (int64_t) glob_tstart;
8618 + tstamp /= 1000 * 1000;
8619 +- fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n",
8620 +- data[ret - 1], tstamp);
8621 ++ fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped: %s\n",
8622 ++ data[ret - 1], tstamp, reason);
8623 +
8624 + msg.msg_flags = 0;
8625 + msg.msg_controllen = sizeof(control);
8626 +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
8627 +index ca35dd8848b0..af3df79d8163 100644
8628 +--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
8629 ++++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
8630 +@@ -7,7 +7,7 @@ noarg:
8631 + # The EBB handler is 64-bit code and everything links against it
8632 + CFLAGS += -m64
8633 +
8634 +-TMPOUT = $(OUTPUT)/
8635 ++TMPOUT = $(OUTPUT)/TMPDIR/
8636 + # Toolchains may build PIE by default which breaks the assembly
8637 + no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
8638 + $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
8639 +diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
8640 +index 17a1f53ceba0..d77f4829f1e0 100755
8641 +--- a/tools/testing/selftests/wireguard/netns.sh
8642 ++++ b/tools/testing/selftests/wireguard/netns.sh
8643 +@@ -587,9 +587,20 @@ ip0 link set wg0 up
8644 + kill $ncat_pid
8645 + ip0 link del wg0
8646 +
8647 ++# Ensure there aren't circular reference loops
8648 ++ip1 link add wg1 type wireguard
8649 ++ip2 link add wg2 type wireguard
8650 ++ip1 link set wg1 netns $netns2
8651 ++ip2 link set wg2 netns $netns1
8652 ++pp ip netns delete $netns1
8653 ++pp ip netns delete $netns2
8654 ++pp ip netns add $netns1
8655 ++pp ip netns add $netns2
8656 ++
8657 ++sleep 2 # Wait for cleanup and grace periods
8658 + declare -A objects
8659 + while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do
8660 +- [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ [0-9]+)\ .*(created|destroyed).* ]] || continue
8661 ++ [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ ?[0-9]*)\ .*(created|destroyed).* ]] || continue
8662 + objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}"
8663 + done < /dev/kmsg
8664 + alldeleted=1