Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Wed, 13 Oct 2021 09:35:45
Message-Id: 1634117703.37557341c0895a26a577adcc6994453a28bd71cc.alicef@gentoo
1 commit: 37557341c0895a26a577adcc6994453a28bd71cc
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Oct 13 09:34:23 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Oct 13 09:35:03 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=37557341
7
8 Linux patch 5.10.73
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1072_linux-5.10.73.patch | 2474 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2478 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 42e4628..9e6befb 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -331,6 +331,10 @@ Patch: 1071_linux-5.10.72.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.72
23
24 +Patch: 1072_linux-5.10.73.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.73
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1072_linux-5.10.73.patch b/1072_linux-5.10.73.patch
33 new file mode 100644
34 index 0000000..5327e55
35 --- /dev/null
36 +++ b/1072_linux-5.10.73.patch
37 @@ -0,0 +1,2474 @@
38 +diff --git a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
39 +index f8622bd0f61ee..f0e0345da498f 100644
40 +--- a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
41 ++++ b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
42 +@@ -18,7 +18,7 @@ properties:
43 + const: ti,sn65dsi86
44 +
45 + reg:
46 +- const: 0x2d
47 ++ enum: [ 0x2c, 0x2d ]
48 +
49 + enable-gpios:
50 + maxItems: 1
51 +diff --git a/Makefile b/Makefile
52 +index 48211c8503d4e..3f62cea9afc0e 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 10
59 +-SUBLEVEL = 72
60 ++SUBLEVEL = 73
61 + EXTRAVERSION =
62 + NAME = Dare mighty things
63 +
64 +diff --git a/arch/arm/boot/dts/imx53-m53menlo.dts b/arch/arm/boot/dts/imx53-m53menlo.dts
65 +index d3082b9774e40..4f88e96d81ddb 100644
66 +--- a/arch/arm/boot/dts/imx53-m53menlo.dts
67 ++++ b/arch/arm/boot/dts/imx53-m53menlo.dts
68 +@@ -56,6 +56,7 @@
69 + panel {
70 + compatible = "edt,etm0700g0dh6";
71 + pinctrl-0 = <&pinctrl_display_gpio>;
72 ++ pinctrl-names = "default";
73 + enable-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>;
74 +
75 + port {
76 +@@ -76,8 +77,7 @@
77 + regulator-name = "vbus";
78 + regulator-min-microvolt = <5000000>;
79 + regulator-max-microvolt = <5000000>;
80 +- gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>;
81 +- enable-active-high;
82 ++ gpio = <&gpio1 2 0>;
83 + };
84 + };
85 +
86 +diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
87 +index 9148a01ed6d9f..ebc0892e37c7a 100644
88 +--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
89 ++++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
90 +@@ -5,6 +5,7 @@
91 + #include <dt-bindings/gpio/gpio.h>
92 + #include <dt-bindings/interrupt-controller/irq.h>
93 + #include <dt-bindings/input/input.h>
94 ++#include <dt-bindings/leds/common.h>
95 + #include <dt-bindings/pwm/pwm.h>
96 +
97 + / {
98 +@@ -275,6 +276,7 @@
99 + led-cur = /bits/ 8 <0x20>;
100 + max-cur = /bits/ 8 <0x60>;
101 + reg = <0>;
102 ++ color = <LED_COLOR_ID_RED>;
103 + };
104 +
105 + chan@1 {
106 +@@ -282,6 +284,7 @@
107 + led-cur = /bits/ 8 <0x20>;
108 + max-cur = /bits/ 8 <0x60>;
109 + reg = <1>;
110 ++ color = <LED_COLOR_ID_GREEN>;
111 + };
112 +
113 + chan@2 {
114 +@@ -289,6 +292,7 @@
115 + led-cur = /bits/ 8 <0x20>;
116 + max-cur = /bits/ 8 <0x60>;
117 + reg = <2>;
118 ++ color = <LED_COLOR_ID_BLUE>;
119 + };
120 +
121 + chan@3 {
122 +@@ -296,6 +300,7 @@
123 + led-cur = /bits/ 8 <0x0>;
124 + max-cur = /bits/ 8 <0x0>;
125 + reg = <3>;
126 ++ color = <LED_COLOR_ID_WHITE>;
127 + };
128 + };
129 +
130 +diff --git a/arch/arm/boot/dts/imx6qdl-pico.dtsi b/arch/arm/boot/dts/imx6qdl-pico.dtsi
131 +index 5de4ccb979163..f7a56d6b160c8 100644
132 +--- a/arch/arm/boot/dts/imx6qdl-pico.dtsi
133 ++++ b/arch/arm/boot/dts/imx6qdl-pico.dtsi
134 +@@ -176,7 +176,18 @@
135 + pinctrl-0 = <&pinctrl_enet>;
136 + phy-mode = "rgmii-id";
137 + phy-reset-gpios = <&gpio1 26 GPIO_ACTIVE_LOW>;
138 ++ phy-handle = <&phy>;
139 + status = "okay";
140 ++
141 ++ mdio {
142 ++ #address-cells = <1>;
143 ++ #size-cells = <0>;
144 ++
145 ++ phy: ethernet-phy@1 {
146 ++ reg = <1>;
147 ++ qca,clk-out-frequency = <125000000>;
148 ++ };
149 ++ };
150 + };
151 +
152 + &hdmi {
153 +diff --git a/arch/arm/boot/dts/omap3430-sdp.dts b/arch/arm/boot/dts/omap3430-sdp.dts
154 +index c5b9037184149..7d530ae3483b8 100644
155 +--- a/arch/arm/boot/dts/omap3430-sdp.dts
156 ++++ b/arch/arm/boot/dts/omap3430-sdp.dts
157 +@@ -101,7 +101,7 @@
158 +
159 + nand@1,0 {
160 + compatible = "ti,omap2-nand";
161 +- reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
162 ++ reg = <1 0 4>; /* CS1, offset 0, IO size 4 */
163 + interrupt-parent = <&gpmc>;
164 + interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
165 + <1 IRQ_TYPE_NONE>; /* termcount */
166 +diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
167 +index e36d590e83732..72c4a9fc41a20 100644
168 +--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
169 ++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
170 +@@ -198,7 +198,7 @@
171 + clock-frequency = <19200000>;
172 + };
173 +
174 +- pxo_board {
175 ++ pxo_board: pxo_board {
176 + compatible = "fixed-clock";
177 + #clock-cells = <0>;
178 + clock-frequency = <27000000>;
179 +@@ -1148,7 +1148,7 @@
180 + };
181 +
182 + gpu: adreno-3xx@4300000 {
183 +- compatible = "qcom,adreno-3xx";
184 ++ compatible = "qcom,adreno-320.2", "qcom,adreno";
185 + reg = <0x04300000 0x20000>;
186 + reg-names = "kgsl_3d0_reg_memory";
187 + interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
188 +@@ -1163,7 +1163,6 @@
189 + <&mmcc GFX3D_AHB_CLK>,
190 + <&mmcc GFX3D_AXI_CLK>,
191 + <&mmcc MMSS_IMEM_AHB_CLK>;
192 +- qcom,chipid = <0x03020002>;
193 +
194 + iommus = <&gfx3d 0
195 + &gfx3d 1
196 +@@ -1306,7 +1305,7 @@
197 + reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator";
198 + clock-names = "iface_clk", "ref";
199 + clocks = <&mmcc DSI_M_AHB_CLK>,
200 +- <&cxo_board>;
201 ++ <&pxo_board>;
202 + };
203 +
204 +
205 +diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
206 +index 120f9aa6fff32..3f015cb6ec2b0 100644
207 +--- a/arch/arm/mach-at91/pm.c
208 ++++ b/arch/arm/mach-at91/pm.c
209 +@@ -517,18 +517,22 @@ static const struct of_device_id ramc_ids[] __initconst = {
210 + { /*sentinel*/ }
211 + };
212 +
213 +-static __init void at91_dt_ramc(void)
214 ++static __init int at91_dt_ramc(void)
215 + {
216 + struct device_node *np;
217 + const struct of_device_id *of_id;
218 + int idx = 0;
219 + void *standby = NULL;
220 + const struct ramc_info *ramc;
221 ++ int ret;
222 +
223 + for_each_matching_node_and_match(np, ramc_ids, &of_id) {
224 + soc_pm.data.ramc[idx] = of_iomap(np, 0);
225 +- if (!soc_pm.data.ramc[idx])
226 +- panic(pr_fmt("unable to map ramc[%d] cpu registers\n"), idx);
227 ++ if (!soc_pm.data.ramc[idx]) {
228 ++ pr_err("unable to map ramc[%d] cpu registers\n", idx);
229 ++ ret = -ENOMEM;
230 ++ goto unmap_ramc;
231 ++ }
232 +
233 + ramc = of_id->data;
234 + if (!standby)
235 +@@ -538,15 +542,26 @@ static __init void at91_dt_ramc(void)
236 + idx++;
237 + }
238 +
239 +- if (!idx)
240 +- panic(pr_fmt("unable to find compatible ram controller node in dtb\n"));
241 ++ if (!idx) {
242 ++ pr_err("unable to find compatible ram controller node in dtb\n");
243 ++ ret = -ENODEV;
244 ++ goto unmap_ramc;
245 ++ }
246 +
247 + if (!standby) {
248 + pr_warn("ramc no standby function available\n");
249 +- return;
250 ++ return 0;
251 + }
252 +
253 + at91_cpuidle_device.dev.platform_data = standby;
254 ++
255 ++ return 0;
256 ++
257 ++unmap_ramc:
258 ++ while (idx)
259 ++ iounmap(soc_pm.data.ramc[--idx]);
260 ++
261 ++ return ret;
262 + }
263 +
264 + static void at91rm9200_idle(void)
265 +@@ -869,6 +884,8 @@ static void __init at91_pm_init(void (*pm_idle)(void))
266 +
267 + void __init at91rm9200_pm_init(void)
268 + {
269 ++ int ret;
270 ++
271 + if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
272 + return;
273 +
274 +@@ -880,7 +897,9 @@ void __init at91rm9200_pm_init(void)
275 + soc_pm.data.standby_mode = AT91_PM_STANDBY;
276 + soc_pm.data.suspend_mode = AT91_PM_ULP0;
277 +
278 +- at91_dt_ramc();
279 ++ ret = at91_dt_ramc();
280 ++ if (ret)
281 ++ return;
282 +
283 + /*
284 + * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
285 +@@ -895,13 +914,17 @@ void __init sam9x60_pm_init(void)
286 + static const int modes[] __initconst = {
287 + AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
288 + };
289 ++ int ret;
290 +
291 + if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
292 + return;
293 +
294 + at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
295 + at91_pm_modes_init();
296 +- at91_dt_ramc();
297 ++ ret = at91_dt_ramc();
298 ++ if (ret)
299 ++ return;
300 ++
301 + at91_pm_init(NULL);
302 +
303 + soc_pm.ws_ids = sam9x60_ws_ids;
304 +@@ -910,6 +933,8 @@ void __init sam9x60_pm_init(void)
305 +
306 + void __init at91sam9_pm_init(void)
307 + {
308 ++ int ret;
309 ++
310 + if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
311 + return;
312 +
313 +@@ -921,7 +946,10 @@ void __init at91sam9_pm_init(void)
314 + soc_pm.data.standby_mode = AT91_PM_STANDBY;
315 + soc_pm.data.suspend_mode = AT91_PM_ULP0;
316 +
317 +- at91_dt_ramc();
318 ++ ret = at91_dt_ramc();
319 ++ if (ret)
320 ++ return;
321 ++
322 + at91_pm_init(at91sam9_idle);
323 + }
324 +
325 +@@ -930,12 +958,16 @@ void __init sama5_pm_init(void)
326 + static const int modes[] __initconst = {
327 + AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST,
328 + };
329 ++ int ret;
330 +
331 + if (!IS_ENABLED(CONFIG_SOC_SAMA5))
332 + return;
333 +
334 + at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
335 +- at91_dt_ramc();
336 ++ ret = at91_dt_ramc();
337 ++ if (ret)
338 ++ return;
339 ++
340 + at91_pm_init(NULL);
341 + }
342 +
343 +@@ -945,13 +977,17 @@ void __init sama5d2_pm_init(void)
344 + AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
345 + AT91_PM_BACKUP,
346 + };
347 ++ int ret;
348 +
349 + if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
350 + return;
351 +
352 + at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
353 + at91_pm_modes_init();
354 +- at91_dt_ramc();
355 ++ ret = at91_dt_ramc();
356 ++ if (ret)
357 ++ return;
358 ++
359 + at91_pm_init(NULL);
360 +
361 + soc_pm.ws_ids = sama5d2_ws_ids;
362 +diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
363 +index 40c74b4c4d730..e24409c1f5d39 100644
364 +--- a/arch/arm/mach-imx/pm-imx6.c
365 ++++ b/arch/arm/mach-imx/pm-imx6.c
366 +@@ -9,6 +9,7 @@
367 + #include <linux/io.h>
368 + #include <linux/irq.h>
369 + #include <linux/genalloc.h>
370 ++#include <linux/irqchip/arm-gic.h>
371 + #include <linux/mfd/syscon.h>
372 + #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
373 + #include <linux/of.h>
374 +@@ -618,6 +619,7 @@ static void __init imx6_pm_common_init(const struct imx6_pm_socdata
375 +
376 + static void imx6_pm_stby_poweroff(void)
377 + {
378 ++ gic_cpu_if_down(0);
379 + imx6_set_lpm(STOP_POWER_OFF);
380 + imx6q_suspend_finish(0);
381 +
382 +diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
383 +index 83d595ebcf1f6..9443f129859b2 100644
384 +--- a/arch/arm/mach-omap2/omap_hwmod.c
385 ++++ b/arch/arm/mach-omap2/omap_hwmod.c
386 +@@ -3618,6 +3618,8 @@ int omap_hwmod_init_module(struct device *dev,
387 + oh->flags |= HWMOD_SWSUP_SIDLE_ACT;
388 + if (data->cfg->quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
389 + oh->flags |= HWMOD_SWSUP_MSTANDBY;
390 ++ if (data->cfg->quirks & SYSC_QUIRK_CLKDM_NOAUTO)
391 ++ oh->flags |= HWMOD_CLKDM_NOAUTO;
392 +
393 + error = omap_hwmod_check_module(dev, oh, data, sysc_fields,
394 + rev_offs, sysc_offs, syss_offs,
395 +diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
396 +index ce8b043263521..1214e39aad5ec 100644
397 +--- a/arch/arm/net/bpf_jit_32.c
398 ++++ b/arch/arm/net/bpf_jit_32.c
399 +@@ -36,6 +36,10 @@
400 + * +-----+
401 + * |RSVD | JIT scratchpad
402 + * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
403 ++ * | ... | caller-saved registers
404 ++ * +-----+
405 ++ * | ... | arguments passed on stack
406 ++ * ARM_SP during call => +-----|
407 + * | |
408 + * | ... | Function call stack
409 + * | |
410 +@@ -63,6 +67,12 @@
411 + *
412 + * When popping registers off the stack at the end of a BPF function, we
413 + * reference them via the current ARM_FP register.
414 ++ *
415 ++ * Some eBPF operations are implemented via a call to a helper function.
416 ++ * Such calls are "invisible" in the eBPF code, so it is up to the calling
417 ++ * program to preserve any caller-saved ARM registers during the call. The
418 ++ * JIT emits code to push and pop those registers onto the stack, immediately
419 ++ * above the callee stack frame.
420 + */
421 + #define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
422 + 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
423 +@@ -70,6 +80,8 @@
424 + #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
425 + #define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
426 +
427 ++#define CALLER_MASK (1 << ARM_R0 | 1 << ARM_R1 | 1 << ARM_R2 | 1 << ARM_R3)
428 ++
429 + enum {
430 + /* Stack layout - these are offsets from (top of stack - 4) */
431 + BPF_R2_HI,
432 +@@ -464,6 +476,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
433 +
434 + static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
435 + {
436 ++ const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1);
437 + const s8 *tmp = bpf2a32[TMP_REG_1];
438 +
439 + #if __LINUX_ARM_ARCH__ == 7
440 +@@ -495,11 +508,17 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
441 + emit(ARM_MOV_R(ARM_R0, rm), ctx);
442 + }
443 +
444 ++ /* Push caller-saved registers on stack */
445 ++ emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx);
446 ++
447 + /* Call appropriate function */
448 + emit_mov_i(ARM_IP, op == BPF_DIV ?
449 + (u32)jit_udiv32 : (u32)jit_mod32, ctx);
450 + emit_blx_r(ARM_IP, ctx);
451 +
452 ++ /* Restore caller-saved registers from stack */
453 ++ emit(ARM_POP(CALLER_MASK & ~exclude_mask), ctx);
454 ++
455 + /* Save return value */
456 + if (rd != ARM_R0)
457 + emit(ARM_MOV_R(rd, ARM_R0), ctx);
458 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
459 +index 5f42904d53ab6..580690057601c 100644
460 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
461 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
462 +@@ -386,6 +386,24 @@
463 + status = "disabled";
464 + };
465 +
466 ++ can0: can@2180000 {
467 ++ compatible = "fsl,ls1028ar1-flexcan", "fsl,lx2160ar1-flexcan";
468 ++ reg = <0x0 0x2180000 0x0 0x10000>;
469 ++ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
470 ++ clocks = <&sysclk>, <&clockgen 4 1>;
471 ++ clock-names = "ipg", "per";
472 ++ status = "disabled";
473 ++ };
474 ++
475 ++ can1: can@2190000 {
476 ++ compatible = "fsl,ls1028ar1-flexcan", "fsl,lx2160ar1-flexcan";
477 ++ reg = <0x0 0x2190000 0x0 0x10000>;
478 ++ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
479 ++ clocks = <&sysclk>, <&clockgen 4 1>;
480 ++ clock-names = "ipg", "per";
481 ++ status = "disabled";
482 ++ };
483 ++
484 + duart0: serial@21c0500 {
485 + compatible = "fsl,ns16550", "ns16550a";
486 + reg = <0x00 0x21c0500 0x0 0x100>;
487 +diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
488 +index 1b6406927509f..82edcd74ce983 100644
489 +--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
490 ++++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
491 +@@ -48,7 +48,7 @@
492 + #size-cells = <0>;
493 +
494 + pon: power-on@800 {
495 +- compatible = "qcom,pm8916-pon";
496 ++ compatible = "qcom,pm8998-pon";
497 + reg = <0x0800>;
498 + pwrkey {
499 + compatible = "qcom,pm8941-pwrkey";
500 +diff --git a/arch/powerpc/boot/dts/fsl/t1023rdb.dts b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
501 +index 5ba6fbfca2742..f82f85c65964c 100644
502 +--- a/arch/powerpc/boot/dts/fsl/t1023rdb.dts
503 ++++ b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
504 +@@ -154,7 +154,7 @@
505 +
506 + fm1mac3: ethernet@e4000 {
507 + phy-handle = <&sgmii_aqr_phy3>;
508 +- phy-connection-type = "sgmii-2500";
509 ++ phy-connection-type = "2500base-x";
510 + sleep = <&rcpm 0x20000000>;
511 + };
512 +
513 +diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
514 +index a1c7441940184..9ac0651795cf6 100644
515 +--- a/arch/powerpc/kernel/dma-iommu.c
516 ++++ b/arch/powerpc/kernel/dma-iommu.c
517 +@@ -117,6 +117,15 @@ u64 dma_iommu_get_required_mask(struct device *dev)
518 + struct iommu_table *tbl = get_iommu_table_base(dev);
519 + u64 mask;
520 +
521 ++ if (dev_is_pci(dev)) {
522 ++ u64 bypass_mask = dma_direct_get_required_mask(dev);
523 ++
524 ++ if (dma_iommu_dma_supported(dev, bypass_mask)) {
525 ++ dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
526 ++ return bypass_mask;
527 ++ }
528 ++ }
529 ++
530 + if (!tbl)
531 + return 0;
532 +
533 +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
534 +index 9d3b468bd2d7a..10df278dc3fbe 100644
535 +--- a/arch/powerpc/kernel/exceptions-64s.S
536 ++++ b/arch/powerpc/kernel/exceptions-64s.S
537 +@@ -1715,27 +1715,30 @@ EXC_COMMON_BEGIN(program_check_common)
538 + */
539 +
540 + andi. r10,r12,MSR_PR
541 +- bne 2f /* If userspace, go normal path */
542 ++ bne .Lnormal_stack /* If userspace, go normal path */
543 +
544 + andis. r10,r12,(SRR1_PROGTM)@h
545 +- bne 1f /* If TM, emergency */
546 ++ bne .Lemergency_stack /* If TM, emergency */
547 +
548 + cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
549 +- blt 2f /* normal path if not */
550 ++ blt .Lnormal_stack /* normal path if not */
551 +
552 + /* Use the emergency stack */
553 +-1: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
554 ++.Lemergency_stack:
555 ++ andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
556 + /* 3 in EXCEPTION_PROLOG_COMMON */
557 + mr r10,r1 /* Save r1 */
558 + ld r1,PACAEMERGSP(r13) /* Use emergency stack */
559 + subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
560 + __ISTACK(program_check)=0
561 + __GEN_COMMON_BODY program_check
562 +- b 3f
563 +-2:
564 ++ b .Ldo_program_check
565 ++
566 ++.Lnormal_stack:
567 + __ISTACK(program_check)=1
568 + __GEN_COMMON_BODY program_check
569 +-3:
570 ++
571 ++.Ldo_program_check:
572 + addi r3,r1,STACK_FRAME_OVERHEAD
573 + bl program_check_exception
574 + REST_NVGPRS(r1) /* instruction emulation may change GPRs */
575 +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
576 +index 658ca2bab13cc..0752967f351bb 100644
577 +--- a/arch/powerpc/net/bpf_jit_comp64.c
578 ++++ b/arch/powerpc/net/bpf_jit_comp64.c
579 +@@ -347,18 +347,25 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
580 + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
581 + goto bpf_alu32_trunc;
582 + case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
583 +- case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
584 + case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
585 ++ if (!imm) {
586 ++ goto bpf_alu32_trunc;
587 ++ } else if (imm >= -32768 && imm < 32768) {
588 ++ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
589 ++ } else {
590 ++ PPC_LI32(b2p[TMP_REG_1], imm);
591 ++ EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
592 ++ }
593 ++ goto bpf_alu32_trunc;
594 ++ case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
595 + case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
596 +- if (BPF_OP(code) == BPF_SUB)
597 +- imm = -imm;
598 +- if (imm) {
599 +- if (imm >= -32768 && imm < 32768)
600 +- EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
601 +- else {
602 +- PPC_LI32(b2p[TMP_REG_1], imm);
603 +- EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
604 +- }
605 ++ if (!imm) {
606 ++ goto bpf_alu32_trunc;
607 ++ } else if (imm > -32768 && imm <= 32768) {
608 ++ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
609 ++ } else {
610 ++ PPC_LI32(b2p[TMP_REG_1], imm);
611 ++ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
612 + }
613 + goto bpf_alu32_trunc;
614 + case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
615 +diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
616 +index cf024fa37bda0..7ed38ebd0c7b6 100644
617 +--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
618 ++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
619 +@@ -868,6 +868,10 @@ static int __init eeh_pseries_init(void)
620 + if (is_kdump_kernel() || reset_devices) {
621 + pr_info("Issue PHB reset ...\n");
622 + list_for_each_entry(phb, &hose_list, list_node) {
623 ++ // Skip if the slot is empty
624 ++ if (list_empty(&PCI_DN(phb->dn)->child_list))
625 ++ continue;
626 ++
627 + pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list);
628 + config_addr = pseries_eeh_get_pe_config_addr(pdn);
629 +
630 +diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
631 +index 4b989ae15d59f..8062996c2dfd0 100644
632 +--- a/arch/riscv/include/uapi/asm/unistd.h
633 ++++ b/arch/riscv/include/uapi/asm/unistd.h
634 +@@ -18,9 +18,10 @@
635 + #ifdef __LP64__
636 + #define __ARCH_WANT_NEW_STAT
637 + #define __ARCH_WANT_SET_GET_RLIMIT
638 +-#define __ARCH_WANT_SYS_CLONE3
639 + #endif /* __LP64__ */
640 +
641 ++#define __ARCH_WANT_SYS_CLONE3
642 ++
643 + #include <asm-generic/unistd.h>
644 +
645 + /*
646 +diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
647 +index 3f1d35e7c98a6..73d45931a053a 100644
648 +--- a/arch/riscv/kernel/vdso.c
649 ++++ b/arch/riscv/kernel/vdso.c
650 +@@ -65,7 +65,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
651 +
652 + vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
653 +
654 +- mmap_write_lock(mm);
655 ++ if (mmap_write_lock_killable(mm))
656 ++ return -EINTR;
657 ++
658 + vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
659 + if (IS_ERR_VALUE(vdso_base)) {
660 + ret = vdso_base;
661 +diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
662 +index 094118663285d..89f81067e09ed 100644
663 +--- a/arch/riscv/mm/cacheflush.c
664 ++++ b/arch/riscv/mm/cacheflush.c
665 +@@ -16,6 +16,8 @@ static void ipi_remote_fence_i(void *info)
666 +
667 + void flush_icache_all(void)
668 + {
669 ++ local_flush_icache_all();
670 ++
671 + if (IS_ENABLED(CONFIG_RISCV_SBI))
672 + sbi_remote_fence_i(NULL);
673 + else
674 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
675 +index 8d9047d2d1e11..cd0cbdafedbd2 100644
676 +--- a/arch/s390/net/bpf_jit_comp.c
677 ++++ b/arch/s390/net/bpf_jit_comp.c
678 +@@ -1775,7 +1775,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
679 + jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
680 + if (jit.addrs == NULL) {
681 + fp = orig_fp;
682 +- goto out;
683 ++ goto free_addrs;
684 + }
685 + /*
686 + * Three initial passes:
687 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
688 +index f3c8a8110f60c..4201d0cf5f835 100644
689 +--- a/arch/x86/Kconfig
690 ++++ b/arch/x86/Kconfig
691 +@@ -1415,7 +1415,7 @@ config HIGHMEM4G
692 +
693 + config HIGHMEM64G
694 + bool "64GB"
695 +- depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
696 ++ depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !MWINCHIP3D && !MK6
697 + select X86_PAE
698 + help
699 + Select this if you have a 32-bit processor and more than 4
700 +diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
701 +index 6fe54b2813c13..4a382fb6a9ef8 100644
702 +--- a/arch/x86/include/asm/entry-common.h
703 ++++ b/arch/x86/include/asm/entry-common.h
704 +@@ -24,7 +24,7 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
705 + * For !SMAP hardware we patch out CLAC on entry.
706 + */
707 + if (boot_cpu_has(X86_FEATURE_SMAP) ||
708 +- (IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
709 ++ (IS_ENABLED(CONFIG_64BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
710 + mask |= X86_EFLAGS_AC;
711 +
712 + WARN_ON_ONCE(flags & mask);
713 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
714 +index 25148ebd36341..ec21f5e9ffd05 100644
715 +--- a/arch/x86/kernel/cpu/common.c
716 ++++ b/arch/x86/kernel/cpu/common.c
717 +@@ -320,6 +320,7 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
718 + #ifdef CONFIG_X86_SMAP
719 + cr4_set_bits(X86_CR4_SMAP);
720 + #else
721 ++ clear_cpu_cap(c, X86_FEATURE_SMAP);
722 + cr4_clear_bits(X86_CR4_SMAP);
723 + #endif
724 + }
725 +diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
726 +index a4b5af03dcc1b..0c6d1dc59fa21 100644
727 +--- a/arch/x86/kernel/early-quirks.c
728 ++++ b/arch/x86/kernel/early-quirks.c
729 +@@ -711,12 +711,6 @@ static struct chipset early_qrk[] __initdata = {
730 + */
731 + { PCI_VENDOR_ID_INTEL, 0x0f00,
732 + PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
733 +- { PCI_VENDOR_ID_INTEL, 0x3e20,
734 +- PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
735 +- { PCI_VENDOR_ID_INTEL, 0x3ec4,
736 +- PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
737 +- { PCI_VENDOR_ID_INTEL, 0x8a12,
738 +- PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
739 + { PCI_VENDOR_ID_BROADCOM, 0x4331,
740 + PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
741 + {}
742 +diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
743 +index 7a50f0b62a709..4ab7a9757e521 100644
744 +--- a/arch/x86/kernel/hpet.c
745 ++++ b/arch/x86/kernel/hpet.c
746 +@@ -9,6 +9,7 @@
747 +
748 + #include <asm/hpet.h>
749 + #include <asm/time.h>
750 ++#include <asm/mwait.h>
751 +
752 + #undef pr_fmt
753 + #define pr_fmt(fmt) "hpet: " fmt
754 +@@ -806,6 +807,83 @@ static bool __init hpet_counting(void)
755 + return false;
756 + }
757 +
758 ++static bool __init mwait_pc10_supported(void)
759 ++{
760 ++ unsigned int eax, ebx, ecx, mwait_substates;
761 ++
762 ++ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
763 ++ return false;
764 ++
765 ++ if (!cpu_feature_enabled(X86_FEATURE_MWAIT))
766 ++ return false;
767 ++
768 ++ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
769 ++ return false;
770 ++
771 ++ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
772 ++
773 ++ return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) &&
774 ++ (ecx & CPUID5_ECX_INTERRUPT_BREAK) &&
775 ++ (mwait_substates & (0xF << 28));
776 ++}
777 ++
778 ++/*
779 ++ * Check whether the system supports PC10. If so force disable HPET as that
780 ++ * stops counting in PC10. This check is overbroad as it does not take any
781 ++ * of the following into account:
782 ++ *
783 ++ * - ACPI tables
784 ++ * - Enablement of intel_idle
785 ++ * - Command line arguments which limit intel_idle C-state support
786 ++ *
787 ++ * That's perfectly fine. HPET is a piece of hardware designed by committee
788 ++ * and the only reasons why it is still in use on modern systems is the
789 ++ * fact that it is impossible to reliably query TSC and CPU frequency via
790 ++ * CPUID or firmware.
791 ++ *
792 ++ * If HPET is functional it is useful for calibrating TSC, but this can be
793 ++ * done via PMTIMER as well which seems to be the last remaining timer on
794 ++ * X86/INTEL platforms that has not been completely wreckaged by feature
795 ++ * creep.
796 ++ *
797 ++ * In theory HPET support should be removed altogether, but there are older
798 ++ * systems out there which depend on it because TSC and APIC timer are
799 ++ * dysfunctional in deeper C-states.
800 ++ *
801 ++ * It's only 20 years now that hardware people have been asked to provide
802 ++ * reliable and discoverable facilities which can be used for timekeeping
803 ++ * and per CPU timer interrupts.
804 ++ *
805 ++ * The probability that this problem is going to be solved in the
806 ++ * forseeable future is close to zero, so the kernel has to be cluttered
807 ++ * with heuristics to keep up with the ever growing amount of hardware and
808 ++ * firmware trainwrecks. Hopefully some day hardware people will understand
809 ++ * that the approach of "This can be fixed in software" is not sustainable.
810 ++ * Hope dies last...
811 ++ */
812 ++static bool __init hpet_is_pc10_damaged(void)
813 ++{
814 ++ unsigned long long pcfg;
815 ++
816 ++ /* Check whether PC10 substates are supported */
817 ++ if (!mwait_pc10_supported())
818 ++ return false;
819 ++
820 ++ /* Check whether PC10 is enabled in PKG C-state limit */
821 ++ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
822 ++ if ((pcfg & 0xF) < 8)
823 ++ return false;
824 ++
825 ++ if (hpet_force_user) {
826 ++ pr_warn("HPET force enabled via command line, but dysfunctional in PC10.\n");
827 ++ return false;
828 ++ }
829 ++
830 ++ pr_info("HPET dysfunctional in PC10. Force disabled.\n");
831 ++ boot_hpet_disable = true;
832 ++ return true;
833 ++}
834 ++
835 + /**
836 + * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
837 + */
838 +@@ -819,6 +897,9 @@ int __init hpet_enable(void)
839 + if (!is_hpet_capable())
840 + return 0;
841 +
842 ++ if (hpet_is_pc10_damaged())
843 ++ return 0;
844 ++
845 + hpet_set_mapping();
846 + if (!hpet_virt_address)
847 + return 0;
848 +diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
849 +index ecb20b17b7df6..82db4014deb21 100644
850 +--- a/arch/x86/kernel/sev-es-shared.c
851 ++++ b/arch/x86/kernel/sev-es-shared.c
852 +@@ -130,6 +130,8 @@ static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
853 + } else {
854 + ret = ES_VMM_ERROR;
855 + }
856 ++ } else if (ghcb->save.sw_exit_info_1 & 0xffffffff) {
857 ++ ret = ES_VMM_ERROR;
858 + } else {
859 + ret = ES_OK;
860 + }
861 +diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
862 +index ee2beda590d0d..1d4a00e767ece 100644
863 +--- a/arch/x86/platform/olpc/olpc.c
864 ++++ b/arch/x86/platform/olpc/olpc.c
865 +@@ -274,7 +274,7 @@ static struct olpc_ec_driver ec_xo1_driver = {
866 +
867 + static struct olpc_ec_driver ec_xo1_5_driver = {
868 + .ec_cmd = olpc_xo1_ec_cmd,
869 +-#ifdef CONFIG_OLPC_XO1_5_SCI
870 ++#ifdef CONFIG_OLPC_XO15_SCI
871 + /*
872 + * XO-1.5 EC wakeups are available when olpc-xo15-sci driver is
873 + * compiled in
874 +diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
875 +index 7cbf68ca71060..6fc05cba61a27 100644
876 +--- a/arch/xtensa/include/asm/kmem_layout.h
877 ++++ b/arch/xtensa/include/asm/kmem_layout.h
878 +@@ -78,7 +78,7 @@
879 + #endif
880 + #define XCHAL_KIO_SIZE 0x10000000
881 +
882 +-#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
883 ++#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
884 + #define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
885 + #ifndef __ASSEMBLY__
886 + extern unsigned long xtensa_kio_paddr;
887 +diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
888 +index a48bf2d10ac2d..80cc9770a8d2d 100644
889 +--- a/arch/xtensa/kernel/irq.c
890 ++++ b/arch/xtensa/kernel/irq.c
891 +@@ -145,7 +145,7 @@ unsigned xtensa_get_ext_irq_no(unsigned irq)
892 +
893 + void __init init_IRQ(void)
894 + {
895 +-#ifdef CONFIG_OF
896 ++#ifdef CONFIG_USE_OF
897 + irqchip_init();
898 + #else
899 + #ifdef CONFIG_HAVE_SMP
900 +diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
901 +index ed184106e4cf9..ee9082a142feb 100644
902 +--- a/arch/xtensa/kernel/setup.c
903 ++++ b/arch/xtensa/kernel/setup.c
904 +@@ -63,7 +63,7 @@ extern unsigned long initrd_end;
905 + extern int initrd_below_start_ok;
906 + #endif
907 +
908 +-#ifdef CONFIG_OF
909 ++#ifdef CONFIG_USE_OF
910 + void *dtb_start = __dtb_start;
911 + #endif
912 +
913 +@@ -125,7 +125,7 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
914 +
915 + #endif /* CONFIG_BLK_DEV_INITRD */
916 +
917 +-#ifdef CONFIG_OF
918 ++#ifdef CONFIG_USE_OF
919 +
920 + static int __init parse_tag_fdt(const bp_tag_t *tag)
921 + {
922 +@@ -135,7 +135,7 @@ static int __init parse_tag_fdt(const bp_tag_t *tag)
923 +
924 + __tagtable(BP_TAG_FDT, parse_tag_fdt);
925 +
926 +-#endif /* CONFIG_OF */
927 ++#endif /* CONFIG_USE_OF */
928 +
929 + static int __init parse_tag_cmdline(const bp_tag_t* tag)
930 + {
931 +@@ -183,7 +183,7 @@ static int __init parse_bootparam(const bp_tag_t *tag)
932 + }
933 + #endif
934 +
935 +-#ifdef CONFIG_OF
936 ++#ifdef CONFIG_USE_OF
937 +
938 + #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
939 + unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
940 +@@ -232,7 +232,7 @@ void __init early_init_devtree(void *params)
941 + strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
942 + }
943 +
944 +-#endif /* CONFIG_OF */
945 ++#endif /* CONFIG_USE_OF */
946 +
947 + /*
948 + * Initialize architecture. (Early stage)
949 +@@ -253,7 +253,7 @@ void __init init_arch(bp_tag_t *bp_start)
950 + if (bp_start)
951 + parse_bootparam(bp_start);
952 +
953 +-#ifdef CONFIG_OF
954 ++#ifdef CONFIG_USE_OF
955 + early_init_devtree(dtb_start);
956 + #endif
957 +
958 +diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
959 +index fd2193df8a145..511bb92518f28 100644
960 +--- a/arch/xtensa/mm/mmu.c
961 ++++ b/arch/xtensa/mm/mmu.c
962 +@@ -100,7 +100,7 @@ void init_mmu(void)
963 +
964 + void init_kio(void)
965 + {
966 +-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
967 ++#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
968 + /*
969 + * Update the IO area mapping in case xtensa_kio_paddr has changed
970 + */
971 +diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
972 +index 159b57c6dc4df..02341fd66e8d2 100644
973 +--- a/drivers/bus/ti-sysc.c
974 ++++ b/drivers/bus/ti-sysc.c
975 +@@ -1464,6 +1464,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
976 + /* Quirks that need to be set based on detected module */
977 + SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
978 + SYSC_MODULE_QUIRK_AESS),
979 ++ /* Errata i893 handling for dra7 dcan1 and 2 */
980 ++ SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
981 ++ SYSC_QUIRK_CLKDM_NOAUTO),
982 + SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
983 + SYSC_QUIRK_CLKDM_NOAUTO),
984 + SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
985 +@@ -2922,6 +2925,7 @@ static int sysc_init_soc(struct sysc *ddata)
986 + break;
987 + case SOC_AM3:
988 + sysc_add_disabled(0x48310000); /* rng */
989 ++ break;
990 + default:
991 + break;
992 + };
993 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
994 +index b8c31b697797e..66f32d965c723 100644
995 +--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
996 ++++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
997 +@@ -704,6 +704,7 @@ static const struct file_operations nv50_crc_flip_threshold_fops = {
998 + .open = nv50_crc_debugfs_flip_threshold_open,
999 + .read = seq_read,
1000 + .write = nv50_crc_debugfs_flip_threshold_set,
1001 ++ .release = single_release,
1002 + };
1003 +
1004 + int nv50_head_crc_late_register(struct nv50_head *head)
1005 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
1006 +index 61826cac3061a..be649d14f8797 100644
1007 +--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
1008 ++++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
1009 +@@ -51,6 +51,7 @@ nv50_head_flush_clr(struct nv50_head *head,
1010 + void
1011 + nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
1012 + {
1013 ++ if (asyh->set.curs ) head->func->curs_set(head, asyh);
1014 + if (asyh->set.olut ) {
1015 + asyh->olut.offset = nv50_lut_load(&head->olut,
1016 + asyh->olut.buffer,
1017 +@@ -66,7 +67,6 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1018 + if (asyh->set.view ) head->func->view (head, asyh);
1019 + if (asyh->set.mode ) head->func->mode (head, asyh);
1020 + if (asyh->set.core ) head->func->core_set(head, asyh);
1021 +- if (asyh->set.curs ) head->func->curs_set(head, asyh);
1022 + if (asyh->set.base ) head->func->base (head, asyh);
1023 + if (asyh->set.ovly ) head->func->ovly (head, asyh);
1024 + if (asyh->set.dither ) head->func->dither (head, asyh);
1025 +diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
1026 +index c2bc05eb2e54a..1cbe01048b930 100644
1027 +--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
1028 ++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
1029 +@@ -207,6 +207,7 @@ static const struct file_operations nouveau_pstate_fops = {
1030 + .open = nouveau_debugfs_pstate_open,
1031 + .read = seq_read,
1032 + .write = nouveau_debugfs_pstate_set,
1033 ++ .release = single_release,
1034 + };
1035 +
1036 + static struct drm_info_list nouveau_debugfs_list[] = {
1037 +diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
1038 +index c2051380d18c0..6504ebec11901 100644
1039 +--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
1040 ++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
1041 +@@ -196,10 +196,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
1042 + }
1043 +
1044 + ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
1045 +- if (ret) {
1046 +- nouveau_bo_ref(NULL, &nvbo);
1047 ++ if (ret)
1048 + return ret;
1049 +- }
1050 +
1051 + /* we restrict allowed domains on nv50+ to only the types
1052 + * that were requested at creation time. not possibly on
1053 +diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
1054 +index f75fb157f2ff7..016b877051dab 100644
1055 +--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
1056 ++++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
1057 +@@ -216,11 +216,13 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
1058 + goto err_disable_clk_tmds;
1059 + }
1060 +
1061 ++ ret = sun8i_hdmi_phy_init(hdmi->phy);
1062 ++ if (ret)
1063 ++ goto err_disable_clk_tmds;
1064 ++
1065 + drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs);
1066 + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
1067 +
1068 +- sun8i_hdmi_phy_init(hdmi->phy);
1069 +-
1070 + plat_data->mode_valid = hdmi->quirks->mode_valid;
1071 + plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe;
1072 + sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
1073 +@@ -262,6 +264,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
1074 + struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
1075 +
1076 + dw_hdmi_unbind(hdmi->hdmi);
1077 ++ sun8i_hdmi_phy_deinit(hdmi->phy);
1078 + clk_disable_unprepare(hdmi->clk_tmds);
1079 + reset_control_assert(hdmi->rst_ctrl);
1080 + gpiod_set_value(hdmi->ddc_en, 0);
1081 +diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
1082 +index 74f6ed0e25709..bffe1b9cd3dcb 100644
1083 +--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
1084 ++++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
1085 +@@ -169,6 +169,7 @@ struct sun8i_hdmi_phy {
1086 + struct clk *clk_phy;
1087 + struct clk *clk_pll0;
1088 + struct clk *clk_pll1;
1089 ++ struct device *dev;
1090 + unsigned int rcal;
1091 + struct regmap *regs;
1092 + struct reset_control *rst_phy;
1093 +@@ -205,7 +206,8 @@ encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
1094 +
1095 + int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
1096 +
1097 +-void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
1098 ++int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
1099 ++void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy);
1100 + void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
1101 + struct dw_hdmi_plat_data *plat_data);
1102 +
1103 +diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
1104 +index c9239708d398c..b64d93da651d2 100644
1105 +--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
1106 ++++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
1107 +@@ -506,9 +506,60 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
1108 + phy->rcal = (val & SUN8I_HDMI_PHY_ANA_STS_RCAL_MASK) >> 2;
1109 + }
1110 +
1111 +-void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
1112 ++int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
1113 + {
1114 ++ int ret;
1115 ++
1116 ++ ret = reset_control_deassert(phy->rst_phy);
1117 ++ if (ret) {
1118 ++ dev_err(phy->dev, "Cannot deassert phy reset control: %d\n", ret);
1119 ++ return ret;
1120 ++ }
1121 ++
1122 ++ ret = clk_prepare_enable(phy->clk_bus);
1123 ++ if (ret) {
1124 ++ dev_err(phy->dev, "Cannot enable bus clock: %d\n", ret);
1125 ++ goto err_assert_rst_phy;
1126 ++ }
1127 ++
1128 ++ ret = clk_prepare_enable(phy->clk_mod);
1129 ++ if (ret) {
1130 ++ dev_err(phy->dev, "Cannot enable mod clock: %d\n", ret);
1131 ++ goto err_disable_clk_bus;
1132 ++ }
1133 ++
1134 ++ if (phy->variant->has_phy_clk) {
1135 ++ ret = sun8i_phy_clk_create(phy, phy->dev,
1136 ++ phy->variant->has_second_pll);
1137 ++ if (ret) {
1138 ++ dev_err(phy->dev, "Couldn't create the PHY clock\n");
1139 ++ goto err_disable_clk_mod;
1140 ++ }
1141 ++
1142 ++ clk_prepare_enable(phy->clk_phy);
1143 ++ }
1144 ++
1145 + phy->variant->phy_init(phy);
1146 ++
1147 ++ return 0;
1148 ++
1149 ++err_disable_clk_mod:
1150 ++ clk_disable_unprepare(phy->clk_mod);
1151 ++err_disable_clk_bus:
1152 ++ clk_disable_unprepare(phy->clk_bus);
1153 ++err_assert_rst_phy:
1154 ++ reset_control_assert(phy->rst_phy);
1155 ++
1156 ++ return ret;
1157 ++}
1158 ++
1159 ++void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy)
1160 ++{
1161 ++ clk_disable_unprepare(phy->clk_mod);
1162 ++ clk_disable_unprepare(phy->clk_bus);
1163 ++ clk_disable_unprepare(phy->clk_phy);
1164 ++
1165 ++ reset_control_assert(phy->rst_phy);
1166 + }
1167 +
1168 + void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
1169 +@@ -638,6 +689,7 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
1170 + return -ENOMEM;
1171 +
1172 + phy->variant = (struct sun8i_hdmi_phy_variant *)match->data;
1173 ++ phy->dev = dev;
1174 +
1175 + ret = of_address_to_resource(node, 0, &res);
1176 + if (ret) {
1177 +@@ -696,47 +748,10 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
1178 + goto err_put_clk_pll1;
1179 + }
1180 +
1181 +- ret = reset_control_deassert(phy->rst_phy);
1182 +- if (ret) {
1183 +- dev_err(dev, "Cannot deassert phy reset control: %d\n", ret);
1184 +- goto err_put_rst_phy;
1185 +- }
1186 +-
1187 +- ret = clk_prepare_enable(phy->clk_bus);
1188 +- if (ret) {
1189 +- dev_err(dev, "Cannot enable bus clock: %d\n", ret);
1190 +- goto err_deassert_rst_phy;
1191 +- }
1192 +-
1193 +- ret = clk_prepare_enable(phy->clk_mod);
1194 +- if (ret) {
1195 +- dev_err(dev, "Cannot enable mod clock: %d\n", ret);
1196 +- goto err_disable_clk_bus;
1197 +- }
1198 +-
1199 +- if (phy->variant->has_phy_clk) {
1200 +- ret = sun8i_phy_clk_create(phy, dev,
1201 +- phy->variant->has_second_pll);
1202 +- if (ret) {
1203 +- dev_err(dev, "Couldn't create the PHY clock\n");
1204 +- goto err_disable_clk_mod;
1205 +- }
1206 +-
1207 +- clk_prepare_enable(phy->clk_phy);
1208 +- }
1209 +-
1210 + platform_set_drvdata(pdev, phy);
1211 +
1212 + return 0;
1213 +
1214 +-err_disable_clk_mod:
1215 +- clk_disable_unprepare(phy->clk_mod);
1216 +-err_disable_clk_bus:
1217 +- clk_disable_unprepare(phy->clk_bus);
1218 +-err_deassert_rst_phy:
1219 +- reset_control_assert(phy->rst_phy);
1220 +-err_put_rst_phy:
1221 +- reset_control_put(phy->rst_phy);
1222 + err_put_clk_pll1:
1223 + clk_put(phy->clk_pll1);
1224 + err_put_clk_pll0:
1225 +@@ -753,12 +768,6 @@ static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
1226 + {
1227 + struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
1228 +
1229 +- clk_disable_unprepare(phy->clk_mod);
1230 +- clk_disable_unprepare(phy->clk_bus);
1231 +- clk_disable_unprepare(phy->clk_phy);
1232 +-
1233 +- reset_control_assert(phy->rst_phy);
1234 +-
1235 + reset_control_put(phy->rst_phy);
1236 +
1237 + clk_put(phy->clk_pll0);
1238 +diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
1239 +index 1a5f1ccd1d2f7..0af2784cbd0d9 100644
1240 +--- a/drivers/i2c/busses/i2c-mt65xx.c
1241 ++++ b/drivers/i2c/busses/i2c-mt65xx.c
1242 +@@ -41,6 +41,8 @@
1243 + #define I2C_HANDSHAKE_RST 0x0020
1244 + #define I2C_FIFO_ADDR_CLR 0x0001
1245 + #define I2C_DELAY_LEN 0x0002
1246 ++#define I2C_ST_START_CON 0x8001
1247 ++#define I2C_FS_START_CON 0x1800
1248 + #define I2C_TIME_CLR_VALUE 0x0000
1249 + #define I2C_TIME_DEFAULT_VALUE 0x0003
1250 + #define I2C_WRRD_TRANAC_VALUE 0x0002
1251 +@@ -479,6 +481,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
1252 + {
1253 + u16 control_reg;
1254 + u16 intr_stat_reg;
1255 ++ u16 ext_conf_val;
1256 +
1257 + mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
1258 + intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
1259 +@@ -517,8 +520,13 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
1260 + if (i2c->dev_comp->ltiming_adjust)
1261 + mtk_i2c_writew(i2c, i2c->ltiming_reg, OFFSET_LTIMING);
1262 +
1263 ++ if (i2c->speed_hz <= I2C_MAX_STANDARD_MODE_FREQ)
1264 ++ ext_conf_val = I2C_ST_START_CON;
1265 ++ else
1266 ++ ext_conf_val = I2C_FS_START_CON;
1267 ++
1268 + if (i2c->dev_comp->timing_adjust) {
1269 +- mtk_i2c_writew(i2c, i2c->ac_timing.ext, OFFSET_EXT_CONF);
1270 ++ ext_conf_val = i2c->ac_timing.ext;
1271 + mtk_i2c_writew(i2c, i2c->ac_timing.inter_clk_div,
1272 + OFFSET_CLOCK_DIV);
1273 + mtk_i2c_writew(i2c, I2C_SCL_MIS_COMP_VALUE,
1274 +@@ -543,6 +551,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
1275 + OFFSET_HS_STA_STO_AC_TIMING);
1276 + }
1277 + }
1278 ++ mtk_i2c_writew(i2c, ext_conf_val, OFFSET_EXT_CONF);
1279 +
1280 + /* If use i2c pin from PMIC mt6397 side, need set PATH_DIR first */
1281 + if (i2c->have_pmic)
1282 +diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
1283 +index 37c510d9347a7..4b136d8710743 100644
1284 +--- a/drivers/i2c/i2c-core-acpi.c
1285 ++++ b/drivers/i2c/i2c-core-acpi.c
1286 +@@ -426,6 +426,7 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
1287 + break;
1288 +
1289 + i2c_acpi_register_device(adapter, adev, &info);
1290 ++ put_device(&adapter->dev);
1291 + break;
1292 + case ACPI_RECONFIG_DEVICE_REMOVE:
1293 + if (!acpi_device_enumerated(adev))
1294 +diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
1295 +index d3f40c9a8c6c8..b274083a6e635 100644
1296 +--- a/drivers/mmc/host/meson-gx-mmc.c
1297 ++++ b/drivers/mmc/host/meson-gx-mmc.c
1298 +@@ -735,7 +735,7 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
1299 + writel(start, host->regs + SD_EMMC_START);
1300 + }
1301 +
1302 +-/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
1303 ++/* local sg copy for dram_access_quirk */
1304 + static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
1305 + size_t buflen, bool to_buffer)
1306 + {
1307 +@@ -753,21 +753,27 @@ static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data
1308 + sg_miter_start(&miter, sgl, nents, sg_flags);
1309 +
1310 + while ((offset < buflen) && sg_miter_next(&miter)) {
1311 +- unsigned int len;
1312 ++ unsigned int buf_offset = 0;
1313 ++ unsigned int len, left;
1314 ++ u32 *buf = miter.addr;
1315 +
1316 + len = min(miter.length, buflen - offset);
1317 ++ left = len;
1318 +
1319 +- /* When dram_access_quirk, the bounce buffer is a iomem mapping */
1320 +- if (host->dram_access_quirk) {
1321 +- if (to_buffer)
1322 +- memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
1323 +- else
1324 +- memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
1325 ++ if (to_buffer) {
1326 ++ do {
1327 ++ writel(*buf++, host->bounce_iomem_buf + offset + buf_offset);
1328 ++
1329 ++ buf_offset += 4;
1330 ++ left -= 4;
1331 ++ } while (left);
1332 + } else {
1333 +- if (to_buffer)
1334 +- memcpy(host->bounce_buf + offset, miter.addr, len);
1335 +- else
1336 +- memcpy(miter.addr, host->bounce_buf + offset, len);
1337 ++ do {
1338 ++ *buf++ = readl(host->bounce_iomem_buf + offset + buf_offset);
1339 ++
1340 ++ buf_offset += 4;
1341 ++ left -= 4;
1342 ++ } while (left);
1343 + }
1344 +
1345 + offset += len;
1346 +@@ -819,7 +825,11 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
1347 + if (data->flags & MMC_DATA_WRITE) {
1348 + cmd_cfg |= CMD_CFG_DATA_WR;
1349 + WARN_ON(xfer_bytes > host->bounce_buf_size);
1350 +- meson_mmc_copy_buffer(host, data, xfer_bytes, true);
1351 ++ if (host->dram_access_quirk)
1352 ++ meson_mmc_copy_buffer(host, data, xfer_bytes, true);
1353 ++ else
1354 ++ sg_copy_to_buffer(data->sg, data->sg_len,
1355 ++ host->bounce_buf, xfer_bytes);
1356 + dma_wmb();
1357 + }
1358 +
1359 +@@ -838,12 +848,43 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
1360 + writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
1361 + }
1362 +
1363 ++static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data *data)
1364 ++{
1365 ++ struct scatterlist *sg;
1366 ++ int i;
1367 ++
1368 ++ /* Reject request if any element offset or size is not 32bit aligned */
1369 ++ for_each_sg(data->sg, sg, data->sg_len, i) {
1370 ++ if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
1371 ++ !IS_ALIGNED(sg->length, sizeof(u32))) {
1372 ++ dev_err(mmc_dev(mmc), "unaligned sg offset %u len %u\n",
1373 ++ data->sg->offset, data->sg->length);
1374 ++ return -EINVAL;
1375 ++ }
1376 ++ }
1377 ++
1378 ++ return 0;
1379 ++}
1380 ++
1381 + static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
1382 + {
1383 + struct meson_host *host = mmc_priv(mmc);
1384 + bool needs_pre_post_req = mrq->data &&
1385 + !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
1386 +
1387 ++ /*
1388 ++ * The memory at the end of the controller used as bounce buffer for
1389 ++ * the dram_access_quirk only accepts 32bit read/write access,
1390 ++ * check the aligment and length of the data before starting the request.
1391 ++ */
1392 ++ if (host->dram_access_quirk && mrq->data) {
1393 ++ mrq->cmd->error = meson_mmc_validate_dram_access(mmc, mrq->data);
1394 ++ if (mrq->cmd->error) {
1395 ++ mmc_request_done(mmc, mrq);
1396 ++ return;
1397 ++ }
1398 ++ }
1399 ++
1400 + if (needs_pre_post_req) {
1401 + meson_mmc_get_transfer_mode(mmc, mrq);
1402 + if (!meson_mmc_desc_chain_mode(mrq->data))
1403 +@@ -988,7 +1029,11 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
1404 + if (meson_mmc_bounce_buf_read(data)) {
1405 + xfer_bytes = data->blksz * data->blocks;
1406 + WARN_ON(xfer_bytes > host->bounce_buf_size);
1407 +- meson_mmc_copy_buffer(host, data, xfer_bytes, false);
1408 ++ if (host->dram_access_quirk)
1409 ++ meson_mmc_copy_buffer(host, data, xfer_bytes, false);
1410 ++ else
1411 ++ sg_copy_from_buffer(data->sg, data->sg_len,
1412 ++ host->bounce_buf, xfer_bytes);
1413 + }
1414 +
1415 + next_cmd = meson_mmc_get_next_command(cmd);
1416 +diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
1417 +index 5564d7b23e7cd..d1a1c548c515f 100644
1418 +--- a/drivers/mmc/host/sdhci-of-at91.c
1419 ++++ b/drivers/mmc/host/sdhci-of-at91.c
1420 +@@ -11,6 +11,7 @@
1421 + #include <linux/delay.h>
1422 + #include <linux/err.h>
1423 + #include <linux/io.h>
1424 ++#include <linux/iopoll.h>
1425 + #include <linux/kernel.h>
1426 + #include <linux/mmc/host.h>
1427 + #include <linux/mmc/slot-gpio.h>
1428 +@@ -61,7 +62,6 @@ static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
1429 + static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
1430 + {
1431 + u16 clk;
1432 +- unsigned long timeout;
1433 +
1434 + host->mmc->actual_clock = 0;
1435 +
1436 +@@ -86,16 +86,11 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
1437 + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1438 +
1439 + /* Wait max 20 ms */
1440 +- timeout = 20;
1441 +- while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1442 +- & SDHCI_CLOCK_INT_STABLE)) {
1443 +- if (timeout == 0) {
1444 +- pr_err("%s: Internal clock never stabilised.\n",
1445 +- mmc_hostname(host->mmc));
1446 +- return;
1447 +- }
1448 +- timeout--;
1449 +- mdelay(1);
1450 ++ if (read_poll_timeout(sdhci_readw, clk, (clk & SDHCI_CLOCK_INT_STABLE),
1451 ++ 1000, 20000, false, host, SDHCI_CLOCK_CONTROL)) {
1452 ++ pr_err("%s: Internal clock never stabilised.\n",
1453 ++ mmc_hostname(host->mmc));
1454 ++ return;
1455 + }
1456 +
1457 + clk |= SDHCI_CLOCK_CARD_EN;
1458 +@@ -114,6 +109,7 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
1459 + {
1460 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1461 + struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
1462 ++ unsigned int tmp;
1463 +
1464 + sdhci_reset(host, mask);
1465 +
1466 +@@ -126,6 +122,10 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
1467 +
1468 + sdhci_writel(host, calcr | SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
1469 + SDMMC_CALCR);
1470 ++
1471 ++ if (read_poll_timeout(sdhci_readl, tmp, !(tmp & SDMMC_CALCR_EN),
1472 ++ 10, 20000, false, host, SDMMC_CALCR))
1473 ++ dev_err(mmc_dev(host->mmc), "Failed to calibrate\n");
1474 + }
1475 + }
1476 +
1477 +diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
1478 +index f5c80229ea966..cfb174624d4ee 100644
1479 +--- a/drivers/net/ethernet/google/gve/gve.h
1480 ++++ b/drivers/net/ethernet/google/gve/gve.h
1481 +@@ -472,7 +472,7 @@ struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
1482 + gve_num_tx_qpls(priv));
1483 +
1484 + /* we are out of rx qpls */
1485 +- if (id == priv->qpl_cfg.qpl_map_size)
1486 ++ if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
1487 + return NULL;
1488 +
1489 + set_bit(id, priv->qpl_cfg.qpl_id_map);
1490 +diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
1491 +index 0b714b606ba19..fd52218f48846 100644
1492 +--- a/drivers/net/ethernet/google/gve/gve_main.c
1493 ++++ b/drivers/net/ethernet/google/gve/gve_main.c
1494 +@@ -30,6 +30,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
1495 + {
1496 + struct gve_priv *priv = netdev_priv(dev);
1497 + unsigned int start;
1498 ++ u64 packets, bytes;
1499 + int ring;
1500 +
1501 + if (priv->rx) {
1502 +@@ -37,10 +38,12 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
1503 + do {
1504 + start =
1505 + u64_stats_fetch_begin(&priv->rx[ring].statss);
1506 +- s->rx_packets += priv->rx[ring].rpackets;
1507 +- s->rx_bytes += priv->rx[ring].rbytes;
1508 ++ packets = priv->rx[ring].rpackets;
1509 ++ bytes = priv->rx[ring].rbytes;
1510 + } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
1511 + start));
1512 ++ s->rx_packets += packets;
1513 ++ s->rx_bytes += bytes;
1514 + }
1515 + }
1516 + if (priv->tx) {
1517 +@@ -48,10 +51,12 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
1518 + do {
1519 + start =
1520 + u64_stats_fetch_begin(&priv->tx[ring].statss);
1521 +- s->tx_packets += priv->tx[ring].pkt_done;
1522 +- s->tx_bytes += priv->tx[ring].bytes_done;
1523 ++ packets = priv->tx[ring].pkt_done;
1524 ++ bytes = priv->tx[ring].bytes_done;
1525 + } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
1526 + start));
1527 ++ s->tx_packets += packets;
1528 ++ s->tx_bytes += bytes;
1529 + }
1530 + }
1531 + }
1532 +@@ -71,6 +76,9 @@ static int gve_alloc_counter_array(struct gve_priv *priv)
1533 +
1534 + static void gve_free_counter_array(struct gve_priv *priv)
1535 + {
1536 ++ if (!priv->counter_array)
1537 ++ return;
1538 ++
1539 + dma_free_coherent(&priv->pdev->dev,
1540 + priv->num_event_counters *
1541 + sizeof(*priv->counter_array),
1542 +@@ -131,6 +139,9 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
1543 +
1544 + static void gve_free_stats_report(struct gve_priv *priv)
1545 + {
1546 ++ if (!priv->stats_report)
1547 ++ return;
1548 ++
1549 + del_timer_sync(&priv->stats_report_timer);
1550 + dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
1551 + priv->stats_report, priv->stats_report_bus);
1552 +@@ -301,18 +312,19 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
1553 + {
1554 + int i;
1555 +
1556 +- if (priv->msix_vectors) {
1557 +- /* Free the irqs */
1558 +- for (i = 0; i < priv->num_ntfy_blks; i++) {
1559 +- struct gve_notify_block *block = &priv->ntfy_blocks[i];
1560 +- int msix_idx = i;
1561 ++ if (!priv->msix_vectors)
1562 ++ return;
1563 +
1564 +- irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
1565 +- NULL);
1566 +- free_irq(priv->msix_vectors[msix_idx].vector, block);
1567 +- }
1568 +- free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
1569 ++ /* Free the irqs */
1570 ++ for (i = 0; i < priv->num_ntfy_blks; i++) {
1571 ++ struct gve_notify_block *block = &priv->ntfy_blocks[i];
1572 ++ int msix_idx = i;
1573 ++
1574 ++ irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
1575 ++ NULL);
1576 ++ free_irq(priv->msix_vectors[msix_idx].vector, block);
1577 + }
1578 ++ free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
1579 + dma_free_coherent(&priv->pdev->dev,
1580 + priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
1581 + priv->ntfy_blocks, priv->ntfy_block_bus);
1582 +@@ -975,9 +987,10 @@ static void gve_handle_reset(struct gve_priv *priv)
1583 +
1584 + void gve_handle_report_stats(struct gve_priv *priv)
1585 + {
1586 +- int idx, stats_idx = 0, tx_bytes;
1587 +- unsigned int start = 0;
1588 + struct stats *stats = priv->stats_report->stats;
1589 ++ int idx, stats_idx = 0;
1590 ++ unsigned int start = 0;
1591 ++ u64 tx_bytes;
1592 +
1593 + if (!gve_get_report_stats(priv))
1594 + return;
1595 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
1596 +index bc648ce0743c7..52c2d6fdeb7a0 100644
1597 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
1598 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
1599 +@@ -4839,7 +4839,8 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
1600 + {
1601 + int i;
1602 +
1603 +- i40e_free_misc_vector(pf);
1604 ++ if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
1605 ++ i40e_free_misc_vector(pf);
1606 +
1607 + i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
1608 + I40E_IWARP_IRQ_PILE_ID);
1609 +@@ -9662,7 +9663,7 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
1610 + if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
1611 + /* retry with a larger buffer */
1612 + buf_len = data_size;
1613 +- } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
1614 ++ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
1615 + dev_info(&pf->pdev->dev,
1616 + "capability discovery failed, err %s aq_err %s\n",
1617 + i40e_stat_str(&pf->hw, err),
1618 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1619 +index f327b78261ec4..117a593414537 100644
1620 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1621 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1622 +@@ -999,14 +999,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
1623 + goto csum_unnecessary;
1624 +
1625 + if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
1626 +- u8 ipproto = get_ip_proto(skb, network_depth, proto);
1627 +-
1628 +- if (unlikely(ipproto == IPPROTO_SCTP))
1629 ++ if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
1630 + goto csum_unnecessary;
1631 +
1632 +- if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1633 +- goto csum_none;
1634 +-
1635 + stats->csum_complete++;
1636 + skb->ip_summed = CHECKSUM_COMPLETE;
1637 + skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1638 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
1639 +index 3e19b1721303f..b00c7d47833f3 100644
1640 +--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
1641 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
1642 +@@ -79,12 +79,16 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
1643 + int dest_num = 0;
1644 + int err = 0;
1645 +
1646 +- if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
1647 ++ if (vport->egress.legacy.drop_counter) {
1648 ++ drop_counter = vport->egress.legacy.drop_counter;
1649 ++ } else if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
1650 + drop_counter = mlx5_fc_create(esw->dev, false);
1651 +- if (IS_ERR(drop_counter))
1652 ++ if (IS_ERR(drop_counter)) {
1653 + esw_warn(esw->dev,
1654 + "vport[%d] configure egress drop rule counter err(%ld)\n",
1655 + vport->vport, PTR_ERR(drop_counter));
1656 ++ drop_counter = NULL;
1657 ++ }
1658 + vport->egress.legacy.drop_counter = drop_counter;
1659 + }
1660 +
1661 +@@ -123,7 +127,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
1662 + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1663 +
1664 + /* Attach egress drop flow counter */
1665 +- if (!IS_ERR_OR_NULL(drop_counter)) {
1666 ++ if (drop_counter) {
1667 + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1668 + drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1669 + drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
1670 +@@ -162,7 +166,7 @@ void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw,
1671 + esw_acl_egress_table_destroy(vport);
1672 +
1673 + clean_drop_counter:
1674 +- if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) {
1675 ++ if (vport->egress.legacy.drop_counter) {
1676 + mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
1677 + vport->egress.legacy.drop_counter = NULL;
1678 + }
1679 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
1680 +index d64fad2823e73..45570d0a58d2f 100644
1681 +--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
1682 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
1683 +@@ -160,7 +160,9 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
1684 +
1685 + esw_acl_ingress_lgcy_rules_destroy(vport);
1686 +
1687 +- if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
1688 ++ if (vport->ingress.legacy.drop_counter) {
1689 ++ counter = vport->ingress.legacy.drop_counter;
1690 ++ } else if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
1691 + counter = mlx5_fc_create(esw->dev, false);
1692 + if (IS_ERR(counter)) {
1693 + esw_warn(esw->dev,
1694 +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
1695 +index b848439fa837c..2645ca35103c9 100644
1696 +--- a/drivers/net/phy/mdio_bus.c
1697 ++++ b/drivers/net/phy/mdio_bus.c
1698 +@@ -534,6 +534,13 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
1699 + bus->dev.groups = NULL;
1700 + dev_set_name(&bus->dev, "%s", bus->id);
1701 +
1702 ++ /* We need to set state to MDIOBUS_UNREGISTERED to correctly release
1703 ++ * the device in mdiobus_free()
1704 ++ *
1705 ++ * State will be updated later in this function in case of success
1706 ++ */
1707 ++ bus->state = MDIOBUS_UNREGISTERED;
1708 ++
1709 + err = device_register(&bus->dev);
1710 + if (err) {
1711 + pr_err("mii_bus %s failed to register\n", bus->id);
1712 +diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
1713 +index 2fff62695455d..32c34c728c7a1 100644
1714 +--- a/drivers/net/phy/sfp.c
1715 ++++ b/drivers/net/phy/sfp.c
1716 +@@ -133,7 +133,7 @@ static const char * const sm_state_strings[] = {
1717 + [SFP_S_LINK_UP] = "link_up",
1718 + [SFP_S_TX_FAULT] = "tx_fault",
1719 + [SFP_S_REINIT] = "reinit",
1720 +- [SFP_S_TX_DISABLE] = "rx_disable",
1721 ++ [SFP_S_TX_DISABLE] = "tx_disable",
1722 + };
1723 +
1724 + static const char *sm_state_to_str(unsigned short sm_state)
1725 +diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
1726 +index f35cd8de228e4..6914b37bb0fbc 100644
1727 +--- a/drivers/net/wireless/ath/ath5k/Kconfig
1728 ++++ b/drivers/net/wireless/ath/ath5k/Kconfig
1729 +@@ -3,9 +3,7 @@ config ATH5K
1730 + tristate "Atheros 5xxx wireless cards support"
1731 + depends on (PCI || ATH25) && MAC80211
1732 + select ATH_COMMON
1733 +- select MAC80211_LEDS
1734 +- select LEDS_CLASS
1735 +- select NEW_LEDS
1736 ++ select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
1737 + select ATH5K_AHB if ATH25
1738 + select ATH5K_PCI if !ATH25
1739 + help
1740 +diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
1741 +index 6a2a168567630..33e9928af3635 100644
1742 +--- a/drivers/net/wireless/ath/ath5k/led.c
1743 ++++ b/drivers/net/wireless/ath/ath5k/led.c
1744 +@@ -89,7 +89,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
1745 +
1746 + void ath5k_led_enable(struct ath5k_hw *ah)
1747 + {
1748 +- if (test_bit(ATH_STAT_LEDSOFT, ah->status)) {
1749 ++ if (IS_ENABLED(CONFIG_MAC80211_LEDS) &&
1750 ++ test_bit(ATH_STAT_LEDSOFT, ah->status)) {
1751 + ath5k_hw_set_gpio_output(ah, ah->led_pin);
1752 + ath5k_led_off(ah);
1753 + }
1754 +@@ -104,7 +105,8 @@ static void ath5k_led_on(struct ath5k_hw *ah)
1755 +
1756 + void ath5k_led_off(struct ath5k_hw *ah)
1757 + {
1758 +- if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
1759 ++ if (!IS_ENABLED(CONFIG_MAC80211_LEDS) ||
1760 ++ !test_bit(ATH_STAT_LEDSOFT, ah->status))
1761 + return;
1762 + ath5k_hw_set_gpio(ah, ah->led_pin, !ah->led_on);
1763 + }
1764 +@@ -146,7 +148,7 @@ ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
1765 + static void
1766 + ath5k_unregister_led(struct ath5k_led *led)
1767 + {
1768 +- if (!led->ah)
1769 ++ if (!IS_ENABLED(CONFIG_MAC80211_LEDS) || !led->ah)
1770 + return;
1771 + led_classdev_unregister(&led->led_dev);
1772 + ath5k_led_off(led->ah);
1773 +@@ -169,7 +171,7 @@ int ath5k_init_leds(struct ath5k_hw *ah)
1774 + char name[ATH5K_LED_MAX_NAME_LEN + 1];
1775 + const struct pci_device_id *match;
1776 +
1777 +- if (!ah->pdev)
1778 ++ if (!IS_ENABLED(CONFIG_MAC80211_LEDS) || !ah->pdev)
1779 + return 0;
1780 +
1781 + #ifdef CONFIG_ATH5K_AHB
1782 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
1783 +index 90b12e201795c..4e43efd5d1ea1 100644
1784 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
1785 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
1786 +@@ -635,6 +635,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
1787 + IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
1788 + IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
1789 + IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
1790 ++ IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name),
1791 ++ IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name),
1792 + IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
1793 + IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
1794 + IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
1795 +diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
1796 +index 44e15f0e3a2ed..ad3e3cde1c20d 100644
1797 +--- a/drivers/pci/controller/pci-hyperv.c
1798 ++++ b/drivers/pci/controller/pci-hyperv.c
1799 +@@ -3259,9 +3259,17 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
1800 + return 0;
1801 +
1802 + if (!keep_devs) {
1803 +- /* Delete any children which might still exist. */
1804 ++ struct list_head removed;
1805 ++
1806 ++ /* Move all present children to the list on stack */
1807 ++ INIT_LIST_HEAD(&removed);
1808 + spin_lock_irqsave(&hbus->device_list_lock, flags);
1809 +- list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry) {
1810 ++ list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
1811 ++ list_move_tail(&hpdev->list_entry, &removed);
1812 ++ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1813 ++
1814 ++ /* Remove all children in the list */
1815 ++ list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
1816 + list_del(&hpdev->list_entry);
1817 + if (hpdev->pci_slot)
1818 + pci_destroy_slot(hpdev->pci_slot);
1819 +@@ -3269,7 +3277,6 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
1820 + put_pcichild(hpdev);
1821 + put_pcichild(hpdev);
1822 + }
1823 +- spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1824 + }
1825 +
1826 + ret = hv_send_resources_released(hdev);
1827 +diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
1828 +index ce10ecd41ba0f..9492ed09518ff 100644
1829 +--- a/drivers/ptp/ptp_pch.c
1830 ++++ b/drivers/ptp/ptp_pch.c
1831 +@@ -651,6 +651,7 @@ static const struct pci_device_id pch_ieee1588_pcidev_id[] = {
1832 + },
1833 + {0}
1834 + };
1835 ++MODULE_DEVICE_TABLE(pci, pch_ieee1588_pcidev_id);
1836 +
1837 + static SIMPLE_DEV_PM_OPS(pch_pm_ops, pch_suspend, pch_resume);
1838 +
1839 +diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
1840 +index eba7f76f9d61a..6034cd8992b0e 100644
1841 +--- a/drivers/soc/qcom/mdt_loader.c
1842 ++++ b/drivers/soc/qcom/mdt_loader.c
1843 +@@ -98,7 +98,7 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len)
1844 + if (ehdr->e_phnum < 2)
1845 + return ERR_PTR(-EINVAL);
1846 +
1847 +- if (phdrs[0].p_type == PT_LOAD || phdrs[1].p_type == PT_LOAD)
1848 ++ if (phdrs[0].p_type == PT_LOAD)
1849 + return ERR_PTR(-EINVAL);
1850 +
1851 + if ((phdrs[1].p_flags & QCOM_MDT_TYPE_MASK) != QCOM_MDT_TYPE_HASH)
1852 +diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
1853 +index e0620416e5743..60c82dcaa8d1d 100644
1854 +--- a/drivers/soc/qcom/socinfo.c
1855 ++++ b/drivers/soc/qcom/socinfo.c
1856 +@@ -521,7 +521,7 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
1857 + /* Feed the soc specific unique data into entropy pool */
1858 + add_device_randomness(info, item_size);
1859 +
1860 +- platform_set_drvdata(pdev, qs->soc_dev);
1861 ++ platform_set_drvdata(pdev, qs);
1862 +
1863 + return 0;
1864 + }
1865 +diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
1866 +index fb067b5e4a977..4a782bfd753c3 100644
1867 +--- a/drivers/soc/ti/omap_prm.c
1868 ++++ b/drivers/soc/ti/omap_prm.c
1869 +@@ -509,25 +509,28 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
1870 + writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl);
1871 + spin_unlock_irqrestore(&reset->lock, flags);
1872 +
1873 +- if (!has_rstst)
1874 +- goto exit;
1875 ++ /* wait for the reset bit to clear */
1876 ++ ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
1877 ++ reset->prm->data->rstctrl,
1878 ++ v, !(v & BIT(id)), 1,
1879 ++ OMAP_RESET_MAX_WAIT);
1880 ++ if (ret)
1881 ++ pr_err("%s: timedout waiting for %s:%lu\n", __func__,
1882 ++ reset->prm->data->name, id);
1883 +
1884 + /* wait for the status to be set */
1885 +- ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
1886 ++ if (has_rstst) {
1887 ++ ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
1888 + reset->prm->data->rstst,
1889 + v, v & BIT(st_bit), 1,
1890 + OMAP_RESET_MAX_WAIT);
1891 +- if (ret)
1892 +- pr_err("%s: timedout waiting for %s:%lu\n", __func__,
1893 +- reset->prm->data->name, id);
1894 ++ if (ret)
1895 ++ pr_err("%s: timedout waiting for %s:%lu\n", __func__,
1896 ++ reset->prm->data->name, id);
1897 ++ }
1898 +
1899 +-exit:
1900 +- if (reset->clkdm) {
1901 +- /* At least dra7 iva needs a delay before clkdm idle */
1902 +- if (has_rstst)
1903 +- udelay(1);
1904 ++ if (reset->clkdm)
1905 + pdata->clkdm_allow_idle(reset->clkdm);
1906 +- }
1907 +
1908 + return ret;
1909 + }
1910 +diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
1911 +index ee565bdb44d65..b4c6527fe5f66 100644
1912 +--- a/drivers/usb/chipidea/ci_hdrc_imx.c
1913 ++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
1914 +@@ -425,11 +425,16 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
1915 + data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
1916 + if (IS_ERR(data->phy)) {
1917 + ret = PTR_ERR(data->phy);
1918 +- /* Return -EINVAL if no usbphy is available */
1919 +- if (ret == -ENODEV)
1920 +- data->phy = NULL;
1921 +- else
1922 +- goto err_clk;
1923 ++ if (ret == -ENODEV) {
1924 ++ data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
1925 ++ if (IS_ERR(data->phy)) {
1926 ++ ret = PTR_ERR(data->phy);
1927 ++ if (ret == -ENODEV)
1928 ++ data->phy = NULL;
1929 ++ else
1930 ++ goto err_clk;
1931 ++ }
1932 ++ }
1933 + }
1934 +
1935 + pdata.usb_phy = data->phy;
1936 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1937 +index 7748b1335558e..7950d5b3af429 100644
1938 +--- a/drivers/usb/class/cdc-acm.c
1939 ++++ b/drivers/usb/class/cdc-acm.c
1940 +@@ -340,6 +340,9 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
1941 + acm->iocount.overrun++;
1942 + spin_unlock_irqrestore(&acm->read_lock, flags);
1943 +
1944 ++ if (newctrl & ACM_CTRL_BRK)
1945 ++ tty_flip_buffer_push(&acm->port);
1946 ++
1947 + if (difference)
1948 + wake_up_all(&acm->wioctl);
1949 +
1950 +@@ -475,11 +478,16 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags)
1951 +
1952 + static void acm_process_read_urb(struct acm *acm, struct urb *urb)
1953 + {
1954 ++ unsigned long flags;
1955 ++
1956 + if (!urb->actual_length)
1957 + return;
1958 +
1959 ++ spin_lock_irqsave(&acm->read_lock, flags);
1960 + tty_insert_flip_string(&acm->port, urb->transfer_buffer,
1961 + urb->actual_length);
1962 ++ spin_unlock_irqrestore(&acm->read_lock, flags);
1963 ++
1964 + tty_flip_buffer_push(&acm->port);
1965 + }
1966 +
1967 +diff --git a/drivers/usb/common/Kconfig b/drivers/usb/common/Kconfig
1968 +index 5e8a04e3dd3c8..b856622431a73 100644
1969 +--- a/drivers/usb/common/Kconfig
1970 ++++ b/drivers/usb/common/Kconfig
1971 +@@ -6,8 +6,7 @@ config USB_COMMON
1972 +
1973 + config USB_LED_TRIG
1974 + bool "USB LED Triggers"
1975 +- depends on LEDS_CLASS && LEDS_TRIGGERS
1976 +- select USB_COMMON
1977 ++ depends on LEDS_CLASS && USB_COMMON && LEDS_TRIGGERS
1978 + help
1979 + This option adds LED triggers for USB host and/or gadget activity.
1980 +
1981 +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
1982 +index 0b08dd3b19eb0..291d020427924 100644
1983 +--- a/drivers/usb/typec/tcpm/tcpm.c
1984 ++++ b/drivers/usb/typec/tcpm/tcpm.c
1985 +@@ -3922,6 +3922,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
1986 + tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
1987 + break;
1988 + case SRC_ATTACHED:
1989 ++ case SRC_STARTUP:
1990 + case SRC_SEND_CAPABILITIES:
1991 + case SRC_READY:
1992 + if (tcpm_port_is_disconnected(port) ||
1993 +diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
1994 +index 31270a8986e8e..8f8ca1f88fe21 100644
1995 +--- a/drivers/video/fbdev/gbefb.c
1996 ++++ b/drivers/video/fbdev/gbefb.c
1997 +@@ -1269,7 +1269,7 @@ static struct platform_device *gbefb_device;
1998 + static int __init gbefb_init(void)
1999 + {
2000 + int ret = platform_driver_register(&gbefb_driver);
2001 +- if (!ret) {
2002 ++ if (IS_ENABLED(CONFIG_SGI_IP32) && !ret) {
2003 + gbefb_device = platform_device_alloc("gbefb", 0);
2004 + if (gbefb_device) {
2005 + ret = platform_device_add(gbefb_device);
2006 +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
2007 +index 15d4b1ef19f83..1911a62a6d9c1 100644
2008 +--- a/drivers/xen/balloon.c
2009 ++++ b/drivers/xen/balloon.c
2010 +@@ -491,12 +491,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
2011 + }
2012 +
2013 + /*
2014 +- * Stop waiting if either state is not BP_EAGAIN and ballooning action is
2015 +- * needed, or if the credit has changed while state is BP_EAGAIN.
2016 ++ * Stop waiting if either state is BP_DONE and ballooning action is
2017 ++ * needed, or if the credit has changed while state is not BP_DONE.
2018 + */
2019 + static bool balloon_thread_cond(enum bp_state state, long credit)
2020 + {
2021 +- if (state != BP_EAGAIN)
2022 ++ if (state == BP_DONE)
2023 + credit = 0;
2024 +
2025 + return current_credit() != credit || kthread_should_stop();
2026 +@@ -516,10 +516,19 @@ static int balloon_thread(void *unused)
2027 +
2028 + set_freezable();
2029 + for (;;) {
2030 +- if (state == BP_EAGAIN)
2031 +- timeout = balloon_stats.schedule_delay * HZ;
2032 +- else
2033 ++ switch (state) {
2034 ++ case BP_DONE:
2035 ++ case BP_ECANCELED:
2036 + timeout = 3600 * HZ;
2037 ++ break;
2038 ++ case BP_EAGAIN:
2039 ++ timeout = balloon_stats.schedule_delay * HZ;
2040 ++ break;
2041 ++ case BP_WAIT:
2042 ++ timeout = HZ;
2043 ++ break;
2044 ++ }
2045 ++
2046 + credit = current_credit();
2047 +
2048 + wait_event_freezable_timeout(balloon_thread_wq,
2049 +diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
2050 +index 720a7b7abd46d..fe8df32bb612b 100644
2051 +--- a/drivers/xen/privcmd.c
2052 ++++ b/drivers/xen/privcmd.c
2053 +@@ -803,11 +803,12 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
2054 + unsigned int domid =
2055 + (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
2056 + DOMID_SELF : kdata.dom;
2057 +- int num;
2058 ++ int num, *errs = (int *)pfns;
2059 +
2060 ++ BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
2061 + num = xen_remap_domain_mfn_array(vma,
2062 + kdata.addr & PAGE_MASK,
2063 +- pfns, kdata.num, (int *)pfns,
2064 ++ pfns, kdata.num, errs,
2065 + vma->vm_page_prot,
2066 + domid,
2067 + vma->vm_private_data);
2068 +@@ -817,7 +818,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
2069 + unsigned int i;
2070 +
2071 + for (i = 0; i < num; i++) {
2072 +- rc = pfns[i];
2073 ++ rc = errs[i];
2074 + if (rc < 0)
2075 + break;
2076 + }
2077 +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
2078 +index 5f5169b9c2e90..46f825cf53f4f 100644
2079 +--- a/fs/nfsd/nfs4xdr.c
2080 ++++ b/fs/nfsd/nfs4xdr.c
2081 +@@ -3427,15 +3427,18 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
2082 + goto fail;
2083 + cd->rd_maxcount -= entry_bytes;
2084 + /*
2085 +- * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
2086 +- * let's always let through the first entry, at least:
2087 ++ * RFC 3530 14.2.24 describes rd_dircount as only a "hint", and
2088 ++ * notes that it could be zero. If it is zero, then the server
2089 ++ * should enforce only the rd_maxcount value.
2090 + */
2091 +- if (!cd->rd_dircount)
2092 +- goto fail;
2093 +- name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
2094 +- if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
2095 +- goto fail;
2096 +- cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
2097 ++ if (cd->rd_dircount) {
2098 ++ name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
2099 ++ if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
2100 ++ goto fail;
2101 ++ cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
2102 ++ if (!cd->rd_dircount)
2103 ++ cd->rd_maxcount = 0;
2104 ++ }
2105 +
2106 + cd->cookie_offset = cookie_offset;
2107 + skip_entry:
2108 +diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
2109 +index 0759e589ab52b..ddf2b375632b7 100644
2110 +--- a/fs/nfsd/nfsctl.c
2111 ++++ b/fs/nfsd/nfsctl.c
2112 +@@ -1547,7 +1547,7 @@ static int __init init_nfsd(void)
2113 + goto out_free_all;
2114 + return 0;
2115 + out_free_all:
2116 +- unregister_pernet_subsys(&nfsd_net_ops);
2117 ++ unregister_filesystem(&nfsd_fs_type);
2118 + out_free_exports:
2119 + remove_proc_entry("fs/nfs/exports", NULL);
2120 + remove_proc_entry("fs/nfs", NULL);
2121 +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
2122 +index 08b595c526d74..16955a307dcd9 100644
2123 +--- a/fs/overlayfs/dir.c
2124 ++++ b/fs/overlayfs/dir.c
2125 +@@ -1214,9 +1214,13 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
2126 + goto out_dput;
2127 + }
2128 + } else {
2129 +- if (!d_is_negative(newdentry) &&
2130 +- (!new_opaque || !ovl_is_whiteout(newdentry)))
2131 +- goto out_dput;
2132 ++ if (!d_is_negative(newdentry)) {
2133 ++ if (!new_opaque || !ovl_is_whiteout(newdentry))
2134 ++ goto out_dput;
2135 ++ } else {
2136 ++ if (flags & RENAME_EXCHANGE)
2137 ++ goto out_dput;
2138 ++ }
2139 + }
2140 +
2141 + if (olddentry == trap)
2142 +diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
2143 +index 5c5c3972ebd0a..f7135777cb4eb 100644
2144 +--- a/fs/overlayfs/file.c
2145 ++++ b/fs/overlayfs/file.c
2146 +@@ -301,6 +301,12 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2147 + if (ret)
2148 + return ret;
2149 +
2150 ++ ret = -EINVAL;
2151 ++ if (iocb->ki_flags & IOCB_DIRECT &&
2152 ++ (!real.file->f_mapping->a_ops ||
2153 ++ !real.file->f_mapping->a_ops->direct_IO))
2154 ++ goto out_fdput;
2155 ++
2156 + old_cred = ovl_override_creds(file_inode(file)->i_sb);
2157 + if (is_sync_kiocb(iocb)) {
2158 + ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
2159 +@@ -325,7 +331,7 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2160 + out:
2161 + revert_creds(old_cred);
2162 + ovl_file_accessed(file);
2163 +-
2164 ++out_fdput:
2165 + fdput(real);
2166 +
2167 + return ret;
2168 +@@ -354,6 +360,12 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
2169 + if (ret)
2170 + goto out_unlock;
2171 +
2172 ++ ret = -EINVAL;
2173 ++ if (iocb->ki_flags & IOCB_DIRECT &&
2174 ++ (!real.file->f_mapping->a_ops ||
2175 ++ !real.file->f_mapping->a_ops->direct_IO))
2176 ++ goto out_fdput;
2177 ++
2178 + if (!ovl_should_sync(OVL_FS(inode->i_sb)))
2179 + ifl &= ~(IOCB_DSYNC | IOCB_SYNC);
2180 +
2181 +@@ -389,6 +401,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
2182 + }
2183 + out:
2184 + revert_creds(old_cred);
2185 ++out_fdput:
2186 + fdput(real);
2187 +
2188 + out_unlock:
2189 +diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
2190 +index ebf60848d5eb7..4477873ac3a0b 100644
2191 +--- a/kernel/bpf/stackmap.c
2192 ++++ b/kernel/bpf/stackmap.c
2193 +@@ -64,7 +64,8 @@ static inline int stack_map_data_size(struct bpf_map *map)
2194 +
2195 + static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
2196 + {
2197 +- u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
2198 ++ u64 elem_size = sizeof(struct stack_map_bucket) +
2199 ++ (u64)smap->map.value_size;
2200 + int err;
2201 +
2202 + smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
2203 +diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
2204 +index 73f71c22f4c03..31b00ba5dcc84 100644
2205 +--- a/net/bridge/br_netlink.c
2206 ++++ b/net/bridge/br_netlink.c
2207 +@@ -1590,7 +1590,8 @@ static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
2208 + }
2209 +
2210 + return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
2211 +- nla_total_size(sizeof(struct br_mcast_stats)) +
2212 ++ nla_total_size_64bit(sizeof(struct br_mcast_stats)) +
2213 ++ (p ? nla_total_size_64bit(sizeof(p->stp_xstats)) : 0) +
2214 + nla_total_size(0);
2215 + }
2216 +
2217 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2218 +index 7266571d5c7e2..27ffa83ffeb3c 100644
2219 +--- a/net/core/rtnetlink.c
2220 ++++ b/net/core/rtnetlink.c
2221 +@@ -5257,7 +5257,7 @@ nla_put_failure:
2222 + static size_t if_nlmsg_stats_size(const struct net_device *dev,
2223 + u32 filter_mask)
2224 + {
2225 +- size_t size = 0;
2226 ++ size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
2227 +
2228 + if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
2229 + size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
2230 +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
2231 +index 45fb450b45227..f3fd5c911ed09 100644
2232 +--- a/net/ipv4/inet_hashtables.c
2233 ++++ b/net/ipv4/inet_hashtables.c
2234 +@@ -242,8 +242,10 @@ static inline int compute_score(struct sock *sk, struct net *net,
2235 +
2236 + if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
2237 + return -1;
2238 ++ score = sk->sk_bound_dev_if ? 2 : 1;
2239 +
2240 +- score = sk->sk_family == PF_INET ? 2 : 1;
2241 ++ if (sk->sk_family == PF_INET)
2242 ++ score++;
2243 + if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
2244 + score++;
2245 + }
2246 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
2247 +index bd7fd9b1f24c8..655f0d8a13d36 100644
2248 +--- a/net/ipv4/udp.c
2249 ++++ b/net/ipv4/udp.c
2250 +@@ -390,7 +390,8 @@ static int compute_score(struct sock *sk, struct net *net,
2251 + dif, sdif);
2252 + if (!dev_match)
2253 + return -1;
2254 +- score += 4;
2255 ++ if (sk->sk_bound_dev_if)
2256 ++ score += 4;
2257 +
2258 + if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
2259 + score++;
2260 +diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
2261 +index 55c290d556059..67c9114835c84 100644
2262 +--- a/net/ipv6/inet6_hashtables.c
2263 ++++ b/net/ipv6/inet6_hashtables.c
2264 +@@ -106,7 +106,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
2265 + if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
2266 + return -1;
2267 +
2268 +- score = 1;
2269 ++ score = sk->sk_bound_dev_if ? 2 : 1;
2270 + if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
2271 + score++;
2272 + }
2273 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
2274 +index 1943ae5103eb6..bae6b51a9bd46 100644
2275 +--- a/net/ipv6/udp.c
2276 ++++ b/net/ipv6/udp.c
2277 +@@ -133,7 +133,8 @@ static int compute_score(struct sock *sk, struct net *net,
2278 + dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
2279 + if (!dev_match)
2280 + return -1;
2281 +- score++;
2282 ++ if (sk->sk_bound_dev_if)
2283 ++ score++;
2284 +
2285 + if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
2286 + score++;
2287 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2288 +index 8434da3c0487a..0886267ea81ef 100644
2289 +--- a/net/netlink/af_netlink.c
2290 ++++ b/net/netlink/af_netlink.c
2291 +@@ -586,7 +586,10 @@ static int netlink_insert(struct sock *sk, u32 portid)
2292 +
2293 + /* We need to ensure that the socket is hashed and visible. */
2294 + smp_wmb();
2295 +- nlk_sk(sk)->bound = portid;
2296 ++ /* Paired with lockless reads from netlink_bind(),
2297 ++ * netlink_connect() and netlink_sendmsg().
2298 ++ */
2299 ++ WRITE_ONCE(nlk_sk(sk)->bound, portid);
2300 +
2301 + err:
2302 + release_sock(sk);
2303 +@@ -1004,7 +1007,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
2304 + if (nlk->ngroups < BITS_PER_LONG)
2305 + groups &= (1UL << nlk->ngroups) - 1;
2306 +
2307 +- bound = nlk->bound;
2308 ++ /* Paired with WRITE_ONCE() in netlink_insert() */
2309 ++ bound = READ_ONCE(nlk->bound);
2310 + if (bound) {
2311 + /* Ensure nlk->portid is up-to-date. */
2312 + smp_rmb();
2313 +@@ -1090,8 +1094,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
2314 +
2315 + /* No need for barriers here as we return to user-space without
2316 + * using any of the bound attributes.
2317 ++ * Paired with WRITE_ONCE() in netlink_insert().
2318 + */
2319 +- if (!nlk->bound)
2320 ++ if (!READ_ONCE(nlk->bound))
2321 + err = netlink_autobind(sock);
2322 +
2323 + if (err == 0) {
2324 +@@ -1880,7 +1885,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2325 + dst_group = nlk->dst_group;
2326 + }
2327 +
2328 +- if (!nlk->bound) {
2329 ++ /* Paired with WRITE_ONCE() in netlink_insert() */
2330 ++ if (!READ_ONCE(nlk->bound)) {
2331 + err = netlink_autobind(sock);
2332 + if (err)
2333 + goto out;
2334 +diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
2335 +index a579a4131d22d..e1040421b7979 100644
2336 +--- a/net/sched/sch_fifo.c
2337 ++++ b/net/sched/sch_fifo.c
2338 +@@ -233,6 +233,9 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
2339 + if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
2340 + return 0;
2341 +
2342 ++ if (!q->ops->change)
2343 ++ return 0;
2344 ++
2345 + nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
2346 + if (nla) {
2347 + nla->nla_type = RTM_NEWQDISC;
2348 +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
2349 +index cb5e5220da552..93899559ba6d2 100644
2350 +--- a/net/sched/sch_taprio.c
2351 ++++ b/net/sched/sch_taprio.c
2352 +@@ -1630,6 +1630,10 @@ static void taprio_destroy(struct Qdisc *sch)
2353 + list_del(&q->taprio_list);
2354 + spin_unlock(&taprio_list_lock);
2355 +
2356 ++ /* Note that taprio_reset() might not be called if an error
2357 ++ * happens in qdisc_create(), after taprio_init() has been called.
2358 ++ */
2359 ++ hrtimer_cancel(&q->advance_timer);
2360 +
2361 + taprio_disable_offload(dev, q, NULL);
2362 +
2363 +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
2364 +index e22f2d65457da..f5111d62972d3 100644
2365 +--- a/net/sunrpc/auth_gss/svcauth_gss.c
2366 ++++ b/net/sunrpc/auth_gss/svcauth_gss.c
2367 +@@ -643,7 +643,7 @@ static bool gss_check_seq_num(const struct svc_rqst *rqstp, struct rsc *rsci,
2368 + }
2369 + __set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win);
2370 + goto ok;
2371 +- } else if (seq_num <= sd->sd_max - GSS_SEQ_WIN) {
2372 ++ } else if (seq_num + GSS_SEQ_WIN <= sd->sd_max) {
2373 + goto toolow;
2374 + }
2375 + if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win))
2376 +diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
2377 +index dcfdf6a322dc4..c679a79aef513 100644
2378 +--- a/tools/perf/pmu-events/jevents.c
2379 ++++ b/tools/perf/pmu-events/jevents.c
2380 +@@ -1100,12 +1100,13 @@ static int process_one_file(const char *fpath, const struct stat *sb,
2381 + */
2382 + int main(int argc, char *argv[])
2383 + {
2384 +- int rc, ret = 0;
2385 ++ int rc, ret = 0, empty_map = 0;
2386 + int maxfds;
2387 + char ldirname[PATH_MAX];
2388 + const char *arch;
2389 + const char *output_file;
2390 + const char *start_dirname;
2391 ++ char *err_string_ext = "";
2392 + struct stat stbuf;
2393 +
2394 + prog = basename(argv[0]);
2395 +@@ -1133,7 +1134,8 @@ int main(int argc, char *argv[])
2396 + /* If architecture does not have any event lists, bail out */
2397 + if (stat(ldirname, &stbuf) < 0) {
2398 + pr_info("%s: Arch %s has no PMU event lists\n", prog, arch);
2399 +- goto empty_map;
2400 ++ empty_map = 1;
2401 ++ goto err_close_eventsfp;
2402 + }
2403 +
2404 + /* Include pmu-events.h first */
2405 +@@ -1150,75 +1152,60 @@ int main(int argc, char *argv[])
2406 + */
2407 +
2408 + maxfds = get_maxfds();
2409 +- mapfile = NULL;
2410 + rc = nftw(ldirname, preprocess_arch_std_files, maxfds, 0);
2411 +- if (rc && verbose) {
2412 +- pr_info("%s: Error preprocessing arch standard files %s\n",
2413 +- prog, ldirname);
2414 +- goto empty_map;
2415 +- } else if (rc < 0) {
2416 +- /* Make build fail */
2417 +- fclose(eventsfp);
2418 +- free_arch_std_events();
2419 +- return 1;
2420 +- } else if (rc) {
2421 +- goto empty_map;
2422 +- }
2423 ++ if (rc)
2424 ++ goto err_processing_std_arch_event_dir;
2425 +
2426 + rc = nftw(ldirname, process_one_file, maxfds, 0);
2427 +- if (rc && verbose) {
2428 +- pr_info("%s: Error walking file tree %s\n", prog, ldirname);
2429 +- goto empty_map;
2430 +- } else if (rc < 0) {
2431 +- /* Make build fail */
2432 +- fclose(eventsfp);
2433 +- free_arch_std_events();
2434 +- ret = 1;
2435 +- goto out_free_mapfile;
2436 +- } else if (rc) {
2437 +- goto empty_map;
2438 +- }
2439 ++ if (rc)
2440 ++ goto err_processing_dir;
2441 +
2442 + sprintf(ldirname, "%s/test", start_dirname);
2443 +
2444 + rc = nftw(ldirname, process_one_file, maxfds, 0);
2445 +- if (rc && verbose) {
2446 +- pr_info("%s: Error walking file tree %s rc=%d for test\n",
2447 +- prog, ldirname, rc);
2448 +- goto empty_map;
2449 +- } else if (rc < 0) {
2450 +- /* Make build fail */
2451 +- free_arch_std_events();
2452 +- ret = 1;
2453 +- goto out_free_mapfile;
2454 +- } else if (rc) {
2455 +- goto empty_map;
2456 +- }
2457 ++ if (rc)
2458 ++ goto err_processing_dir;
2459 +
2460 + if (close_table)
2461 + print_events_table_suffix(eventsfp);
2462 +
2463 + if (!mapfile) {
2464 + pr_info("%s: No CPU->JSON mapping?\n", prog);
2465 +- goto empty_map;
2466 ++ empty_map = 1;
2467 ++ goto err_close_eventsfp;
2468 + }
2469 +
2470 +- if (process_mapfile(eventsfp, mapfile)) {
2471 ++ rc = process_mapfile(eventsfp, mapfile);
2472 ++ fclose(eventsfp);
2473 ++ if (rc) {
2474 + pr_info("%s: Error processing mapfile %s\n", prog, mapfile);
2475 + /* Make build fail */
2476 +- fclose(eventsfp);
2477 +- free_arch_std_events();
2478 + ret = 1;
2479 ++ goto err_out;
2480 + }
2481 +
2482 ++ free_arch_std_events();
2483 ++ free(mapfile);
2484 ++ return 0;
2485 +
2486 +- goto out_free_mapfile;
2487 +-
2488 +-empty_map:
2489 ++err_processing_std_arch_event_dir:
2490 ++ err_string_ext = " for std arch event";
2491 ++err_processing_dir:
2492 ++ if (verbose) {
2493 ++ pr_info("%s: Error walking file tree %s%s\n", prog, ldirname,
2494 ++ err_string_ext);
2495 ++ empty_map = 1;
2496 ++ } else if (rc < 0) {
2497 ++ ret = 1;
2498 ++ } else {
2499 ++ empty_map = 1;
2500 ++ }
2501 ++err_close_eventsfp:
2502 + fclose(eventsfp);
2503 +- create_empty_mapping(output_file);
2504 ++ if (empty_map)
2505 ++ create_empty_mapping(output_file);
2506 ++err_out:
2507 + free_arch_std_events();
2508 +-out_free_mapfile:
2509 + free(mapfile);
2510 + return ret;
2511 + }