Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.5 commit in: /
Date: Thu, 12 Mar 2020 09:56:28
Message-Id: 1584006968.087942a4f21321a177630fffaf20442b4aa9c6d3.mpagano@gentoo
1 commit: 087942a4f21321a177630fffaf20442b4aa9c6d3
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Mar 12 09:56:08 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Mar 12 09:56:08 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=087942a4
7
8 Linux patch 5.5.9
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1008_linux-5.5.9.patch | 6714 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6718 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e58ee4a..170d7c9 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -75,6 +75,10 @@ Patch: 1007_linux-5.5.8.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.5.8
23
24 +Patch: 1008_linux-5.5.9.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.5.9
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1008_linux-5.5.9.patch b/1008_linux-5.5.9.patch
33 new file mode 100644
34 index 0000000..21dd99d
35 --- /dev/null
36 +++ b/1008_linux-5.5.9.patch
37 @@ -0,0 +1,6714 @@
38 +diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
39 +index f79683a628f0..1238e68ac1b4 100644
40 +--- a/Documentation/devicetree/bindings/arm/fsl.yaml
41 ++++ b/Documentation/devicetree/bindings/arm/fsl.yaml
42 +@@ -139,7 +139,7 @@ properties:
43 + items:
44 + - enum:
45 + - armadeus,imx6dl-apf6 # APF6 (Solo) SoM
46 +- - armadeus,imx6dl-apf6dldev # APF6 (Solo) SoM on APF6Dev board
47 ++ - armadeus,imx6dl-apf6dev # APF6 (Solo) SoM on APF6Dev board
48 + - eckelmann,imx6dl-ci4x10
49 + - emtrion,emcon-mx6 # emCON-MX6S or emCON-MX6DL SoM
50 + - emtrion,emcon-mx6-avari # emCON-MX6S or emCON-MX6DL SoM on Avari Base
51 +diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
52 +index f5cdac8b2847..8b005192f6e8 100644
53 +--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
54 ++++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
55 +@@ -161,7 +161,7 @@ The regulator node houses sub-nodes for each regulator within the device. Each
56 + sub-node is identified using the node's name, with valid values listed for each
57 + of the PMICs below.
58 +
59 +-pm8005:
60 ++pm8004:
61 + s2, s5
62 +
63 + pm8005:
64 +diff --git a/Makefile b/Makefile
65 +index a1e5190e4721..8b786a8a7289 100644
66 +--- a/Makefile
67 ++++ b/Makefile
68 +@@ -1,7 +1,7 @@
69 + # SPDX-License-Identifier: GPL-2.0
70 + VERSION = 5
71 + PATCHLEVEL = 5
72 +-SUBLEVEL = 8
73 ++SUBLEVEL = 9
74 + EXTRAVERSION =
75 + NAME = Kleptomaniac Octopus
76 +
77 +diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
78 +index f3ced6df0c9b..9f66f96d09c9 100644
79 +--- a/arch/arm/boot/dts/am437x-idk-evm.dts
80 ++++ b/arch/arm/boot/dts/am437x-idk-evm.dts
81 +@@ -526,11 +526,11 @@
82 + * Supply voltage supervisor on board will not allow opp50 so
83 + * disable it and set opp100 as suspend OPP.
84 + */
85 +- opp50@300000000 {
86 ++ opp50-300000000 {
87 + status = "disabled";
88 + };
89 +
90 +- opp100@600000000 {
91 ++ opp100-600000000 {
92 + opp-suspend;
93 + };
94 + };
95 +diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
96 +index cdcba3f561c4..9f6fbe4c1fee 100644
97 +--- a/arch/arm/boot/dts/dra76x.dtsi
98 ++++ b/arch/arm/boot/dts/dra76x.dtsi
99 +@@ -86,3 +86,8 @@
100 + &usb4_tm {
101 + status = "disabled";
102 + };
103 ++
104 ++&mmc3 {
105 ++ /* dra76x is not affected by i887 */
106 ++ max-frequency = <96000000>;
107 ++};
108 +diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
109 +index 93e1eb83bed9..d7d98d2069df 100644
110 +--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
111 ++++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
112 +@@ -796,16 +796,6 @@
113 + clock-div = <1>;
114 + };
115 +
116 +- ipu1_gfclk_mux: ipu1_gfclk_mux@520 {
117 +- #clock-cells = <0>;
118 +- compatible = "ti,mux-clock";
119 +- clocks = <&dpll_abe_m2x2_ck>, <&dpll_core_h22x2_ck>;
120 +- ti,bit-shift = <24>;
121 +- reg = <0x0520>;
122 +- assigned-clocks = <&ipu1_gfclk_mux>;
123 +- assigned-clock-parents = <&dpll_core_h22x2_ck>;
124 +- };
125 +-
126 + dummy_ck: dummy_ck {
127 + #clock-cells = <0>;
128 + compatible = "fixed-clock";
129 +@@ -1564,6 +1554,8 @@
130 + compatible = "ti,clkctrl";
131 + reg = <0x20 0x4>;
132 + #clock-cells = <2>;
133 ++ assigned-clocks = <&ipu1_clkctrl DRA7_IPU1_MMU_IPU1_CLKCTRL 24>;
134 ++ assigned-clock-parents = <&dpll_core_h22x2_ck>;
135 + };
136 +
137 + ipu_clkctrl: ipu-clkctrl@50 {
138 +diff --git a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
139 +index 6486df3e2942..881cea0b61ba 100644
140 +--- a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
141 ++++ b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
142 +@@ -183,7 +183,6 @@
143 + pinctrl-0 = <&pinctrl_usdhc4>;
144 + bus-width = <8>;
145 + non-removable;
146 +- vmmc-supply = <&vdd_emmc_1p8>;
147 + status = "disabled";
148 + };
149 +
150 +diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
151 +index d05be3f0e2a7..04717cf69db0 100644
152 +--- a/arch/arm/boot/dts/imx7-colibri.dtsi
153 ++++ b/arch/arm/boot/dts/imx7-colibri.dtsi
154 +@@ -336,7 +336,6 @@
155 + assigned-clock-rates = <400000000>;
156 + bus-width = <8>;
157 + fsl,tuning-step = <2>;
158 +- max-frequency = <100000000>;
159 + vmmc-supply = <&reg_module_3v3>;
160 + vqmmc-supply = <&reg_DCDC3>;
161 + non-removable;
162 +diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
163 +index d8acd7cc7918..497434f0629e 100644
164 +--- a/arch/arm/boot/dts/imx7d.dtsi
165 ++++ b/arch/arm/boot/dts/imx7d.dtsi
166 +@@ -44,7 +44,7 @@
167 + opp-hz = /bits/ 64 <792000000>;
168 + opp-microvolt = <1000000>;
169 + clock-latency-ns = <150000>;
170 +- opp-supported-hw = <0xd>, <0xf>;
171 ++ opp-supported-hw = <0xd>, <0x7>;
172 + opp-suspend;
173 + };
174 +
175 +@@ -52,7 +52,7 @@
176 + opp-hz = /bits/ 64 <996000000>;
177 + opp-microvolt = <1100000>;
178 + clock-latency-ns = <150000>;
179 +- opp-supported-hw = <0xc>, <0xf>;
180 ++ opp-supported-hw = <0xc>, <0x7>;
181 + opp-suspend;
182 + };
183 +
184 +@@ -60,7 +60,7 @@
185 + opp-hz = /bits/ 64 <1200000000>;
186 + opp-microvolt = <1225000>;
187 + clock-latency-ns = <150000>;
188 +- opp-supported-hw = <0x8>, <0xf>;
189 ++ opp-supported-hw = <0x8>, <0x3>;
190 + opp-suspend;
191 + };
192 + };
193 +diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
194 +index 2f6977ada447..63d9f4a066e3 100644
195 +--- a/arch/arm/boot/dts/ls1021a.dtsi
196 ++++ b/arch/arm/boot/dts/ls1021a.dtsi
197 +@@ -728,7 +728,7 @@
198 + };
199 +
200 + mdio0: mdio@2d24000 {
201 +- compatible = "fsl,etsec2-mdio";
202 ++ compatible = "gianfar";
203 + device_type = "mdio";
204 + #address-cells = <1>;
205 + #size-cells = <0>;
206 +@@ -737,7 +737,7 @@
207 + };
208 +
209 + mdio1: mdio@2d64000 {
210 +- compatible = "fsl,etsec2-mdio";
211 ++ compatible = "gianfar";
212 + device_type = "mdio";
213 + #address-cells = <1>;
214 + #size-cells = <0>;
215 +diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
216 +index 35ff620537e6..03506ce46149 100644
217 +--- a/arch/arm/mach-imx/Makefile
218 ++++ b/arch/arm/mach-imx/Makefile
219 +@@ -91,6 +91,8 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
220 + obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
221 + obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
222 + endif
223 ++AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
224 ++obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
225 + obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
226 +
227 + obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
228 +diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
229 +index 912aeceb4ff8..5aa5796cff0e 100644
230 +--- a/arch/arm/mach-imx/common.h
231 ++++ b/arch/arm/mach-imx/common.h
232 +@@ -109,17 +109,17 @@ void imx_cpu_die(unsigned int cpu);
233 + int imx_cpu_kill(unsigned int cpu);
234 +
235 + #ifdef CONFIG_SUSPEND
236 +-void v7_cpu_resume(void);
237 + void imx53_suspend(void __iomem *ocram_vbase);
238 + extern const u32 imx53_suspend_sz;
239 + void imx6_suspend(void __iomem *ocram_vbase);
240 + #else
241 +-static inline void v7_cpu_resume(void) {}
242 + static inline void imx53_suspend(void __iomem *ocram_vbase) {}
243 + static const u32 imx53_suspend_sz;
244 + static inline void imx6_suspend(void __iomem *ocram_vbase) {}
245 + #endif
246 +
247 ++void v7_cpu_resume(void);
248 ++
249 + void imx6_pm_ccm_init(const char *ccm_compat);
250 + void imx6q_pm_init(void);
251 + void imx6dl_pm_init(void);
252 +diff --git a/arch/arm/mach-imx/resume-imx6.S b/arch/arm/mach-imx/resume-imx6.S
253 +new file mode 100644
254 +index 000000000000..5bd1ba7ef15b
255 +--- /dev/null
256 ++++ b/arch/arm/mach-imx/resume-imx6.S
257 +@@ -0,0 +1,24 @@
258 ++/* SPDX-License-Identifier: GPL-2.0-or-later */
259 ++/*
260 ++ * Copyright 2014 Freescale Semiconductor, Inc.
261 ++ */
262 ++
263 ++#include <linux/linkage.h>
264 ++#include <asm/assembler.h>
265 ++#include <asm/asm-offsets.h>
266 ++#include <asm/hardware/cache-l2x0.h>
267 ++#include "hardware.h"
268 ++
269 ++/*
270 ++ * The following code must assume it is running from physical address
271 ++ * where absolute virtual addresses to the data section have to be
272 ++ * turned into relative ones.
273 ++ */
274 ++
275 ++ENTRY(v7_cpu_resume)
276 ++ bl v7_invalidate_l1
277 ++#ifdef CONFIG_CACHE_L2X0
278 ++ bl l2c310_early_resume
279 ++#endif
280 ++ b cpu_resume
281 ++ENDPROC(v7_cpu_resume)
282 +diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
283 +index 062391ff13da..1eabf2d2834b 100644
284 +--- a/arch/arm/mach-imx/suspend-imx6.S
285 ++++ b/arch/arm/mach-imx/suspend-imx6.S
286 +@@ -327,17 +327,3 @@ resume:
287 +
288 + ret lr
289 + ENDPROC(imx6_suspend)
290 +-
291 +-/*
292 +- * The following code must assume it is running from physical address
293 +- * where absolute virtual addresses to the data section have to be
294 +- * turned into relative ones.
295 +- */
296 +-
297 +-ENTRY(v7_cpu_resume)
298 +- bl v7_invalidate_l1
299 +-#ifdef CONFIG_CACHE_L2X0
300 +- bl l2c310_early_resume
301 +-#endif
302 +- b cpu_resume
303 +-ENDPROC(v7_cpu_resume)
304 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
305 +index f82f25c1a5f9..d5dc12878dfe 100644
306 +--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
307 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
308 +@@ -327,7 +327,7 @@
309 + #size-cells = <0>;
310 +
311 + bus-width = <4>;
312 +- max-frequency = <50000000>;
313 ++ max-frequency = <60000000>;
314 +
315 + non-removable;
316 + disable-wp;
317 +diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
318 +index a8bb3fa9fec9..cb1b48f5b8b1 100644
319 +--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
320 ++++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
321 +@@ -593,6 +593,7 @@
322 + compatible = "brcm,bcm43438-bt";
323 + interrupt-parent = <&gpio_intc>;
324 + interrupts = <95 IRQ_TYPE_LEVEL_HIGH>;
325 ++ interrupt-names = "host-wakeup";
326 + shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
327 + max-speed = <2000000>;
328 + clocks = <&wifi32k>;
329 +diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
330 +index d3d26cca7d52..13460a360c6a 100644
331 +--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
332 ++++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
333 +@@ -52,11 +52,6 @@
334 + compatible = "ethernet-phy-ieee802.3-c22";
335 + reg = <0>;
336 + };
337 +-
338 +- ethphy1: ethernet-phy@1 {
339 +- compatible = "ethernet-phy-ieee802.3-c22";
340 +- reg = <1>;
341 +- };
342 + };
343 + };
344 +
345 +diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
346 +index d43e1299c8ef..b47f2ce160a4 100644
347 +--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
348 ++++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
349 +@@ -102,7 +102,7 @@
350 + };
351 +
352 + gmac0: ethernet@ff800000 {
353 +- compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
354 ++ compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
355 + reg = <0xff800000 0x2000>;
356 + interrupts = <0 90 4>;
357 + interrupt-names = "macirq";
358 +@@ -117,7 +117,7 @@
359 + };
360 +
361 + gmac1: ethernet@ff802000 {
362 +- compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
363 ++ compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
364 + reg = <0xff802000 0x2000>;
365 + interrupts = <0 91 4>;
366 + interrupt-names = "macirq";
367 +@@ -132,7 +132,7 @@
368 + };
369 +
370 + gmac2: ethernet@ff804000 {
371 +- compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
372 ++ compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
373 + reg = <0xff804000 0x2000>;
374 + interrupts = <0 92 4>;
375 + interrupt-names = "macirq";
376 +diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
377 +index da09c884cc30..f00b394988a2 100644
378 +--- a/arch/csky/Kconfig
379 ++++ b/arch/csky/Kconfig
380 +@@ -37,6 +37,7 @@ config CSKY
381 + select GX6605S_TIMER if CPU_CK610
382 + select HAVE_ARCH_TRACEHOOK
383 + select HAVE_ARCH_AUDITSYSCALL
384 ++ select HAVE_COPY_THREAD_TLS
385 + select HAVE_DYNAMIC_FTRACE
386 + select HAVE_FUNCTION_TRACER
387 + select HAVE_FUNCTION_GRAPH_TRACER
388 +@@ -75,7 +76,7 @@ config CPU_HAS_TLBI
389 + config CPU_HAS_LDSTEX
390 + bool
391 + help
392 +- For SMP, CPU needs "ldex&stex" instrcutions to atomic operations.
393 ++ For SMP, CPU needs "ldex&stex" instructions for atomic operations.
394 +
395 + config CPU_NEED_TLBSYNC
396 + bool
397 +diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h
398 +index 7ab78bd0f3b1..f35a9f3315ee 100644
399 +--- a/arch/csky/abiv1/inc/abi/entry.h
400 ++++ b/arch/csky/abiv1/inc/abi/entry.h
401 +@@ -16,14 +16,16 @@
402 + #define LSAVE_A4 40
403 + #define LSAVE_A5 44
404 +
405 ++#define usp ss1
406 ++
407 + .macro USPTOKSP
408 +- mtcr sp, ss1
409 ++ mtcr sp, usp
410 + mfcr sp, ss0
411 + .endm
412 +
413 + .macro KSPTOUSP
414 + mtcr sp, ss0
415 +- mfcr sp, ss1
416 ++ mfcr sp, usp
417 + .endm
418 +
419 + .macro SAVE_ALL epc_inc
420 +@@ -45,7 +47,13 @@
421 + add lr, r13
422 + stw lr, (sp, 8)
423 +
424 ++ mov lr, sp
425 ++ addi lr, 32
426 ++ addi lr, 32
427 ++ addi lr, 16
428 ++ bt 2f
429 + mfcr lr, ss1
430 ++2:
431 + stw lr, (sp, 16)
432 +
433 + stw a0, (sp, 20)
434 +@@ -79,9 +87,10 @@
435 + ldw a0, (sp, 12)
436 + mtcr a0, epsr
437 + btsti a0, 31
438 ++ bt 1f
439 + ldw a0, (sp, 16)
440 + mtcr a0, ss1
441 +-
442 ++1:
443 + ldw a0, (sp, 24)
444 + ldw a1, (sp, 28)
445 + ldw a2, (sp, 32)
446 +@@ -102,9 +111,9 @@
447 + addi sp, 32
448 + addi sp, 8
449 +
450 +- bt 1f
451 ++ bt 2f
452 + KSPTOUSP
453 +-1:
454 ++2:
455 + rte
456 + .endm
457 +
458 +diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h
459 +index 9897a16b45e5..94a7a58765df 100644
460 +--- a/arch/csky/abiv2/inc/abi/entry.h
461 ++++ b/arch/csky/abiv2/inc/abi/entry.h
462 +@@ -31,7 +31,13 @@
463 +
464 + mfcr lr, epsr
465 + stw lr, (sp, 12)
466 ++ btsti lr, 31
467 ++ bf 1f
468 ++ addi lr, sp, 152
469 ++ br 2f
470 ++1:
471 + mfcr lr, usp
472 ++2:
473 + stw lr, (sp, 16)
474 +
475 + stw a0, (sp, 20)
476 +@@ -64,8 +70,10 @@
477 + mtcr a0, epc
478 + ldw a0, (sp, 12)
479 + mtcr a0, epsr
480 ++ btsti a0, 31
481 + ldw a0, (sp, 16)
482 + mtcr a0, usp
483 ++ mtcr a0, ss0
484 +
485 + #ifdef CONFIG_CPU_HAS_HILO
486 + ldw a0, (sp, 140)
487 +@@ -86,6 +94,9 @@
488 + addi sp, 40
489 + ldm r16-r30, (sp)
490 + addi sp, 72
491 ++ bf 1f
492 ++ mfcr sp, ss0
493 ++1:
494 + rte
495 + .endm
496 +
497 +diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
498 +index 211c983c7282..ba4018929733 100644
499 +--- a/arch/csky/include/uapi/asm/unistd.h
500 ++++ b/arch/csky/include/uapi/asm/unistd.h
501 +@@ -1,7 +1,10 @@
502 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
503 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
504 +
505 ++#define __ARCH_WANT_STAT64
506 ++#define __ARCH_WANT_NEW_STAT
507 + #define __ARCH_WANT_SYS_CLONE
508 ++#define __ARCH_WANT_SYS_CLONE3
509 + #define __ARCH_WANT_SET_GET_RLIMIT
510 + #define __ARCH_WANT_TIME32_SYSCALLS
511 + #include <asm-generic/unistd.h>
512 +diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S
513 +index 5b84f11485ae..3821ef9b7567 100644
514 +--- a/arch/csky/kernel/atomic.S
515 ++++ b/arch/csky/kernel/atomic.S
516 +@@ -17,10 +17,12 @@ ENTRY(csky_cmpxchg)
517 + mfcr a3, epc
518 + addi a3, TRAP0_SIZE
519 +
520 +- subi sp, 8
521 ++ subi sp, 16
522 + stw a3, (sp, 0)
523 + mfcr a3, epsr
524 + stw a3, (sp, 4)
525 ++ mfcr a3, usp
526 ++ stw a3, (sp, 8)
527 +
528 + psrset ee
529 + #ifdef CONFIG_CPU_HAS_LDSTEX
530 +@@ -47,7 +49,9 @@ ENTRY(csky_cmpxchg)
531 + mtcr a3, epc
532 + ldw a3, (sp, 4)
533 + mtcr a3, epsr
534 +- addi sp, 8
535 ++ ldw a3, (sp, 8)
536 ++ mtcr a3, usp
537 ++ addi sp, 16
538 + KSPTOUSP
539 + rte
540 + END(csky_cmpxchg)
541 +diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
542 +index f320d9248a22..397962e11bd1 100644
543 +--- a/arch/csky/kernel/process.c
544 ++++ b/arch/csky/kernel/process.c
545 +@@ -34,10 +34,11 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
546 + return sw->r15;
547 + }
548 +
549 +-int copy_thread(unsigned long clone_flags,
550 ++int copy_thread_tls(unsigned long clone_flags,
551 + unsigned long usp,
552 + unsigned long kthread_arg,
553 +- struct task_struct *p)
554 ++ struct task_struct *p,
555 ++ unsigned long tls)
556 + {
557 + struct switch_stack *childstack;
558 + struct pt_regs *childregs = task_pt_regs(p);
559 +@@ -64,7 +65,7 @@ int copy_thread(unsigned long clone_flags,
560 + childregs->usp = usp;
561 + if (clone_flags & CLONE_SETTLS)
562 + task_thread_info(p)->tp_value = childregs->tls
563 +- = childregs->regs[0];
564 ++ = tls;
565 +
566 + childregs->a0 = 0;
567 + childstack->r15 = (unsigned long) ret_from_fork;
568 +diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
569 +index b753d382e4ce..0bb0954d5570 100644
570 +--- a/arch/csky/kernel/smp.c
571 ++++ b/arch/csky/kernel/smp.c
572 +@@ -120,7 +120,7 @@ void __init setup_smp_ipi(void)
573 + int rc;
574 +
575 + if (ipi_irq == 0)
576 +- panic("%s IRQ mapping failed\n", __func__);
577 ++ return;
578 +
579 + rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
580 + &ipi_dummy_dev);
581 +diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile
582 +index c94ef6481098..efb7ebab342b 100644
583 +--- a/arch/csky/mm/Makefile
584 ++++ b/arch/csky/mm/Makefile
585 +@@ -1,8 +1,10 @@
586 + # SPDX-License-Identifier: GPL-2.0-only
587 + ifeq ($(CONFIG_CPU_HAS_CACHEV2),y)
588 + obj-y += cachev2.o
589 ++CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE)
590 + else
591 + obj-y += cachev1.o
592 ++CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE)
593 + endif
594 +
595 + obj-y += dma-mapping.o
596 +diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
597 +index d4c2292ea46b..00e96278b377 100644
598 +--- a/arch/csky/mm/init.c
599 ++++ b/arch/csky/mm/init.c
600 +@@ -31,6 +31,7 @@
601 +
602 + pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
603 + pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
604 ++EXPORT_SYMBOL(invalid_pte_table);
605 + unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
606 + __page_aligned_bss;
607 + EXPORT_SYMBOL(empty_zero_page);
608 +diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
609 +index e745abc5457a..245be4fafe13 100644
610 +--- a/arch/powerpc/kernel/cputable.c
611 ++++ b/arch/powerpc/kernel/cputable.c
612 +@@ -2193,11 +2193,13 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
613 + * oprofile_cpu_type already has a value, then we are
614 + * possibly overriding a real PVR with a logical one,
615 + * and, in that case, keep the current value for
616 +- * oprofile_cpu_type.
617 ++ * oprofile_cpu_type. Futhermore, let's ensure that the
618 ++ * fix for the PMAO bug is enabled on compatibility mode.
619 + */
620 + if (old.oprofile_cpu_type != NULL) {
621 + t->oprofile_cpu_type = old.oprofile_cpu_type;
622 + t->oprofile_type = old.oprofile_type;
623 ++ t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
624 + }
625 + }
626 +
627 +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
628 +index f5535eae637f..ab81a727e273 100644
629 +--- a/arch/powerpc/mm/mem.c
630 ++++ b/arch/powerpc/mm/mem.c
631 +@@ -369,7 +369,9 @@ static inline bool flush_coherent_icache(unsigned long addr)
632 + */
633 + if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
634 + mb(); /* sync */
635 ++ allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
636 + icbi((void *)addr);
637 ++ prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
638 + mb(); /* sync */
639 + isync();
640 + return true;
641 +diff --git a/arch/s390/Makefile b/arch/s390/Makefile
642 +index e0e3a465bbfd..8dfa2cf1f05c 100644
643 +--- a/arch/s390/Makefile
644 ++++ b/arch/s390/Makefile
645 +@@ -146,7 +146,7 @@ all: bzImage
646 + #KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg...
647 + KBUILD_IMAGE := $(boot)/bzImage
648 +
649 +-install: vmlinux
650 ++install:
651 + $(Q)$(MAKE) $(build)=$(boot) $@
652 +
653 + bzImage: vmlinux
654 +diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
655 +index e2c47d3a1c89..0ff9261c915e 100644
656 +--- a/arch/s390/boot/Makefile
657 ++++ b/arch/s390/boot/Makefile
658 +@@ -70,7 +70,7 @@ $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
659 + $(obj)/startup.a: $(OBJECTS) FORCE
660 + $(call if_changed,ar)
661 +
662 +-install: $(CONFIGURE) $(obj)/bzImage
663 ++install:
664 + sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
665 + System.map "$(INSTALL_PATH)"
666 +
667 +diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
668 +index 7b03037a8475..9c578ad5409e 100644
669 +--- a/arch/s390/include/asm/pgtable.h
670 ++++ b/arch/s390/include/asm/pgtable.h
671 +@@ -750,6 +750,12 @@ static inline int pmd_write(pmd_t pmd)
672 + return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
673 + }
674 +
675 ++#define pud_write pud_write
676 ++static inline int pud_write(pud_t pud)
677 ++{
678 ++ return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
679 ++}
680 ++
681 + static inline int pmd_dirty(pmd_t pmd)
682 + {
683 + return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
684 +diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
685 +index 71e3f0146cda..7870cf834533 100644
686 +--- a/arch/s390/include/asm/qdio.h
687 ++++ b/arch/s390/include/asm/qdio.h
688 +@@ -227,7 +227,7 @@ struct qdio_buffer {
689 + * @sbal: absolute SBAL address
690 + */
691 + struct sl_element {
692 +- unsigned long sbal;
693 ++ u64 sbal;
694 + } __attribute__ ((packed));
695 +
696 + /**
697 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
698 +index bc61ea18e88d..60716d18ce5a 100644
699 +--- a/arch/s390/pci/pci.c
700 ++++ b/arch/s390/pci/pci.c
701 +@@ -424,7 +424,7 @@ static void zpci_map_resources(struct pci_dev *pdev)
702 +
703 + if (zpci_use_mio(zdev))
704 + pdev->resource[i].start =
705 +- (resource_size_t __force) zdev->bars[i].mio_wb;
706 ++ (resource_size_t __force) zdev->bars[i].mio_wt;
707 + else
708 + pdev->resource[i].start = (resource_size_t __force)
709 + pci_iomap_range_fh(pdev, i, 0, 0);
710 +@@ -531,7 +531,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev,
711 + flags |= IORESOURCE_MEM_64;
712 +
713 + if (zpci_use_mio(zdev))
714 +- addr = (unsigned long) zdev->bars[i].mio_wb;
715 ++ addr = (unsigned long) zdev->bars[i].mio_wt;
716 + else
717 + addr = ZPCI_ADDR(entry);
718 + size = 1UL << zdev->bars[i].size;
719 +diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c
720 +index 748456c365f4..9557c5a15b91 100644
721 +--- a/arch/x86/boot/compressed/kaslr_64.c
722 ++++ b/arch/x86/boot/compressed/kaslr_64.c
723 +@@ -29,9 +29,6 @@
724 + #define __PAGE_OFFSET __PAGE_OFFSET_BASE
725 + #include "../../mm/ident_map.c"
726 +
727 +-/* Used by pgtable.h asm code to force instruction serialization. */
728 +-unsigned long __force_order;
729 +-
730 + /* Used to track our page table allocation area. */
731 + struct alloc_pgt_data {
732 + unsigned char *pgt_buf;
733 +diff --git a/arch/x86/include/asm/io_bitmap.h b/arch/x86/include/asm/io_bitmap.h
734 +index 02c6ef8f7667..07344d82e88e 100644
735 +--- a/arch/x86/include/asm/io_bitmap.h
736 ++++ b/arch/x86/include/asm/io_bitmap.h
737 +@@ -19,7 +19,14 @@ struct task_struct;
738 + void io_bitmap_share(struct task_struct *tsk);
739 + void io_bitmap_exit(void);
740 +
741 +-void tss_update_io_bitmap(void);
742 ++void native_tss_update_io_bitmap(void);
743 ++
744 ++#ifdef CONFIG_PARAVIRT_XXL
745 ++#include <asm/paravirt.h>
746 ++#else
747 ++#define tss_update_io_bitmap native_tss_update_io_bitmap
748 ++#endif
749 ++
750 + #else
751 + static inline void io_bitmap_share(struct task_struct *tsk) { }
752 + static inline void io_bitmap_exit(void) { }
753 +diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
754 +index 86e7317eb31f..694d8daf4983 100644
755 +--- a/arch/x86/include/asm/paravirt.h
756 ++++ b/arch/x86/include/asm/paravirt.h
757 +@@ -295,6 +295,13 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
758 + PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
759 + }
760 +
761 ++#ifdef CONFIG_X86_IOPL_IOPERM
762 ++static inline void tss_update_io_bitmap(void)
763 ++{
764 ++ PVOP_VCALL0(cpu.update_io_bitmap);
765 ++}
766 ++#endif
767 ++
768 + static inline void paravirt_activate_mm(struct mm_struct *prev,
769 + struct mm_struct *next)
770 + {
771 +diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
772 +index 84812964d3dd..732f62e04ddb 100644
773 +--- a/arch/x86/include/asm/paravirt_types.h
774 ++++ b/arch/x86/include/asm/paravirt_types.h
775 +@@ -140,6 +140,10 @@ struct pv_cpu_ops {
776 +
777 + void (*load_sp0)(unsigned long sp0);
778 +
779 ++#ifdef CONFIG_X86_IOPL_IOPERM
780 ++ void (*update_io_bitmap)(void);
781 ++#endif
782 ++
783 + void (*wbinvd)(void);
784 +
785 + /* cpuid emulation, mostly so that caps bits can be disabled */
786 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
787 +index 2e4d90294fe6..9761e9c56756 100644
788 +--- a/arch/x86/kernel/cpu/common.c
789 ++++ b/arch/x86/kernel/cpu/common.c
790 +@@ -462,7 +462,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
791 + * cpuid bit to be set. We need to ensure that we
792 + * update that bit in this CPU's "cpu_info".
793 + */
794 +- get_cpu_cap(c);
795 ++ set_cpu_cap(c, X86_FEATURE_OSPKE);
796 + }
797 +
798 + #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
799 +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
800 +index 789f5e4f89de..c131ba4e70ef 100644
801 +--- a/arch/x86/kernel/paravirt.c
802 ++++ b/arch/x86/kernel/paravirt.c
803 +@@ -30,6 +30,7 @@
804 + #include <asm/timer.h>
805 + #include <asm/special_insns.h>
806 + #include <asm/tlb.h>
807 ++#include <asm/io_bitmap.h>
808 +
809 + /*
810 + * nop stub, which must not clobber anything *including the stack* to
811 +@@ -341,6 +342,10 @@ struct paravirt_patch_template pv_ops = {
812 + .cpu.iret = native_iret,
813 + .cpu.swapgs = native_swapgs,
814 +
815 ++#ifdef CONFIG_X86_IOPL_IOPERM
816 ++ .cpu.update_io_bitmap = native_tss_update_io_bitmap,
817 ++#endif
818 ++
819 + .cpu.start_context_switch = paravirt_nop,
820 + .cpu.end_context_switch = paravirt_nop,
821 +
822 +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
823 +index 61e93a318983..3363e71589dd 100644
824 +--- a/arch/x86/kernel/process.c
825 ++++ b/arch/x86/kernel/process.c
826 +@@ -374,7 +374,7 @@ static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
827 + /**
828 + * tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
829 + */
830 +-void tss_update_io_bitmap(void)
831 ++void native_tss_update_io_bitmap(void)
832 + {
833 + struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
834 + struct thread_struct *t = &current->thread;
835 +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
836 +index 52a1e5192fa8..fe0e647411da 100644
837 +--- a/arch/x86/platform/efi/efi_64.c
838 ++++ b/arch/x86/platform/efi/efi_64.c
839 +@@ -316,7 +316,7 @@ void efi_sync_low_kernel_mappings(void)
840 + static inline phys_addr_t
841 + virt_to_phys_or_null_size(void *va, unsigned long size)
842 + {
843 +- bool bad_size;
844 ++ phys_addr_t pa;
845 +
846 + if (!va)
847 + return 0;
848 +@@ -324,16 +324,13 @@ virt_to_phys_or_null_size(void *va, unsigned long size)
849 + if (virt_addr_valid(va))
850 + return virt_to_phys(va);
851 +
852 +- /*
853 +- * A fully aligned variable on the stack is guaranteed not to
854 +- * cross a page bounary. Try to catch strings on the stack by
855 +- * checking that 'size' is a power of two.
856 +- */
857 +- bad_size = size > PAGE_SIZE || !is_power_of_2(size);
858 ++ pa = slow_virt_to_phys(va);
859 +
860 +- WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
861 ++ /* check if the object crosses a page boundary */
862 ++ if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
863 ++ return 0;
864 +
865 +- return slow_virt_to_phys(va);
866 ++ return pa;
867 + }
868 +
869 + #define virt_to_phys_or_null(addr) \
870 +@@ -791,6 +788,8 @@ static efi_status_t
871 + efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
872 + u32 *attr, unsigned long *data_size, void *data)
873 + {
874 ++ u8 buf[24] __aligned(8);
875 ++ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
876 + efi_status_t status;
877 + u32 phys_name, phys_vendor, phys_attr;
878 + u32 phys_data_size, phys_data;
879 +@@ -798,14 +797,19 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
880 +
881 + spin_lock_irqsave(&efi_runtime_lock, flags);
882 +
883 ++ *vnd = *vendor;
884 ++
885 + phys_data_size = virt_to_phys_or_null(data_size);
886 +- phys_vendor = virt_to_phys_or_null(vendor);
887 ++ phys_vendor = virt_to_phys_or_null(vnd);
888 + phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
889 + phys_attr = virt_to_phys_or_null(attr);
890 + phys_data = virt_to_phys_or_null_size(data, *data_size);
891 +
892 +- status = efi_thunk(get_variable, phys_name, phys_vendor,
893 +- phys_attr, phys_data_size, phys_data);
894 ++ if (!phys_name || (data && !phys_data))
895 ++ status = EFI_INVALID_PARAMETER;
896 ++ else
897 ++ status = efi_thunk(get_variable, phys_name, phys_vendor,
898 ++ phys_attr, phys_data_size, phys_data);
899 +
900 + spin_unlock_irqrestore(&efi_runtime_lock, flags);
901 +
902 +@@ -816,19 +820,25 @@ static efi_status_t
903 + efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
904 + u32 attr, unsigned long data_size, void *data)
905 + {
906 ++ u8 buf[24] __aligned(8);
907 ++ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
908 + u32 phys_name, phys_vendor, phys_data;
909 + efi_status_t status;
910 + unsigned long flags;
911 +
912 + spin_lock_irqsave(&efi_runtime_lock, flags);
913 +
914 ++ *vnd = *vendor;
915 ++
916 + phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
917 +- phys_vendor = virt_to_phys_or_null(vendor);
918 ++ phys_vendor = virt_to_phys_or_null(vnd);
919 + phys_data = virt_to_phys_or_null_size(data, data_size);
920 +
921 +- /* If data_size is > sizeof(u32) we've got problems */
922 +- status = efi_thunk(set_variable, phys_name, phys_vendor,
923 +- attr, data_size, phys_data);
924 ++ if (!phys_name || !phys_data)
925 ++ status = EFI_INVALID_PARAMETER;
926 ++ else
927 ++ status = efi_thunk(set_variable, phys_name, phys_vendor,
928 ++ attr, data_size, phys_data);
929 +
930 + spin_unlock_irqrestore(&efi_runtime_lock, flags);
931 +
932 +@@ -840,6 +850,8 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
933 + u32 attr, unsigned long data_size,
934 + void *data)
935 + {
936 ++ u8 buf[24] __aligned(8);
937 ++ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
938 + u32 phys_name, phys_vendor, phys_data;
939 + efi_status_t status;
940 + unsigned long flags;
941 +@@ -847,13 +859,17 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
942 + if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
943 + return EFI_NOT_READY;
944 +
945 ++ *vnd = *vendor;
946 ++
947 + phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
948 +- phys_vendor = virt_to_phys_or_null(vendor);
949 ++ phys_vendor = virt_to_phys_or_null(vnd);
950 + phys_data = virt_to_phys_or_null_size(data, data_size);
951 +
952 +- /* If data_size is > sizeof(u32) we've got problems */
953 +- status = efi_thunk(set_variable, phys_name, phys_vendor,
954 +- attr, data_size, phys_data);
955 ++ if (!phys_name || !phys_data)
956 ++ status = EFI_INVALID_PARAMETER;
957 ++ else
958 ++ status = efi_thunk(set_variable, phys_name, phys_vendor,
959 ++ attr, data_size, phys_data);
960 +
961 + spin_unlock_irqrestore(&efi_runtime_lock, flags);
962 +
963 +@@ -865,21 +881,29 @@ efi_thunk_get_next_variable(unsigned long *name_size,
964 + efi_char16_t *name,
965 + efi_guid_t *vendor)
966 + {
967 ++ u8 buf[24] __aligned(8);
968 ++ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
969 + efi_status_t status;
970 + u32 phys_name_size, phys_name, phys_vendor;
971 + unsigned long flags;
972 +
973 + spin_lock_irqsave(&efi_runtime_lock, flags);
974 +
975 ++ *vnd = *vendor;
976 ++
977 + phys_name_size = virt_to_phys_or_null(name_size);
978 +- phys_vendor = virt_to_phys_or_null(vendor);
979 ++ phys_vendor = virt_to_phys_or_null(vnd);
980 + phys_name = virt_to_phys_or_null_size(name, *name_size);
981 +
982 +- status = efi_thunk(get_next_variable, phys_name_size,
983 +- phys_name, phys_vendor);
984 ++ if (!phys_name)
985 ++ status = EFI_INVALID_PARAMETER;
986 ++ else
987 ++ status = efi_thunk(get_next_variable, phys_name_size,
988 ++ phys_name, phys_vendor);
989 +
990 + spin_unlock_irqrestore(&efi_runtime_lock, flags);
991 +
992 ++ *vendor = *vnd;
993 + return status;
994 + }
995 +
996 +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
997 +index 1f756ffffe8b..507f4fb88fa7 100644
998 +--- a/arch/x86/xen/enlighten_pv.c
999 ++++ b/arch/x86/xen/enlighten_pv.c
1000 +@@ -72,6 +72,9 @@
1001 + #include <asm/mwait.h>
1002 + #include <asm/pci_x86.h>
1003 + #include <asm/cpu.h>
1004 ++#ifdef CONFIG_X86_IOPL_IOPERM
1005 ++#include <asm/io_bitmap.h>
1006 ++#endif
1007 +
1008 + #ifdef CONFIG_ACPI
1009 + #include <linux/acpi.h>
1010 +@@ -837,6 +840,25 @@ static void xen_load_sp0(unsigned long sp0)
1011 + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
1012 + }
1013 +
1014 ++#ifdef CONFIG_X86_IOPL_IOPERM
1015 ++static void xen_update_io_bitmap(void)
1016 ++{
1017 ++ struct physdev_set_iobitmap iobitmap;
1018 ++ struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
1019 ++
1020 ++ native_tss_update_io_bitmap();
1021 ++
1022 ++ iobitmap.bitmap = (uint8_t *)(&tss->x86_tss) +
1023 ++ tss->x86_tss.io_bitmap_base;
1024 ++ if (tss->x86_tss.io_bitmap_base == IO_BITMAP_OFFSET_INVALID)
1025 ++ iobitmap.nr_ports = 0;
1026 ++ else
1027 ++ iobitmap.nr_ports = IO_BITMAP_BITS;
1028 ++
1029 ++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap);
1030 ++}
1031 ++#endif
1032 ++
1033 + static void xen_io_delay(void)
1034 + {
1035 + }
1036 +@@ -896,14 +918,15 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
1037 + static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
1038 + {
1039 + int ret;
1040 ++#ifdef CONFIG_X86_64
1041 ++ unsigned int which;
1042 ++ u64 base;
1043 ++#endif
1044 +
1045 + ret = 0;
1046 +
1047 + switch (msr) {
1048 + #ifdef CONFIG_X86_64
1049 +- unsigned which;
1050 +- u64 base;
1051 +-
1052 + case MSR_FS_BASE: which = SEGBASE_FS; goto set;
1053 + case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
1054 + case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
1055 +@@ -1046,6 +1069,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1056 + .write_idt_entry = xen_write_idt_entry,
1057 + .load_sp0 = xen_load_sp0,
1058 +
1059 ++#ifdef CONFIG_X86_IOPL_IOPERM
1060 ++ .update_io_bitmap = xen_update_io_bitmap,
1061 ++#endif
1062 + .io_delay = xen_io_delay,
1063 +
1064 + /* Xen takes care of %gs when switching to usermode for us */
1065 +diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
1066 +index e1419edde2ec..5a64607ce774 100644
1067 +--- a/block/bfq-cgroup.c
1068 ++++ b/block/bfq-cgroup.c
1069 +@@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg)
1070 + kfree(bfqg);
1071 + }
1072 +
1073 +-static void bfqg_and_blkg_get(struct bfq_group *bfqg)
1074 ++void bfqg_and_blkg_get(struct bfq_group *bfqg)
1075 + {
1076 + /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
1077 + bfqg_get(bfqg);
1078 +@@ -651,6 +651,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1079 + bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
1080 + false, BFQQE_PREEMPTED);
1081 +
1082 ++ /*
1083 ++ * get extra reference to prevent bfqq from being freed in
1084 ++ * next possible deactivate
1085 ++ */
1086 ++ bfqq->ref++;
1087 ++
1088 + if (bfq_bfqq_busy(bfqq))
1089 + bfq_deactivate_bfqq(bfqd, bfqq, false, false);
1090 + else if (entity->on_st)
1091 +@@ -670,6 +676,8 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1092 +
1093 + if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
1094 + bfq_schedule_dispatch(bfqd);
1095 ++ /* release extra ref taken above */
1096 ++ bfq_put_queue(bfqq);
1097 + }
1098 +
1099 + /**
1100 +@@ -1398,6 +1406,10 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1101 + return bfqq->bfqd->root_group;
1102 + }
1103 +
1104 ++void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
1105 ++
1106 ++void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1107 ++
1108 + struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1109 + {
1110 + struct bfq_group *bfqg;
1111 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
1112 +index 5c239c540c47..8fe4b6919511 100644
1113 +--- a/block/bfq-iosched.c
1114 ++++ b/block/bfq-iosched.c
1115 +@@ -614,6 +614,10 @@ bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1116 + bfqq->pos_root = NULL;
1117 + }
1118 +
1119 ++ /* oom_bfqq does not participate in queue merging */
1120 ++ if (bfqq == &bfqd->oom_bfqq)
1121 ++ return;
1122 ++
1123 + /*
1124 + * bfqq cannot be merged any longer (see comments in
1125 + * bfq_setup_cooperator): no point in adding bfqq into the
1126 +@@ -4822,9 +4826,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
1127 + {
1128 + struct bfq_queue *item;
1129 + struct hlist_node *n;
1130 +-#ifdef CONFIG_BFQ_GROUP_IOSCHED
1131 + struct bfq_group *bfqg = bfqq_group(bfqq);
1132 +-#endif
1133 +
1134 + if (bfqq->bfqd)
1135 + bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
1136 +@@ -4897,9 +4899,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
1137 + bfqq->bfqd->last_completed_rq_bfqq = NULL;
1138 +
1139 + kmem_cache_free(bfq_pool, bfqq);
1140 +-#ifdef CONFIG_BFQ_GROUP_IOSCHED
1141 + bfqg_and_blkg_put(bfqg);
1142 +-#endif
1143 + }
1144 +
1145 + static void bfq_put_cooperator(struct bfq_queue *bfqq)
1146 +@@ -6387,10 +6387,10 @@ static void bfq_exit_queue(struct elevator_queue *e)
1147 +
1148 + hrtimer_cancel(&bfqd->idle_slice_timer);
1149 +
1150 +-#ifdef CONFIG_BFQ_GROUP_IOSCHED
1151 + /* release oom-queue reference to root group */
1152 + bfqg_and_blkg_put(bfqd->root_group);
1153 +
1154 ++#ifdef CONFIG_BFQ_GROUP_IOSCHED
1155 + blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
1156 + #else
1157 + spin_lock_irq(&bfqd->lock);
1158 +diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
1159 +index 8526f20c53bc..355b8bbd1033 100644
1160 +--- a/block/bfq-iosched.h
1161 ++++ b/block/bfq-iosched.h
1162 +@@ -921,6 +921,7 @@ struct bfq_group {
1163 +
1164 + #else
1165 + struct bfq_group {
1166 ++ struct bfq_entity entity;
1167 + struct bfq_sched_data sched_data;
1168 +
1169 + struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
1170 +@@ -984,6 +985,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
1171 + struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
1172 + struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
1173 + struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
1174 ++void bfqg_and_blkg_get(struct bfq_group *bfqg);
1175 + void bfqg_and_blkg_put(struct bfq_group *bfqg);
1176 +
1177 + #ifdef CONFIG_BFQ_GROUP_IOSCHED
1178 +diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
1179 +index 05f0bf4a1144..44079147e396 100644
1180 +--- a/block/bfq-wf2q.c
1181 ++++ b/block/bfq-wf2q.c
1182 +@@ -536,7 +536,9 @@ static void bfq_get_entity(struct bfq_entity *entity)
1183 + bfqq->ref++;
1184 + bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
1185 + bfqq, bfqq->ref);
1186 +- }
1187 ++ } else
1188 ++ bfqg_and_blkg_get(container_of(entity, struct bfq_group,
1189 ++ entity));
1190 + }
1191 +
1192 + /**
1193 +@@ -650,8 +652,14 @@ static void bfq_forget_entity(struct bfq_service_tree *st,
1194 +
1195 + entity->on_st = false;
1196 + st->wsum -= entity->weight;
1197 +- if (bfqq && !is_in_service)
1198 ++ if (is_in_service)
1199 ++ return;
1200 ++
1201 ++ if (bfqq)
1202 + bfq_put_queue(bfqq);
1203 ++ else
1204 ++ bfqg_and_blkg_put(container_of(entity, struct bfq_group,
1205 ++ entity));
1206 + }
1207 +
1208 + /**
1209 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
1210 +index 9fcc761031d8..59b217ffeb59 100644
1211 +--- a/drivers/android/binder.c
1212 ++++ b/drivers/android/binder.c
1213 +@@ -5226,6 +5226,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
1214 + binder_dev = container_of(filp->private_data,
1215 + struct binder_device, miscdev);
1216 + }
1217 ++ refcount_inc(&binder_dev->ref);
1218 + proc->context = &binder_dev->context;
1219 + binder_alloc_init(&proc->alloc);
1220 +
1221 +@@ -5403,6 +5404,7 @@ static int binder_node_release(struct binder_node *node, int refs)
1222 + static void binder_deferred_release(struct binder_proc *proc)
1223 + {
1224 + struct binder_context *context = proc->context;
1225 ++ struct binder_device *device;
1226 + struct rb_node *n;
1227 + int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
1228 +
1229 +@@ -5419,6 +5421,12 @@ static void binder_deferred_release(struct binder_proc *proc)
1230 + context->binder_context_mgr_node = NULL;
1231 + }
1232 + mutex_unlock(&context->context_mgr_node_lock);
1233 ++ device = container_of(proc->context, struct binder_device, context);
1234 ++ if (refcount_dec_and_test(&device->ref)) {
1235 ++ kfree(context->name);
1236 ++ kfree(device);
1237 ++ }
1238 ++ proc->context = NULL;
1239 + binder_inner_proc_lock(proc);
1240 + /*
1241 + * Make sure proc stays alive after we
1242 +@@ -6075,6 +6083,7 @@ static int __init init_binder_device(const char *name)
1243 + binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
1244 + binder_device->miscdev.name = name;
1245 +
1246 ++ refcount_set(&binder_device->ref, 1);
1247 + binder_device->context.binder_context_mgr_uid = INVALID_UID;
1248 + binder_device->context.name = name;
1249 + mutex_init(&binder_device->context.context_mgr_node_lock);
1250 +diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
1251 +index ae991097d14d..283d3cb9c16e 100644
1252 +--- a/drivers/android/binder_internal.h
1253 ++++ b/drivers/android/binder_internal.h
1254 +@@ -8,6 +8,7 @@
1255 + #include <linux/list.h>
1256 + #include <linux/miscdevice.h>
1257 + #include <linux/mutex.h>
1258 ++#include <linux/refcount.h>
1259 + #include <linux/stddef.h>
1260 + #include <linux/types.h>
1261 + #include <linux/uidgid.h>
1262 +@@ -33,6 +34,7 @@ struct binder_device {
1263 + struct miscdevice miscdev;
1264 + struct binder_context context;
1265 + struct inode *binderfs_inode;
1266 ++ refcount_t ref;
1267 + };
1268 +
1269 + /**
1270 +diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
1271 +index e2580e5316a2..110e41f920c2 100644
1272 +--- a/drivers/android/binderfs.c
1273 ++++ b/drivers/android/binderfs.c
1274 +@@ -154,6 +154,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
1275 + if (!name)
1276 + goto err;
1277 +
1278 ++ refcount_set(&device->ref, 1);
1279 + device->binderfs_inode = inode;
1280 + device->context.binder_context_mgr_uid = INVALID_UID;
1281 + device->context.name = name;
1282 +@@ -257,8 +258,10 @@ static void binderfs_evict_inode(struct inode *inode)
1283 + ida_free(&binderfs_minors, device->miscdev.minor);
1284 + mutex_unlock(&binderfs_minors_mutex);
1285 +
1286 +- kfree(device->context.name);
1287 +- kfree(device);
1288 ++ if (refcount_dec_and_test(&device->ref)) {
1289 ++ kfree(device->context.name);
1290 ++ kfree(device);
1291 ++ }
1292 + }
1293 +
1294 + /**
1295 +diff --git a/drivers/base/core.c b/drivers/base/core.c
1296 +index 42a672456432..3306d5ae92a6 100644
1297 +--- a/drivers/base/core.c
1298 ++++ b/drivers/base/core.c
1299 +@@ -745,25 +745,31 @@ static void __device_links_queue_sync_state(struct device *dev,
1300 + /**
1301 + * device_links_flush_sync_list - Call sync_state() on a list of devices
1302 + * @list: List of devices to call sync_state() on
1303 ++ * @dont_lock_dev: Device for which lock is already held by the caller
1304 + *
1305 + * Calls sync_state() on all the devices that have been queued for it. This
1306 +- * function is used in conjunction with __device_links_queue_sync_state().
1307 ++ * function is used in conjunction with __device_links_queue_sync_state(). The
1308 ++ * @dont_lock_dev parameter is useful when this function is called from a
1309 ++ * context where a device lock is already held.
1310 + */
1311 +-static void device_links_flush_sync_list(struct list_head *list)
1312 ++static void device_links_flush_sync_list(struct list_head *list,
1313 ++ struct device *dont_lock_dev)
1314 + {
1315 + struct device *dev, *tmp;
1316 +
1317 + list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
1318 + list_del_init(&dev->links.defer_sync);
1319 +
1320 +- device_lock(dev);
1321 ++ if (dev != dont_lock_dev)
1322 ++ device_lock(dev);
1323 +
1324 + if (dev->bus->sync_state)
1325 + dev->bus->sync_state(dev);
1326 + else if (dev->driver && dev->driver->sync_state)
1327 + dev->driver->sync_state(dev);
1328 +
1329 +- device_unlock(dev);
1330 ++ if (dev != dont_lock_dev)
1331 ++ device_unlock(dev);
1332 +
1333 + put_device(dev);
1334 + }
1335 +@@ -801,7 +807,7 @@ void device_links_supplier_sync_state_resume(void)
1336 + out:
1337 + device_links_write_unlock();
1338 +
1339 +- device_links_flush_sync_list(&sync_list);
1340 ++ device_links_flush_sync_list(&sync_list, NULL);
1341 + }
1342 +
1343 + static int sync_state_resume_initcall(void)
1344 +@@ -865,6 +871,11 @@ void device_links_driver_bound(struct device *dev)
1345 + driver_deferred_probe_add(link->consumer);
1346 + }
1347 +
1348 ++ if (defer_sync_state_count)
1349 ++ __device_links_supplier_defer_sync(dev);
1350 ++ else
1351 ++ __device_links_queue_sync_state(dev, &sync_list);
1352 ++
1353 + list_for_each_entry(link, &dev->links.suppliers, c_node) {
1354 + if (!(link->flags & DL_FLAG_MANAGED))
1355 + continue;
1356 +@@ -883,7 +894,7 @@ void device_links_driver_bound(struct device *dev)
1357 +
1358 + device_links_write_unlock();
1359 +
1360 +- device_links_flush_sync_list(&sync_list);
1361 ++ device_links_flush_sync_list(&sync_list, dev);
1362 + }
1363 +
1364 + static void device_link_drop_managed(struct device_link *link)
1365 +diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
1366 +index 3d79b074f958..c42447d5d5a8 100644
1367 +--- a/drivers/bus/ti-sysc.c
1368 ++++ b/drivers/bus/ti-sysc.c
1369 +@@ -1406,7 +1406,7 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
1370 + }
1371 +
1372 + /* 1-wire needs module's internal clocks enabled for reset */
1373 +-static void sysc_clk_enable_quirk_hdq1w(struct sysc *ddata)
1374 ++static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
1375 + {
1376 + int offset = 0x0c; /* HDQ_CTRL_STATUS */
1377 + u16 val;
1378 +@@ -1494,7 +1494,7 @@ static void sysc_init_module_quirks(struct sysc *ddata)
1379 + return;
1380 +
1381 + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
1382 +- ddata->clk_enable_quirk = sysc_clk_enable_quirk_hdq1w;
1383 ++ ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w;
1384 +
1385 + return;
1386 + }
1387 +diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
1388 +index ce41cd9b758a..2427398ff22a 100644
1389 +--- a/drivers/dma-buf/dma-buf.c
1390 ++++ b/drivers/dma-buf/dma-buf.c
1391 +@@ -108,6 +108,7 @@ static int dma_buf_release(struct inode *inode, struct file *file)
1392 + dma_resv_fini(dmabuf->resv);
1393 +
1394 + module_put(dmabuf->owner);
1395 ++ kfree(dmabuf->name);
1396 + kfree(dmabuf);
1397 + return 0;
1398 + }
1399 +diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
1400 +index e51d836afcc7..1092d4ce723e 100644
1401 +--- a/drivers/dma/coh901318.c
1402 ++++ b/drivers/dma/coh901318.c
1403 +@@ -1947,8 +1947,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
1404 + return;
1405 + }
1406 +
1407 +- spin_lock(&cohc->lock);
1408 +-
1409 + /*
1410 + * When we reach this point, at least one queue item
1411 + * should have been moved over from cohc->queue to
1412 +@@ -1969,8 +1967,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
1413 + if (coh901318_queue_start(cohc) == NULL)
1414 + cohc->busy = 0;
1415 +
1416 +- spin_unlock(&cohc->lock);
1417 +-
1418 + /*
1419 + * This tasklet will remove items from cohc->active
1420 + * and thus terminates them.
1421 +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
1422 +index c27e206a764c..67736c801f3c 100644
1423 +--- a/drivers/dma/imx-sdma.c
1424 ++++ b/drivers/dma/imx-sdma.c
1425 +@@ -1328,13 +1328,14 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
1426 +
1427 + sdma_channel_synchronize(chan);
1428 +
1429 +- if (sdmac->event_id0)
1430 ++ if (sdmac->event_id0 >= 0)
1431 + sdma_event_disable(sdmac, sdmac->event_id0);
1432 + if (sdmac->event_id1)
1433 + sdma_event_disable(sdmac, sdmac->event_id1);
1434 +
1435 + sdmac->event_id0 = 0;
1436 + sdmac->event_id1 = 0;
1437 ++ sdmac->context_loaded = false;
1438 +
1439 + sdma_set_channel_priority(sdmac, 0);
1440 +
1441 +@@ -1628,7 +1629,7 @@ static int sdma_config(struct dma_chan *chan,
1442 + memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
1443 +
1444 + /* Set ENBLn earlier to make sure dma request triggered after that */
1445 +- if (sdmac->event_id0) {
1446 ++ if (sdmac->event_id0 >= 0) {
1447 + if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1448 + return -EINVAL;
1449 + sdma_event_enable(sdmac, sdmac->event_id0);
1450 +diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
1451 +index 3a45079d11ec..4a750e29bfb5 100644
1452 +--- a/drivers/dma/tegra20-apb-dma.c
1453 ++++ b/drivers/dma/tegra20-apb-dma.c
1454 +@@ -281,7 +281,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
1455 +
1456 + /* Do not allocate if desc are waiting for ack */
1457 + list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
1458 +- if (async_tx_test_ack(&dma_desc->txd)) {
1459 ++ if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
1460 + list_del(&dma_desc->node);
1461 + spin_unlock_irqrestore(&tdc->lock, flags);
1462 + dma_desc->txd.flags = 0;
1463 +@@ -756,10 +756,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
1464 + bool was_busy;
1465 +
1466 + spin_lock_irqsave(&tdc->lock, flags);
1467 +- if (list_empty(&tdc->pending_sg_req)) {
1468 +- spin_unlock_irqrestore(&tdc->lock, flags);
1469 +- return 0;
1470 +- }
1471 +
1472 + if (!tdc->busy)
1473 + goto skip_dma_stop;
1474 +diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
1475 +index 2d263382d797..880ffd833718 100644
1476 +--- a/drivers/edac/synopsys_edac.c
1477 ++++ b/drivers/edac/synopsys_edac.c
1478 +@@ -479,20 +479,14 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
1479 + pinf = &p->ceinfo;
1480 + if (!priv->p_data->quirks) {
1481 + snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1482 +- "DDR ECC error type:%s Row %d Bank %d Col %d ",
1483 +- "CE", pinf->row, pinf->bank, pinf->col);
1484 +- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1485 +- "Bit Position: %d Data: 0x%08x\n",
1486 ++ "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
1487 ++ "CE", pinf->row, pinf->bank, pinf->col,
1488 + pinf->bitpos, pinf->data);
1489 + } else {
1490 + snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1491 +- "DDR ECC error type:%s Row %d Bank %d Col %d ",
1492 +- "CE", pinf->row, pinf->bank, pinf->col);
1493 +- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1494 +- "BankGroup Number %d Block Number %d ",
1495 +- pinf->bankgrpnr, pinf->blknr);
1496 +- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1497 +- "Bit Position: %d Data: 0x%08x\n",
1498 ++ "DDR ECC error type:%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
1499 ++ "CE", pinf->row, pinf->bank, pinf->col,
1500 ++ pinf->bankgrpnr, pinf->blknr,
1501 + pinf->bitpos, pinf->data);
1502 + }
1503 +
1504 +@@ -509,10 +503,8 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
1505 + "UE", pinf->row, pinf->bank, pinf->col);
1506 + } else {
1507 + snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1508 +- "DDR ECC error type :%s Row %d Bank %d Col %d ",
1509 +- "UE", pinf->row, pinf->bank, pinf->col);
1510 +- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1511 +- "BankGroup Number %d Block Number %d",
1512 ++ "DDR ECC error type :%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d",
1513 ++ "UE", pinf->row, pinf->bank, pinf->col,
1514 + pinf->bankgrpnr, pinf->blknr);
1515 + }
1516 +
1517 +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
1518 +index 2b02cb165f16..a9778591341b 100644
1519 +--- a/drivers/firmware/efi/efi.c
1520 ++++ b/drivers/firmware/efi/efi.c
1521 +@@ -552,7 +552,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
1522 +
1523 + seed = early_memremap(efi.rng_seed, sizeof(*seed));
1524 + if (seed != NULL) {
1525 +- size = seed->size;
1526 ++ size = READ_ONCE(seed->size);
1527 + early_memunmap(seed, sizeof(*seed));
1528 + } else {
1529 + pr_err("Could not map UEFI random seed!\n");
1530 +@@ -562,7 +562,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
1531 + sizeof(*seed) + size);
1532 + if (seed != NULL) {
1533 + pr_notice("seeding entropy pool\n");
1534 +- add_bootloader_randomness(seed->bits, seed->size);
1535 ++ add_bootloader_randomness(seed->bits, size);
1536 + early_memunmap(seed, sizeof(*seed) + size);
1537 + } else {
1538 + pr_err("Could not map UEFI random seed!\n");
1539 +diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
1540 +index 03b43b7a6d1d..f71eaa5bf52d 100644
1541 +--- a/drivers/firmware/imx/imx-scu.c
1542 ++++ b/drivers/firmware/imx/imx-scu.c
1543 +@@ -29,6 +29,7 @@ struct imx_sc_chan {
1544 + struct mbox_client cl;
1545 + struct mbox_chan *ch;
1546 + int idx;
1547 ++ struct completion tx_done;
1548 + };
1549 +
1550 + struct imx_sc_ipc {
1551 +@@ -100,6 +101,14 @@ int imx_scu_get_handle(struct imx_sc_ipc **ipc)
1552 + }
1553 + EXPORT_SYMBOL(imx_scu_get_handle);
1554 +
1555 ++/* Callback called when the word of a message is ack-ed, eg read by SCU */
1556 ++static void imx_scu_tx_done(struct mbox_client *cl, void *mssg, int r)
1557 ++{
1558 ++ struct imx_sc_chan *sc_chan = container_of(cl, struct imx_sc_chan, cl);
1559 ++
1560 ++ complete(&sc_chan->tx_done);
1561 ++}
1562 ++
1563 + static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
1564 + {
1565 + struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl);
1566 +@@ -149,6 +158,19 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
1567 +
1568 + for (i = 0; i < hdr->size; i++) {
1569 + sc_chan = &sc_ipc->chans[i % 4];
1570 ++
1571 ++ /*
1572 ++ * SCU requires that all messages words are written
1573 ++ * sequentially but linux MU driver implements multiple
1574 ++ * independent channels for each register so ordering between
1575 ++ * different channels must be ensured by SCU API interface.
1576 ++ *
1577 ++ * Wait for tx_done before every send to ensure that no
1578 ++ * queueing happens at the mailbox channel level.
1579 ++ */
1580 ++ wait_for_completion(&sc_chan->tx_done);
1581 ++ reinit_completion(&sc_chan->tx_done);
1582 ++
1583 + ret = mbox_send_message(sc_chan->ch, &data[i]);
1584 + if (ret < 0)
1585 + return ret;
1586 +@@ -247,6 +269,11 @@ static int imx_scu_probe(struct platform_device *pdev)
1587 + cl->knows_txdone = true;
1588 + cl->rx_callback = imx_scu_rx_callback;
1589 +
1590 ++ /* Initial tx_done completion as "done" */
1591 ++ cl->tx_done = imx_scu_tx_done;
1592 ++ init_completion(&sc_chan->tx_done);
1593 ++ complete(&sc_chan->tx_done);
1594 ++
1595 + sc_chan->sc_ipc = sc_ipc;
1596 + sc_chan->idx = i % 4;
1597 + sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
1598 +diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c
1599 +index 4b56a587dacd..d073cb3ce699 100644
1600 +--- a/drivers/firmware/imx/misc.c
1601 ++++ b/drivers/firmware/imx/misc.c
1602 +@@ -16,7 +16,7 @@ struct imx_sc_msg_req_misc_set_ctrl {
1603 + u32 ctrl;
1604 + u32 val;
1605 + u16 resource;
1606 +-} __packed;
1607 ++} __packed __aligned(4);
1608 +
1609 + struct imx_sc_msg_req_cpu_start {
1610 + struct imx_sc_rpc_msg hdr;
1611 +@@ -24,18 +24,18 @@ struct imx_sc_msg_req_cpu_start {
1612 + u32 address_lo;
1613 + u16 resource;
1614 + u8 enable;
1615 +-} __packed;
1616 ++} __packed __aligned(4);
1617 +
1618 + struct imx_sc_msg_req_misc_get_ctrl {
1619 + struct imx_sc_rpc_msg hdr;
1620 + u32 ctrl;
1621 + u16 resource;
1622 +-} __packed;
1623 ++} __packed __aligned(4);
1624 +
1625 + struct imx_sc_msg_resp_misc_get_ctrl {
1626 + struct imx_sc_rpc_msg hdr;
1627 + u32 val;
1628 +-} __packed;
1629 ++} __packed __aligned(4);
1630 +
1631 + /*
1632 + * This function sets a miscellaneous control value.
1633 +diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
1634 +index b556612207e5..af3ae0087de4 100644
1635 +--- a/drivers/firmware/imx/scu-pd.c
1636 ++++ b/drivers/firmware/imx/scu-pd.c
1637 +@@ -61,7 +61,7 @@ struct imx_sc_msg_req_set_resource_power_mode {
1638 + struct imx_sc_rpc_msg hdr;
1639 + u16 resource;
1640 + u8 mode;
1641 +-} __packed;
1642 ++} __packed __aligned(4);
1643 +
1644 + #define IMX_SCU_PD_NAME_SIZE 20
1645 + struct imx_sc_pm_domain {
1646 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1647 +index d9b8e3298d78..6b5b243af15d 100644
1648 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1649 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1650 +@@ -54,7 +54,7 @@
1651 + * In bring-up phase, it just used primary ring so set gfx ring number as 1 at
1652 + * first.
1653 + */
1654 +-#define GFX10_NUM_GFX_RINGS 2
1655 ++#define GFX10_NUM_GFX_RINGS_NV1X 1
1656 + #define GFX10_MEC_HPD_SIZE 2048
1657 +
1658 + #define F32_CE_PROGRAM_RAM_SIZE 65536
1659 +@@ -1286,7 +1286,7 @@ static int gfx_v10_0_sw_init(void *handle)
1660 + case CHIP_NAVI14:
1661 + case CHIP_NAVI12:
1662 + adev->gfx.me.num_me = 1;
1663 +- adev->gfx.me.num_pipe_per_me = 2;
1664 ++ adev->gfx.me.num_pipe_per_me = 1;
1665 + adev->gfx.me.num_queue_per_pipe = 1;
1666 + adev->gfx.mec.num_mec = 2;
1667 + adev->gfx.mec.num_pipe_per_mec = 4;
1668 +@@ -2692,18 +2692,20 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
1669 + amdgpu_ring_commit(ring);
1670 +
1671 + /* submit cs packet to copy state 0 to next available state */
1672 +- ring = &adev->gfx.gfx_ring[1];
1673 +- r = amdgpu_ring_alloc(ring, 2);
1674 +- if (r) {
1675 +- DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
1676 +- return r;
1677 +- }
1678 +-
1679 +- amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1680 +- amdgpu_ring_write(ring, 0);
1681 ++ if (adev->gfx.num_gfx_rings > 1) {
1682 ++ /* maximum supported gfx ring is 2 */
1683 ++ ring = &adev->gfx.gfx_ring[1];
1684 ++ r = amdgpu_ring_alloc(ring, 2);
1685 ++ if (r) {
1686 ++ DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
1687 ++ return r;
1688 ++ }
1689 +
1690 +- amdgpu_ring_commit(ring);
1691 ++ amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1692 ++ amdgpu_ring_write(ring, 0);
1693 +
1694 ++ amdgpu_ring_commit(ring);
1695 ++ }
1696 + return 0;
1697 + }
1698 +
1699 +@@ -2800,39 +2802,41 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
1700 + mutex_unlock(&adev->srbm_mutex);
1701 +
1702 + /* Init gfx ring 1 for pipe 1 */
1703 +- mutex_lock(&adev->srbm_mutex);
1704 +- gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
1705 +- ring = &adev->gfx.gfx_ring[1];
1706 +- rb_bufsz = order_base_2(ring->ring_size / 8);
1707 +- tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
1708 +- tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
1709 +- WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
1710 +- /* Initialize the ring buffer's write pointers */
1711 +- ring->wptr = 0;
1712 +- WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
1713 +- WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
1714 +- /* Set the wb address wether it's enabled or not */
1715 +- rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1716 +- WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
1717 +- WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
1718 +- CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
1719 +- wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1720 +- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
1721 +- lower_32_bits(wptr_gpu_addr));
1722 +- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
1723 +- upper_32_bits(wptr_gpu_addr));
1724 +-
1725 +- mdelay(1);
1726 +- WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
1727 +-
1728 +- rb_addr = ring->gpu_addr >> 8;
1729 +- WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
1730 +- WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
1731 +- WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
1732 +-
1733 +- gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
1734 +- mutex_unlock(&adev->srbm_mutex);
1735 +-
1736 ++ if (adev->gfx.num_gfx_rings > 1) {
1737 ++ mutex_lock(&adev->srbm_mutex);
1738 ++ gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
1739 ++ /* maximum supported gfx ring is 2 */
1740 ++ ring = &adev->gfx.gfx_ring[1];
1741 ++ rb_bufsz = order_base_2(ring->ring_size / 8);
1742 ++ tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
1743 ++ tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
1744 ++ WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
1745 ++ /* Initialize the ring buffer's write pointers */
1746 ++ ring->wptr = 0;
1747 ++ WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
1748 ++ WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
1749 ++ /* Set the wb address wether it's enabled or not */
1750 ++ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1751 ++ WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
1752 ++ WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
1753 ++ CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
1754 ++ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1755 ++ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
1756 ++ lower_32_bits(wptr_gpu_addr));
1757 ++ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
1758 ++ upper_32_bits(wptr_gpu_addr));
1759 ++
1760 ++ mdelay(1);
1761 ++ WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
1762 ++
1763 ++ rb_addr = ring->gpu_addr >> 8;
1764 ++ WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
1765 ++ WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
1766 ++ WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
1767 ++
1768 ++ gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
1769 ++ mutex_unlock(&adev->srbm_mutex);
1770 ++ }
1771 + /* Switch to pipe 0 */
1772 + mutex_lock(&adev->srbm_mutex);
1773 + gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
1774 +@@ -3952,7 +3956,8 @@ static int gfx_v10_0_early_init(void *handle)
1775 + {
1776 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1777 +
1778 +- adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
1779 ++ adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
1780 ++
1781 + adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
1782 +
1783 + gfx_v10_0_set_kiq_pm4_funcs(adev);
1784 +diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
1785 +index 9b415f6569a2..04fea3cc0cfa 100644
1786 +--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
1787 ++++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
1788 +@@ -222,7 +222,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
1789 + {
1790 + int ret = 0;
1791 +
1792 +- if (min <= 0 && max <= 0)
1793 ++ if (min < 0 && max < 0)
1794 + return -EINVAL;
1795 +
1796 + if (!smu_clk_dpm_is_enabled(smu, clk_type))
1797 +diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
1798 +index 094cfc46adac..29c11694406d 100644
1799 +--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
1800 ++++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
1801 +@@ -373,9 +373,6 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
1802 + {
1803 + int ret = 0;
1804 +
1805 +- if (max < min)
1806 +- return -EINVAL;
1807 +-
1808 + switch (clk_type) {
1809 + case SMU_GFXCLK:
1810 + case SMU_SCLK:
1811 +diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
1812 +index 6d4a29e99ae2..3035584f6dc7 100644
1813 +--- a/drivers/gpu/drm/drm_client_modeset.c
1814 ++++ b/drivers/gpu/drm/drm_client_modeset.c
1815 +@@ -951,7 +951,8 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
1816 + * depending on the hardware this may require the framebuffer
1817 + * to be in a specific tiling format.
1818 + */
1819 +- if ((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180 ||
1820 ++ if (((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0 &&
1821 ++ (*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180) ||
1822 + !plane->rotation_property)
1823 + return false;
1824 +
1825 +diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
1826 +index 0810d3ef6961..6c35407a50eb 100644
1827 +--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
1828 ++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
1829 +@@ -254,11 +254,16 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
1830 + if (ret)
1831 + goto err_zero_use;
1832 +
1833 +- if (obj->import_attach)
1834 ++ if (obj->import_attach) {
1835 + shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
1836 +- else
1837 ++ } else {
1838 ++ pgprot_t prot = PAGE_KERNEL;
1839 ++
1840 ++ if (!shmem->map_cached)
1841 ++ prot = pgprot_writecombine(prot);
1842 + shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
1843 +- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
1844 ++ VM_MAP, prot);
1845 ++ }
1846 +
1847 + if (!shmem->vaddr) {
1848 + DRM_DEBUG_KMS("Failed to vmap pages\n");
1849 +@@ -537,7 +542,9 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1850 + }
1851 +
1852 + vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
1853 +- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1854 ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1855 ++ if (!shmem->map_cached)
1856 ++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1857 + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1858 + vma->vm_ops = &drm_gem_shmem_vm_ops;
1859 +
1860 +diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
1861 +index 88232698d7a0..3fd35e6b9d53 100644
1862 +--- a/drivers/gpu/drm/drm_modes.c
1863 ++++ b/drivers/gpu/drm/drm_modes.c
1864 +@@ -1672,6 +1672,13 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
1865 + }
1866 + }
1867 +
1868 ++ if (!(rotation & DRM_MODE_ROTATE_MASK))
1869 ++ rotation |= DRM_MODE_ROTATE_0;
1870 ++
1871 ++ /* Make sure there is exactly one rotation defined */
1872 ++ if (!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK))
1873 ++ return -EINVAL;
1874 ++
1875 + mode->rotation_reflection = rotation;
1876 +
1877 + return 0;
1878 +diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
1879 +index 0da860200410..e2ac09894a6d 100644
1880 +--- a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
1881 ++++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
1882 +@@ -83,7 +83,6 @@
1883 + #define VSIZE_OFST 20
1884 + #define LDI_INT_EN 0x741C
1885 + #define FRAME_END_INT_EN_OFST 1
1886 +-#define UNDERFLOW_INT_EN_OFST 2
1887 + #define LDI_CTRL 0x7420
1888 + #define BPP_OFST 3
1889 + #define DATA_GATE_EN BIT(2)
1890 +diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
1891 +index 73cd28a6ea07..86000127d4ee 100644
1892 +--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
1893 ++++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
1894 +@@ -46,7 +46,6 @@ struct ade_hw_ctx {
1895 + struct clk *media_noc_clk;
1896 + struct clk *ade_pix_clk;
1897 + struct reset_control *reset;
1898 +- struct work_struct display_reset_wq;
1899 + bool power_on;
1900 + int irq;
1901 +
1902 +@@ -136,7 +135,6 @@ static void ade_init(struct ade_hw_ctx *ctx)
1903 + */
1904 + ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
1905 + FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
1906 +- ade_update_bits(base + LDI_INT_EN, UNDERFLOW_INT_EN_OFST, MASK(1), 1);
1907 + }
1908 +
1909 + static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
1910 +@@ -304,17 +302,6 @@ static void ade_crtc_disable_vblank(struct drm_crtc *crtc)
1911 + MASK(1), 0);
1912 + }
1913 +
1914 +-static void drm_underflow_wq(struct work_struct *work)
1915 +-{
1916 +- struct ade_hw_ctx *ctx = container_of(work, struct ade_hw_ctx,
1917 +- display_reset_wq);
1918 +- struct drm_device *drm_dev = ctx->crtc->dev;
1919 +- struct drm_atomic_state *state;
1920 +-
1921 +- state = drm_atomic_helper_suspend(drm_dev);
1922 +- drm_atomic_helper_resume(drm_dev, state);
1923 +-}
1924 +-
1925 + static irqreturn_t ade_irq_handler(int irq, void *data)
1926 + {
1927 + struct ade_hw_ctx *ctx = data;
1928 +@@ -331,12 +318,6 @@ static irqreturn_t ade_irq_handler(int irq, void *data)
1929 + MASK(1), 1);
1930 + drm_crtc_handle_vblank(crtc);
1931 + }
1932 +- if (status & BIT(UNDERFLOW_INT_EN_OFST)) {
1933 +- ade_update_bits(base + LDI_INT_CLR, UNDERFLOW_INT_EN_OFST,
1934 +- MASK(1), 1);
1935 +- DRM_ERROR("LDI underflow!");
1936 +- schedule_work(&ctx->display_reset_wq);
1937 +- }
1938 +
1939 + return IRQ_HANDLED;
1940 + }
1941 +@@ -919,7 +900,6 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
1942 + if (ret)
1943 + return ERR_PTR(-EIO);
1944 +
1945 +- INIT_WORK(&ctx->display_reset_wq, drm_underflow_wq);
1946 + ctx->crtc = crtc;
1947 +
1948 + return ctx;
1949 +diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
1950 +index 12ba74788cce..597e45977349 100644
1951 +--- a/drivers/gpu/drm/i915/display/intel_display_power.c
1952 ++++ b/drivers/gpu/drm/i915/display/intel_display_power.c
1953 +@@ -4471,13 +4471,19 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
1954 +
1955 + static void icl_mbus_init(struct drm_i915_private *dev_priv)
1956 + {
1957 +- u32 val;
1958 ++ u32 mask, val;
1959 +
1960 +- val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
1961 +- MBUS_ABOX_BT_CREDIT_POOL2(16) |
1962 +- MBUS_ABOX_B_CREDIT(1) |
1963 +- MBUS_ABOX_BW_CREDIT(1);
1964 ++ mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
1965 ++ MBUS_ABOX_BT_CREDIT_POOL2_MASK |
1966 ++ MBUS_ABOX_B_CREDIT_MASK |
1967 ++ MBUS_ABOX_BW_CREDIT_MASK;
1968 +
1969 ++ val = I915_READ(MBUS_ABOX_CTL);
1970 ++ val &= ~mask;
1971 ++ val |= MBUS_ABOX_BT_CREDIT_POOL1(16) |
1972 ++ MBUS_ABOX_BT_CREDIT_POOL2(16) |
1973 ++ MBUS_ABOX_B_CREDIT(1) |
1974 ++ MBUS_ABOX_BW_CREDIT(1);
1975 + I915_WRITE(MBUS_ABOX_CTL, val);
1976 + }
1977 +
1978 +diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1979 +index 29b2077b73d2..e81c2726f7fd 100644
1980 +--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1981 ++++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1982 +@@ -567,7 +567,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
1983 +
1984 + obj = i915_gem_object_create_internal(i915, size);
1985 + if (IS_ERR(obj))
1986 +- return PTR_ERR(obj);
1987 ++ return false;
1988 +
1989 + err = create_mmap_offset(obj);
1990 + i915_gem_object_put(obj);
1991 +diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
1992 +index 2ae14bc14931..cc917200bdeb 100644
1993 +--- a/drivers/gpu/drm/i915/i915_perf.c
1994 ++++ b/drivers/gpu/drm/i915/i915_perf.c
1995 +@@ -1950,9 +1950,10 @@ out:
1996 + return i915_vma_get(oa_bo->vma);
1997 + }
1998 +
1999 +-static int emit_oa_config(struct i915_perf_stream *stream,
2000 +- struct i915_oa_config *oa_config,
2001 +- struct intel_context *ce)
2002 ++static struct i915_request *
2003 ++emit_oa_config(struct i915_perf_stream *stream,
2004 ++ struct i915_oa_config *oa_config,
2005 ++ struct intel_context *ce)
2006 + {
2007 + struct i915_request *rq;
2008 + struct i915_vma *vma;
2009 +@@ -1960,7 +1961,7 @@ static int emit_oa_config(struct i915_perf_stream *stream,
2010 +
2011 + vma = get_oa_vma(stream, oa_config);
2012 + if (IS_ERR(vma))
2013 +- return PTR_ERR(vma);
2014 ++ return ERR_CAST(vma);
2015 +
2016 + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
2017 + if (err)
2018 +@@ -1983,13 +1984,17 @@ static int emit_oa_config(struct i915_perf_stream *stream,
2019 + err = rq->engine->emit_bb_start(rq,
2020 + vma->node.start, 0,
2021 + I915_DISPATCH_SECURE);
2022 ++ if (err)
2023 ++ goto err_add_request;
2024 ++
2025 ++ i915_request_get(rq);
2026 + err_add_request:
2027 + i915_request_add(rq);
2028 + err_vma_unpin:
2029 + i915_vma_unpin(vma);
2030 + err_vma_put:
2031 + i915_vma_put(vma);
2032 +- return err;
2033 ++ return err ? ERR_PTR(err) : rq;
2034 + }
2035 +
2036 + static struct intel_context *oa_context(struct i915_perf_stream *stream)
2037 +@@ -1997,7 +2002,8 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream)
2038 + return stream->pinned_ctx ?: stream->engine->kernel_context;
2039 + }
2040 +
2041 +-static int hsw_enable_metric_set(struct i915_perf_stream *stream)
2042 ++static struct i915_request *
2043 ++hsw_enable_metric_set(struct i915_perf_stream *stream)
2044 + {
2045 + struct intel_uncore *uncore = stream->uncore;
2046 +
2047 +@@ -2408,7 +2414,8 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
2048 + return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
2049 + }
2050 +
2051 +-static int gen8_enable_metric_set(struct i915_perf_stream *stream)
2052 ++static struct i915_request *
2053 ++gen8_enable_metric_set(struct i915_perf_stream *stream)
2054 + {
2055 + struct intel_uncore *uncore = stream->uncore;
2056 + struct i915_oa_config *oa_config = stream->oa_config;
2057 +@@ -2450,12 +2457,13 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
2058 + */
2059 + ret = lrc_configure_all_contexts(stream, oa_config);
2060 + if (ret)
2061 +- return ret;
2062 ++ return ERR_PTR(ret);
2063 +
2064 + return emit_oa_config(stream, oa_config, oa_context(stream));
2065 + }
2066 +
2067 +-static int gen12_enable_metric_set(struct i915_perf_stream *stream)
2068 ++static struct i915_request *
2069 ++gen12_enable_metric_set(struct i915_perf_stream *stream)
2070 + {
2071 + struct intel_uncore *uncore = stream->uncore;
2072 + struct i915_oa_config *oa_config = stream->oa_config;
2073 +@@ -2488,7 +2496,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
2074 + */
2075 + ret = gen12_configure_all_contexts(stream, oa_config);
2076 + if (ret)
2077 +- return ret;
2078 ++ return ERR_PTR(ret);
2079 +
2080 + /*
2081 + * For Gen12, performance counters are context
2082 +@@ -2498,7 +2506,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
2083 + if (stream->ctx) {
2084 + ret = gen12_configure_oar_context(stream, true);
2085 + if (ret)
2086 +- return ret;
2087 ++ return ERR_PTR(ret);
2088 + }
2089 +
2090 + return emit_oa_config(stream, oa_config, oa_context(stream));
2091 +@@ -2693,6 +2701,20 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
2092 + .read = i915_oa_read,
2093 + };
2094 +
2095 ++static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
2096 ++{
2097 ++ struct i915_request *rq;
2098 ++
2099 ++ rq = stream->perf->ops.enable_metric_set(stream);
2100 ++ if (IS_ERR(rq))
2101 ++ return PTR_ERR(rq);
2102 ++
2103 ++ i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
2104 ++ i915_request_put(rq);
2105 ++
2106 ++ return 0;
2107 ++}
2108 ++
2109 + /**
2110 + * i915_oa_stream_init - validate combined props for OA stream and init
2111 + * @stream: An i915 perf stream
2112 +@@ -2826,7 +2848,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
2113 + stream->ops = &i915_oa_stream_ops;
2114 + perf->exclusive_stream = stream;
2115 +
2116 +- ret = perf->ops.enable_metric_set(stream);
2117 ++ ret = i915_perf_stream_enable_sync(stream);
2118 + if (ret) {
2119 + DRM_DEBUG("Unable to enable metric set\n");
2120 + goto err_enable;
2121 +@@ -3144,7 +3166,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
2122 + return -EINVAL;
2123 +
2124 + if (config != stream->oa_config) {
2125 +- int err;
2126 ++ struct i915_request *rq;
2127 +
2128 + /*
2129 + * If OA is bound to a specific context, emit the
2130 +@@ -3155,11 +3177,13 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
2131 + * When set globally, we use a low priority kernel context,
2132 + * so it will effectively take effect when idle.
2133 + */
2134 +- err = emit_oa_config(stream, config, oa_context(stream));
2135 +- if (err == 0)
2136 ++ rq = emit_oa_config(stream, config, oa_context(stream));
2137 ++ if (!IS_ERR(rq)) {
2138 + config = xchg(&stream->oa_config, config);
2139 +- else
2140 +- ret = err;
2141 ++ i915_request_put(rq);
2142 ++ } else {
2143 ++ ret = PTR_ERR(rq);
2144 ++ }
2145 + }
2146 +
2147 + i915_oa_config_put(config);
2148 +diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
2149 +index 74ddc20a0d37..171f95a97ef6 100644
2150 +--- a/drivers/gpu/drm/i915/i915_perf_types.h
2151 ++++ b/drivers/gpu/drm/i915/i915_perf_types.h
2152 +@@ -339,7 +339,8 @@ struct i915_oa_ops {
2153 + * counter reports being sampled. May apply system constraints such as
2154 + * disabling EU clock gating as required.
2155 + */
2156 +- int (*enable_metric_set)(struct i915_perf_stream *stream);
2157 ++ struct i915_request *
2158 ++ (*enable_metric_set)(struct i915_perf_stream *stream);
2159 +
2160 + /**
2161 + * @disable_metric_set: Remove system constraints associated with using
2162 +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
2163 +index 7f21307cda75..c26b2faa15cf 100644
2164 +--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
2165 ++++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
2166 +@@ -358,6 +358,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
2167 + /* Only DMA capable components need the LARB property */
2168 + comp->larb_dev = NULL;
2169 + if (type != MTK_DISP_OVL &&
2170 ++ type != MTK_DISP_OVL_2L &&
2171 + type != MTK_DISP_RDMA &&
2172 + type != MTK_DISP_WDMA)
2173 + return 0;
2174 +diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
2175 +index 05cc04f729d6..e1cc541e0ef2 100644
2176 +--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
2177 ++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
2178 +@@ -1109,8 +1109,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
2179 + ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
2180 + msecs_to_jiffies(50));
2181 + if (ret == 0)
2182 +- dev_warn(dev->dev, "pp done time out, lm=%d\n",
2183 +- mdp5_cstate->pipeline.mixer->lm);
2184 ++ dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
2185 ++ mdp5_cstate->pipeline.mixer->lm);
2186 + }
2187 +
2188 + static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
2189 +diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
2190 +index 271aa7bbca92..73127948f54d 100644
2191 +--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
2192 ++++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
2193 +@@ -336,7 +336,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
2194 + return num;
2195 + }
2196 +
2197 +-static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
2198 ++static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
2199 + struct drm_display_mode *mode)
2200 + {
2201 + int id = dsi_mgr_connector_get_id(connector);
2202 +@@ -479,6 +479,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
2203 + struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
2204 + struct mipi_dsi_host *host = msm_dsi->host;
2205 + struct drm_panel *panel = msm_dsi->panel;
2206 ++ struct msm_dsi_pll *src_pll;
2207 + bool is_dual_dsi = IS_DUAL_DSI();
2208 + int ret;
2209 +
2210 +@@ -519,6 +520,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
2211 + id, ret);
2212 + }
2213 +
2214 ++ /* Save PLL status if it is a clock source */
2215 ++ src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
2216 ++ msm_dsi_pll_save_state(src_pll);
2217 ++
2218 + ret = msm_dsi_host_power_off(host);
2219 + if (ret)
2220 + pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
2221 +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
2222 +index b0cfa67d2a57..f509ebd77500 100644
2223 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
2224 ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
2225 +@@ -724,10 +724,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
2226 + if (!phy || !phy->cfg->ops.disable)
2227 + return;
2228 +
2229 +- /* Save PLL status if it is a clock source */
2230 +- if (phy->usecase != MSM_DSI_PHY_SLAVE)
2231 +- msm_dsi_pll_save_state(phy->pll);
2232 +-
2233 + phy->cfg->ops.disable(phy);
2234 +
2235 + dsi_phy_regulator_disable(phy);
2236 +diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
2237 +index 8f6100db90ed..aa9385d5bfff 100644
2238 +--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
2239 ++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
2240 +@@ -411,6 +411,12 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
2241 + if (pll_10nm->slave)
2242 + dsi_pll_enable_pll_bias(pll_10nm->slave);
2243 +
2244 ++ rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
2245 ++ if (rc) {
2246 ++ pr_err("vco_set_rate failed, rc=%d\n", rc);
2247 ++ return rc;
2248 ++ }
2249 ++
2250 + /* Start PLL */
2251 + pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
2252 + 0x01);
2253 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
2254 +index 3107b0738e40..5d75f8cf6477 100644
2255 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
2256 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
2257 +@@ -601,33 +601,27 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
2258 + source_id = (fault_status >> 16);
2259 +
2260 + /* Page fault only */
2261 +- if ((status & mask) == BIT(i)) {
2262 +- WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
2263 +-
2264 ++ ret = -1;
2265 ++ if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
2266 + ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
2267 +- if (!ret) {
2268 +- mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
2269 +- status &= ~mask;
2270 +- continue;
2271 +- }
2272 +- }
2273 +
2274 +- /* terminal fault, print info about the fault */
2275 +- dev_err(pfdev->dev,
2276 +- "Unhandled Page fault in AS%d at VA 0x%016llX\n"
2277 +- "Reason: %s\n"
2278 +- "raw fault status: 0x%X\n"
2279 +- "decoded fault status: %s\n"
2280 +- "exception type 0x%X: %s\n"
2281 +- "access type 0x%X: %s\n"
2282 +- "source id 0x%X\n",
2283 +- i, addr,
2284 +- "TODO",
2285 +- fault_status,
2286 +- (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
2287 +- exception_type, panfrost_exception_name(pfdev, exception_type),
2288 +- access_type, access_type_name(pfdev, fault_status),
2289 +- source_id);
2290 ++ if (ret)
2291 ++ /* terminal fault, print info about the fault */
2292 ++ dev_err(pfdev->dev,
2293 ++ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
2294 ++ "Reason: %s\n"
2295 ++ "raw fault status: 0x%X\n"
2296 ++ "decoded fault status: %s\n"
2297 ++ "exception type 0x%X: %s\n"
2298 ++ "access type 0x%X: %s\n"
2299 ++ "source id 0x%X\n",
2300 ++ i, addr,
2301 ++ "TODO",
2302 ++ fault_status,
2303 ++ (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
2304 ++ exception_type, panfrost_exception_name(pfdev, exception_type),
2305 ++ access_type, access_type_name(pfdev, fault_status),
2306 ++ source_id);
2307 +
2308 + mmu_write(pfdev, MMU_INT_CLEAR, mask);
2309 +
2310 +diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
2311 +index 6d61a0eb5d64..84e6bc050bf2 100644
2312 +--- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
2313 ++++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
2314 +@@ -53,6 +53,7 @@ cmdline_test(drm_cmdline_test_rotate_0)
2315 + cmdline_test(drm_cmdline_test_rotate_90)
2316 + cmdline_test(drm_cmdline_test_rotate_180)
2317 + cmdline_test(drm_cmdline_test_rotate_270)
2318 ++cmdline_test(drm_cmdline_test_rotate_multiple)
2319 + cmdline_test(drm_cmdline_test_rotate_invalid_val)
2320 + cmdline_test(drm_cmdline_test_rotate_truncated)
2321 + cmdline_test(drm_cmdline_test_hmirror)
2322 +diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
2323 +index 013de9d27c35..035f86c5d648 100644
2324 +--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
2325 ++++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
2326 +@@ -856,6 +856,17 @@ static int drm_cmdline_test_rotate_270(void *ignored)
2327 + return 0;
2328 + }
2329 +
2330 ++static int drm_cmdline_test_rotate_multiple(void *ignored)
2331 ++{
2332 ++ struct drm_cmdline_mode mode = { };
2333 ++
2334 ++ FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=0,rotate=90",
2335 ++ &no_connector,
2336 ++ &mode));
2337 ++
2338 ++ return 0;
2339 ++}
2340 ++
2341 + static int drm_cmdline_test_rotate_invalid_val(void *ignored)
2342 + {
2343 + struct drm_cmdline_mode mode = { };
2344 +@@ -888,7 +899,7 @@ static int drm_cmdline_test_hmirror(void *ignored)
2345 + FAIL_ON(!mode.specified);
2346 + FAIL_ON(mode.xres != 720);
2347 + FAIL_ON(mode.yres != 480);
2348 +- FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_X);
2349 ++ FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X));
2350 +
2351 + FAIL_ON(mode.refresh_specified);
2352 +
2353 +@@ -913,7 +924,7 @@ static int drm_cmdline_test_vmirror(void *ignored)
2354 + FAIL_ON(!mode.specified);
2355 + FAIL_ON(mode.xres != 720);
2356 + FAIL_ON(mode.yres != 480);
2357 +- FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_Y);
2358 ++ FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y));
2359 +
2360 + FAIL_ON(mode.refresh_specified);
2361 +
2362 +diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
2363 +index 8b803eb903b8..18b4881f4481 100644
2364 +--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
2365 ++++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
2366 +@@ -106,48 +106,128 @@ static const struct de2_fmt_info de2_formats[] = {
2367 + .rgb = true,
2368 + .csc = SUN8I_CSC_MODE_OFF,
2369 + },
2370 ++ {
2371 ++ /* for DE2 VI layer which ignores alpha */
2372 ++ .drm_fmt = DRM_FORMAT_XRGB4444,
2373 ++ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
2374 ++ .rgb = true,
2375 ++ .csc = SUN8I_CSC_MODE_OFF,
2376 ++ },
2377 + {
2378 + .drm_fmt = DRM_FORMAT_ABGR4444,
2379 + .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
2380 + .rgb = true,
2381 + .csc = SUN8I_CSC_MODE_OFF,
2382 + },
2383 ++ {
2384 ++ /* for DE2 VI layer which ignores alpha */
2385 ++ .drm_fmt = DRM_FORMAT_XBGR4444,
2386 ++ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
2387 ++ .rgb = true,
2388 ++ .csc = SUN8I_CSC_MODE_OFF,
2389 ++ },
2390 + {
2391 + .drm_fmt = DRM_FORMAT_RGBA4444,
2392 + .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
2393 + .rgb = true,
2394 + .csc = SUN8I_CSC_MODE_OFF,
2395 + },
2396 ++ {
2397 ++ /* for DE2 VI layer which ignores alpha */
2398 ++ .drm_fmt = DRM_FORMAT_RGBX4444,
2399 ++ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
2400 ++ .rgb = true,
2401 ++ .csc = SUN8I_CSC_MODE_OFF,
2402 ++ },
2403 + {
2404 + .drm_fmt = DRM_FORMAT_BGRA4444,
2405 + .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
2406 + .rgb = true,
2407 + .csc = SUN8I_CSC_MODE_OFF,
2408 + },
2409 ++ {
2410 ++ /* for DE2 VI layer which ignores alpha */
2411 ++ .drm_fmt = DRM_FORMAT_BGRX4444,
2412 ++ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
2413 ++ .rgb = true,
2414 ++ .csc = SUN8I_CSC_MODE_OFF,
2415 ++ },
2416 + {
2417 + .drm_fmt = DRM_FORMAT_ARGB1555,
2418 + .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
2419 + .rgb = true,
2420 + .csc = SUN8I_CSC_MODE_OFF,
2421 + },
2422 ++ {
2423 ++ /* for DE2 VI layer which ignores alpha */
2424 ++ .drm_fmt = DRM_FORMAT_XRGB1555,
2425 ++ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
2426 ++ .rgb = true,
2427 ++ .csc = SUN8I_CSC_MODE_OFF,
2428 ++ },
2429 + {
2430 + .drm_fmt = DRM_FORMAT_ABGR1555,
2431 + .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
2432 + .rgb = true,
2433 + .csc = SUN8I_CSC_MODE_OFF,
2434 + },
2435 ++ {
2436 ++ /* for DE2 VI layer which ignores alpha */
2437 ++ .drm_fmt = DRM_FORMAT_XBGR1555,
2438 ++ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
2439 ++ .rgb = true,
2440 ++ .csc = SUN8I_CSC_MODE_OFF,
2441 ++ },
2442 + {
2443 + .drm_fmt = DRM_FORMAT_RGBA5551,
2444 + .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
2445 + .rgb = true,
2446 + .csc = SUN8I_CSC_MODE_OFF,
2447 + },
2448 ++ {
2449 ++ /* for DE2 VI layer which ignores alpha */
2450 ++ .drm_fmt = DRM_FORMAT_RGBX5551,
2451 ++ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
2452 ++ .rgb = true,
2453 ++ .csc = SUN8I_CSC_MODE_OFF,
2454 ++ },
2455 + {
2456 + .drm_fmt = DRM_FORMAT_BGRA5551,
2457 + .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
2458 + .rgb = true,
2459 + .csc = SUN8I_CSC_MODE_OFF,
2460 + },
2461 ++ {
2462 ++ /* for DE2 VI layer which ignores alpha */
2463 ++ .drm_fmt = DRM_FORMAT_BGRX5551,
2464 ++ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
2465 ++ .rgb = true,
2466 ++ .csc = SUN8I_CSC_MODE_OFF,
2467 ++ },
2468 ++ {
2469 ++ .drm_fmt = DRM_FORMAT_ARGB2101010,
2470 ++ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
2471 ++ .rgb = true,
2472 ++ .csc = SUN8I_CSC_MODE_OFF,
2473 ++ },
2474 ++ {
2475 ++ .drm_fmt = DRM_FORMAT_ABGR2101010,
2476 ++ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
2477 ++ .rgb = true,
2478 ++ .csc = SUN8I_CSC_MODE_OFF,
2479 ++ },
2480 ++ {
2481 ++ .drm_fmt = DRM_FORMAT_RGBA1010102,
2482 ++ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
2483 ++ .rgb = true,
2484 ++ .csc = SUN8I_CSC_MODE_OFF,
2485 ++ },
2486 ++ {
2487 ++ .drm_fmt = DRM_FORMAT_BGRA1010102,
2488 ++ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
2489 ++ .rgb = true,
2490 ++ .csc = SUN8I_CSC_MODE_OFF,
2491 ++ },
2492 + {
2493 + .drm_fmt = DRM_FORMAT_UYVY,
2494 + .de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
2495 +@@ -196,12 +276,6 @@ static const struct de2_fmt_info de2_formats[] = {
2496 + .rgb = false,
2497 + .csc = SUN8I_CSC_MODE_YUV2RGB,
2498 + },
2499 +- {
2500 +- .drm_fmt = DRM_FORMAT_YUV444,
2501 +- .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
2502 +- .rgb = true,
2503 +- .csc = SUN8I_CSC_MODE_YUV2RGB,
2504 +- },
2505 + {
2506 + .drm_fmt = DRM_FORMAT_YUV422,
2507 + .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
2508 +@@ -220,12 +294,6 @@ static const struct de2_fmt_info de2_formats[] = {
2509 + .rgb = false,
2510 + .csc = SUN8I_CSC_MODE_YUV2RGB,
2511 + },
2512 +- {
2513 +- .drm_fmt = DRM_FORMAT_YVU444,
2514 +- .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
2515 +- .rgb = true,
2516 +- .csc = SUN8I_CSC_MODE_YVU2RGB,
2517 +- },
2518 + {
2519 + .drm_fmt = DRM_FORMAT_YVU422,
2520 + .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
2521 +@@ -244,6 +312,18 @@ static const struct de2_fmt_info de2_formats[] = {
2522 + .rgb = false,
2523 + .csc = SUN8I_CSC_MODE_YVU2RGB,
2524 + },
2525 ++ {
2526 ++ .drm_fmt = DRM_FORMAT_P010,
2527 ++ .de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
2528 ++ .rgb = false,
2529 ++ .csc = SUN8I_CSC_MODE_YUV2RGB,
2530 ++ },
2531 ++ {
2532 ++ .drm_fmt = DRM_FORMAT_P210,
2533 ++ .de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
2534 ++ .rgb = false,
2535 ++ .csc = SUN8I_CSC_MODE_YUV2RGB,
2536 ++ },
2537 + };
2538 +
2539 + const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)
2540 +diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
2541 +index c6cc94057faf..345b28b0a80a 100644
2542 +--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
2543 ++++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
2544 +@@ -93,6 +93,10 @@
2545 + #define SUN8I_MIXER_FBFMT_ABGR1555 17
2546 + #define SUN8I_MIXER_FBFMT_RGBA5551 18
2547 + #define SUN8I_MIXER_FBFMT_BGRA5551 19
2548 ++#define SUN8I_MIXER_FBFMT_ARGB2101010 20
2549 ++#define SUN8I_MIXER_FBFMT_ABGR2101010 21
2550 ++#define SUN8I_MIXER_FBFMT_RGBA1010102 22
2551 ++#define SUN8I_MIXER_FBFMT_BGRA1010102 23
2552 +
2553 + #define SUN8I_MIXER_FBFMT_YUYV 0
2554 + #define SUN8I_MIXER_FBFMT_UYVY 1
2555 +@@ -109,6 +113,13 @@
2556 + /* format 12 is semi-planar YUV411 UVUV */
2557 + /* format 13 is semi-planar YUV411 VUVU */
2558 + #define SUN8I_MIXER_FBFMT_YUV411 14
2559 ++/* format 15 doesn't exist */
2560 ++/* format 16 is P010 YVU */
2561 ++#define SUN8I_MIXER_FBFMT_P010_YUV 17
2562 ++/* format 18 is P210 YVU */
2563 ++#define SUN8I_MIXER_FBFMT_P210_YUV 19
2564 ++/* format 20 is packed YVU444 10-bit */
2565 ++/* format 21 is packed YUV444 10-bit */
2566 +
2567 + /*
2568 + * Sub-engines listed bellow are unused for now. The EN registers are here only
2569 +diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
2570 +index 42d445d23773..b8398ca18b0f 100644
2571 +--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
2572 ++++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
2573 +@@ -398,24 +398,66 @@ static const struct drm_plane_funcs sun8i_vi_layer_funcs = {
2574 + };
2575 +
2576 + /*
2577 +- * While all RGB formats are supported, VI planes don't support
2578 +- * alpha blending, so there is no point having formats with alpha
2579 +- * channel if their opaque analog exist.
2580 ++ * While DE2 VI layer supports same RGB formats as UI layer, alpha
2581 ++ * channel is ignored. This structure lists all unique variants
2582 ++ * where alpha channel is replaced with "don't care" (X) channel.
2583 + */
2584 + static const u32 sun8i_vi_layer_formats[] = {
2585 ++ DRM_FORMAT_BGR565,
2586 ++ DRM_FORMAT_BGR888,
2587 ++ DRM_FORMAT_BGRX4444,
2588 ++ DRM_FORMAT_BGRX5551,
2589 ++ DRM_FORMAT_BGRX8888,
2590 ++ DRM_FORMAT_RGB565,
2591 ++ DRM_FORMAT_RGB888,
2592 ++ DRM_FORMAT_RGBX4444,
2593 ++ DRM_FORMAT_RGBX5551,
2594 ++ DRM_FORMAT_RGBX8888,
2595 ++ DRM_FORMAT_XBGR1555,
2596 ++ DRM_FORMAT_XBGR4444,
2597 ++ DRM_FORMAT_XBGR8888,
2598 ++ DRM_FORMAT_XRGB1555,
2599 ++ DRM_FORMAT_XRGB4444,
2600 ++ DRM_FORMAT_XRGB8888,
2601 ++
2602 ++ DRM_FORMAT_NV16,
2603 ++ DRM_FORMAT_NV12,
2604 ++ DRM_FORMAT_NV21,
2605 ++ DRM_FORMAT_NV61,
2606 ++ DRM_FORMAT_UYVY,
2607 ++ DRM_FORMAT_VYUY,
2608 ++ DRM_FORMAT_YUYV,
2609 ++ DRM_FORMAT_YVYU,
2610 ++ DRM_FORMAT_YUV411,
2611 ++ DRM_FORMAT_YUV420,
2612 ++ DRM_FORMAT_YUV422,
2613 ++ DRM_FORMAT_YVU411,
2614 ++ DRM_FORMAT_YVU420,
2615 ++ DRM_FORMAT_YVU422,
2616 ++};
2617 ++
2618 ++static const u32 sun8i_vi_layer_de3_formats[] = {
2619 + DRM_FORMAT_ABGR1555,
2620 ++ DRM_FORMAT_ABGR2101010,
2621 + DRM_FORMAT_ABGR4444,
2622 ++ DRM_FORMAT_ABGR8888,
2623 + DRM_FORMAT_ARGB1555,
2624 ++ DRM_FORMAT_ARGB2101010,
2625 + DRM_FORMAT_ARGB4444,
2626 ++ DRM_FORMAT_ARGB8888,
2627 + DRM_FORMAT_BGR565,
2628 + DRM_FORMAT_BGR888,
2629 ++ DRM_FORMAT_BGRA1010102,
2630 + DRM_FORMAT_BGRA5551,
2631 + DRM_FORMAT_BGRA4444,
2632 ++ DRM_FORMAT_BGRA8888,
2633 + DRM_FORMAT_BGRX8888,
2634 + DRM_FORMAT_RGB565,
2635 + DRM_FORMAT_RGB888,
2636 ++ DRM_FORMAT_RGBA1010102,
2637 + DRM_FORMAT_RGBA4444,
2638 + DRM_FORMAT_RGBA5551,
2639 ++ DRM_FORMAT_RGBA8888,
2640 + DRM_FORMAT_RGBX8888,
2641 + DRM_FORMAT_XBGR8888,
2642 + DRM_FORMAT_XRGB8888,
2643 +@@ -424,6 +466,8 @@ static const u32 sun8i_vi_layer_formats[] = {
2644 + DRM_FORMAT_NV12,
2645 + DRM_FORMAT_NV21,
2646 + DRM_FORMAT_NV61,
2647 ++ DRM_FORMAT_P010,
2648 ++ DRM_FORMAT_P210,
2649 + DRM_FORMAT_UYVY,
2650 + DRM_FORMAT_VYUY,
2651 + DRM_FORMAT_YUYV,
2652 +@@ -431,11 +475,9 @@ static const u32 sun8i_vi_layer_formats[] = {
2653 + DRM_FORMAT_YUV411,
2654 + DRM_FORMAT_YUV420,
2655 + DRM_FORMAT_YUV422,
2656 +- DRM_FORMAT_YUV444,
2657 + DRM_FORMAT_YVU411,
2658 + DRM_FORMAT_YVU420,
2659 + DRM_FORMAT_YVU422,
2660 +- DRM_FORMAT_YVU444,
2661 + };
2662 +
2663 + struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
2664 +@@ -443,19 +485,27 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
2665 + int index)
2666 + {
2667 + u32 supported_encodings, supported_ranges;
2668 ++ unsigned int plane_cnt, format_count;
2669 + struct sun8i_vi_layer *layer;
2670 +- unsigned int plane_cnt;
2671 ++ const u32 *formats;
2672 + int ret;
2673 +
2674 + layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
2675 + if (!layer)
2676 + return ERR_PTR(-ENOMEM);
2677 +
2678 ++ if (mixer->cfg->is_de3) {
2679 ++ formats = sun8i_vi_layer_de3_formats;
2680 ++ format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
2681 ++ } else {
2682 ++ formats = sun8i_vi_layer_formats;
2683 ++ format_count = ARRAY_SIZE(sun8i_vi_layer_formats);
2684 ++ }
2685 ++
2686 + /* possible crtcs are set later */
2687 + ret = drm_universal_plane_init(drm, &layer->plane, 0,
2688 + &sun8i_vi_layer_funcs,
2689 +- sun8i_vi_layer_formats,
2690 +- ARRAY_SIZE(sun8i_vi_layer_formats),
2691 ++ formats, format_count,
2692 + NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
2693 + if (ret) {
2694 + dev_err(drm->dev, "Couldn't initialize layer\n");
2695 +diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
2696 +index 6b0883a1776e..b40915638e13 100644
2697 +--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
2698 ++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
2699 +@@ -516,6 +516,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
2700 + fbo->base.base.resv = &fbo->base.base._resv;
2701 +
2702 + dma_resv_init(&fbo->base.base._resv);
2703 ++ fbo->base.base.dev = NULL;
2704 + ret = dma_resv_trylock(&fbo->base.base._resv);
2705 + WARN_ON(!ret);
2706 +
2707 +diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
2708 +index 017a9e0fc3bb..3af7ec80c7da 100644
2709 +--- a/drivers/gpu/drm/virtio/virtgpu_object.c
2710 ++++ b/drivers/gpu/drm/virtio/virtgpu_object.c
2711 +@@ -42,8 +42,8 @@ static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
2712 + * "f91a9dd35715 Fix unlinking resources from hash
2713 + * table." (Feb 2019) fixes the bug.
2714 + */
2715 +- static int handle;
2716 +- handle++;
2717 ++ static atomic_t seqno = ATOMIC_INIT(0);
2718 ++ int handle = atomic_inc_return(&seqno);
2719 + *resid = handle + 1;
2720 + } else {
2721 + int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
2722 +@@ -99,6 +99,7 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
2723 + return NULL;
2724 +
2725 + bo->base.base.funcs = &virtio_gpu_gem_funcs;
2726 ++ bo->base.map_cached = true;
2727 + return &bo->base.base;
2728 + }
2729 +
2730 +diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
2731 +index 9632e2e3c4bb..319a0519ebdb 100644
2732 +--- a/drivers/hwmon/adt7462.c
2733 ++++ b/drivers/hwmon/adt7462.c
2734 +@@ -413,7 +413,7 @@ static int ADT7462_REG_VOLT(struct adt7462_data *data, int which)
2735 + return 0x95;
2736 + break;
2737 + }
2738 +- return -ENODEV;
2739 ++ return 0;
2740 + }
2741 +
2742 + /* Provide labels for sysfs */
2743 +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
2744 +index 455b3659d84b..4decc1d4cc99 100644
2745 +--- a/drivers/infiniband/core/cm.c
2746 ++++ b/drivers/infiniband/core/cm.c
2747 +@@ -1202,6 +1202,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
2748 + /* Sharing an ib_cm_id with different handlers is not
2749 + * supported */
2750 + spin_unlock_irqrestore(&cm.lock, flags);
2751 ++ ib_destroy_cm_id(cm_id);
2752 + return ERR_PTR(-EINVAL);
2753 + }
2754 + refcount_inc(&cm_id_priv->refcount);
2755 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
2756 +index af1afc17b8bd..0b530646f1e5 100644
2757 +--- a/drivers/infiniband/core/cma.c
2758 ++++ b/drivers/infiniband/core/cma.c
2759 +@@ -3182,19 +3182,26 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2760 + int ret;
2761 +
2762 + id_priv = container_of(id, struct rdma_id_private, id);
2763 ++ memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
2764 + if (id_priv->state == RDMA_CM_IDLE) {
2765 + ret = cma_bind_addr(id, src_addr, dst_addr);
2766 +- if (ret)
2767 ++ if (ret) {
2768 ++ memset(cma_dst_addr(id_priv), 0,
2769 ++ rdma_addr_size(dst_addr));
2770 + return ret;
2771 ++ }
2772 + }
2773 +
2774 +- if (cma_family(id_priv) != dst_addr->sa_family)
2775 ++ if (cma_family(id_priv) != dst_addr->sa_family) {
2776 ++ memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
2777 + return -EINVAL;
2778 ++ }
2779 +
2780 +- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
2781 ++ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
2782 ++ memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
2783 + return -EINVAL;
2784 ++ }
2785 +
2786 +- memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
2787 + if (cma_any_addr(dst_addr)) {
2788 + ret = cma_resolve_loopback(id_priv);
2789 + } else {
2790 +diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
2791 +index d657d90e618b..9dc7383eef49 100644
2792 +--- a/drivers/infiniband/core/core_priv.h
2793 ++++ b/drivers/infiniband/core/core_priv.h
2794 +@@ -338,6 +338,21 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
2795 + qp->pd = pd;
2796 + qp->uobject = uobj;
2797 + qp->real_qp = qp;
2798 ++
2799 ++ qp->qp_type = attr->qp_type;
2800 ++ qp->qp_context = attr->qp_context;
2801 ++ qp->rwq_ind_tbl = attr->rwq_ind_tbl;
2802 ++ qp->send_cq = attr->send_cq;
2803 ++ qp->recv_cq = attr->recv_cq;
2804 ++ qp->srq = attr->srq;
2805 ++ qp->rwq_ind_tbl = attr->rwq_ind_tbl;
2806 ++ qp->event_handler = attr->event_handler;
2807 ++
2808 ++ atomic_set(&qp->usecnt, 0);
2809 ++ spin_lock_init(&qp->mr_lock);
2810 ++ INIT_LIST_HEAD(&qp->rdma_mrs);
2811 ++ INIT_LIST_HEAD(&qp->sig_mrs);
2812 ++
2813 + /*
2814 + * We don't track XRC QPs for now, because they don't have PD
2815 + * and more importantly they are created internaly by driver,
2816 +diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
2817 +index ade71823370f..da8adadf4755 100644
2818 +--- a/drivers/infiniband/core/iwcm.c
2819 ++++ b/drivers/infiniband/core/iwcm.c
2820 +@@ -159,8 +159,10 @@ static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
2821 + {
2822 + struct list_head *e, *tmp;
2823 +
2824 +- list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
2825 ++ list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
2826 ++ list_del(e);
2827 + kfree(list_entry(e, struct iwcm_work, free_list));
2828 ++ }
2829 + }
2830 +
2831 + static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
2832 +diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
2833 +index cbf6041a5d4a..ba76709897bb 100644
2834 +--- a/drivers/infiniband/core/nldev.c
2835 ++++ b/drivers/infiniband/core/nldev.c
2836 +@@ -1756,6 +1756,8 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
2837 + if (ret)
2838 + goto err_msg;
2839 + } else {
2840 ++ if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
2841 ++ goto err_msg;
2842 + qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
2843 + if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
2844 + cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
2845 +diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
2846 +index 4fad732f9b3c..06e5b6787443 100644
2847 +--- a/drivers/infiniband/core/rw.c
2848 ++++ b/drivers/infiniband/core/rw.c
2849 +@@ -273,6 +273,23 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
2850 + return 1;
2851 + }
2852 +
2853 ++static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
2854 ++ u32 sg_cnt, enum dma_data_direction dir)
2855 ++{
2856 ++ if (is_pci_p2pdma_page(sg_page(sg)))
2857 ++ pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
2858 ++ else
2859 ++ ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
2860 ++}
2861 ++
2862 ++static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
2863 ++ u32 sg_cnt, enum dma_data_direction dir)
2864 ++{
2865 ++ if (is_pci_p2pdma_page(sg_page(sg)))
2866 ++ return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
2867 ++ return ib_dma_map_sg(dev, sg, sg_cnt, dir);
2868 ++}
2869 ++
2870 + /**
2871 + * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
2872 + * @ctx: context to initialize
2873 +@@ -295,11 +312,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
2874 + struct ib_device *dev = qp->pd->device;
2875 + int ret;
2876 +
2877 +- if (is_pci_p2pdma_page(sg_page(sg)))
2878 +- ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
2879 +- else
2880 +- ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
2881 +-
2882 ++ ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
2883 + if (!ret)
2884 + return -ENOMEM;
2885 + sg_cnt = ret;
2886 +@@ -338,7 +351,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
2887 + return ret;
2888 +
2889 + out_unmap_sg:
2890 +- ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
2891 ++ rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
2892 + return ret;
2893 + }
2894 + EXPORT_SYMBOL(rdma_rw_ctx_init);
2895 +@@ -588,11 +601,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
2896 + break;
2897 + }
2898 +
2899 +- if (is_pci_p2pdma_page(sg_page(sg)))
2900 +- pci_p2pdma_unmap_sg(qp->pd->device->dma_device, sg,
2901 +- sg_cnt, dir);
2902 +- else
2903 +- ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
2904 ++ rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
2905 + }
2906 + EXPORT_SYMBOL(rdma_rw_ctx_destroy);
2907 +
2908 +diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
2909 +index 2b4d80393bd0..2d5608315dc8 100644
2910 +--- a/drivers/infiniband/core/security.c
2911 ++++ b/drivers/infiniband/core/security.c
2912 +@@ -340,15 +340,19 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
2913 + return NULL;
2914 +
2915 + if (qp_attr_mask & IB_QP_PORT)
2916 +- new_pps->main.port_num =
2917 +- (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
2918 ++ new_pps->main.port_num = qp_attr->port_num;
2919 ++ else if (qp_pps)
2920 ++ new_pps->main.port_num = qp_pps->main.port_num;
2921 ++
2922 + if (qp_attr_mask & IB_QP_PKEY_INDEX)
2923 +- new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
2924 +- qp_attr->pkey_index;
2925 ++ new_pps->main.pkey_index = qp_attr->pkey_index;
2926 ++ else if (qp_pps)
2927 ++ new_pps->main.pkey_index = qp_pps->main.pkey_index;
2928 ++
2929 + if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
2930 + new_pps->main.state = IB_PORT_PKEY_VALID;
2931 +
2932 +- if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
2933 ++ if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) {
2934 + new_pps->main.port_num = qp_pps->main.port_num;
2935 + new_pps->main.pkey_index = qp_pps->main.pkey_index;
2936 + if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
2937 +diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
2938 +index b9baf7d0a5cb..eb22cb4f26b4 100644
2939 +--- a/drivers/infiniband/core/umem_odp.c
2940 ++++ b/drivers/infiniband/core/umem_odp.c
2941 +@@ -187,14 +187,28 @@ ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
2942 + odp_data->page_shift = PAGE_SHIFT;
2943 + odp_data->notifier.ops = ops;
2944 +
2945 ++ /*
2946 ++ * A mmget must be held when registering a notifier, the owming_mm only
2947 ++ * has a mm_grab at this point.
2948 ++ */
2949 ++ if (!mmget_not_zero(umem->owning_mm)) {
2950 ++ ret = -EFAULT;
2951 ++ goto out_free;
2952 ++ }
2953 ++
2954 + odp_data->tgid = get_pid(root->tgid);
2955 + ret = ib_init_umem_odp(odp_data, ops);
2956 +- if (ret) {
2957 +- put_pid(odp_data->tgid);
2958 +- kfree(odp_data);
2959 +- return ERR_PTR(ret);
2960 +- }
2961 ++ if (ret)
2962 ++ goto out_tgid;
2963 ++ mmput(umem->owning_mm);
2964 + return odp_data;
2965 ++
2966 ++out_tgid:
2967 ++ put_pid(odp_data->tgid);
2968 ++ mmput(umem->owning_mm);
2969 ++out_free:
2970 ++ kfree(odp_data);
2971 ++ return ERR_PTR(ret);
2972 + }
2973 + EXPORT_SYMBOL(ib_umem_odp_alloc_child);
2974 +
2975 +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
2976 +index 86e93ac46d75..c3a67ad82ddd 100644
2977 +--- a/drivers/infiniband/core/uverbs_cmd.c
2978 ++++ b/drivers/infiniband/core/uverbs_cmd.c
2979 +@@ -1433,17 +1433,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
2980 + if (ret)
2981 + goto err_cb;
2982 +
2983 +- qp->pd = pd;
2984 +- qp->send_cq = attr.send_cq;
2985 +- qp->recv_cq = attr.recv_cq;
2986 +- qp->srq = attr.srq;
2987 +- qp->rwq_ind_tbl = ind_tbl;
2988 +- qp->event_handler = attr.event_handler;
2989 +- qp->qp_context = attr.qp_context;
2990 +- qp->qp_type = attr.qp_type;
2991 +- atomic_set(&qp->usecnt, 0);
2992 + atomic_inc(&pd->usecnt);
2993 +- qp->port = 0;
2994 + if (attr.send_cq)
2995 + atomic_inc(&attr.send_cq->usecnt);
2996 + if (attr.recv_cq)
2997 +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
2998 +index dd765e176cdd..eb1c68311e52 100644
2999 +--- a/drivers/infiniband/core/verbs.c
3000 ++++ b/drivers/infiniband/core/verbs.c
3001 +@@ -1182,16 +1182,6 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
3002 + if (ret)
3003 + goto err;
3004 +
3005 +- qp->qp_type = qp_init_attr->qp_type;
3006 +- qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
3007 +-
3008 +- atomic_set(&qp->usecnt, 0);
3009 +- qp->mrs_used = 0;
3010 +- spin_lock_init(&qp->mr_lock);
3011 +- INIT_LIST_HEAD(&qp->rdma_mrs);
3012 +- INIT_LIST_HEAD(&qp->sig_mrs);
3013 +- qp->port = 0;
3014 +-
3015 + if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
3016 + struct ib_qp *xrc_qp =
3017 + create_xrc_qp_user(qp, qp_init_attr, udata);
3018 +diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
3019 +index 089e201d7550..2f6323ad9c59 100644
3020 +--- a/drivers/infiniband/hw/hfi1/verbs.c
3021 ++++ b/drivers/infiniband/hw/hfi1/verbs.c
3022 +@@ -515,10 +515,11 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
3023 + opa_get_lid(packet->dlid, 9B));
3024 + if (!mcast)
3025 + goto drop;
3026 ++ rcu_read_lock();
3027 + list_for_each_entry_rcu(p, &mcast->qp_list, list) {
3028 + packet->qp = p->qp;
3029 + if (hfi1_do_pkey_check(packet))
3030 +- goto drop;
3031 ++ goto unlock_drop;
3032 + spin_lock_irqsave(&packet->qp->r_lock, flags);
3033 + packet_handler = qp_ok(packet);
3034 + if (likely(packet_handler))
3035 +@@ -527,6 +528,7 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
3036 + ibp->rvp.n_pkt_drops++;
3037 + spin_unlock_irqrestore(&packet->qp->r_lock, flags);
3038 + }
3039 ++ rcu_read_unlock();
3040 + /*
3041 + * Notify rvt_multicast_detach() if it is waiting for us
3042 + * to finish.
3043 +diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
3044 +index b06f32ff5748..b3561e4c44e8 100644
3045 +--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
3046 ++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
3047 +@@ -629,6 +629,7 @@ struct mlx5_ib_mr {
3048 +
3049 + /* For ODP and implicit */
3050 + atomic_t num_deferred_work;
3051 ++ wait_queue_head_t q_deferred_work;
3052 + struct xarray implicit_children;
3053 + union {
3054 + struct rcu_head rcu;
3055 +diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
3056 +index 8247c26a1ce9..443de6fb578b 100644
3057 +--- a/drivers/infiniband/hw/mlx5/odp.c
3058 ++++ b/drivers/infiniband/hw/mlx5/odp.c
3059 +@@ -197,7 +197,8 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
3060 + mr->parent = NULL;
3061 + mlx5_mr_cache_free(mr->dev, mr);
3062 + ib_umem_odp_release(odp);
3063 +- atomic_dec(&imr->num_deferred_work);
3064 ++ if (atomic_dec_and_test(&imr->num_deferred_work))
3065 ++ wake_up(&imr->q_deferred_work);
3066 + }
3067 +
3068 + static void free_implicit_child_mr_work(struct work_struct *work)
3069 +@@ -516,6 +517,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
3070 + imr->umem = &umem_odp->umem;
3071 + imr->is_odp_implicit = true;
3072 + atomic_set(&imr->num_deferred_work, 0);
3073 ++ init_waitqueue_head(&imr->q_deferred_work);
3074 + xa_init(&imr->implicit_children);
3075 +
3076 + err = mlx5_ib_update_xlt(imr, 0,
3077 +@@ -573,10 +575,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
3078 + * under xa_lock while the child is in the xarray. Thus at this point
3079 + * it is only decreasing, and all work holding it is now on the wq.
3080 + */
3081 +- if (atomic_read(&imr->num_deferred_work)) {
3082 +- flush_workqueue(system_unbound_wq);
3083 +- WARN_ON(atomic_read(&imr->num_deferred_work));
3084 +- }
3085 ++ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
3086 +
3087 + /*
3088 + * Fence the imr before we destroy the children. This allows us to
3089 +@@ -607,10 +606,7 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
3090 + /* Wait for all running page-fault handlers to finish. */
3091 + synchronize_srcu(&mr->dev->odp_srcu);
3092 +
3093 +- if (atomic_read(&mr->num_deferred_work)) {
3094 +- flush_workqueue(system_unbound_wq);
3095 +- WARN_ON(atomic_read(&mr->num_deferred_work));
3096 +- }
3097 ++ wait_event(mr->q_deferred_work, !atomic_read(&mr->num_deferred_work));
3098 +
3099 + dma_fence_odp_mr(mr);
3100 + }
3101 +@@ -1682,7 +1678,8 @@ static void destroy_prefetch_work(struct prefetch_mr_work *work)
3102 + u32 i;
3103 +
3104 + for (i = 0; i < work->num_sge; ++i)
3105 +- atomic_dec(&work->frags[i].mr->num_deferred_work);
3106 ++ if (atomic_dec_and_test(&work->frags[i].mr->num_deferred_work))
3107 ++ wake_up(&work->frags[i].mr->q_deferred_work);
3108 + kvfree(work);
3109 + }
3110 +
3111 +diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
3112 +index 33778d451b82..5ef93f8f17a1 100644
3113 +--- a/drivers/infiniband/hw/qib/qib_verbs.c
3114 ++++ b/drivers/infiniband/hw/qib/qib_verbs.c
3115 +@@ -329,8 +329,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
3116 + if (mcast == NULL)
3117 + goto drop;
3118 + this_cpu_inc(ibp->pmastats->n_multicast_rcv);
3119 ++ rcu_read_lock();
3120 + list_for_each_entry_rcu(p, &mcast->qp_list, list)
3121 + qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
3122 ++ rcu_read_unlock();
3123 + /*
3124 + * Notify rvt_multicast_detach() if it is waiting for us
3125 + * to finish.
3126 +diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
3127 +index c147f0613d95..1e2fdd21ba6e 100644
3128 +--- a/drivers/infiniband/sw/siw/siw_main.c
3129 ++++ b/drivers/infiniband/sw/siw/siw_main.c
3130 +@@ -388,6 +388,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
3131 + { .max_segment_size = SZ_2G };
3132 + base_dev->num_comp_vectors = num_possible_cpus();
3133 +
3134 ++ xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
3135 ++ xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
3136 ++
3137 + ib_set_device_ops(base_dev, &siw_device_ops);
3138 + rv = ib_device_set_netdev(base_dev, netdev, 1);
3139 + if (rv)
3140 +@@ -415,9 +418,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
3141 + sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR;
3142 + sdev->attrs.max_srq_sge = SIW_MAX_SGE;
3143 +
3144 +- xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
3145 +- xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
3146 +-
3147 + INIT_LIST_HEAD(&sdev->cep_list);
3148 + INIT_LIST_HEAD(&sdev->qp_list);
3149 +
3150 +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
3151 +index d7cbca8bf2cd..b5ae9f7c0510 100644
3152 +--- a/drivers/iommu/amd_iommu_init.c
3153 ++++ b/drivers/iommu/amd_iommu_init.c
3154 +@@ -2533,6 +2533,7 @@ static int __init early_amd_iommu_init(void)
3155 + struct acpi_table_header *ivrs_base;
3156 + acpi_status status;
3157 + int i, remap_cache_sz, ret = 0;
3158 ++ u32 pci_id;
3159 +
3160 + if (!amd_iommu_detected)
3161 + return -ENODEV;
3162 +@@ -2620,6 +2621,16 @@ static int __init early_amd_iommu_init(void)
3163 + if (ret)
3164 + goto out;
3165 +
3166 ++ /* Disable IOMMU if there's Stoney Ridge graphics */
3167 ++ for (i = 0; i < 32; i++) {
3168 ++ pci_id = read_pci_config(0, i, 0, 0);
3169 ++ if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
3170 ++ pr_info("Disable IOMMU on Stoney Ridge\n");
3171 ++ amd_iommu_disabled = true;
3172 ++ break;
3173 ++ }
3174 ++ }
3175 ++
3176 + /* Disable any previously enabled IOMMUs */
3177 + if (!is_kdump_kernel() || amd_iommu_disabled)
3178 + disable_iommus();
3179 +@@ -2728,7 +2739,7 @@ static int __init state_next(void)
3180 + ret = early_amd_iommu_init();
3181 + init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
3182 + if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
3183 +- pr_info("AMD IOMMU disabled on kernel command-line\n");
3184 ++ pr_info("AMD IOMMU disabled\n");
3185 + init_state = IOMMU_CMDLINE_DISABLED;
3186 + ret = -EINVAL;
3187 + }
3188 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
3189 +index 2d32821b3a5b..f4be63671233 100644
3190 +--- a/drivers/md/dm-cache-target.c
3191 ++++ b/drivers/md/dm-cache-target.c
3192 +@@ -2846,8 +2846,8 @@ static void cache_postsuspend(struct dm_target *ti)
3193 + prevent_background_work(cache);
3194 + BUG_ON(atomic_read(&cache->nr_io_migrations));
3195 +
3196 +- cancel_delayed_work(&cache->waker);
3197 +- flush_workqueue(cache->wq);
3198 ++ cancel_delayed_work_sync(&cache->waker);
3199 ++ drain_workqueue(cache->wq);
3200 + WARN_ON(cache->tracker.in_flight);
3201 +
3202 + /*
3203 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
3204 +index b225b3e445fa..e1ad0b53f681 100644
3205 +--- a/drivers/md/dm-integrity.c
3206 ++++ b/drivers/md/dm-integrity.c
3207 +@@ -201,17 +201,19 @@ struct dm_integrity_c {
3208 + __u8 log2_blocks_per_bitmap_bit;
3209 +
3210 + unsigned char mode;
3211 +- int suspending;
3212 +
3213 + int failed;
3214 +
3215 + struct crypto_shash *internal_hash;
3216 +
3217 ++ struct dm_target *ti;
3218 ++
3219 + /* these variables are locked with endio_wait.lock */
3220 + struct rb_root in_progress;
3221 + struct list_head wait_list;
3222 + wait_queue_head_t endio_wait;
3223 + struct workqueue_struct *wait_wq;
3224 ++ struct workqueue_struct *offload_wq;
3225 +
3226 + unsigned char commit_seq;
3227 + commit_id_t commit_ids[N_COMMIT_IDS];
3228 +@@ -1439,7 +1441,7 @@ static void dec_in_flight(struct dm_integrity_io *dio)
3229 + dio->range.logical_sector += dio->range.n_sectors;
3230 + bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
3231 + INIT_WORK(&dio->work, integrity_bio_wait);
3232 +- queue_work(ic->wait_wq, &dio->work);
3233 ++ queue_work(ic->offload_wq, &dio->work);
3234 + return;
3235 + }
3236 + do_endio_flush(ic, dio);
3237 +@@ -1865,7 +1867,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
3238 +
3239 + if (need_sync_io && from_map) {
3240 + INIT_WORK(&dio->work, integrity_bio_wait);
3241 +- queue_work(ic->metadata_wq, &dio->work);
3242 ++ queue_work(ic->offload_wq, &dio->work);
3243 + return;
3244 + }
3245 +
3246 +@@ -2315,7 +2317,7 @@ static void integrity_writer(struct work_struct *w)
3247 + unsigned prev_free_sectors;
3248 +
3249 + /* the following test is not needed, but it tests the replay code */
3250 +- if (READ_ONCE(ic->suspending) && !ic->meta_dev)
3251 ++ if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
3252 + return;
3253 +
3254 + spin_lock_irq(&ic->endio_wait.lock);
3255 +@@ -2376,7 +2378,7 @@ static void integrity_recalc(struct work_struct *w)
3256 +
3257 + next_chunk:
3258 +
3259 +- if (unlikely(READ_ONCE(ic->suspending)))
3260 ++ if (unlikely(dm_suspended(ic->ti)))
3261 + goto unlock_ret;
3262 +
3263 + range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
3264 +@@ -2501,7 +2503,7 @@ static void bitmap_block_work(struct work_struct *w)
3265 + dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
3266 + remove_range(ic, &dio->range);
3267 + INIT_WORK(&dio->work, integrity_bio_wait);
3268 +- queue_work(ic->wait_wq, &dio->work);
3269 ++ queue_work(ic->offload_wq, &dio->work);
3270 + } else {
3271 + block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
3272 + dio->range.n_sectors, BITMAP_OP_SET);
3273 +@@ -2524,7 +2526,7 @@ static void bitmap_block_work(struct work_struct *w)
3274 +
3275 + remove_range(ic, &dio->range);
3276 + INIT_WORK(&dio->work, integrity_bio_wait);
3277 +- queue_work(ic->wait_wq, &dio->work);
3278 ++ queue_work(ic->offload_wq, &dio->work);
3279 + }
3280 +
3281 + queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
3282 +@@ -2804,8 +2806,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
3283 +
3284 + del_timer_sync(&ic->autocommit_timer);
3285 +
3286 +- WRITE_ONCE(ic->suspending, 1);
3287 +-
3288 + if (ic->recalc_wq)
3289 + drain_workqueue(ic->recalc_wq);
3290 +
3291 +@@ -2834,8 +2834,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
3292 + #endif
3293 + }
3294 +
3295 +- WRITE_ONCE(ic->suspending, 0);
3296 +-
3297 + BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3298 +
3299 + ic->journal_uptodate = true;
3300 +@@ -2888,17 +2886,24 @@ static void dm_integrity_resume(struct dm_target *ti)
3301 + } else {
3302 + replay_journal(ic);
3303 + if (ic->mode == 'B') {
3304 +- int mode;
3305 + ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3306 + ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3307 + r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3308 + if (unlikely(r))
3309 + dm_integrity_io_error(ic, "writing superblock", r);
3310 +
3311 +- mode = ic->recalculate_flag ? BITMAP_OP_SET : BITMAP_OP_CLEAR;
3312 +- block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, mode);
3313 +- block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, mode);
3314 +- block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, mode);
3315 ++ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3316 ++ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3317 ++ block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3318 ++ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3319 ++ le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3320 ++ block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3321 ++ ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3322 ++ block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3323 ++ ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3324 ++ block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3325 ++ ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3326 ++ }
3327 + rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3328 + ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3329 + }
3330 +@@ -2967,7 +2972,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3331 + DMEMIT(" meta_device:%s", ic->meta_dev->name);
3332 + if (ic->sectors_per_block != 1)
3333 + DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3334 +- if (ic->recalculate_flag)
3335 ++ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3336 + DMEMIT(" recalculate");
3337 + DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3338 + DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3339 +@@ -3623,6 +3628,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3340 + }
3341 + ti->private = ic;
3342 + ti->per_io_data_size = sizeof(struct dm_integrity_io);
3343 ++ ic->ti = ti;
3344 +
3345 + ic->in_progress = RB_ROOT;
3346 + INIT_LIST_HEAD(&ic->wait_list);
3347 +@@ -3836,6 +3842,14 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3348 + goto bad;
3349 + }
3350 +
3351 ++ ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
3352 ++ METADATA_WORKQUEUE_MAX_ACTIVE);
3353 ++ if (!ic->offload_wq) {
3354 ++ ti->error = "Cannot allocate workqueue";
3355 ++ r = -ENOMEM;
3356 ++ goto bad;
3357 ++ }
3358 ++
3359 + ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
3360 + if (!ic->commit_wq) {
3361 + ti->error = "Cannot allocate workqueue";
3362 +@@ -4140,6 +4154,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
3363 + destroy_workqueue(ic->metadata_wq);
3364 + if (ic->wait_wq)
3365 + destroy_workqueue(ic->wait_wq);
3366 ++ if (ic->offload_wq)
3367 ++ destroy_workqueue(ic->offload_wq);
3368 + if (ic->commit_wq)
3369 + destroy_workqueue(ic->commit_wq);
3370 + if (ic->writer_wq)
3371 +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
3372 +index 8bb723f1a569..4cd8868f8004 100644
3373 +--- a/drivers/md/dm-thin-metadata.c
3374 ++++ b/drivers/md/dm-thin-metadata.c
3375 +@@ -960,9 +960,9 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
3376 + DMWARN("%s: __commit_transaction() failed, error = %d",
3377 + __func__, r);
3378 + }
3379 ++ pmd_write_unlock(pmd);
3380 + if (!pmd->fail_io)
3381 + __destroy_persistent_data_objects(pmd);
3382 +- pmd_write_unlock(pmd);
3383 +
3384 + kfree(pmd);
3385 + return 0;
3386 +diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
3387 +index 9b0a3bf6a4a1..cd9e4c8a023a 100644
3388 +--- a/drivers/md/dm-writecache.c
3389 ++++ b/drivers/md/dm-writecache.c
3390 +@@ -625,6 +625,12 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry
3391 + wc->freelist_size++;
3392 + }
3393 +
3394 ++static inline void writecache_verify_watermark(struct dm_writecache *wc)
3395 ++{
3396 ++ if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
3397 ++ queue_work(wc->writeback_wq, &wc->writeback_work);
3398 ++}
3399 ++
3400 + static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
3401 + {
3402 + struct wc_entry *e;
3403 +@@ -646,8 +652,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
3404 + list_del(&e->lru);
3405 + }
3406 + wc->freelist_size--;
3407 +- if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
3408 +- queue_work(wc->writeback_wq, &wc->writeback_work);
3409 ++
3410 ++ writecache_verify_watermark(wc);
3411 +
3412 + return e;
3413 + }
3414 +@@ -838,7 +844,7 @@ static void writecache_suspend(struct dm_target *ti)
3415 + }
3416 + wc_unlock(wc);
3417 +
3418 +- flush_workqueue(wc->writeback_wq);
3419 ++ drain_workqueue(wc->writeback_wq);
3420 +
3421 + wc_lock(wc);
3422 + if (flush_on_suspend)
3423 +@@ -961,6 +967,8 @@ erase_this:
3424 + writecache_commit_flushed(wc, false);
3425 + }
3426 +
3427 ++ writecache_verify_watermark(wc);
3428 ++
3429 + wc_unlock(wc);
3430 + }
3431 +
3432 +diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
3433 +index 70a1063161c0..b1e64cd31647 100644
3434 +--- a/drivers/md/dm-zoned-target.c
3435 ++++ b/drivers/md/dm-zoned-target.c
3436 +@@ -533,8 +533,9 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
3437 +
3438 + /* Get the BIO chunk work. If one is not active yet, create one */
3439 + cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
3440 +- if (!cw) {
3441 +-
3442 ++ if (cw) {
3443 ++ dmz_get_chunk_work(cw);
3444 ++ } else {
3445 + /* Create a new chunk work */
3446 + cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
3447 + if (unlikely(!cw)) {
3448 +@@ -543,7 +544,7 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
3449 + }
3450 +
3451 + INIT_WORK(&cw->work, dmz_chunk_work);
3452 +- refcount_set(&cw->refcount, 0);
3453 ++ refcount_set(&cw->refcount, 1);
3454 + cw->target = dmz;
3455 + cw->chunk = chunk;
3456 + bio_list_init(&cw->bio_list);
3457 +@@ -556,7 +557,6 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
3458 + }
3459 +
3460 + bio_list_add(&cw->bio_list, bio);
3461 +- dmz_get_chunk_work(cw);
3462 +
3463 + dmz_reclaim_bio_acc(dmz->reclaim);
3464 + if (queue_work(dmz->chunk_wq, &cw->work))
3465 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3466 +index b89f07ee2eff..0413018c8305 100644
3467 +--- a/drivers/md/dm.c
3468 ++++ b/drivers/md/dm.c
3469 +@@ -1788,7 +1788,8 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
3470 + * With request-based DM we only need to check the
3471 + * top-level queue for congestion.
3472 + */
3473 +- r = md->queue->backing_dev_info->wb.state & bdi_bits;
3474 ++ struct backing_dev_info *bdi = md->queue->backing_dev_info;
3475 ++ r = bdi->wb.congested->state & bdi_bits;
3476 + } else {
3477 + map = dm_get_live_table_fast(md);
3478 + if (map)
3479 +@@ -1854,15 +1855,6 @@ static const struct dax_operations dm_dax_ops;
3480 +
3481 + static void dm_wq_work(struct work_struct *work);
3482 +
3483 +-static void dm_init_normal_md_queue(struct mapped_device *md)
3484 +-{
3485 +- /*
3486 +- * Initialize aspects of queue that aren't relevant for blk-mq
3487 +- */
3488 +- md->queue->backing_dev_info->congested_data = md;
3489 +- md->queue->backing_dev_info->congested_fn = dm_any_congested;
3490 +-}
3491 +-
3492 + static void cleanup_mapped_device(struct mapped_device *md)
3493 + {
3494 + if (md->wq)
3495 +@@ -2249,6 +2241,12 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
3496 + }
3497 + EXPORT_SYMBOL_GPL(dm_get_queue_limits);
3498 +
3499 ++static void dm_init_congested_fn(struct mapped_device *md)
3500 ++{
3501 ++ md->queue->backing_dev_info->congested_data = md;
3502 ++ md->queue->backing_dev_info->congested_fn = dm_any_congested;
3503 ++}
3504 ++
3505 + /*
3506 + * Setup the DM device's queue based on md's type
3507 + */
3508 +@@ -2265,11 +2263,12 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
3509 + DMERR("Cannot initialize queue for request-based dm-mq mapped device");
3510 + return r;
3511 + }
3512 ++ dm_init_congested_fn(md);
3513 + break;
3514 + case DM_TYPE_BIO_BASED:
3515 + case DM_TYPE_DAX_BIO_BASED:
3516 + case DM_TYPE_NVME_BIO_BASED:
3517 +- dm_init_normal_md_queue(md);
3518 ++ dm_init_congested_fn(md);
3519 + break;
3520 + case DM_TYPE_NONE:
3521 + WARN_ON_ONCE(true);
3522 +@@ -2368,6 +2367,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
3523 + map = dm_get_live_table(md, &srcu_idx);
3524 + if (!dm_suspended_md(md)) {
3525 + dm_table_presuspend_targets(map);
3526 ++ set_bit(DMF_SUSPENDED, &md->flags);
3527 + dm_table_postsuspend_targets(map);
3528 + }
3529 + /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
3530 +diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
3531 +index 7c429ce98bae..668770e9f609 100644
3532 +--- a/drivers/media/mc/mc-entity.c
3533 ++++ b/drivers/media/mc/mc-entity.c
3534 +@@ -639,9 +639,9 @@ int media_get_pad_index(struct media_entity *entity, bool is_sink,
3535 + return -EINVAL;
3536 +
3537 + for (i = 0; i < entity->num_pads; i++) {
3538 +- if (entity->pads[i].flags == MEDIA_PAD_FL_SINK)
3539 ++ if (entity->pads[i].flags & MEDIA_PAD_FL_SINK)
3540 + pad_is_sink = true;
3541 +- else if (entity->pads[i].flags == MEDIA_PAD_FL_SOURCE)
3542 ++ else if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE)
3543 + pad_is_sink = false;
3544 + else
3545 + continue; /* This is an error! */
3546 +diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
3547 +index 3c93d9232c3c..b6e39fbd8ad5 100644
3548 +--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c
3549 ++++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
3550 +@@ -27,17 +27,17 @@ static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = {
3551 + { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3552 + { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3553 + { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
3554 +- { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3555 +- { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3556 ++ { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3557 ++ { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3558 + { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3559 +- { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3560 +- { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3561 ++ { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3562 ++ { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3563 + { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3564 +- { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3565 ++ { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3566 + { V4L2_PIX_FMT_BGRA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3567 +- { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3568 ++ { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3569 + { V4L2_PIX_FMT_RGBA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3570 +- { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
3571 ++ { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_HSV},
3572 + { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB},
3573 + };
3574 +
3575 +@@ -175,22 +175,14 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
3576 + case V4L2_PIX_FMT_RGB32:
3577 + case V4L2_PIX_FMT_XRGB32:
3578 + case V4L2_PIX_FMT_HSV32:
3579 +- rf->cr = rf->luma + 1;
3580 +- rf->cb = rf->cr + 2;
3581 +- rf->luma += 2;
3582 +- break;
3583 +- case V4L2_PIX_FMT_BGR32:
3584 +- case V4L2_PIX_FMT_XBGR32:
3585 +- rf->cb = rf->luma;
3586 +- rf->cr = rf->cb + 2;
3587 +- rf->luma++;
3588 +- break;
3589 + case V4L2_PIX_FMT_ARGB32:
3590 + rf->alpha = rf->luma;
3591 + rf->cr = rf->luma + 1;
3592 + rf->cb = rf->cr + 2;
3593 + rf->luma += 2;
3594 + break;
3595 ++ case V4L2_PIX_FMT_BGR32:
3596 ++ case V4L2_PIX_FMT_XBGR32:
3597 + case V4L2_PIX_FMT_ABGR32:
3598 + rf->cb = rf->luma;
3599 + rf->cr = rf->cb + 2;
3600 +@@ -198,10 +190,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
3601 + rf->alpha = rf->cr + 1;
3602 + break;
3603 + case V4L2_PIX_FMT_BGRX32:
3604 +- rf->cb = rf->luma + 1;
3605 +- rf->cr = rf->cb + 2;
3606 +- rf->luma += 2;
3607 +- break;
3608 + case V4L2_PIX_FMT_BGRA32:
3609 + rf->alpha = rf->luma;
3610 + rf->cb = rf->luma + 1;
3611 +@@ -209,10 +197,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
3612 + rf->luma += 2;
3613 + break;
3614 + case V4L2_PIX_FMT_RGBX32:
3615 +- rf->cr = rf->luma;
3616 +- rf->cb = rf->cr + 2;
3617 +- rf->luma++;
3618 +- break;
3619 + case V4L2_PIX_FMT_RGBA32:
3620 + rf->alpha = rf->luma + 3;
3621 + rf->cr = rf->luma;
3622 +diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
3623 +index 1afd9c6ad908..cc34c5ab7009 100644
3624 +--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
3625 ++++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
3626 +@@ -880,12 +880,12 @@ int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
3627 + goto err_rel_entity1;
3628 +
3629 + /* Connect the three entities */
3630 +- ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1,
3631 ++ ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0,
3632 + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
3633 + if (ret)
3634 + goto err_rel_entity2;
3635 +
3636 +- ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0,
3637 ++ ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0,
3638 + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
3639 + if (ret)
3640 + goto err_rm_links0;
3641 +diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
3642 +index b155e9549076..b680b0caa69b 100644
3643 +--- a/drivers/misc/habanalabs/device.c
3644 ++++ b/drivers/misc/habanalabs/device.c
3645 +@@ -598,7 +598,9 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
3646 + goto out;
3647 + }
3648 +
3649 +- hdev->asic_funcs->halt_coresight(hdev);
3650 ++ if (!hdev->hard_reset_pending)
3651 ++ hdev->asic_funcs->halt_coresight(hdev);
3652 ++
3653 + hdev->in_debug = 0;
3654 +
3655 + goto out;
3656 +@@ -1189,6 +1191,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
3657 + if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
3658 + dev_info(hdev->dev,
3659 + "H/W state is dirty, must reset before initializing\n");
3660 ++ hdev->asic_funcs->halt_engines(hdev, true);
3661 + hdev->asic_funcs->hw_fini(hdev, true);
3662 + }
3663 +
3664 +diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
3665 +index 7344e8a222ae..b8a8de24aaf7 100644
3666 +--- a/drivers/misc/habanalabs/goya/goya.c
3667 ++++ b/drivers/misc/habanalabs/goya/goya.c
3668 +@@ -895,6 +895,11 @@ void goya_init_dma_qmans(struct hl_device *hdev)
3669 + */
3670 + static void goya_disable_external_queues(struct hl_device *hdev)
3671 + {
3672 ++ struct goya_device *goya = hdev->asic_specific;
3673 ++
3674 ++ if (!(goya->hw_cap_initialized & HW_CAP_DMA))
3675 ++ return;
3676 ++
3677 + WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
3678 + WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
3679 + WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
3680 +@@ -956,6 +961,11 @@ static int goya_stop_external_queues(struct hl_device *hdev)
3681 + {
3682 + int rc, retval = 0;
3683 +
3684 ++ struct goya_device *goya = hdev->asic_specific;
3685 ++
3686 ++ if (!(goya->hw_cap_initialized & HW_CAP_DMA))
3687 ++ return retval;
3688 ++
3689 + rc = goya_stop_queue(hdev,
3690 + mmDMA_QM_0_GLBL_CFG1,
3691 + mmDMA_QM_0_CP_STS,
3692 +@@ -1744,9 +1754,18 @@ void goya_init_tpc_qmans(struct hl_device *hdev)
3693 + */
3694 + static void goya_disable_internal_queues(struct hl_device *hdev)
3695 + {
3696 ++ struct goya_device *goya = hdev->asic_specific;
3697 ++
3698 ++ if (!(goya->hw_cap_initialized & HW_CAP_MME))
3699 ++ goto disable_tpc;
3700 ++
3701 + WREG32(mmMME_QM_GLBL_CFG0, 0);
3702 + WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
3703 +
3704 ++disable_tpc:
3705 ++ if (!(goya->hw_cap_initialized & HW_CAP_TPC))
3706 ++ return;
3707 ++
3708 + WREG32(mmTPC0_QM_GLBL_CFG0, 0);
3709 + WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
3710 +
3711 +@@ -1782,8 +1801,12 @@ static void goya_disable_internal_queues(struct hl_device *hdev)
3712 + */
3713 + static int goya_stop_internal_queues(struct hl_device *hdev)
3714 + {
3715 ++ struct goya_device *goya = hdev->asic_specific;
3716 + int rc, retval = 0;
3717 +
3718 ++ if (!(goya->hw_cap_initialized & HW_CAP_MME))
3719 ++ goto stop_tpc;
3720 ++
3721 + /*
3722 + * Each queue (QMAN) is a separate H/W logic. That means that each
3723 + * QMAN can be stopped independently and failure to stop one does NOT
3724 +@@ -1810,6 +1833,10 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
3725 + retval = -EIO;
3726 + }
3727 +
3728 ++stop_tpc:
3729 ++ if (!(goya->hw_cap_initialized & HW_CAP_TPC))
3730 ++ return retval;
3731 ++
3732 + rc = goya_stop_queue(hdev,
3733 + mmTPC0_QM_GLBL_CFG1,
3734 + mmTPC0_QM_CP_STS,
3735 +@@ -1975,6 +2002,11 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
3736 +
3737 + static void goya_dma_stall(struct hl_device *hdev)
3738 + {
3739 ++ struct goya_device *goya = hdev->asic_specific;
3740 ++
3741 ++ if (!(goya->hw_cap_initialized & HW_CAP_DMA))
3742 ++ return;
3743 ++
3744 + WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
3745 + WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
3746 + WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
3747 +@@ -1984,6 +2016,11 @@ static void goya_dma_stall(struct hl_device *hdev)
3748 +
3749 + static void goya_tpc_stall(struct hl_device *hdev)
3750 + {
3751 ++ struct goya_device *goya = hdev->asic_specific;
3752 ++
3753 ++ if (!(goya->hw_cap_initialized & HW_CAP_TPC))
3754 ++ return;
3755 ++
3756 + WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
3757 + WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
3758 + WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
3759 +@@ -1996,6 +2033,11 @@ static void goya_tpc_stall(struct hl_device *hdev)
3760 +
3761 + static void goya_mme_stall(struct hl_device *hdev)
3762 + {
3763 ++ struct goya_device *goya = hdev->asic_specific;
3764 ++
3765 ++ if (!(goya->hw_cap_initialized & HW_CAP_MME))
3766 ++ return;
3767 ++
3768 + WREG32(mmMME_STALL, 0xFFFFFFFF);
3769 + }
3770 +
3771 +@@ -4648,8 +4690,6 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
3772 +
3773 + rc = goya_send_job_on_qman0(hdev, job);
3774 +
3775 +- hl_cb_put(job->patched_cb);
3776 +-
3777 + hl_debugfs_remove_job(hdev, job);
3778 + kfree(job);
3779 + cb->cs_cnt--;
3780 +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
3781 +index d1955543acd1..b0f5280a83cb 100644
3782 +--- a/drivers/net/dsa/bcm_sf2.c
3783 ++++ b/drivers/net/dsa/bcm_sf2.c
3784 +@@ -69,8 +69,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
3785 + /* Force link status for IMP port */
3786 + reg = core_readl(priv, offset);
3787 + reg |= (MII_SW_OR | LINK_STS);
3788 +- if (priv->type == BCM7278_DEVICE_ID)
3789 +- reg |= GMII_SPEED_UP_2G;
3790 ++ reg &= ~GMII_SPEED_UP_2G;
3791 + core_writel(priv, reg, offset);
3792 +
3793 + /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
3794 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
3795 +index cc70c606b6ef..251767c31f7e 100644
3796 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
3797 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
3798 +@@ -337,6 +337,8 @@ struct aq_fw_ops {
3799 +
3800 + void (*enable_ptp)(struct aq_hw_s *self, int enable);
3801 +
3802 ++ void (*adjust_ptp)(struct aq_hw_s *self, uint64_t adj);
3803 ++
3804 + int (*set_eee_rate)(struct aq_hw_s *self, u32 speed);
3805 +
3806 + int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
3807 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
3808 +index fce587aaba33..d20d91cdece8 100644
3809 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
3810 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
3811 +@@ -1165,6 +1165,8 @@ static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
3812 + {
3813 + self->ptp_clk_offset += delta;
3814 +
3815 ++ self->aq_fw_ops->adjust_ptp(self, self->ptp_clk_offset);
3816 ++
3817 + return 0;
3818 + }
3819 +
3820 +@@ -1215,7 +1217,7 @@ static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index,
3821 + fwreq.ptp_gpio_ctrl.index = index;
3822 + fwreq.ptp_gpio_ctrl.period = period;
3823 + /* Apply time offset */
3824 +- fwreq.ptp_gpio_ctrl.start = start - self->ptp_clk_offset;
3825 ++ fwreq.ptp_gpio_ctrl.start = start;
3826 +
3827 + size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl);
3828 + return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
3829 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
3830 +index f547baa6c954..354705f9bc49 100644
3831 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
3832 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
3833 +@@ -22,6 +22,7 @@
3834 + #define HW_ATL_MIF_ADDR 0x0208U
3835 + #define HW_ATL_MIF_VAL 0x020CU
3836 +
3837 ++#define HW_ATL_MPI_RPC_ADDR 0x0334U
3838 + #define HW_ATL_RPC_CONTROL_ADR 0x0338U
3839 + #define HW_ATL_RPC_STATE_ADR 0x033CU
3840 +
3841 +@@ -53,15 +54,14 @@ enum mcp_area {
3842 + };
3843 +
3844 + static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
3845 +-
3846 + static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
3847 + enum hal_atl_utils_fw_state_e state);
3848 +-
3849 + static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
3850 + static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self);
3851 + static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self);
3852 + static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self);
3853 + static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self);
3854 ++static u32 aq_fw1x_rpc_get(struct aq_hw_s *self);
3855 +
3856 + int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
3857 + {
3858 +@@ -476,6 +476,10 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
3859 + self, self->mbox_addr,
3860 + self->mbox_addr != 0U,
3861 + 1000U, 10000U);
3862 ++ err = readx_poll_timeout_atomic(aq_fw1x_rpc_get, self,
3863 ++ self->rpc_addr,
3864 ++ self->rpc_addr != 0U,
3865 ++ 1000U, 100000U);
3866 +
3867 + return err;
3868 + }
3869 +@@ -531,6 +535,12 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
3870 + self, fw.val,
3871 + sw.tid == fw.tid,
3872 + 1000U, 100000U);
3873 ++ if (err < 0)
3874 ++ goto err_exit;
3875 ++
3876 ++ err = aq_hw_err_from_flags(self);
3877 ++ if (err < 0)
3878 ++ goto err_exit;
3879 +
3880 + if (fw.len == 0xFFFFU) {
3881 + err = hw_atl_utils_fw_rpc_call(self, sw.len);
3882 +@@ -1025,6 +1035,11 @@ static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self)
3883 + return aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR);
3884 + }
3885 +
3886 ++static u32 aq_fw1x_rpc_get(struct aq_hw_s *self)
3887 ++{
3888 ++ return aq_hw_read_reg(self, HW_ATL_MPI_RPC_ADDR);
3889 ++}
3890 ++
3891 + const struct aq_fw_ops aq_fw_1x_ops = {
3892 + .init = hw_atl_utils_mpi_create,
3893 + .deinit = hw_atl_fw1x_deinit,
3894 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
3895 +index 97ebf849695f..77a4ed64830f 100644
3896 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
3897 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
3898 +@@ -30,6 +30,9 @@
3899 + #define HW_ATL_FW3X_EXT_CONTROL_ADDR 0x378
3900 + #define HW_ATL_FW3X_EXT_STATE_ADDR 0x37c
3901 +
3902 ++#define HW_ATL_FW3X_PTP_ADJ_LSW_ADDR 0x50a0
3903 ++#define HW_ATL_FW3X_PTP_ADJ_MSW_ADDR 0x50a4
3904 ++
3905 + #define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
3906 + #define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
3907 + #define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
3908 +@@ -475,6 +478,14 @@ static void aq_fw3x_enable_ptp(struct aq_hw_s *self, int enable)
3909 + aq_hw_write_reg(self, HW_ATL_FW3X_EXT_CONTROL_ADDR, ptp_opts);
3910 + }
3911 +
3912 ++static void aq_fw3x_adjust_ptp(struct aq_hw_s *self, uint64_t adj)
3913 ++{
3914 ++ aq_hw_write_reg(self, HW_ATL_FW3X_PTP_ADJ_LSW_ADDR,
3915 ++ (adj >> 0) & 0xffffffff);
3916 ++ aq_hw_write_reg(self, HW_ATL_FW3X_PTP_ADJ_MSW_ADDR,
3917 ++ (adj >> 32) & 0xffffffff);
3918 ++}
3919 ++
3920 + static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode)
3921 + {
3922 + if (self->fw_ver_actual < HW_ATL_FW_VER_LED)
3923 +@@ -633,4 +644,5 @@ const struct aq_fw_ops aq_fw_2x_ops = {
3924 + .enable_ptp = aq_fw3x_enable_ptp,
3925 + .led_control = aq_fw2x_led_control,
3926 + .set_phyloopback = aq_fw2x_set_phyloopback,
3927 ++ .adjust_ptp = aq_fw3x_adjust_ptp,
3928 + };
3929 +diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
3930 +index c4f6ec0cd183..00751771f662 100644
3931 +--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
3932 ++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
3933 +@@ -410,10 +410,19 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
3934 + lmac = &bgx->lmac[lmacid];
3935 +
3936 + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
3937 +- if (enable)
3938 ++ if (enable) {
3939 + cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
3940 +- else
3941 ++
3942 ++ /* enable TX FIFO Underflow interrupt */
3943 ++ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S,
3944 ++ GMI_TXX_INT_UNDFLW);
3945 ++ } else {
3946 + cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
3947 ++
3948 ++ /* Disable TX FIFO Underflow interrupt */
3949 ++ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C,
3950 ++ GMI_TXX_INT_UNDFLW);
3951 ++ }
3952 + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
3953 +
3954 + if (bgx->is_rgx)
3955 +@@ -1535,6 +1544,48 @@ static int bgx_init_phy(struct bgx *bgx)
3956 + return bgx_init_of_phy(bgx);
3957 + }
3958 +
3959 ++static irqreturn_t bgx_intr_handler(int irq, void *data)
3960 ++{
3961 ++ struct bgx *bgx = (struct bgx *)data;
3962 ++ u64 status, val;
3963 ++ int lmac;
3964 ++
3965 ++ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
3966 ++ status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT);
3967 ++ if (status & GMI_TXX_INT_UNDFLW) {
3968 ++ pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n",
3969 ++ bgx->bgx_id, lmac);
3970 ++ val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG);
3971 ++ val &= ~CMR_EN;
3972 ++ bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
3973 ++ val |= CMR_EN;
3974 ++ bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
3975 ++ }
3976 ++ /* clear interrupts */
3977 ++ bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, status);
3978 ++ }
3979 ++
3980 ++ return IRQ_HANDLED;
3981 ++}
3982 ++
3983 ++static void bgx_register_intr(struct pci_dev *pdev)
3984 ++{
3985 ++ struct bgx *bgx = pci_get_drvdata(pdev);
3986 ++ int ret;
3987 ++
3988 ++ ret = pci_alloc_irq_vectors(pdev, BGX_LMAC_VEC_OFFSET,
3989 ++ BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES);
3990 ++ if (ret < 0) {
3991 ++ pci_err(pdev, "Req for #%d msix vectors failed\n",
3992 ++ BGX_LMAC_VEC_OFFSET);
3993 ++ return;
3994 ++ }
3995 ++ ret = pci_request_irq(pdev, GMPX_GMI_TX_INT, bgx_intr_handler, NULL,
3996 ++ bgx, "BGX%d", bgx->bgx_id);
3997 ++ if (ret)
3998 ++ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
3999 ++}
4000 ++
4001 + static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4002 + {
4003 + int err;
4004 +@@ -1550,7 +1601,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4005 +
4006 + pci_set_drvdata(pdev, bgx);
4007 +
4008 +- err = pci_enable_device(pdev);
4009 ++ err = pcim_enable_device(pdev);
4010 + if (err) {
4011 + dev_err(dev, "Failed to enable PCI device\n");
4012 + pci_set_drvdata(pdev, NULL);
4013 +@@ -1604,6 +1655,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4014 +
4015 + bgx_init_hw(bgx);
4016 +
4017 ++ bgx_register_intr(pdev);
4018 ++
4019 + /* Enable all LMACs */
4020 + for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
4021 + err = bgx_lmac_enable(bgx, lmac);
4022 +@@ -1620,6 +1673,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4023 +
4024 + err_enable:
4025 + bgx_vnic[bgx->bgx_id] = NULL;
4026 ++ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
4027 + err_release_regions:
4028 + pci_release_regions(pdev);
4029 + err_disable_device:
4030 +@@ -1637,6 +1691,8 @@ static void bgx_remove(struct pci_dev *pdev)
4031 + for (lmac = 0; lmac < bgx->lmac_count; lmac++)
4032 + bgx_lmac_disable(bgx, lmac);
4033 +
4034 ++ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
4035 ++
4036 + bgx_vnic[bgx->bgx_id] = NULL;
4037 + pci_release_regions(pdev);
4038 + pci_disable_device(pdev);
4039 +diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
4040 +index 25888706bdcd..cdea49392185 100644
4041 +--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
4042 ++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
4043 +@@ -180,6 +180,15 @@
4044 + #define BGX_GMP_GMI_TXX_BURST 0x38228
4045 + #define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
4046 + #define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
4047 ++#define BGX_GMP_GMI_TXX_INT 0x38500
4048 ++#define BGX_GMP_GMI_TXX_INT_W1S 0x38508
4049 ++#define BGX_GMP_GMI_TXX_INT_ENA_W1C 0x38510
4050 ++#define BGX_GMP_GMI_TXX_INT_ENA_W1S 0x38518
4051 ++#define GMI_TXX_INT_PTP_LOST BIT_ULL(4)
4052 ++#define GMI_TXX_INT_LATE_COL BIT_ULL(3)
4053 ++#define GMI_TXX_INT_XSDEF BIT_ULL(2)
4054 ++#define GMI_TXX_INT_XSCOL BIT_ULL(1)
4055 ++#define GMI_TXX_INT_UNDFLW BIT_ULL(0)
4056 +
4057 + #define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
4058 + #define BGX_MSIX_VEC_0_29_CTL 0x400008
4059 +diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
4060 +index cce90b5925d9..70060c51854f 100644
4061 +--- a/drivers/net/ethernet/davicom/dm9000.c
4062 ++++ b/drivers/net/ethernet/davicom/dm9000.c
4063 +@@ -1405,6 +1405,8 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
4064 + mac_addr = of_get_mac_address(np);
4065 + if (!IS_ERR(mac_addr))
4066 + ether_addr_copy(pdata->dev_addr, mac_addr);
4067 ++ else if (PTR_ERR(mac_addr) == -EPROBE_DEFER)
4068 ++ return ERR_CAST(mac_addr);
4069 +
4070 + return pdata;
4071 + }
4072 +diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
4073 +index 9bd166e3dff3..594f6dbb2110 100644
4074 +--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
4075 ++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
4076 +@@ -2977,13 +2977,6 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
4077 + else
4078 + return -EINVAL;
4079 +
4080 +- /* Tell the OS link is going down, the link will go back up when fw
4081 +- * says it is ready asynchronously
4082 +- */
4083 +- ice_print_link_msg(vsi, false);
4084 +- netif_carrier_off(netdev);
4085 +- netif_tx_stop_all_queues(netdev);
4086 +-
4087 + /* Set the FC mode and only restart AN if link is up */
4088 + status = ice_set_fc(pi, &aq_failures, link_up);
4089 +
4090 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
4091 +index c6c7d1defbd7..aade62a9ee5c 100644
4092 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
4093 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
4094 +@@ -2307,7 +2307,9 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
4095 + struct mlx5dr_cmd_vport_cap *vport_cap;
4096 + struct mlx5dr_domain *dmn = sb->dmn;
4097 + struct mlx5dr_cmd_caps *caps;
4098 ++ u8 *bit_mask = sb->bit_mask;
4099 + u8 *tag = hw_ste->tag;
4100 ++ bool source_gvmi_set;
4101 +
4102 + DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
4103 +
4104 +@@ -2328,7 +2330,8 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
4105 + if (!vport_cap)
4106 + return -EINVAL;
4107 +
4108 +- if (vport_cap->vport_gvmi)
4109 ++ source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
4110 ++ if (vport_cap->vport_gvmi && source_gvmi_set)
4111 + MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
4112 +
4113 + misc->source_eswitch_owner_vhca_id = 0;
4114 +diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
4115 +index a41a90c589db..1c9e70c8cc30 100644
4116 +--- a/drivers/net/ethernet/micrel/ks8851_mll.c
4117 ++++ b/drivers/net/ethernet/micrel/ks8851_mll.c
4118 +@@ -156,24 +156,6 @@ static int msg_enable;
4119 + * chip is busy transferring packet data (RX/TX FIFO accesses).
4120 + */
4121 +
4122 +-/**
4123 +- * ks_rdreg8 - read 8 bit register from device
4124 +- * @ks : The chip information
4125 +- * @offset: The register address
4126 +- *
4127 +- * Read a 8bit register from the chip, returning the result
4128 +- */
4129 +-static u8 ks_rdreg8(struct ks_net *ks, int offset)
4130 +-{
4131 +- u16 data;
4132 +- u8 shift_bit = offset & 0x03;
4133 +- u8 shift_data = (offset & 1) << 3;
4134 +- ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
4135 +- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
4136 +- data = ioread16(ks->hw_addr);
4137 +- return (u8)(data >> shift_data);
4138 +-}
4139 +-
4140 + /**
4141 + * ks_rdreg16 - read 16 bit register from device
4142 + * @ks : The chip information
4143 +@@ -184,27 +166,11 @@ static u8 ks_rdreg8(struct ks_net *ks, int offset)
4144 +
4145 + static u16 ks_rdreg16(struct ks_net *ks, int offset)
4146 + {
4147 +- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
4148 ++ ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
4149 + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
4150 + return ioread16(ks->hw_addr);
4151 + }
4152 +
4153 +-/**
4154 +- * ks_wrreg8 - write 8bit register value to chip
4155 +- * @ks: The chip information
4156 +- * @offset: The register address
4157 +- * @value: The value to write
4158 +- *
4159 +- */
4160 +-static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
4161 +-{
4162 +- u8 shift_bit = (offset & 0x03);
4163 +- u16 value_write = (u16)(value << ((offset & 1) << 3));
4164 +- ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
4165 +- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
4166 +- iowrite16(value_write, ks->hw_addr);
4167 +-}
4168 +-
4169 + /**
4170 + * ks_wrreg16 - write 16bit register value to chip
4171 + * @ks: The chip information
4172 +@@ -215,7 +181,7 @@ static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
4173 +
4174 + static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
4175 + {
4176 +- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
4177 ++ ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
4178 + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
4179 + iowrite16(value, ks->hw_addr);
4180 + }
4181 +@@ -231,7 +197,7 @@ static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
4182 + {
4183 + len >>= 1;
4184 + while (len--)
4185 +- *wptr++ = (u16)ioread16(ks->hw_addr);
4186 ++ *wptr++ = be16_to_cpu(ioread16(ks->hw_addr));
4187 + }
4188 +
4189 + /**
4190 +@@ -245,7 +211,7 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
4191 + {
4192 + len >>= 1;
4193 + while (len--)
4194 +- iowrite16(*wptr++, ks->hw_addr);
4195 ++ iowrite16(cpu_to_be16(*wptr++), ks->hw_addr);
4196 + }
4197 +
4198 + static void ks_disable_int(struct ks_net *ks)
4199 +@@ -324,8 +290,7 @@ static void ks_read_config(struct ks_net *ks)
4200 + u16 reg_data = 0;
4201 +
4202 + /* Regardless of bus width, 8 bit read should always work.*/
4203 +- reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
4204 +- reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
4205 ++ reg_data = ks_rdreg16(ks, KS_CCR);
4206 +
4207 + /* addr/data bus are multiplexed */
4208 + ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
4209 +@@ -429,7 +394,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
4210 +
4211 + /* 1. set sudo DMA mode */
4212 + ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
4213 +- ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
4214 ++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
4215 +
4216 + /* 2. read prepend data */
4217 + /**
4218 +@@ -446,7 +411,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
4219 + ks_inblk(ks, buf, ALIGN(len, 4));
4220 +
4221 + /* 4. reset sudo DMA Mode */
4222 +- ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
4223 ++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
4224 + }
4225 +
4226 + /**
4227 +@@ -679,13 +644,13 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
4228 + ks->txh.txw[1] = cpu_to_le16(len);
4229 +
4230 + /* 1. set sudo-DMA mode */
4231 +- ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
4232 ++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
4233 + /* 2. write status/lenth info */
4234 + ks_outblk(ks, ks->txh.txw, 4);
4235 + /* 3. write pkt data */
4236 + ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
4237 + /* 4. reset sudo-DMA mode */
4238 +- ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
4239 ++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
4240 + /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
4241 + ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
4242 + /* 6. wait until TXQCR_METFE is auto-cleared */
4243 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4244 +index 6bc1bdb137ae..caa4d4c687b9 100644
4245 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4246 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4247 +@@ -4289,6 +4289,8 @@ static void stmmac_init_fs(struct net_device *dev)
4248 + {
4249 + struct stmmac_priv *priv = netdev_priv(dev);
4250 +
4251 ++ rtnl_lock();
4252 ++
4253 + /* Create per netdev entries */
4254 + priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4255 +
4256 +@@ -4300,14 +4302,13 @@ static void stmmac_init_fs(struct net_device *dev)
4257 + debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4258 + &stmmac_dma_cap_fops);
4259 +
4260 +- register_netdevice_notifier(&stmmac_notifier);
4261 ++ rtnl_unlock();
4262 + }
4263 +
4264 + static void stmmac_exit_fs(struct net_device *dev)
4265 + {
4266 + struct stmmac_priv *priv = netdev_priv(dev);
4267 +
4268 +- unregister_netdevice_notifier(&stmmac_notifier);
4269 + debugfs_remove_recursive(priv->dbgfs_dir);
4270 + }
4271 + #endif /* CONFIG_DEBUG_FS */
4272 +@@ -4825,14 +4826,14 @@ int stmmac_dvr_remove(struct device *dev)
4273 +
4274 + netdev_info(priv->dev, "%s: removing driver", __func__);
4275 +
4276 +-#ifdef CONFIG_DEBUG_FS
4277 +- stmmac_exit_fs(ndev);
4278 +-#endif
4279 + stmmac_stop_all_dma(priv);
4280 +
4281 + stmmac_mac_set(priv, priv->ioaddr, false);
4282 + netif_carrier_off(ndev);
4283 + unregister_netdev(ndev);
4284 ++#ifdef CONFIG_DEBUG_FS
4285 ++ stmmac_exit_fs(ndev);
4286 ++#endif
4287 + phylink_destroy(priv->phylink);
4288 + if (priv->plat->stmmac_rst)
4289 + reset_control_assert(priv->plat->stmmac_rst);
4290 +@@ -5052,6 +5053,7 @@ static int __init stmmac_init(void)
4291 + /* Create debugfs main directory if it doesn't exist yet */
4292 + if (!stmmac_fs_dir)
4293 + stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4294 ++ register_netdevice_notifier(&stmmac_notifier);
4295 + #endif
4296 +
4297 + return 0;
4298 +@@ -5060,6 +5062,7 @@ static int __init stmmac_init(void)
4299 + static void __exit stmmac_exit(void)
4300 + {
4301 + #ifdef CONFIG_DEBUG_FS
4302 ++ unregister_netdevice_notifier(&stmmac_notifier);
4303 + debugfs_remove_recursive(stmmac_fs_dir);
4304 + #endif
4305 + }
4306 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
4307 +index ada59df642d2..a4d8c90ee7cc 100644
4308 +--- a/drivers/nvme/host/core.c
4309 ++++ b/drivers/nvme/host/core.c
4310 +@@ -1165,8 +1165,8 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl,
4311 + static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
4312 + unsigned int dword11, void *buffer, size_t buflen, u32 *result)
4313 + {
4314 ++ union nvme_result res = { 0 };
4315 + struct nvme_command c;
4316 +- union nvme_result res;
4317 + int ret;
4318 +
4319 + memset(&c, 0, sizeof(c));
4320 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
4321 +index bb5e13ad1aff..d3f23d6254e4 100644
4322 +--- a/drivers/nvme/host/pci.c
4323 ++++ b/drivers/nvme/host/pci.c
4324 +@@ -2747,6 +2747,18 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
4325 + (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
4326 + dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
4327 + return NVME_QUIRK_NO_APST;
4328 ++ } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 ||
4329 ++ pdev->device == 0xa808 || pdev->device == 0xa809)) ||
4330 ++ (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) {
4331 ++ /*
4332 ++ * Forcing to use host managed nvme power settings for
4333 ++ * lowest idle power with quick resume latency on
4334 ++ * Samsung and Toshiba SSDs based on suspend behavior
4335 ++ * on Coffee Lake board for LENOVO C640
4336 ++ */
4337 ++ if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
4338 ++ dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
4339 ++ return NVME_QUIRK_SIMPLE_SUSPEND;
4340 + }
4341 +
4342 + return 0;
4343 +@@ -3109,7 +3121,8 @@ static const struct pci_device_id nvme_id_table[] = {
4344 + .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
4345 + NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4346 + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
4347 +- { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
4348 ++ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
4349 ++ .driver_data = NVME_QUIRK_SINGLE_VECTOR },
4350 + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
4351 + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
4352 + .driver_data = NVME_QUIRK_SINGLE_VECTOR |
4353 +diff --git a/drivers/phy/allwinner/phy-sun50i-usb3.c b/drivers/phy/allwinner/phy-sun50i-usb3.c
4354 +index 1169f3e83a6f..b1c04f71a31d 100644
4355 +--- a/drivers/phy/allwinner/phy-sun50i-usb3.c
4356 ++++ b/drivers/phy/allwinner/phy-sun50i-usb3.c
4357 +@@ -49,7 +49,7 @@
4358 + #define SUNXI_LOS_BIAS(n) ((n) << 3)
4359 + #define SUNXI_LOS_BIAS_MASK GENMASK(5, 3)
4360 + #define SUNXI_TXVBOOSTLVL(n) ((n) << 0)
4361 +-#define SUNXI_TXVBOOSTLVL_MASK GENMASK(0, 2)
4362 ++#define SUNXI_TXVBOOSTLVL_MASK GENMASK(2, 0)
4363 +
4364 + struct sun50i_usb3_phy {
4365 + struct phy *phy;
4366 +diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
4367 +index f20524f0c21d..94a34cf75eb3 100644
4368 +--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
4369 ++++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
4370 +@@ -20,6 +20,7 @@
4371 +
4372 + #define PHY_MDM6600_PHY_DELAY_MS 4000 /* PHY enable 2.2s to 3.5s */
4373 + #define PHY_MDM6600_ENABLED_DELAY_MS 8000 /* 8s more total for MDM6600 */
4374 ++#define PHY_MDM6600_WAKE_KICK_MS 600 /* time on after GPIO toggle */
4375 + #define MDM6600_MODEM_IDLE_DELAY_MS 1000 /* modem after USB suspend */
4376 + #define MDM6600_MODEM_WAKE_DELAY_MS 200 /* modem response after idle */
4377 +
4378 +@@ -243,10 +244,24 @@ static irqreturn_t phy_mdm6600_wakeirq_thread(int irq, void *data)
4379 + {
4380 + struct phy_mdm6600 *ddata = data;
4381 + struct gpio_desc *mode_gpio1;
4382 ++ int error, wakeup;
4383 +
4384 + mode_gpio1 = ddata->mode_gpios->desc[PHY_MDM6600_MODE1];
4385 +- dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n",
4386 +- gpiod_get_value(mode_gpio1));
4387 ++ wakeup = gpiod_get_value(mode_gpio1);
4388 ++ if (!wakeup)
4389 ++ return IRQ_NONE;
4390 ++
4391 ++ dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n", wakeup);
4392 ++ error = pm_runtime_get_sync(ddata->dev);
4393 ++ if (error < 0) {
4394 ++ pm_runtime_put_noidle(ddata->dev);
4395 ++
4396 ++ return IRQ_NONE;
4397 ++ }
4398 ++
4399 ++ /* Just wake-up and kick the autosuspend timer */
4400 ++ pm_runtime_mark_last_busy(ddata->dev);
4401 ++ pm_runtime_put_autosuspend(ddata->dev);
4402 +
4403 + return IRQ_HANDLED;
4404 + }
4405 +@@ -496,8 +511,14 @@ static void phy_mdm6600_modem_wake(struct work_struct *work)
4406 +
4407 + ddata = container_of(work, struct phy_mdm6600, modem_wake_work.work);
4408 + phy_mdm6600_wake_modem(ddata);
4409 ++
4410 ++ /*
4411 ++ * The modem does not always stay awake 1.2 seconds after toggling
4412 ++ * the wake GPIO, and sometimes it idles after about some 600 ms
4413 ++ * making writes time out.
4414 ++ */
4415 + schedule_delayed_work(&ddata->modem_wake_work,
4416 +- msecs_to_jiffies(MDM6600_MODEM_IDLE_DELAY_MS));
4417 ++ msecs_to_jiffies(PHY_MDM6600_WAKE_KICK_MS));
4418 + }
4419 +
4420 + static int __maybe_unused phy_mdm6600_runtime_suspend(struct device *dev)
4421 +diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
4422 +index bdfaf7edb75a..992bc18101ef 100644
4423 +--- a/drivers/regulator/stm32-vrefbuf.c
4424 ++++ b/drivers/regulator/stm32-vrefbuf.c
4425 +@@ -88,7 +88,7 @@ static int stm32_vrefbuf_disable(struct regulator_dev *rdev)
4426 + }
4427 +
4428 + val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
4429 +- val = (val & ~STM32_ENVR) | STM32_HIZ;
4430 ++ val &= ~STM32_ENVR;
4431 + writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
4432 +
4433 + pm_runtime_mark_last_busy(priv->dev);
4434 +@@ -175,6 +175,7 @@ static const struct regulator_desc stm32_vrefbuf_regu = {
4435 + .volt_table = stm32_vrefbuf_voltages,
4436 + .n_voltages = ARRAY_SIZE(stm32_vrefbuf_voltages),
4437 + .ops = &stm32_vrefbuf_volt_ops,
4438 ++ .off_on_delay = 1000,
4439 + .type = REGULATOR_VOLTAGE,
4440 + .owner = THIS_MODULE,
4441 + };
4442 +diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
4443 +index 2a3f874a21d5..9cebff8e8d74 100644
4444 +--- a/drivers/s390/cio/blacklist.c
4445 ++++ b/drivers/s390/cio/blacklist.c
4446 +@@ -303,8 +303,10 @@ static void *
4447 + cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
4448 + {
4449 + struct ccwdev_iter *iter;
4450 ++ loff_t p = *offset;
4451 +
4452 +- if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
4453 ++ (*offset)++;
4454 ++ if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
4455 + return NULL;
4456 + iter = it;
4457 + if (iter->devno == __MAX_SUBCHANNEL) {
4458 +@@ -314,7 +316,6 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
4459 + return NULL;
4460 + } else
4461 + iter->devno++;
4462 +- (*offset)++;
4463 + return iter;
4464 + }
4465 +
4466 +diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
4467 +index dc430bd86ade..58eaac70dba7 100644
4468 +--- a/drivers/s390/cio/qdio_setup.c
4469 ++++ b/drivers/s390/cio/qdio_setup.c
4470 +@@ -8,6 +8,7 @@
4471 + #include <linux/kernel.h>
4472 + #include <linux/slab.h>
4473 + #include <linux/export.h>
4474 ++#include <linux/io.h>
4475 + #include <asm/qdio.h>
4476 +
4477 + #include "cio.h"
4478 +@@ -205,7 +206,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
4479 +
4480 + /* fill in sl */
4481 + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
4482 +- q->sl->element[j].sbal = (unsigned long)q->sbal[j];
4483 ++ q->sl->element[j].sbal = virt_to_phys(q->sbal[j]);
4484 + }
4485 +
4486 + static void setup_queues(struct qdio_irq *irq_ptr,
4487 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
4488 +index 10edfd6fc930..4fd7b0ceb4ff 100644
4489 +--- a/drivers/s390/net/qeth_core_main.c
4490 ++++ b/drivers/s390/net/qeth_core_main.c
4491 +@@ -4749,10 +4749,10 @@ static void qeth_qdio_establish_cq(struct qeth_card *card,
4492 + if (card->options.cq == QETH_CQ_ENABLED) {
4493 + int offset = QDIO_MAX_BUFFERS_PER_Q *
4494 + (card->qdio.no_in_queues - 1);
4495 +- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4496 +- in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
4497 +- virt_to_phys(card->qdio.c_q->bufs[i].buffer);
4498 +- }
4499 ++
4500 ++ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
4501 ++ in_sbal_ptrs[offset + i] =
4502 ++ card->qdio.c_q->bufs[i].buffer;
4503 +
4504 + queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4505 + }
4506 +@@ -4786,10 +4786,9 @@ static int qeth_qdio_establish(struct qeth_card *card)
4507 + rc = -ENOMEM;
4508 + goto out_free_qib_param;
4509 + }
4510 +- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4511 +- in_sbal_ptrs[i] = (struct qdio_buffer *)
4512 +- virt_to_phys(card->qdio.in_q->bufs[i].buffer);
4513 +- }
4514 ++
4515 ++ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
4516 ++ in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer;
4517 +
4518 + queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
4519 + GFP_KERNEL);
4520 +@@ -4810,11 +4809,11 @@ static int qeth_qdio_establish(struct qeth_card *card)
4521 + rc = -ENOMEM;
4522 + goto out_free_queue_start_poll;
4523 + }
4524 ++
4525 + for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
4526 +- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
4527 +- out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
4528 +- card->qdio.out_qs[i]->bufs[j]->buffer);
4529 +- }
4530 ++ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++)
4531 ++ out_sbal_ptrs[k] =
4532 ++ card->qdio.out_qs[i]->bufs[j]->buffer;
4533 +
4534 + memset(&init_data, 0, sizeof(struct qdio_initialize));
4535 + init_data.cdev = CARD_DDEV(card);
4536 +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
4537 +index 46bc062d873e..d86838801805 100644
4538 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
4539 ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
4540 +@@ -594,7 +594,8 @@ retry_alloc:
4541 +
4542 + fusion->io_request_frames =
4543 + dma_pool_alloc(fusion->io_request_frames_pool,
4544 +- GFP_KERNEL, &fusion->io_request_frames_phys);
4545 ++ GFP_KERNEL | __GFP_NOWARN,
4546 ++ &fusion->io_request_frames_phys);
4547 + if (!fusion->io_request_frames) {
4548 + if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) {
4549 + instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT;
4550 +@@ -632,7 +633,7 @@ retry_alloc:
4551 +
4552 + fusion->io_request_frames =
4553 + dma_pool_alloc(fusion->io_request_frames_pool,
4554 +- GFP_KERNEL,
4555 ++ GFP_KERNEL | __GFP_NOWARN,
4556 + &fusion->io_request_frames_phys);
4557 +
4558 + if (!fusion->io_request_frames) {
4559 +diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c
4560 +index fb70b8a3f7c5..20d37eaeb5f2 100644
4561 +--- a/drivers/soc/imx/soc-imx-scu.c
4562 ++++ b/drivers/soc/imx/soc-imx-scu.c
4563 +@@ -25,7 +25,7 @@ struct imx_sc_msg_misc_get_soc_id {
4564 + u32 id;
4565 + } resp;
4566 + } data;
4567 +-} __packed;
4568 ++} __packed __aligned(4);
4569 +
4570 + struct imx_sc_msg_misc_get_soc_uid {
4571 + struct imx_sc_rpc_msg hdr;
4572 +diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
4573 +index fd8007ebb145..13def7f78b9e 100644
4574 +--- a/drivers/spi/atmel-quadspi.c
4575 ++++ b/drivers/spi/atmel-quadspi.c
4576 +@@ -149,6 +149,7 @@ struct atmel_qspi {
4577 + struct clk *qspick;
4578 + struct platform_device *pdev;
4579 + const struct atmel_qspi_caps *caps;
4580 ++ resource_size_t mmap_size;
4581 + u32 pending;
4582 + u32 mr;
4583 + u32 scr;
4584 +@@ -329,6 +330,14 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
4585 + u32 sr, offset;
4586 + int err;
4587 +
4588 ++ /*
4589 ++ * Check if the address exceeds the MMIO window size. An improvement
4590 ++ * would be to add support for regular SPI mode and fall back to it
4591 ++ * when the flash memories overrun the controller's memory space.
4592 ++ */
4593 ++ if (op->addr.val + op->data.nbytes > aq->mmap_size)
4594 ++ return -ENOTSUPP;
4595 ++
4596 + err = atmel_qspi_set_cfg(aq, op, &offset);
4597 + if (err)
4598 + return err;
4599 +@@ -480,6 +489,8 @@ static int atmel_qspi_probe(struct platform_device *pdev)
4600 + goto exit;
4601 + }
4602 +
4603 ++ aq->mmap_size = resource_size(res);
4604 ++
4605 + /* Get the peripheral clock */
4606 + aq->pclk = devm_clk_get(&pdev->dev, "pclk");
4607 + if (IS_ERR(aq->pclk))
4608 +diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
4609 +index 7327309ea3d5..6c235306c0e4 100644
4610 +--- a/drivers/spi/spi-bcm63xx-hsspi.c
4611 ++++ b/drivers/spi/spi-bcm63xx-hsspi.c
4612 +@@ -366,7 +366,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
4613 + goto out_disable_clk;
4614 +
4615 + rate = clk_get_rate(pll_clk);
4616 +- clk_disable_unprepare(pll_clk);
4617 + if (!rate) {
4618 + ret = -EINVAL;
4619 + goto out_disable_pll_clk;
4620 +diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
4621 +index 1e217e3e9486..2ab6e782f14c 100644
4622 +--- a/drivers/spi/spidev.c
4623 ++++ b/drivers/spi/spidev.c
4624 +@@ -396,6 +396,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4625 + else
4626 + retval = get_user(tmp, (u32 __user *)arg);
4627 + if (retval == 0) {
4628 ++ struct spi_controller *ctlr = spi->controller;
4629 + u32 save = spi->mode;
4630 +
4631 + if (tmp & ~SPI_MODE_MASK) {
4632 +@@ -403,6 +404,10 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4633 + break;
4634 + }
4635 +
4636 ++ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
4637 ++ ctlr->cs_gpiods[spi->chip_select])
4638 ++ tmp |= SPI_CS_HIGH;
4639 ++
4640 + tmp |= spi->mode & ~SPI_MODE_MASK;
4641 + spi->mode = (u16)tmp;
4642 + retval = spi_setup(spi);
4643 +diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
4644 +index 26108c96b674..37c7cf6b7d8a 100644
4645 +--- a/drivers/staging/media/hantro/hantro_drv.c
4646 ++++ b/drivers/staging/media/hantro/hantro_drv.c
4647 +@@ -553,13 +553,13 @@ static int hantro_attach_func(struct hantro_dev *vpu,
4648 + goto err_rel_entity1;
4649 +
4650 + /* Connect the three entities */
4651 +- ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 1,
4652 ++ ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
4653 + MEDIA_LNK_FL_IMMUTABLE |
4654 + MEDIA_LNK_FL_ENABLED);
4655 + if (ret)
4656 + goto err_rel_entity2;
4657 +
4658 +- ret = media_create_pad_link(&func->proc, 0, &func->sink, 0,
4659 ++ ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
4660 + MEDIA_LNK_FL_IMMUTABLE |
4661 + MEDIA_LNK_FL_ENABLED);
4662 + if (ret)
4663 +diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
4664 +index a8b4d0c5ab7e..032f3264fba1 100644
4665 +--- a/drivers/staging/speakup/selection.c
4666 ++++ b/drivers/staging/speakup/selection.c
4667 +@@ -51,9 +51,7 @@ static void __speakup_set_selection(struct work_struct *work)
4668 + goto unref;
4669 + }
4670 +
4671 +- console_lock();
4672 + set_selection_kernel(&sel, tty);
4673 +- console_unlock();
4674 +
4675 + unref:
4676 + tty_kref_put(tty);
4677 +diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
4678 +index ce5309d00280..0f64a10ba51f 100644
4679 +--- a/drivers/tty/serdev/core.c
4680 ++++ b/drivers/tty/serdev/core.c
4681 +@@ -18,6 +18,7 @@
4682 + #include <linux/sched.h>
4683 + #include <linux/serdev.h>
4684 + #include <linux/slab.h>
4685 ++#include <linux/platform_data/x86/apple.h>
4686 +
4687 + static bool is_registered;
4688 + static DEFINE_IDA(ctrl_ida);
4689 +@@ -630,6 +631,15 @@ static int acpi_serdev_check_resources(struct serdev_controller *ctrl,
4690 + if (ret)
4691 + return ret;
4692 +
4693 ++ /*
4694 ++ * Apple machines provide an empty resource template, so on those
4695 ++ * machines just look for immediate children with a "baud" property
4696 ++ * (from the _DSM method) instead.
4697 ++ */
4698 ++ if (!lookup.controller_handle && x86_apple_machine &&
4699 ++ !acpi_dev_get_property(adev, "baud", ACPI_TYPE_BUFFER, NULL))
4700 ++ acpi_get_parent(adev->handle, &lookup.controller_handle);
4701 ++
4702 + /* Make sure controller and ResourceSource handle match */
4703 + if (ACPI_HANDLE(ctrl->dev.parent) != lookup.controller_handle)
4704 + return -ENODEV;
4705 +diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
4706 +index 108cd55f9c4d..405370c6eee5 100644
4707 +--- a/drivers/tty/serial/8250/8250_exar.c
4708 ++++ b/drivers/tty/serial/8250/8250_exar.c
4709 +@@ -25,6 +25,14 @@
4710 +
4711 + #include "8250.h"
4712 +
4713 ++#define PCI_DEVICE_ID_ACCES_COM_2S 0x1052
4714 ++#define PCI_DEVICE_ID_ACCES_COM_4S 0x105d
4715 ++#define PCI_DEVICE_ID_ACCES_COM_8S 0x106c
4716 ++#define PCI_DEVICE_ID_ACCES_COM232_8 0x10a8
4717 ++#define PCI_DEVICE_ID_ACCES_COM_2SM 0x10d2
4718 ++#define PCI_DEVICE_ID_ACCES_COM_4SM 0x10db
4719 ++#define PCI_DEVICE_ID_ACCES_COM_8SM 0x10ea
4720 ++
4721 + #define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002
4722 + #define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004
4723 + #define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a
4724 +@@ -677,6 +685,22 @@ static int __maybe_unused exar_resume(struct device *dev)
4725 +
4726 + static SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume);
4727 +
4728 ++static const struct exar8250_board acces_com_2x = {
4729 ++ .num_ports = 2,
4730 ++ .setup = pci_xr17c154_setup,
4731 ++};
4732 ++
4733 ++static const struct exar8250_board acces_com_4x = {
4734 ++ .num_ports = 4,
4735 ++ .setup = pci_xr17c154_setup,
4736 ++};
4737 ++
4738 ++static const struct exar8250_board acces_com_8x = {
4739 ++ .num_ports = 8,
4740 ++ .setup = pci_xr17c154_setup,
4741 ++};
4742 ++
4743 ++
4744 + static const struct exar8250_board pbn_fastcom335_2 = {
4745 + .num_ports = 2,
4746 + .setup = pci_fastcom335_setup,
4747 +@@ -745,6 +769,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
4748 + }
4749 +
4750 + static const struct pci_device_id exar_pci_tbl[] = {
4751 ++ EXAR_DEVICE(ACCESSIO, ACCES_COM_2S, acces_com_2x),
4752 ++ EXAR_DEVICE(ACCESSIO, ACCES_COM_4S, acces_com_4x),
4753 ++ EXAR_DEVICE(ACCESSIO, ACCES_COM_8S, acces_com_8x),
4754 ++ EXAR_DEVICE(ACCESSIO, ACCES_COM232_8, acces_com_8x),
4755 ++ EXAR_DEVICE(ACCESSIO, ACCES_COM_2SM, acces_com_2x),
4756 ++ EXAR_DEVICE(ACCESSIO, ACCES_COM_4SM, acces_com_4x),
4757 ++ EXAR_DEVICE(ACCESSIO, ACCES_COM_8SM, acces_com_8x),
4758 ++
4759 ++
4760 + CONNECT_DEVICE(XR17C152, UART_2_232, pbn_connect),
4761 + CONNECT_DEVICE(XR17C154, UART_4_232, pbn_connect),
4762 + CONNECT_DEVICE(XR17C158, UART_8_232, pbn_connect),
4763 +diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
4764 +index 3bdd56a1021b..ea12f10610b6 100644
4765 +--- a/drivers/tty/serial/ar933x_uart.c
4766 ++++ b/drivers/tty/serial/ar933x_uart.c
4767 +@@ -286,6 +286,10 @@ static void ar933x_uart_set_termios(struct uart_port *port,
4768 + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
4769 + AR933X_UART_CS_HOST_INT_EN);
4770 +
4771 ++ /* enable RX and TX ready overide */
4772 ++ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
4773 ++ AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
4774 ++
4775 + /* reenable the UART */
4776 + ar933x_uart_rmw(up, AR933X_UART_CS_REG,
4777 + AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
4778 +@@ -418,6 +422,10 @@ static int ar933x_uart_startup(struct uart_port *port)
4779 + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
4780 + AR933X_UART_CS_HOST_INT_EN);
4781 +
4782 ++ /* enable RX and TX ready overide */
4783 ++ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
4784 ++ AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
4785 ++
4786 + /* Enable RX interrupts */
4787 + up->ier = AR933X_UART_INT_RX_VALID;
4788 + ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
4789 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
4790 +index 4e128d19e0ad..8a69ec282a43 100644
4791 +--- a/drivers/tty/serial/fsl_lpuart.c
4792 ++++ b/drivers/tty/serial/fsl_lpuart.c
4793 +@@ -268,6 +268,7 @@ struct lpuart_port {
4794 + int rx_dma_rng_buf_len;
4795 + unsigned int dma_tx_nents;
4796 + wait_queue_head_t dma_wait;
4797 ++ bool id_allocated;
4798 + };
4799 +
4800 + struct lpuart_soc_data {
4801 +@@ -2429,19 +2430,6 @@ static int lpuart_probe(struct platform_device *pdev)
4802 + if (!sport)
4803 + return -ENOMEM;
4804 +
4805 +- ret = of_alias_get_id(np, "serial");
4806 +- if (ret < 0) {
4807 +- ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
4808 +- if (ret < 0) {
4809 +- dev_err(&pdev->dev, "port line is full, add device failed\n");
4810 +- return ret;
4811 +- }
4812 +- }
4813 +- if (ret >= ARRAY_SIZE(lpuart_ports)) {
4814 +- dev_err(&pdev->dev, "serial%d out of range\n", ret);
4815 +- return -EINVAL;
4816 +- }
4817 +- sport->port.line = ret;
4818 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4819 + sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
4820 + if (IS_ERR(sport->port.membase))
4821 +@@ -2485,9 +2473,25 @@ static int lpuart_probe(struct platform_device *pdev)
4822 + }
4823 + }
4824 +
4825 ++ ret = of_alias_get_id(np, "serial");
4826 ++ if (ret < 0) {
4827 ++ ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
4828 ++ if (ret < 0) {
4829 ++ dev_err(&pdev->dev, "port line is full, add device failed\n");
4830 ++ return ret;
4831 ++ }
4832 ++ sport->id_allocated = true;
4833 ++ }
4834 ++ if (ret >= ARRAY_SIZE(lpuart_ports)) {
4835 ++ dev_err(&pdev->dev, "serial%d out of range\n", ret);
4836 ++ ret = -EINVAL;
4837 ++ goto failed_out_of_range;
4838 ++ }
4839 ++ sport->port.line = ret;
4840 ++
4841 + ret = lpuart_enable_clks(sport);
4842 + if (ret)
4843 +- return ret;
4844 ++ goto failed_clock_enable;
4845 + sport->port.uartclk = lpuart_get_baud_clk_rate(sport);
4846 +
4847 + lpuart_ports[sport->port.line] = sport;
4848 +@@ -2537,6 +2541,10 @@ static int lpuart_probe(struct platform_device *pdev)
4849 + failed_attach_port:
4850 + failed_irq_request:
4851 + lpuart_disable_clks(sport);
4852 ++failed_clock_enable:
4853 ++failed_out_of_range:
4854 ++ if (sport->id_allocated)
4855 ++ ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
4856 + return ret;
4857 + }
4858 +
4859 +@@ -2546,7 +2554,8 @@ static int lpuart_remove(struct platform_device *pdev)
4860 +
4861 + uart_remove_one_port(&lpuart_reg, &sport->port);
4862 +
4863 +- ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
4864 ++ if (sport->id_allocated)
4865 ++ ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
4866 +
4867 + lpuart_disable_clks(sport);
4868 +
4869 +diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
4870 +index c12a12556339..4e9a590712cb 100644
4871 +--- a/drivers/tty/serial/mvebu-uart.c
4872 ++++ b/drivers/tty/serial/mvebu-uart.c
4873 +@@ -851,7 +851,7 @@ static int mvebu_uart_probe(struct platform_device *pdev)
4874 +
4875 + port->membase = devm_ioremap_resource(&pdev->dev, reg);
4876 + if (IS_ERR(port->membase))
4877 +- return -PTR_ERR(port->membase);
4878 ++ return PTR_ERR(port->membase);
4879 +
4880 + mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
4881 + GFP_KERNEL);
4882 +diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
4883 +index 44d974d4159f..d7d2e4b844bc 100644
4884 +--- a/drivers/tty/vt/selection.c
4885 ++++ b/drivers/tty/vt/selection.c
4886 +@@ -16,6 +16,7 @@
4887 + #include <linux/tty.h>
4888 + #include <linux/sched.h>
4889 + #include <linux/mm.h>
4890 ++#include <linux/mutex.h>
4891 + #include <linux/slab.h>
4892 + #include <linux/types.h>
4893 +
4894 +@@ -45,6 +46,7 @@ static volatile int sel_start = -1; /* cleared by clear_selection */
4895 + static int sel_end;
4896 + static int sel_buffer_lth;
4897 + static char *sel_buffer;
4898 ++static DEFINE_MUTEX(sel_lock);
4899 +
4900 + /* clear_selection, highlight and highlight_pointer can be called
4901 + from interrupt (via scrollback/front) */
4902 +@@ -179,14 +181,14 @@ int set_selection_user(const struct tiocl_selection __user *sel,
4903 + return set_selection_kernel(&v, tty);
4904 + }
4905 +
4906 +-int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
4907 ++static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
4908 + {
4909 + struct vc_data *vc = vc_cons[fg_console].d;
4910 + int new_sel_start, new_sel_end, spc;
4911 + char *bp, *obp;
4912 + int i, ps, pe, multiplier;
4913 + u32 c;
4914 +- int mode;
4915 ++ int mode, ret = 0;
4916 +
4917 + poke_blanked_console();
4918 +
4919 +@@ -334,7 +336,21 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
4920 + }
4921 + }
4922 + sel_buffer_lth = bp - sel_buffer;
4923 +- return 0;
4924 ++
4925 ++ return ret;
4926 ++}
4927 ++
4928 ++int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
4929 ++{
4930 ++ int ret;
4931 ++
4932 ++ mutex_lock(&sel_lock);
4933 ++ console_lock();
4934 ++ ret = __set_selection_kernel(v, tty);
4935 ++ console_unlock();
4936 ++ mutex_unlock(&sel_lock);
4937 ++
4938 ++ return ret;
4939 + }
4940 + EXPORT_SYMBOL_GPL(set_selection_kernel);
4941 +
4942 +@@ -364,6 +380,7 @@ int paste_selection(struct tty_struct *tty)
4943 + tty_buffer_lock_exclusive(&vc->port);
4944 +
4945 + add_wait_queue(&vc->paste_wait, &wait);
4946 ++ mutex_lock(&sel_lock);
4947 + while (sel_buffer && sel_buffer_lth > pasted) {
4948 + set_current_state(TASK_INTERRUPTIBLE);
4949 + if (signal_pending(current)) {
4950 +@@ -371,7 +388,9 @@ int paste_selection(struct tty_struct *tty)
4951 + break;
4952 + }
4953 + if (tty_throttled(tty)) {
4954 ++ mutex_unlock(&sel_lock);
4955 + schedule();
4956 ++ mutex_lock(&sel_lock);
4957 + continue;
4958 + }
4959 + __set_current_state(TASK_RUNNING);
4960 +@@ -380,6 +399,7 @@ int paste_selection(struct tty_struct *tty)
4961 + count);
4962 + pasted += count;
4963 + }
4964 ++ mutex_unlock(&sel_lock);
4965 + remove_wait_queue(&vc->paste_wait, &wait);
4966 + __set_current_state(TASK_RUNNING);
4967 +
4968 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
4969 +index 3b4ccc2a30c1..e9e27ba69d5d 100644
4970 +--- a/drivers/tty/vt/vt.c
4971 ++++ b/drivers/tty/vt/vt.c
4972 +@@ -3046,10 +3046,8 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
4973 + switch (type)
4974 + {
4975 + case TIOCL_SETSEL:
4976 +- console_lock();
4977 + ret = set_selection_user((struct tiocl_selection
4978 + __user *)(p+1), tty);
4979 +- console_unlock();
4980 + break;
4981 + case TIOCL_PASTESEL:
4982 + ret = paste_selection(tty);
4983 +diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
4984 +index 02f6ca2cb1ba..f624cc87cbab 100644
4985 +--- a/drivers/usb/cdns3/gadget.c
4986 ++++ b/drivers/usb/cdns3/gadget.c
4987 +@@ -2107,7 +2107,7 @@ found:
4988 + /* Update ring only if removed request is on pending_req_list list */
4989 + if (req_on_hw_ring) {
4990 + link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
4991 +- (priv_req->start_trb * TRB_SIZE));
4992 ++ ((priv_req->end_trb + 1) * TRB_SIZE));
4993 + link_trb->control = (link_trb->control & TRB_CYCLE) |
4994 + TRB_TYPE(TRB_LINK) | TRB_CHAIN;
4995 +
4996 +@@ -2152,11 +2152,21 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
4997 + {
4998 + struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
4999 + struct usb_request *request;
5000 ++ struct cdns3_request *priv_req;
5001 ++ struct cdns3_trb *trb = NULL;
5002 + int ret;
5003 + int val;
5004 +
5005 + trace_cdns3_halt(priv_ep, 0, 0);
5006 +
5007 ++ request = cdns3_next_request(&priv_ep->pending_req_list);
5008 ++ if (request) {
5009 ++ priv_req = to_cdns3_request(request);
5010 ++ trb = priv_req->trb;
5011 ++ if (trb)
5012 ++ trb->control = trb->control ^ TRB_CYCLE;
5013 ++ }
5014 ++
5015 + writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
5016 +
5017 + /* wait for EPRST cleared */
5018 +@@ -2167,10 +2177,11 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
5019 +
5020 + priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING);
5021 +
5022 +- request = cdns3_next_request(&priv_ep->pending_req_list);
5023 +-
5024 +- if (request)
5025 ++ if (request) {
5026 ++ if (trb)
5027 ++ trb->control = trb->control ^ TRB_CYCLE;
5028 + cdns3_rearm_transfer(priv_ep, 1);
5029 ++ }
5030 +
5031 + cdns3_start_all_request(priv_dev, priv_ep);
5032 + return ret;
5033 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
5034 +index 1d212f82c69b..54cd8ef795ec 100644
5035 +--- a/drivers/usb/core/hub.c
5036 ++++ b/drivers/usb/core/hub.c
5037 +@@ -988,13 +988,17 @@ int usb_remove_device(struct usb_device *udev)
5038 + {
5039 + struct usb_hub *hub;
5040 + struct usb_interface *intf;
5041 ++ int ret;
5042 +
5043 + if (!udev->parent) /* Can't remove a root hub */
5044 + return -EINVAL;
5045 + hub = usb_hub_to_struct_hub(udev->parent);
5046 + intf = to_usb_interface(hub->intfdev);
5047 +
5048 +- usb_autopm_get_interface(intf);
5049 ++ ret = usb_autopm_get_interface(intf);
5050 ++ if (ret < 0)
5051 ++ return ret;
5052 ++
5053 + set_bit(udev->portnum, hub->removed_bits);
5054 + hub_port_logical_disconnect(hub, udev->portnum);
5055 + usb_autopm_put_interface(intf);
5056 +@@ -1866,7 +1870,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
5057 +
5058 + if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
5059 + hub->quirk_disable_autosuspend = 1;
5060 +- usb_autopm_get_interface(intf);
5061 ++ usb_autopm_get_interface_no_resume(intf);
5062 + }
5063 +
5064 + if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
5065 +diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
5066 +index bbbb35fa639f..235a7c645503 100644
5067 +--- a/drivers/usb/core/port.c
5068 ++++ b/drivers/usb/core/port.c
5069 +@@ -213,7 +213,10 @@ static int usb_port_runtime_resume(struct device *dev)
5070 + if (!port_dev->is_superspeed && peer)
5071 + pm_runtime_get_sync(&peer->dev);
5072 +
5073 +- usb_autopm_get_interface(intf);
5074 ++ retval = usb_autopm_get_interface(intf);
5075 ++ if (retval < 0)
5076 ++ return retval;
5077 ++
5078 + retval = usb_hub_set_port_power(hdev, hub, port1, true);
5079 + msleep(hub_power_on_good_delay(hub));
5080 + if (udev && !retval) {
5081 +@@ -266,7 +269,10 @@ static int usb_port_runtime_suspend(struct device *dev)
5082 + if (usb_port_block_power_off)
5083 + return -EBUSY;
5084 +
5085 +- usb_autopm_get_interface(intf);
5086 ++ retval = usb_autopm_get_interface(intf);
5087 ++ if (retval < 0)
5088 ++ return retval;
5089 ++
5090 + retval = usb_hub_set_port_power(hdev, hub, port1, false);
5091 + usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
5092 + if (!port_dev->is_superspeed)
5093 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
5094 +index 2b24336a72e5..2dac3e7cdd97 100644
5095 +--- a/drivers/usb/core/quirks.c
5096 ++++ b/drivers/usb/core/quirks.c
5097 +@@ -231,6 +231,9 @@ static const struct usb_device_id usb_quirk_list[] = {
5098 + /* Logitech PTZ Pro Camera */
5099 + { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
5100 +
5101 ++ /* Logitech Screen Share */
5102 ++ { USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM },
5103 ++
5104 + /* Logitech Quickcam Fusion */
5105 + { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
5106 +
5107 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
5108 +index e0cb1c2d5675..6ac02ba5e4a1 100644
5109 +--- a/drivers/usb/dwc3/gadget.c
5110 ++++ b/drivers/usb/dwc3/gadget.c
5111 +@@ -1068,7 +1068,14 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
5112 + unsigned int rem = length % maxp;
5113 + unsigned chain = true;
5114 +
5115 +- if (sg_is_last(s))
5116 ++ /*
5117 ++ * IOMMU driver is coalescing the list of sgs which shares a
5118 ++ * page boundary into one and giving it to USB driver. With
5119 ++ * this the number of sgs mapped is not equal to the number of
5120 ++ * sgs passed. So mark the chain bit to false if it isthe last
5121 ++ * mapped sg.
5122 ++ */
5123 ++ if (i == remaining - 1)
5124 + chain = false;
5125 +
5126 + if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
5127 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
5128 +index cd303a3ea680..223f72d4d9ed 100644
5129 +--- a/drivers/usb/gadget/composite.c
5130 ++++ b/drivers/usb/gadget/composite.c
5131 +@@ -438,9 +438,13 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
5132 + if (!val)
5133 + return 0;
5134 + if (speed < USB_SPEED_SUPER)
5135 +- return DIV_ROUND_UP(val, 2);
5136 ++ return min(val, 500U) / 2;
5137 + else
5138 +- return DIV_ROUND_UP(val, 8);
5139 ++ /*
5140 ++ * USB 3.x supports up to 900mA, but since 900 isn't divisible
5141 ++ * by 8 the integral division will effectively cap to 896mA.
5142 ++ */
5143 ++ return min(val, 900U) / 8;
5144 + }
5145 +
5146 + static int config_buf(struct usb_configuration *config,
5147 +@@ -852,6 +856,10 @@ static int set_config(struct usb_composite_dev *cdev,
5148 +
5149 + /* when we return, be sure our power usage is valid */
5150 + power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
5151 ++ if (gadget->speed < USB_SPEED_SUPER)
5152 ++ power = min(power, 500U);
5153 ++ else
5154 ++ power = min(power, 900U);
5155 + done:
5156 + usb_gadget_vbus_draw(gadget, power);
5157 + if (result >= 0 && cdev->delayed_status)
5158 +@@ -2278,7 +2286,7 @@ void composite_resume(struct usb_gadget *gadget)
5159 + {
5160 + struct usb_composite_dev *cdev = get_gadget_data(gadget);
5161 + struct usb_function *f;
5162 +- u16 maxpower;
5163 ++ unsigned maxpower;
5164 +
5165 + /* REVISIT: should we have config level
5166 + * suspend/resume callbacks?
5167 +@@ -2292,10 +2300,14 @@ void composite_resume(struct usb_gadget *gadget)
5168 + f->resume(f);
5169 + }
5170 +
5171 +- maxpower = cdev->config->MaxPower;
5172 ++ maxpower = cdev->config->MaxPower ?
5173 ++ cdev->config->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
5174 ++ if (gadget->speed < USB_SPEED_SUPER)
5175 ++ maxpower = min(maxpower, 500U);
5176 ++ else
5177 ++ maxpower = min(maxpower, 900U);
5178 +
5179 +- usb_gadget_vbus_draw(gadget, maxpower ?
5180 +- maxpower : CONFIG_USB_GADGET_VBUS_DRAW);
5181 ++ usb_gadget_vbus_draw(gadget, maxpower);
5182 + }
5183 +
5184 + cdev->suspended = 0;
5185 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
5186 +index 6f8b67e61771..bdac92d3a8d0 100644
5187 +--- a/drivers/usb/gadget/function/f_fs.c
5188 ++++ b/drivers/usb/gadget/function/f_fs.c
5189 +@@ -1162,18 +1162,19 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
5190 + {
5191 + struct ffs_io_data *io_data = kiocb->private;
5192 + struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
5193 ++ unsigned long flags;
5194 + int value;
5195 +
5196 + ENTER();
5197 +
5198 +- spin_lock_irq(&epfile->ffs->eps_lock);
5199 ++ spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
5200 +
5201 + if (likely(io_data && io_data->ep && io_data->req))
5202 + value = usb_ep_dequeue(io_data->ep, io_data->req);
5203 + else
5204 + value = -EINVAL;
5205 +
5206 +- spin_unlock_irq(&epfile->ffs->eps_lock);
5207 ++ spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
5208 +
5209 + return value;
5210 + }
5211 +diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
5212 +index f986e5c55974..8167d379e115 100644
5213 +--- a/drivers/usb/gadget/function/u_serial.c
5214 ++++ b/drivers/usb/gadget/function/u_serial.c
5215 +@@ -561,8 +561,10 @@ static int gs_start_io(struct gs_port *port)
5216 + port->n_read = 0;
5217 + started = gs_start_rx(port);
5218 +
5219 +- /* unblock any pending writes into our circular buffer */
5220 + if (started) {
5221 ++ gs_start_tx(port);
5222 ++ /* Unblock any pending writes into our circular buffer, in case
5223 ++ * we didn't in gs_start_tx() */
5224 + tty_wakeup(port->port.tty);
5225 + } else {
5226 + gs_free_requests(ep, head, &port->read_allocated);
5227 +diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
5228 +index 10c9e7f6273e..29fe5771c21b 100644
5229 +--- a/drivers/usb/misc/usb251xb.c
5230 ++++ b/drivers/usb/misc/usb251xb.c
5231 +@@ -424,10 +424,6 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
5232 + return err;
5233 + }
5234 +
5235 +- hub->vdd = devm_regulator_get(dev, "vdd");
5236 +- if (IS_ERR(hub->vdd))
5237 +- return PTR_ERR(hub->vdd);
5238 +-
5239 + if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1))
5240 + hub->vendor_id = USB251XB_DEF_VENDOR_ID;
5241 +
5242 +@@ -640,6 +636,13 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
5243 + }
5244 + #endif /* CONFIG_OF */
5245 +
5246 ++static void usb251xb_regulator_disable_action(void *data)
5247 ++{
5248 ++ struct usb251xb *hub = data;
5249 ++
5250 ++ regulator_disable(hub->vdd);
5251 ++}
5252 ++
5253 + static int usb251xb_probe(struct usb251xb *hub)
5254 + {
5255 + struct device *dev = hub->dev;
5256 +@@ -676,10 +679,19 @@ static int usb251xb_probe(struct usb251xb *hub)
5257 + if (err)
5258 + return err;
5259 +
5260 ++ hub->vdd = devm_regulator_get(dev, "vdd");
5261 ++ if (IS_ERR(hub->vdd))
5262 ++ return PTR_ERR(hub->vdd);
5263 ++
5264 + err = regulator_enable(hub->vdd);
5265 + if (err)
5266 + return err;
5267 +
5268 ++ err = devm_add_action_or_reset(dev,
5269 ++ usb251xb_regulator_disable_action, hub);
5270 ++ if (err)
5271 ++ return err;
5272 ++
5273 + err = usb251xb_connect(hub);
5274 + if (err) {
5275 + dev_err(dev, "Failed to connect hub (%d)\n", err);
5276 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
5277 +index 1cd9b6305b06..1880f3e13f57 100644
5278 +--- a/drivers/usb/storage/unusual_devs.h
5279 ++++ b/drivers/usb/storage/unusual_devs.h
5280 +@@ -1258,6 +1258,12 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
5281 + USB_SC_RBC, USB_PR_BULK, NULL,
5282 + 0 ),
5283 +
5284 ++UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100,
5285 ++ "Samsung",
5286 ++ "Flash Drive FIT",
5287 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5288 ++ US_FL_MAX_SECTORS_64),
5289 ++
5290 + /* aeb */
5291 + UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
5292 + "Feiya",
5293 +diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
5294 +index de7b8382aba9..998b0de1812f 100644
5295 +--- a/drivers/video/console/vgacon.c
5296 ++++ b/drivers/video/console/vgacon.c
5297 +@@ -1316,6 +1316,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
5298 + static int vgacon_resize(struct vc_data *c, unsigned int width,
5299 + unsigned int height, unsigned int user)
5300 + {
5301 ++ if ((width << 1) * height > vga_vram_size)
5302 ++ return -EINVAL;
5303 ++
5304 + if (width % 2 || width > screen_info.orig_video_cols ||
5305 + height > (screen_info.orig_video_lines * vga_default_font_height)/
5306 + c->vc_font.height)
5307 +diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
5308 +index e149e66a6ea9..e92f38fcb7a4 100644
5309 +--- a/drivers/watchdog/da9062_wdt.c
5310 ++++ b/drivers/watchdog/da9062_wdt.c
5311 +@@ -94,13 +94,6 @@ static int da9062_wdt_stop(struct watchdog_device *wdd)
5312 + struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
5313 + int ret;
5314 +
5315 +- ret = da9062_reset_watchdog_timer(wdt);
5316 +- if (ret) {
5317 +- dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n",
5318 +- ret);
5319 +- return ret;
5320 +- }
5321 +-
5322 + ret = regmap_update_bits(wdt->hw->regmap,
5323 + DA9062AA_CONTROL_D,
5324 + DA9062AA_TWDSCALE_MASK,
5325 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
5326 +index e6901744a5be..e47708a9bf8b 100644
5327 +--- a/fs/btrfs/inode.c
5328 ++++ b/fs/btrfs/inode.c
5329 +@@ -8444,6 +8444,7 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
5330 + {
5331 + struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
5332 + struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
5333 ++ u16 csum_size;
5334 + blk_status_t ret;
5335 +
5336 + /*
5337 +@@ -8463,7 +8464,8 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
5338 +
5339 + file_offset -= dip->logical_offset;
5340 + file_offset >>= inode->i_sb->s_blocksize_bits;
5341 +- io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
5342 ++ csum_size = btrfs_super_csum_size(btrfs_sb(inode->i_sb)->super_copy);
5343 ++ io_bio->csum = orig_io_bio->csum + csum_size * file_offset;
5344 +
5345 + return 0;
5346 + }
5347 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
5348 +index 239338d57086..af789aac8ef7 100644
5349 +--- a/fs/cifs/cifsglob.h
5350 ++++ b/fs/cifs/cifsglob.h
5351 +@@ -1277,6 +1277,7 @@ struct cifs_fid {
5352 + __u64 volatile_fid; /* volatile file id for smb2 */
5353 + __u8 lease_key[SMB2_LEASE_KEY_SIZE]; /* lease key for smb2 */
5354 + __u8 create_guid[16];
5355 ++ __u32 access;
5356 + struct cifs_pending_open *pending_open;
5357 + unsigned int epoch;
5358 + #ifdef CONFIG_CIFS_DEBUG2
5359 +@@ -1737,6 +1738,12 @@ static inline bool is_retryable_error(int error)
5360 + return false;
5361 + }
5362 +
5363 ++
5364 ++/* cifs_get_writable_file() flags */
5365 ++#define FIND_WR_ANY 0
5366 ++#define FIND_WR_FSUID_ONLY 1
5367 ++#define FIND_WR_WITH_DELETE 2
5368 ++
5369 + #define MID_FREE 0
5370 + #define MID_REQUEST_ALLOCATED 1
5371 + #define MID_REQUEST_SUBMITTED 2
5372 +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
5373 +index d6100299458a..3b583150bcd5 100644
5374 +--- a/fs/cifs/cifsproto.h
5375 ++++ b/fs/cifs/cifsproto.h
5376 +@@ -134,11 +134,12 @@ extern bool backup_cred(struct cifs_sb_info *);
5377 + extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
5378 + extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
5379 + unsigned int bytes_written);
5380 +-extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool);
5381 ++extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int);
5382 + extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
5383 +- bool fsuid_only,
5384 ++ int flags,
5385 + struct cifsFileInfo **ret_file);
5386 + extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
5387 ++ int flags,
5388 + struct cifsFileInfo **ret_file);
5389 + extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
5390 + extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
5391 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
5392 +index cc86a67225d1..69c38c379f33 100644
5393 +--- a/fs/cifs/cifssmb.c
5394 ++++ b/fs/cifs/cifssmb.c
5395 +@@ -1492,6 +1492,7 @@ openRetry:
5396 + *oplock = rsp->OplockLevel;
5397 + /* cifs fid stays in le */
5398 + oparms->fid->netfid = rsp->Fid;
5399 ++ oparms->fid->access = desired_access;
5400 +
5401 + /* Let caller know file was created so we can set the mode. */
5402 + /* Do we care about the CreateAction in any other cases? */
5403 +@@ -2115,7 +2116,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
5404 + wdata2->tailsz = tailsz;
5405 + wdata2->bytes = cur_len;
5406 +
5407 +- rc = cifs_get_writable_file(CIFS_I(inode), false,
5408 ++ rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY,
5409 + &wdata2->cfile);
5410 + if (!wdata2->cfile) {
5411 + cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
5412 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
5413 +index 043288b5c728..5b1460486535 100644
5414 +--- a/fs/cifs/file.c
5415 ++++ b/fs/cifs/file.c
5416 +@@ -1964,7 +1964,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
5417 +
5418 + /* Return -EBADF if no handle is found and general rc otherwise */
5419 + int
5420 +-cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
5421 ++cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
5422 + struct cifsFileInfo **ret_file)
5423 + {
5424 + struct cifsFileInfo *open_file, *inv_file = NULL;
5425 +@@ -1972,7 +1972,8 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
5426 + bool any_available = false;
5427 + int rc = -EBADF;
5428 + unsigned int refind = 0;
5429 +-
5430 ++ bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
5431 ++ bool with_delete = flags & FIND_WR_WITH_DELETE;
5432 + *ret_file = NULL;
5433 +
5434 + /*
5435 +@@ -2004,6 +2005,8 @@ refind_writable:
5436 + continue;
5437 + if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
5438 + continue;
5439 ++ if (with_delete && !(open_file->fid.access & DELETE))
5440 ++ continue;
5441 + if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
5442 + if (!open_file->invalidHandle) {
5443 + /* found a good writable file */
5444 +@@ -2051,12 +2054,12 @@ refind_writable:
5445 + }
5446 +
5447 + struct cifsFileInfo *
5448 +-find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
5449 ++find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
5450 + {
5451 + struct cifsFileInfo *cfile;
5452 + int rc;
5453 +
5454 +- rc = cifs_get_writable_file(cifs_inode, fsuid_only, &cfile);
5455 ++ rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
5456 + if (rc)
5457 + cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
5458 +
5459 +@@ -2065,6 +2068,7 @@ find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
5460 +
5461 + int
5462 + cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
5463 ++ int flags,
5464 + struct cifsFileInfo **ret_file)
5465 + {
5466 + struct list_head *tmp;
5467 +@@ -2091,7 +2095,7 @@ cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
5468 + kfree(full_path);
5469 + cinode = CIFS_I(d_inode(cfile->dentry));
5470 + spin_unlock(&tcon->open_file_lock);
5471 +- return cifs_get_writable_file(cinode, 0, ret_file);
5472 ++ return cifs_get_writable_file(cinode, flags, ret_file);
5473 + }
5474 +
5475 + spin_unlock(&tcon->open_file_lock);
5476 +@@ -2168,7 +2172,8 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
5477 + if (mapping->host->i_size - offset < (loff_t)to)
5478 + to = (unsigned)(mapping->host->i_size - offset);
5479 +
5480 +- rc = cifs_get_writable_file(CIFS_I(mapping->host), false, &open_file);
5481 ++ rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
5482 ++ &open_file);
5483 + if (!rc) {
5484 + bytes_written = cifs_write(open_file, open_file->pid,
5485 + write_data, to - from, &offset);
5486 +@@ -2361,7 +2366,7 @@ retry:
5487 + if (cfile)
5488 + cifsFileInfo_put(cfile);
5489 +
5490 +- rc = cifs_get_writable_file(CIFS_I(inode), false, &cfile);
5491 ++ rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
5492 +
5493 + /* in case of an error store it to return later */
5494 + if (rc)
5495 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
5496 +index b3f3675e1878..e9a7536c2a5e 100644
5497 +--- a/fs/cifs/inode.c
5498 ++++ b/fs/cifs/inode.c
5499 +@@ -2074,6 +2074,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
5500 + struct inode *inode = d_inode(dentry);
5501 + struct super_block *sb = dentry->d_sb;
5502 + char *full_path = NULL;
5503 ++ int count = 0;
5504 +
5505 + if (inode == NULL)
5506 + return -ENOENT;
5507 +@@ -2095,15 +2096,18 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
5508 + full_path, inode, inode->i_count.counter,
5509 + dentry, cifs_get_time(dentry), jiffies);
5510 +
5511 ++again:
5512 + if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
5513 + rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
5514 + else
5515 + rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
5516 + xid, NULL);
5517 +-
5518 ++ if (rc == -EAGAIN && count++ < 10)
5519 ++ goto again;
5520 + out:
5521 + kfree(full_path);
5522 + free_xid(xid);
5523 ++
5524 + return rc;
5525 + }
5526 +
5527 +@@ -2279,7 +2283,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
5528 + * writebehind data than the SMB timeout for the SetPathInfo
5529 + * request would allow
5530 + */
5531 +- open_file = find_writable_file(cifsInode, true);
5532 ++ open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
5533 + if (open_file) {
5534 + tcon = tlink_tcon(open_file->tlink);
5535 + server = tcon->ses->server;
5536 +@@ -2429,7 +2433,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
5537 + args->ctime = NO_CHANGE_64;
5538 +
5539 + args->device = 0;
5540 +- open_file = find_writable_file(cifsInode, true);
5541 ++ open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
5542 + if (open_file) {
5543 + u16 nfid = open_file->fid.netfid;
5544 + u32 npid = open_file->pid;
5545 +@@ -2532,7 +2536,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
5546 + rc = 0;
5547 +
5548 + if (attrs->ia_valid & ATTR_MTIME) {
5549 +- rc = cifs_get_writable_file(cifsInode, false, &wfile);
5550 ++ rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile);
5551 + if (!rc) {
5552 + tcon = tlink_tcon(wfile->tlink);
5553 + rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
5554 +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
5555 +index d70a2bb062df..e523c05a4487 100644
5556 +--- a/fs/cifs/smb1ops.c
5557 ++++ b/fs/cifs/smb1ops.c
5558 +@@ -765,7 +765,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
5559 + struct cifs_tcon *tcon;
5560 +
5561 + /* if the file is already open for write, just use that fileid */
5562 +- open_file = find_writable_file(cinode, true);
5563 ++ open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
5564 + if (open_file) {
5565 + fid.netfid = open_file->fid.netfid;
5566 + netpid = open_file->pid;
5567 +diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
5568 +index 5ef5e97a6d13..bd3669532a09 100644
5569 +--- a/fs/cifs/smb2inode.c
5570 ++++ b/fs/cifs/smb2inode.c
5571 +@@ -526,7 +526,7 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
5572 + cifs_i = CIFS_I(inode);
5573 + dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
5574 + data.Attributes = cpu_to_le32(dosattrs);
5575 +- cifs_get_writable_path(tcon, name, &cfile);
5576 ++ cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile);
5577 + tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
5578 + FILE_WRITE_ATTRIBUTES, FILE_CREATE,
5579 + CREATE_NOT_FILE, ACL_NO_MODE,
5580 +@@ -582,7 +582,7 @@ smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
5581 + {
5582 + struct cifsFileInfo *cfile;
5583 +
5584 +- cifs_get_writable_path(tcon, from_name, &cfile);
5585 ++ cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile);
5586 +
5587 + return smb2_set_path_attr(xid, tcon, from_name, to_name,
5588 + cifs_sb, DELETE, SMB2_OP_RENAME, cfile);
5589 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
5590 +index 65f76be0f454..5b62840853ff 100644
5591 +--- a/fs/cifs/smb2ops.c
5592 ++++ b/fs/cifs/smb2ops.c
5593 +@@ -1366,6 +1366,7 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
5594 +
5595 + cfile->fid.persistent_fid = fid->persistent_fid;
5596 + cfile->fid.volatile_fid = fid->volatile_fid;
5597 ++ cfile->fid.access = fid->access;
5598 + #ifdef CONFIG_CIFS_DEBUG2
5599 + cfile->fid.mid = fid->mid;
5600 + #endif /* CIFS_DEBUG2 */
5601 +@@ -3225,7 +3226,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs
5602 + * some servers (Windows2016) will not reflect recent writes in
5603 + * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
5604 + */
5605 +- wrcfile = find_writable_file(cifsi, false);
5606 ++ wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
5607 + if (wrcfile) {
5608 + filemap_write_and_wait(inode->i_mapping);
5609 + smb2_flush_file(xid, tcon, &wrcfile->fid);
5610 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
5611 +index 6c9497c18f0b..fc32fe546c1a 100644
5612 +--- a/fs/cifs/smb2pdu.c
5613 ++++ b/fs/cifs/smb2pdu.c
5614 +@@ -2749,6 +2749,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
5615 + atomic_inc(&tcon->num_remote_opens);
5616 + oparms->fid->persistent_fid = rsp->PersistentFileId;
5617 + oparms->fid->volatile_fid = rsp->VolatileFileId;
5618 ++ oparms->fid->access = oparms->desired_access;
5619 + #ifdef CONFIG_CIFS_DEBUG2
5620 + oparms->fid->mid = le64_to_cpu(rsp->sync_hdr.MessageId);
5621 + #endif /* CIFS_DEBUG2 */
5622 +diff --git a/fs/fat/inode.c b/fs/fat/inode.c
5623 +index 5f04c5c810fb..d40cbad16659 100644
5624 +--- a/fs/fat/inode.c
5625 ++++ b/fs/fat/inode.c
5626 +@@ -749,6 +749,13 @@ static struct inode *fat_alloc_inode(struct super_block *sb)
5627 + return NULL;
5628 +
5629 + init_rwsem(&ei->truncate_lock);
5630 ++ /* Zeroing to allow iput() even if partial initialized inode. */
5631 ++ ei->mmu_private = 0;
5632 ++ ei->i_start = 0;
5633 ++ ei->i_logstart = 0;
5634 ++ ei->i_attrs = 0;
5635 ++ ei->i_pos = 0;
5636 ++
5637 + return &ei->vfs_inode;
5638 + }
5639 +
5640 +@@ -1373,16 +1380,6 @@ out:
5641 + return 0;
5642 + }
5643 +
5644 +-static void fat_dummy_inode_init(struct inode *inode)
5645 +-{
5646 +- /* Initialize this dummy inode to work as no-op. */
5647 +- MSDOS_I(inode)->mmu_private = 0;
5648 +- MSDOS_I(inode)->i_start = 0;
5649 +- MSDOS_I(inode)->i_logstart = 0;
5650 +- MSDOS_I(inode)->i_attrs = 0;
5651 +- MSDOS_I(inode)->i_pos = 0;
5652 +-}
5653 +-
5654 + static int fat_read_root(struct inode *inode)
5655 + {
5656 + struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
5657 +@@ -1843,13 +1840,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
5658 + fat_inode = new_inode(sb);
5659 + if (!fat_inode)
5660 + goto out_fail;
5661 +- fat_dummy_inode_init(fat_inode);
5662 + sbi->fat_inode = fat_inode;
5663 +
5664 + fsinfo_inode = new_inode(sb);
5665 + if (!fsinfo_inode)
5666 + goto out_fail;
5667 +- fat_dummy_inode_init(fsinfo_inode);
5668 + fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
5669 + sbi->fsinfo_inode = fsinfo_inode;
5670 + insert_inode_hash(fsinfo_inode);
5671 +diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
5672 +index e34a7b7f848a..294b2931c4cc 100644
5673 +--- a/include/drm/drm_gem_shmem_helper.h
5674 ++++ b/include/drm/drm_gem_shmem_helper.h
5675 +@@ -96,6 +96,11 @@ struct drm_gem_shmem_object {
5676 + * The address are un-mapped when the count reaches zero.
5677 + */
5678 + unsigned int vmap_use_count;
5679 ++
5680 ++ /**
5681 ++ * @map_cached: map object cached (instead of using writecombine).
5682 ++ */
5683 ++ bool map_cached;
5684 + };
5685 +
5686 + #define to_drm_gem_shmem_obj(obj) \
5687 +diff --git a/include/linux/mm.h b/include/linux/mm.h
5688 +index cfaa8feecfe8..70f4278bb193 100644
5689 +--- a/include/linux/mm.h
5690 ++++ b/include/linux/mm.h
5691 +@@ -2687,6 +2687,10 @@ static inline bool debug_pagealloc_enabled_static(void)
5692 + #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
5693 + extern void __kernel_map_pages(struct page *page, int numpages, int enable);
5694 +
5695 ++/*
5696 ++ * When called in DEBUG_PAGEALLOC context, the call should most likely be
5697 ++ * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static()
5698 ++ */
5699 + static inline void
5700 + kernel_map_pages(struct page *page, int numpages, int enable)
5701 + {
5702 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
5703 +index b0ee5eedeccd..0ff2f43ac9cd 100644
5704 +--- a/kernel/sched/fair.c
5705 ++++ b/kernel/sched/fair.c
5706 +@@ -8315,6 +8315,8 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
5707 +
5708 + sgs->group_capacity = group->sgc->capacity;
5709 +
5710 ++ sgs->group_weight = group->group_weight;
5711 ++
5712 + sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
5713 +
5714 + /*
5715 +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
5716 +index a6d3016410eb..840ef7af20e0 100644
5717 +--- a/kernel/trace/blktrace.c
5718 ++++ b/kernel/trace/blktrace.c
5719 +@@ -1896,8 +1896,11 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
5720 + }
5721 +
5722 + ret = 0;
5723 +- if (bt == NULL)
5724 ++ if (bt == NULL) {
5725 + ret = blk_trace_setup_queue(q, bdev);
5726 ++ bt = rcu_dereference_protected(q->blk_trace,
5727 ++ lockdep_is_held(&q->blk_trace_mutex));
5728 ++ }
5729 +
5730 + if (ret == 0) {
5731 + if (attr == &dev_attr_act_mask)
5732 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
5733 +index 54c106bdbafd..c9f8163bd5bf 100644
5734 +--- a/mm/huge_memory.c
5735 ++++ b/mm/huge_memory.c
5736 +@@ -3032,8 +3032,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
5737 + return;
5738 +
5739 + flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
5740 +- pmdval = *pvmw->pmd;
5741 +- pmdp_invalidate(vma, address, pvmw->pmd);
5742 ++ pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
5743 + if (pmd_dirty(pmdval))
5744 + set_page_dirty(page);
5745 + entry = make_migration_entry(page, pmd_write(pmdval));
5746 +diff --git a/mm/memory.c b/mm/memory.c
5747 +index 45442d9a4f52..0eba7af05777 100644
5748 +--- a/mm/memory.c
5749 ++++ b/mm/memory.c
5750 +@@ -2221,7 +2221,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
5751 + bool ret;
5752 + void *kaddr;
5753 + void __user *uaddr;
5754 +- bool force_mkyoung;
5755 ++ bool locked = false;
5756 + struct vm_area_struct *vma = vmf->vma;
5757 + struct mm_struct *mm = vma->vm_mm;
5758 + unsigned long addr = vmf->address;
5759 +@@ -2246,11 +2246,11 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
5760 + * On architectures with software "accessed" bits, we would
5761 + * take a double page fault, so mark it accessed here.
5762 + */
5763 +- force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte);
5764 +- if (force_mkyoung) {
5765 ++ if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
5766 + pte_t entry;
5767 +
5768 + vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
5769 ++ locked = true;
5770 + if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
5771 + /*
5772 + * Other thread has already handled the fault
5773 +@@ -2274,18 +2274,37 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
5774 + * zeroes.
5775 + */
5776 + if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
5777 ++ if (locked)
5778 ++ goto warn;
5779 ++
5780 ++ /* Re-validate under PTL if the page is still mapped */
5781 ++ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
5782 ++ locked = true;
5783 ++ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
5784 ++ /* The PTE changed under us. Retry page fault. */
5785 ++ ret = false;
5786 ++ goto pte_unlock;
5787 ++ }
5788 ++
5789 + /*
5790 +- * Give a warn in case there can be some obscure
5791 +- * use-case
5792 ++ * The same page can be mapped back since last copy attampt.
5793 ++ * Try to copy again under PTL.
5794 + */
5795 +- WARN_ON_ONCE(1);
5796 +- clear_page(kaddr);
5797 ++ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
5798 ++ /*
5799 ++ * Give a warn in case there can be some obscure
5800 ++ * use-case
5801 ++ */
5802 ++warn:
5803 ++ WARN_ON_ONCE(1);
5804 ++ clear_page(kaddr);
5805 ++ }
5806 + }
5807 +
5808 + ret = true;
5809 +
5810 + pte_unlock:
5811 +- if (force_mkyoung)
5812 ++ if (locked)
5813 + pte_unmap_unlock(vmf->pte, vmf->ptl);
5814 + kunmap_atomic(kaddr);
5815 + flush_dcache_page(dst);
5816 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
5817 +index 0ddff29079c3..673900faea76 100644
5818 +--- a/mm/memory_hotplug.c
5819 ++++ b/mm/memory_hotplug.c
5820 +@@ -599,7 +599,13 @@ EXPORT_SYMBOL_GPL(restore_online_page_callback);
5821 +
5822 + void generic_online_page(struct page *page, unsigned int order)
5823 + {
5824 +- kernel_map_pages(page, 1 << order, 1);
5825 ++ /*
5826 ++ * Freeing the page with debug_pagealloc enabled will try to unmap it,
5827 ++ * so we should map it first. This is better than introducing a special
5828 ++ * case in page freeing fast path.
5829 ++ */
5830 ++ if (debug_pagealloc_enabled_static())
5831 ++ kernel_map_pages(page, 1 << order, 1);
5832 + __free_pages_core(page, order);
5833 + totalram_pages_add(1UL << order);
5834 + #ifdef CONFIG_HIGHMEM
5835 +diff --git a/mm/mprotect.c b/mm/mprotect.c
5836 +index 7a8e84f86831..311c0dadf71c 100644
5837 +--- a/mm/mprotect.c
5838 ++++ b/mm/mprotect.c
5839 +@@ -161,6 +161,31 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
5840 + return pages;
5841 + }
5842 +
5843 ++/*
5844 ++ * Used when setting automatic NUMA hinting protection where it is
5845 ++ * critical that a numa hinting PMD is not confused with a bad PMD.
5846 ++ */
5847 ++static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
5848 ++{
5849 ++ pmd_t pmdval = pmd_read_atomic(pmd);
5850 ++
5851 ++ /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
5852 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5853 ++ barrier();
5854 ++#endif
5855 ++
5856 ++ if (pmd_none(pmdval))
5857 ++ return 1;
5858 ++ if (pmd_trans_huge(pmdval))
5859 ++ return 0;
5860 ++ if (unlikely(pmd_bad(pmdval))) {
5861 ++ pmd_clear_bad(pmd);
5862 ++ return 1;
5863 ++ }
5864 ++
5865 ++ return 0;
5866 ++}
5867 ++
5868 + static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
5869 + pud_t *pud, unsigned long addr, unsigned long end,
5870 + pgprot_t newprot, int dirty_accountable, int prot_numa)
5871 +@@ -178,8 +203,17 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
5872 + unsigned long this_pages;
5873 +
5874 + next = pmd_addr_end(addr, end);
5875 +- if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
5876 +- && pmd_none_or_clear_bad(pmd))
5877 ++
5878 ++ /*
5879 ++ * Automatic NUMA balancing walks the tables with mmap_sem
5880 ++ * held for read. It's possible a parallel update to occur
5881 ++ * between pmd_trans_huge() and a pmd_none_or_clear_bad()
5882 ++ * check leading to a false positive and clearing.
5883 ++ * Hence, it's necessary to atomically read the PMD value
5884 ++ * for all the checks.
5885 ++ */
5886 ++ if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
5887 ++ pmd_none_or_clear_bad_unless_trans_huge(pmd))
5888 + goto next;
5889 +
5890 + /* invoke the mmu notifier if the pmd is populated */
5891 +diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
5892 +index 1b68a131083c..8c835ad63729 100644
5893 +--- a/net/netfilter/xt_hashlimit.c
5894 ++++ b/net/netfilter/xt_hashlimit.c
5895 +@@ -358,21 +358,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
5896 + return 0;
5897 + }
5898 +
5899 +-static bool select_all(const struct xt_hashlimit_htable *ht,
5900 +- const struct dsthash_ent *he)
5901 +-{
5902 +- return true;
5903 +-}
5904 +-
5905 +-static bool select_gc(const struct xt_hashlimit_htable *ht,
5906 +- const struct dsthash_ent *he)
5907 +-{
5908 +- return time_after_eq(jiffies, he->expires);
5909 +-}
5910 +-
5911 +-static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
5912 +- bool (*select)(const struct xt_hashlimit_htable *ht,
5913 +- const struct dsthash_ent *he))
5914 ++static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, bool select_all)
5915 + {
5916 + unsigned int i;
5917 +
5918 +@@ -382,7 +368,7 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
5919 +
5920 + spin_lock_bh(&ht->lock);
5921 + hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
5922 +- if ((*select)(ht, dh))
5923 ++ if (time_after_eq(jiffies, dh->expires) || select_all)
5924 + dsthash_free(ht, dh);
5925 + }
5926 + spin_unlock_bh(&ht->lock);
5927 +@@ -396,7 +382,7 @@ static void htable_gc(struct work_struct *work)
5928 +
5929 + ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
5930 +
5931 +- htable_selective_cleanup(ht, select_gc);
5932 ++ htable_selective_cleanup(ht, false);
5933 +
5934 + queue_delayed_work(system_power_efficient_wq,
5935 + &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
5936 +@@ -416,15 +402,6 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
5937 + remove_proc_entry(hinfo->name, parent);
5938 + }
5939 +
5940 +-static void htable_destroy(struct xt_hashlimit_htable *hinfo)
5941 +-{
5942 +- cancel_delayed_work_sync(&hinfo->gc_work);
5943 +- htable_remove_proc_entry(hinfo);
5944 +- htable_selective_cleanup(hinfo, select_all);
5945 +- kfree(hinfo->name);
5946 +- vfree(hinfo);
5947 +-}
5948 +-
5949 + static struct xt_hashlimit_htable *htable_find_get(struct net *net,
5950 + const char *name,
5951 + u_int8_t family)
5952 +@@ -446,8 +423,13 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
5953 + {
5954 + if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) {
5955 + hlist_del(&hinfo->node);
5956 ++ htable_remove_proc_entry(hinfo);
5957 + mutex_unlock(&hashlimit_mutex);
5958 +- htable_destroy(hinfo);
5959 ++
5960 ++ cancel_delayed_work_sync(&hinfo->gc_work);
5961 ++ htable_selective_cleanup(hinfo, true);
5962 ++ kfree(hinfo->name);
5963 ++ vfree(hinfo);
5964 + }
5965 + }
5966 +
5967 +diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
5968 +index 111898aad56e..f0c908241966 100644
5969 +--- a/security/integrity/platform_certs/load_uefi.c
5970 ++++ b/security/integrity/platform_certs/load_uefi.c
5971 +@@ -35,16 +35,18 @@ static __init bool uefi_check_ignore_db(void)
5972 + * Get a certificate list blob from the named EFI variable.
5973 + */
5974 + static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
5975 +- unsigned long *size)
5976 ++ unsigned long *size, efi_status_t *status)
5977 + {
5978 +- efi_status_t status;
5979 + unsigned long lsize = 4;
5980 + unsigned long tmpdb[4];
5981 + void *db;
5982 +
5983 +- status = efi.get_variable(name, guid, NULL, &lsize, &tmpdb);
5984 +- if (status != EFI_BUFFER_TOO_SMALL) {
5985 +- pr_err("Couldn't get size: 0x%lx\n", status);
5986 ++ *status = efi.get_variable(name, guid, NULL, &lsize, &tmpdb);
5987 ++ if (*status == EFI_NOT_FOUND)
5988 ++ return NULL;
5989 ++
5990 ++ if (*status != EFI_BUFFER_TOO_SMALL) {
5991 ++ pr_err("Couldn't get size: 0x%lx\n", *status);
5992 + return NULL;
5993 + }
5994 +
5995 +@@ -52,10 +54,10 @@ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
5996 + if (!db)
5997 + return NULL;
5998 +
5999 +- status = efi.get_variable(name, guid, NULL, &lsize, db);
6000 +- if (status != EFI_SUCCESS) {
6001 ++ *status = efi.get_variable(name, guid, NULL, &lsize, db);
6002 ++ if (*status != EFI_SUCCESS) {
6003 + kfree(db);
6004 +- pr_err("Error reading db var: 0x%lx\n", status);
6005 ++ pr_err("Error reading db var: 0x%lx\n", *status);
6006 + return NULL;
6007 + }
6008 +
6009 +@@ -74,6 +76,7 @@ static int __init load_uefi_certs(void)
6010 + efi_guid_t mok_var = EFI_SHIM_LOCK_GUID;
6011 + void *db = NULL, *dbx = NULL, *mok = NULL;
6012 + unsigned long dbsize = 0, dbxsize = 0, moksize = 0;
6013 ++ efi_status_t status;
6014 + int rc = 0;
6015 +
6016 + if (!efi.get_variable)
6017 +@@ -83,9 +86,12 @@ static int __init load_uefi_certs(void)
6018 + * an error if we can't get them.
6019 + */
6020 + if (!uefi_check_ignore_db()) {
6021 +- db = get_cert_list(L"db", &secure_var, &dbsize);
6022 ++ db = get_cert_list(L"db", &secure_var, &dbsize, &status);
6023 + if (!db) {
6024 +- pr_err("MODSIGN: Couldn't get UEFI db list\n");
6025 ++ if (status == EFI_NOT_FOUND)
6026 ++ pr_debug("MODSIGN: db variable wasn't found\n");
6027 ++ else
6028 ++ pr_err("MODSIGN: Couldn't get UEFI db list\n");
6029 + } else {
6030 + rc = parse_efi_signature_list("UEFI:db",
6031 + db, dbsize, get_handler_for_db);
6032 +@@ -96,9 +102,12 @@ static int __init load_uefi_certs(void)
6033 + }
6034 + }
6035 +
6036 +- mok = get_cert_list(L"MokListRT", &mok_var, &moksize);
6037 ++ mok = get_cert_list(L"MokListRT", &mok_var, &moksize, &status);
6038 + if (!mok) {
6039 +- pr_info("Couldn't get UEFI MokListRT\n");
6040 ++ if (status == EFI_NOT_FOUND)
6041 ++ pr_debug("MokListRT variable wasn't found\n");
6042 ++ else
6043 ++ pr_info("Couldn't get UEFI MokListRT\n");
6044 + } else {
6045 + rc = parse_efi_signature_list("UEFI:MokListRT",
6046 + mok, moksize, get_handler_for_db);
6047 +@@ -107,9 +116,12 @@ static int __init load_uefi_certs(void)
6048 + kfree(mok);
6049 + }
6050 +
6051 +- dbx = get_cert_list(L"dbx", &secure_var, &dbxsize);
6052 ++ dbx = get_cert_list(L"dbx", &secure_var, &dbxsize, &status);
6053 + if (!dbx) {
6054 +- pr_info("Couldn't get UEFI dbx list\n");
6055 ++ if (status == EFI_NOT_FOUND)
6056 ++ pr_debug("dbx variable wasn't found\n");
6057 ++ else
6058 ++ pr_info("Couldn't get UEFI dbx list\n");
6059 + } else {
6060 + rc = parse_efi_signature_list("UEFI:dbx",
6061 + dbx, dbxsize,
6062 +diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
6063 +index cfab60d88c92..09ff209df4a3 100644
6064 +--- a/sound/hda/ext/hdac_ext_controller.c
6065 ++++ b/sound/hda/ext/hdac_ext_controller.c
6066 +@@ -254,6 +254,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down_all);
6067 + int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
6068 + struct hdac_ext_link *link)
6069 + {
6070 ++ unsigned long codec_mask;
6071 + int ret = 0;
6072 +
6073 + mutex_lock(&bus->lock);
6074 +@@ -280,9 +281,11 @@ int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
6075 + * HDA spec section 4.3 - Codec Discovery
6076 + */
6077 + udelay(521);
6078 +- bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS);
6079 +- dev_dbg(bus->dev, "codec_mask = 0x%lx\n", bus->codec_mask);
6080 +- snd_hdac_chip_writew(bus, STATESTS, bus->codec_mask);
6081 ++ codec_mask = snd_hdac_chip_readw(bus, STATESTS);
6082 ++ dev_dbg(bus->dev, "codec_mask = 0x%lx\n", codec_mask);
6083 ++ snd_hdac_chip_writew(bus, STATESTS, codec_mask);
6084 ++ if (!bus->codec_mask)
6085 ++ bus->codec_mask = codec_mask;
6086 + }
6087 +
6088 + mutex_unlock(&bus->lock);
6089 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6090 +index 7ba3ef6b673d..4436ebbea108 100644
6091 +--- a/sound/pci/hda/patch_realtek.c
6092 ++++ b/sound/pci/hda/patch_realtek.c
6093 +@@ -2447,6 +2447,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
6094 + SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
6095 + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
6096 + SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
6097 ++ SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
6098 + SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
6099 + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
6100 + SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
6101 +@@ -5920,7 +5921,8 @@ enum {
6102 + ALC289_FIXUP_DUAL_SPK,
6103 + ALC294_FIXUP_SPK2_TO_DAC1,
6104 + ALC294_FIXUP_ASUS_DUAL_SPK,
6105 +-
6106 ++ ALC285_FIXUP_THINKPAD_HEADSET_JACK,
6107 ++ ALC294_FIXUP_ASUS_HPE,
6108 + };
6109 +
6110 + static const struct hda_fixup alc269_fixups[] = {
6111 +@@ -6684,6 +6686,8 @@ static const struct hda_fixup alc269_fixups[] = {
6112 + [ALC285_FIXUP_SPEAKER2_TO_DAC1] = {
6113 + .type = HDA_FIXUP_FUNC,
6114 + .v.func = alc285_fixup_speaker2_to_dac1,
6115 ++ .chained = true,
6116 ++ .chain_id = ALC269_FIXUP_THINKPAD_ACPI
6117 + },
6118 + [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
6119 + .type = HDA_FIXUP_PINS,
6120 +@@ -7040,7 +7044,23 @@ static const struct hda_fixup alc269_fixups[] = {
6121 + .chained = true,
6122 + .chain_id = ALC294_FIXUP_SPK2_TO_DAC1
6123 + },
6124 +-
6125 ++ [ALC285_FIXUP_THINKPAD_HEADSET_JACK] = {
6126 ++ .type = HDA_FIXUP_FUNC,
6127 ++ .v.func = alc_fixup_headset_jack,
6128 ++ .chained = true,
6129 ++ .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1
6130 ++ },
6131 ++ [ALC294_FIXUP_ASUS_HPE] = {
6132 ++ .type = HDA_FIXUP_VERBS,
6133 ++ .v.verbs = (const struct hda_verb[]) {
6134 ++ /* Set EAPD high */
6135 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f },
6136 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 },
6137 ++ { }
6138 ++ },
6139 ++ .chained = true,
6140 ++ .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
6141 ++ },
6142 + };
6143 +
6144 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6145 +@@ -7115,6 +7135,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6146 + SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6147 + SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
6148 + SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
6149 ++ SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
6150 ++ SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
6151 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6152 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6153 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
6154 +@@ -7204,6 +7226,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6155 + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
6156 + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
6157 + SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
6158 ++ SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
6159 + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
6160 + SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
6161 + SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
6162 +@@ -7274,8 +7297,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6163 + SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6164 + SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6165 + SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
6166 +- SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
6167 +- SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
6168 ++ SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
6169 ++ SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
6170 + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6171 + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6172 + SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6173 +diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
6174 +index 861210f6bf4f..4cbef9affffd 100644
6175 +--- a/sound/soc/codecs/pcm512x.c
6176 ++++ b/sound/soc/codecs/pcm512x.c
6177 +@@ -1564,13 +1564,15 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
6178 + }
6179 +
6180 + pcm512x->sclk = devm_clk_get(dev, NULL);
6181 +- if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER)
6182 +- return -EPROBE_DEFER;
6183 ++ if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) {
6184 ++ ret = -EPROBE_DEFER;
6185 ++ goto err;
6186 ++ }
6187 + if (!IS_ERR(pcm512x->sclk)) {
6188 + ret = clk_prepare_enable(pcm512x->sclk);
6189 + if (ret != 0) {
6190 + dev_err(dev, "Failed to enable SCLK: %d\n", ret);
6191 +- return ret;
6192 ++ goto err;
6193 + }
6194 + }
6195 +
6196 +diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
6197 +index 4e45901e3a2f..11eaee9ae41f 100644
6198 +--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
6199 ++++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
6200 +@@ -100,6 +100,8 @@ static struct snd_soc_card hda_soc_card = {
6201 + .late_probe = skl_hda_card_late_probe,
6202 + };
6203 +
6204 ++static char hda_soc_components[30];
6205 ++
6206 + #define IDISP_DAI_COUNT 3
6207 + #define HDAC_DAI_COUNT 2
6208 + #define DMIC_DAI_COUNT 2
6209 +@@ -183,6 +185,12 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
6210 + hda_soc_card.dev = &pdev->dev;
6211 + snd_soc_card_set_drvdata(&hda_soc_card, ctx);
6212 +
6213 ++ if (mach->mach_params.dmic_num > 0) {
6214 ++ snprintf(hda_soc_components, sizeof(hda_soc_components),
6215 ++ "cfg-dmics:%d", mach->mach_params.dmic_num);
6216 ++ hda_soc_card.components = hda_soc_components;
6217 ++ }
6218 ++
6219 + return devm_snd_soc_register_card(&pdev->dev, &hda_soc_card);
6220 + }
6221 +
6222 +diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c
6223 +index 3466675f2678..a15aa2ffa681 100644
6224 +--- a/sound/soc/intel/skylake/skl-debug.c
6225 ++++ b/sound/soc/intel/skylake/skl-debug.c
6226 +@@ -34,8 +34,8 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
6227 + int i;
6228 + ssize_t ret = 0;
6229 +
6230 +- for (i = 0; i < max_pin; i++)
6231 +- ret += snprintf(buf + size, MOD_BUF - size,
6232 ++ for (i = 0; i < max_pin; i++) {
6233 ++ ret += scnprintf(buf + size, MOD_BUF - size,
6234 + "%s %d\n\tModule %d\n\tInstance %d\n\t"
6235 + "In-used %s\n\tType %s\n"
6236 + "\tState %d\n\tIndex %d\n",
6237 +@@ -45,13 +45,15 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
6238 + m_pin[i].in_use ? "Used" : "Unused",
6239 + m_pin[i].is_dynamic ? "Dynamic" : "Static",
6240 + m_pin[i].pin_state, i);
6241 ++ size += ret;
6242 ++ }
6243 + return ret;
6244 + }
6245 +
6246 + static ssize_t skl_print_fmt(struct skl_module_fmt *fmt, char *buf,
6247 + ssize_t size, bool direction)
6248 + {
6249 +- return snprintf(buf + size, MOD_BUF - size,
6250 ++ return scnprintf(buf + size, MOD_BUF - size,
6251 + "%s\n\tCh %d\n\tFreq %d\n\tBit depth %d\n\t"
6252 + "Valid bit depth %d\n\tCh config %#x\n\tInterleaving %d\n\t"
6253 + "Sample Type %d\n\tCh Map %#x\n",
6254 +@@ -75,16 +77,16 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
6255 + if (!buf)
6256 + return -ENOMEM;
6257 +
6258 +- ret = snprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
6259 ++ ret = scnprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
6260 + "\tInstance id %d\n\tPvt_id %d\n", mconfig->guid,
6261 + mconfig->id.module_id, mconfig->id.instance_id,
6262 + mconfig->id.pvt_id);
6263 +
6264 +- ret += snprintf(buf + ret, MOD_BUF - ret,
6265 ++ ret += scnprintf(buf + ret, MOD_BUF - ret,
6266 + "Resources:\n\tCPC %#x\n\tIBS %#x\n\tOBS %#x\t\n",
6267 + res->cpc, res->ibs, res->obs);
6268 +
6269 +- ret += snprintf(buf + ret, MOD_BUF - ret,
6270 ++ ret += scnprintf(buf + ret, MOD_BUF - ret,
6271 + "Module data:\n\tCore %d\n\tIn queue %d\n\t"
6272 + "Out queue %d\n\tType %s\n",
6273 + mconfig->core_id, mconfig->max_in_queue,
6274 +@@ -94,38 +96,38 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
6275 + ret += skl_print_fmt(mconfig->in_fmt, buf, ret, true);
6276 + ret += skl_print_fmt(mconfig->out_fmt, buf, ret, false);
6277 +
6278 +- ret += snprintf(buf + ret, MOD_BUF - ret,
6279 ++ ret += scnprintf(buf + ret, MOD_BUF - ret,
6280 + "Fixup:\n\tParams %#x\n\tConverter %#x\n",
6281 + mconfig->params_fixup, mconfig->converter);
6282 +
6283 +- ret += snprintf(buf + ret, MOD_BUF - ret,
6284 ++ ret += scnprintf(buf + ret, MOD_BUF - ret,
6285 + "Module Gateway:\n\tType %#x\n\tVbus %#x\n\tHW conn %#x\n\tSlot %#x\n",
6286 + mconfig->dev_type, mconfig->vbus_id,
6287 + mconfig->hw_conn_type, mconfig->time_slot);
6288 +
6289 +- ret += snprintf(buf + ret, MOD_BUF - ret,
6290 ++ ret += scnprintf(buf + ret, MOD_BUF - ret,
6291 + "Pipeline:\n\tID %d\n\tPriority %d\n\tConn Type %d\n\t"
6292 + "Pages %#x\n", mconfig->pipe->ppl_id,
6293 + mconfig->pipe->pipe_priority, mconfig->pipe->conn_type,
6294 + mconfig->pipe->memory_pages);
6295 +
6296 +- ret += snprintf(buf + ret, MOD_BUF - ret,
6297 ++ ret += scnprintf(buf + ret, MOD_BUF - ret,
6298 + "\tParams:\n\t\tHost DMA %d\n\t\tLink DMA %d\n",
6299 + mconfig->pipe->p_params->host_dma_id,
6300 + mconfig->pipe->p_params->link_dma_id);
6301 +
6302 +- ret += snprintf(buf + ret, MOD_BUF - ret,
6303 ++ ret += scnprintf(buf + ret, MOD_BUF - ret,
6304 + "\tPCM params:\n\t\tCh %d\n\t\tFreq %d\n\t\tFormat %d\n",
6305 + mconfig->pipe->p_params->ch,
6306 + mconfig->pipe->p_params->s_freq,
6307 + mconfig->pipe->p_params->s_fmt);
6308 +
6309 +- ret += snprintf(buf + ret, MOD_BUF - ret,
6310 ++ ret += scnprintf(buf + ret, MOD_BUF - ret,
6311 + "\tLink %#x\n\tStream %#x\n",
6312 + mconfig->pipe->p_params->linktype,
6313 + mconfig->pipe->p_params->stream);
6314 +
6315 +- ret += snprintf(buf + ret, MOD_BUF - ret,
6316 ++ ret += scnprintf(buf + ret, MOD_BUF - ret,
6317 + "\tState %d\n\tPassthru %s\n",
6318 + mconfig->pipe->state,
6319 + mconfig->pipe->passthru ? "true" : "false");
6320 +@@ -135,7 +137,7 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
6321 + ret += skl_print_pins(mconfig->m_out_pin, buf,
6322 + mconfig->max_out_queue, ret, false);
6323 +
6324 +- ret += snprintf(buf + ret, MOD_BUF - ret,
6325 ++ ret += scnprintf(buf + ret, MOD_BUF - ret,
6326 + "Other:\n\tDomain %d\n\tHomogeneous Input %s\n\t"
6327 + "Homogeneous Output %s\n\tIn Queue Mask %d\n\t"
6328 + "Out Queue Mask %d\n\tDMA ID %d\n\tMem Pages %d\n\t"
6329 +@@ -191,7 +193,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
6330 + __ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
6331 +
6332 + for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
6333 +- ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
6334 ++ ret += scnprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
6335 + hex_dump_to_buffer(d->fw_read_buff + offset, 16, 16, 4,
6336 + tmp + ret, FW_REG_BUF - ret, 0);
6337 + ret += strlen(tmp + ret);
6338 +diff --git a/sound/soc/intel/skylake/skl-ssp-clk.c b/sound/soc/intel/skylake/skl-ssp-clk.c
6339 +index 1c0e5226cb5b..bd43885f3805 100644
6340 +--- a/sound/soc/intel/skylake/skl-ssp-clk.c
6341 ++++ b/sound/soc/intel/skylake/skl-ssp-clk.c
6342 +@@ -384,9 +384,11 @@ static int skl_clk_dev_probe(struct platform_device *pdev)
6343 + &clks[i], clk_pdata, i);
6344 +
6345 + if (IS_ERR(data->clk[data->avail_clk_cnt])) {
6346 +- ret = PTR_ERR(data->clk[data->avail_clk_cnt++]);
6347 ++ ret = PTR_ERR(data->clk[data->avail_clk_cnt]);
6348 + goto err_unreg_skl_clk;
6349 + }
6350 ++
6351 ++ data->avail_clk_cnt++;
6352 + }
6353 +
6354 + platform_set_drvdata(pdev, data);
6355 +diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
6356 +index b94680fb26fa..a770e66b233a 100644
6357 +--- a/sound/soc/soc-component.c
6358 ++++ b/sound/soc/soc-component.c
6359 +@@ -452,7 +452,7 @@ int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream)
6360 + int ret;
6361 +
6362 + for_each_rtd_components(rtd, rtdcom, component) {
6363 +- if (component->driver->ioctl) {
6364 ++ if (component->driver->sync_stop) {
6365 + ret = component->driver->sync_stop(component,
6366 + substream);
6367 + if (ret < 0)
6368 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
6369 +index 935b5375ecc5..ebd785f9aa46 100644
6370 +--- a/sound/soc/soc-dapm.c
6371 ++++ b/sound/soc/soc-dapm.c
6372 +@@ -4749,7 +4749,7 @@ static void soc_dapm_shutdown_dapm(struct snd_soc_dapm_context *dapm)
6373 + continue;
6374 + if (w->power) {
6375 + dapm_seq_insert(w, &down_list, false);
6376 +- w->power = 0;
6377 ++ w->new_power = 0;
6378 + powerdown = 1;
6379 + }
6380 + }
6381 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
6382 +index 01e7bc03d92f..8de29f48442f 100644
6383 +--- a/sound/soc/soc-pcm.c
6384 ++++ b/sound/soc/soc-pcm.c
6385 +@@ -3192,16 +3192,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
6386 + unsigned long flags;
6387 +
6388 + /* FE state */
6389 +- offset += snprintf(buf + offset, size - offset,
6390 ++ offset += scnprintf(buf + offset, size - offset,
6391 + "[%s - %s]\n", fe->dai_link->name,
6392 + stream ? "Capture" : "Playback");
6393 +
6394 +- offset += snprintf(buf + offset, size - offset, "State: %s\n",
6395 ++ offset += scnprintf(buf + offset, size - offset, "State: %s\n",
6396 + dpcm_state_string(fe->dpcm[stream].state));
6397 +
6398 + if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
6399 + (fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
6400 +- offset += snprintf(buf + offset, size - offset,
6401 ++ offset += scnprintf(buf + offset, size - offset,
6402 + "Hardware Params: "
6403 + "Format = %s, Channels = %d, Rate = %d\n",
6404 + snd_pcm_format_name(params_format(params)),
6405 +@@ -3209,10 +3209,10 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
6406 + params_rate(params));
6407 +
6408 + /* BEs state */
6409 +- offset += snprintf(buf + offset, size - offset, "Backends:\n");
6410 ++ offset += scnprintf(buf + offset, size - offset, "Backends:\n");
6411 +
6412 + if (list_empty(&fe->dpcm[stream].be_clients)) {
6413 +- offset += snprintf(buf + offset, size - offset,
6414 ++ offset += scnprintf(buf + offset, size - offset,
6415 + " No active DSP links\n");
6416 + goto out;
6417 + }
6418 +@@ -3222,16 +3222,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
6419 + struct snd_soc_pcm_runtime *be = dpcm->be;
6420 + params = &dpcm->hw_params;
6421 +
6422 +- offset += snprintf(buf + offset, size - offset,
6423 ++ offset += scnprintf(buf + offset, size - offset,
6424 + "- %s\n", be->dai_link->name);
6425 +
6426 +- offset += snprintf(buf + offset, size - offset,
6427 ++ offset += scnprintf(buf + offset, size - offset,
6428 + " State: %s\n",
6429 + dpcm_state_string(be->dpcm[stream].state));
6430 +
6431 + if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
6432 + (be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
6433 +- offset += snprintf(buf + offset, size - offset,
6434 ++ offset += scnprintf(buf + offset, size - offset,
6435 + " Hardware Params: "
6436 + "Format = %s, Channels = %d, Rate = %d\n",
6437 + snd_pcm_format_name(params_format(params)),
6438 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
6439 +index 0119f07cece6..56a7142f15a0 100644
6440 +--- a/sound/soc/soc-topology.c
6441 ++++ b/sound/soc/soc-topology.c
6442 +@@ -2335,8 +2335,11 @@ static int soc_tplg_link_elems_load(struct soc_tplg *tplg,
6443 + }
6444 +
6445 + ret = soc_tplg_link_config(tplg, _link);
6446 +- if (ret < 0)
6447 ++ if (ret < 0) {
6448 ++ if (!abi_match)
6449 ++ kfree(_link);
6450 + return ret;
6451 ++ }
6452 +
6453 + /* offset by version-specific struct size and
6454 + * real priv data size
6455 +@@ -2500,7 +2503,7 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
6456 + {
6457 + struct snd_soc_tplg_manifest *manifest, *_manifest;
6458 + bool abi_match;
6459 +- int err;
6460 ++ int ret = 0;
6461 +
6462 + if (tplg->pass != SOC_TPLG_PASS_MANIFEST)
6463 + return 0;
6464 +@@ -2513,19 +2516,19 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
6465 + _manifest = manifest;
6466 + } else {
6467 + abi_match = false;
6468 +- err = manifest_new_ver(tplg, manifest, &_manifest);
6469 +- if (err < 0)
6470 +- return err;
6471 ++ ret = manifest_new_ver(tplg, manifest, &_manifest);
6472 ++ if (ret < 0)
6473 ++ return ret;
6474 + }
6475 +
6476 + /* pass control to component driver for optional further init */
6477 + if (tplg->comp && tplg->ops && tplg->ops->manifest)
6478 +- return tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
6479 ++ ret = tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
6480 +
6481 + if (!abi_match) /* free the duplicated one */
6482 + kfree(_manifest);
6483 +
6484 +- return 0;
6485 ++ return ret;
6486 + }
6487 +
6488 + /* validate header magic, size and type */
6489 +diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
6490 +index 82ecadda886c..a1780259292f 100644
6491 +--- a/sound/soc/sof/intel/hda.c
6492 ++++ b/sound/soc/sof/intel/hda.c
6493 +@@ -351,7 +351,7 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
6494 + const char *tplg_filename;
6495 + const char *idisp_str;
6496 + const char *dmic_str;
6497 +- int dmic_num;
6498 ++ int dmic_num = 0;
6499 + int codec_num = 0;
6500 + int i;
6501 + #endif
6502 +@@ -472,6 +472,7 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
6503 + mach_params->codec_mask = bus->codec_mask;
6504 + mach_params->platform = dev_name(sdev->dev);
6505 + mach_params->common_hdmi_codec_drv = hda_codec_use_common_hdmi;
6506 ++ mach_params->dmic_num = dmic_num;
6507 + }
6508 +
6509 + /* create codec instances */
6510 +diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
6511 +index dfe429f9e33f..c7a408f36733 100644
6512 +--- a/sound/soc/sof/ipc.c
6513 ++++ b/sound/soc/sof/ipc.c
6514 +@@ -495,7 +495,7 @@ int snd_sof_ipc_stream_posn(struct snd_sof_dev *sdev,
6515 +
6516 + /* send IPC to the DSP */
6517 + err = sof_ipc_tx_message(sdev->ipc,
6518 +- stream.hdr.cmd, &stream, sizeof(stream), &posn,
6519 ++ stream.hdr.cmd, &stream, sizeof(stream), posn,
6520 + sizeof(*posn));
6521 + if (err < 0) {
6522 + dev_err(sdev->dev, "error: failed to get stream %d position\n",
6523 +diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
6524 +index ede040cf82ad..20e9a189ad92 100644
6525 +--- a/tools/perf/arch/arm/util/cs-etm.c
6526 ++++ b/tools/perf/arch/arm/util/cs-etm.c
6527 +@@ -865,9 +865,12 @@ static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
6528 + struct evsel *evsel;
6529 +
6530 + evlist__for_each_entry(ptr->evlist, evsel) {
6531 +- if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
6532 ++ if (evsel->core.attr.type == ptr->cs_etm_pmu->type) {
6533 ++ if (evsel->disabled)
6534 ++ return 0;
6535 + return perf_evlist__enable_event_idx(ptr->evlist,
6536 + evsel, idx);
6537 ++ }
6538 + }
6539 +
6540 + return -EINVAL;
6541 +diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
6542 +index eba6541ec0f1..1d993c27242b 100644
6543 +--- a/tools/perf/arch/arm64/util/arm-spe.c
6544 ++++ b/tools/perf/arch/arm64/util/arm-spe.c
6545 +@@ -165,9 +165,12 @@ static int arm_spe_read_finish(struct auxtrace_record *itr, int idx)
6546 + struct evsel *evsel;
6547 +
6548 + evlist__for_each_entry(sper->evlist, evsel) {
6549 +- if (evsel->core.attr.type == sper->arm_spe_pmu->type)
6550 ++ if (evsel->core.attr.type == sper->arm_spe_pmu->type) {
6551 ++ if (evsel->disabled)
6552 ++ return 0;
6553 + return perf_evlist__enable_event_idx(sper->evlist,
6554 + evsel, idx);
6555 ++ }
6556 + }
6557 + return -EINVAL;
6558 + }
6559 +diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
6560 +index 27d9e214d068..39e363151ad7 100644
6561 +--- a/tools/perf/arch/x86/util/intel-bts.c
6562 ++++ b/tools/perf/arch/x86/util/intel-bts.c
6563 +@@ -420,9 +420,12 @@ static int intel_bts_read_finish(struct auxtrace_record *itr, int idx)
6564 + struct evsel *evsel;
6565 +
6566 + evlist__for_each_entry(btsr->evlist, evsel) {
6567 +- if (evsel->core.attr.type == btsr->intel_bts_pmu->type)
6568 ++ if (evsel->core.attr.type == btsr->intel_bts_pmu->type) {
6569 ++ if (evsel->disabled)
6570 ++ return 0;
6571 + return perf_evlist__enable_event_idx(btsr->evlist,
6572 + evsel, idx);
6573 ++ }
6574 + }
6575 + return -EINVAL;
6576 + }
6577 +diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
6578 +index 20df442fdf36..be07d6886256 100644
6579 +--- a/tools/perf/arch/x86/util/intel-pt.c
6580 ++++ b/tools/perf/arch/x86/util/intel-pt.c
6581 +@@ -1173,9 +1173,12 @@ static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
6582 + struct evsel *evsel;
6583 +
6584 + evlist__for_each_entry(ptr->evlist, evsel) {
6585 +- if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
6586 ++ if (evsel->core.attr.type == ptr->intel_pt_pmu->type) {
6587 ++ if (evsel->disabled)
6588 ++ return 0;
6589 + return perf_evlist__enable_event_idx(ptr->evlist, evsel,
6590 + idx);
6591 ++ }
6592 + }
6593 + return -EINVAL;
6594 + }
6595 +diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
6596 +index 1c8a1963d03f..3ed0134a764d 100644
6597 +--- a/tools/testing/selftests/lib.mk
6598 ++++ b/tools/testing/selftests/lib.mk
6599 +@@ -83,17 +83,20 @@ else
6600 + $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS))
6601 + endif
6602 +
6603 ++define INSTALL_SINGLE_RULE
6604 ++ $(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
6605 ++ $(if $(INSTALL_LIST),@echo rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/)
6606 ++ $(if $(INSTALL_LIST),@rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/)
6607 ++endef
6608 ++
6609 + define INSTALL_RULE
6610 +- @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
6611 +- mkdir -p ${INSTALL_PATH}; \
6612 +- echo "rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \
6613 +- rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \
6614 +- fi
6615 +- @if [ "X$(TEST_GEN_PROGS)$(TEST_CUSTOM_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then \
6616 +- mkdir -p ${INSTALL_PATH}; \
6617 +- echo "rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/"; \
6618 +- rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/; \
6619 +- fi
6620 ++ $(eval INSTALL_LIST = $(TEST_PROGS)) $(INSTALL_SINGLE_RULE)
6621 ++ $(eval INSTALL_LIST = $(TEST_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE)
6622 ++ $(eval INSTALL_LIST = $(TEST_FILES)) $(INSTALL_SINGLE_RULE)
6623 ++ $(eval INSTALL_LIST = $(TEST_GEN_PROGS)) $(INSTALL_SINGLE_RULE)
6624 ++ $(eval INSTALL_LIST = $(TEST_CUSTOM_PROGS)) $(INSTALL_SINGLE_RULE)
6625 ++ $(eval INSTALL_LIST = $(TEST_GEN_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE)
6626 ++ $(eval INSTALL_LIST = $(TEST_GEN_FILES)) $(INSTALL_SINGLE_RULE)
6627 + endef
6628 +
6629 + install: all
6630 +diff --git a/tools/testing/selftests/net/forwarding/mirror_gre.sh b/tools/testing/selftests/net/forwarding/mirror_gre.sh
6631 +index e6fd7a18c655..0266443601bc 100755
6632 +--- a/tools/testing/selftests/net/forwarding/mirror_gre.sh
6633 ++++ b/tools/testing/selftests/net/forwarding/mirror_gre.sh
6634 +@@ -63,22 +63,23 @@ test_span_gre_mac()
6635 + {
6636 + local tundev=$1; shift
6637 + local direction=$1; shift
6638 +- local prot=$1; shift
6639 + local what=$1; shift
6640 +
6641 +- local swp3mac=$(mac_get $swp3)
6642 +- local h3mac=$(mac_get $h3)
6643 ++ case "$direction" in
6644 ++ ingress) local src_mac=$(mac_get $h1); local dst_mac=$(mac_get $h2)
6645 ++ ;;
6646 ++ egress) local src_mac=$(mac_get $h2); local dst_mac=$(mac_get $h1)
6647 ++ ;;
6648 ++ esac
6649 +
6650 + RET=0
6651 +
6652 + mirror_install $swp1 $direction $tundev "matchall $tcflags"
6653 +- tc filter add dev $h3 ingress pref 77 prot $prot \
6654 +- flower ip_proto 0x2f src_mac $swp3mac dst_mac $h3mac \
6655 +- action pass
6656 ++ icmp_capture_install h3-${tundev} "src_mac $src_mac dst_mac $dst_mac"
6657 +
6658 +- mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10
6659 ++ mirror_test v$h1 192.0.2.1 192.0.2.2 h3-${tundev} 100 10
6660 +
6661 +- tc filter del dev $h3 ingress pref 77
6662 ++ icmp_capture_uninstall h3-${tundev}
6663 + mirror_uninstall $swp1 $direction
6664 +
6665 + log_test "$direction $what: envelope MAC ($tcflags)"
6666 +@@ -120,14 +121,14 @@ test_ip6gretap()
6667 +
6668 + test_gretap_mac()
6669 + {
6670 +- test_span_gre_mac gt4 ingress ip "mirror to gretap"
6671 +- test_span_gre_mac gt4 egress ip "mirror to gretap"
6672 ++ test_span_gre_mac gt4 ingress "mirror to gretap"
6673 ++ test_span_gre_mac gt4 egress "mirror to gretap"
6674 + }
6675 +
6676 + test_ip6gretap_mac()
6677 + {
6678 +- test_span_gre_mac gt6 ingress ipv6 "mirror to ip6gretap"
6679 +- test_span_gre_mac gt6 egress ipv6 "mirror to ip6gretap"
6680 ++ test_span_gre_mac gt6 ingress "mirror to ip6gretap"
6681 ++ test_span_gre_mac gt6 egress "mirror to ip6gretap"
6682 + }
6683 +
6684 + test_all()
6685 +diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
6686 +index bb10e33690b2..ce6bea9675c0 100755
6687 +--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
6688 ++++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
6689 +@@ -516,9 +516,9 @@ test_tos()
6690 + RET=0
6691 +
6692 + tc filter add dev v1 egress pref 77 prot ip \
6693 +- flower ip_tos 0x40 action pass
6694 +- vxlan_ping_test $h1 192.0.2.3 "-Q 0x40" v1 egress 77 10
6695 +- vxlan_ping_test $h1 192.0.2.3 "-Q 0x30" v1 egress 77 0
6696 ++ flower ip_tos 0x14 action pass
6697 ++ vxlan_ping_test $h1 192.0.2.3 "-Q 0x14" v1 egress 77 10
6698 ++ vxlan_ping_test $h1 192.0.2.3 "-Q 0x18" v1 egress 77 0
6699 + tc filter del dev v1 egress pref 77 prot ip
6700 +
6701 + log_test "VXLAN: envelope TOS inheritance"
6702 +diff --git a/tools/testing/selftests/pidfd/.gitignore b/tools/testing/selftests/pidfd/.gitignore
6703 +index 8d069490e17b..161facf28d6a 100644
6704 +--- a/tools/testing/selftests/pidfd/.gitignore
6705 ++++ b/tools/testing/selftests/pidfd/.gitignore
6706 +@@ -2,3 +2,4 @@ pidfd_open_test
6707 + pidfd_poll_test
6708 + pidfd_test
6709 + pidfd_wait
6710 ++pidfd_fdinfo_test
6711 +diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh
6712 +index 8155c2ea7ccb..b630c7b5950a 100755
6713 +--- a/tools/testing/selftests/tpm2/test_smoke.sh
6714 ++++ b/tools/testing/selftests/tpm2/test_smoke.sh
6715 +@@ -1,8 +1,17 @@
6716 + #!/bin/bash
6717 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
6718 ++self.flags = flags
6719 +
6720 +-python -m unittest -v tpm2_tests.SmokeTest
6721 +-python -m unittest -v tpm2_tests.AsyncTest
6722 ++# Kselftest framework requirement - SKIP code is 4.
6723 ++ksft_skip=4
6724 ++
6725 ++
6726 ++if [ -f /dev/tpm0 ] ; then
6727 ++ python -m unittest -v tpm2_tests.SmokeTest
6728 ++ python -m unittest -v tpm2_tests.AsyncTest
6729 ++else
6730 ++ exit $ksft_skip
6731 ++fi
6732 +
6733 + CLEAR_CMD=$(which tpm2_clear)
6734 + if [ -n $CLEAR_CMD ]; then
6735 +diff --git a/tools/testing/selftests/tpm2/test_space.sh b/tools/testing/selftests/tpm2/test_space.sh
6736 +index a6f5e346635e..180b469c53b4 100755
6737 +--- a/tools/testing/selftests/tpm2/test_space.sh
6738 ++++ b/tools/testing/selftests/tpm2/test_space.sh
6739 +@@ -1,4 +1,11 @@
6740 + #!/bin/bash
6741 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
6742 +
6743 +-python -m unittest -v tpm2_tests.SpaceTest
6744 ++# Kselftest framework requirement - SKIP code is 4.
6745 ++ksft_skip=4
6746 ++
6747 ++if [ -f /dev/tpmrm0 ] ; then
6748 ++ python -m unittest -v tpm2_tests.SpaceTest
6749 ++else
6750 ++ exit $ksft_skip
6751 ++fi