Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.9 commit in: /
Date: Thu, 29 Oct 2020 11:21:34
Message-Id: 1603970475.1f892fe0eac7c43a6fbec5d2af76a4ef1465cce1.mpagano@gentoo
1 commit: 1f892fe0eac7c43a6fbec5d2af76a4ef1465cce1
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Oct 29 11:21:15 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Oct 29 11:21:15 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1f892fe0
7
8 Linux patch 5.9.2
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1001_linux-5.9.2.patch | 29846 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 29850 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index f7f6e8d..73a1979 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -47,6 +47,10 @@ Patch: 1000_linux-5.9.1.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.9.1
23
24 +Patch: 1001_linux-5.9.2.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.9.2
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1001_linux-5.9.2.patch b/1001_linux-5.9.2.patch
33 new file mode 100644
34 index 0000000..61d927f
35 --- /dev/null
36 +++ b/1001_linux-5.9.2.patch
37 @@ -0,0 +1,29846 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index a1068742a6df1..ffe864390c5ac 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -577,7 +577,7 @@
43 + loops can be debugged more effectively on production
44 + systems.
45 +
46 +- clearcpuid=BITNUM [X86]
47 ++ clearcpuid=BITNUM[,BITNUM...] [X86]
48 + Disable CPUID feature X for the kernel. See
49 + arch/x86/include/asm/cpufeatures.h for the valid bit
50 + numbers. Note the Linux specific bits are not necessarily
51 +diff --git a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
52 +index fc823572bcff2..90c6d039b91b0 100644
53 +--- a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
54 ++++ b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
55 +@@ -23,8 +23,7 @@ properties:
56 + - items:
57 + - const: allwinner,sun7i-a20-crypto
58 + - const: allwinner,sun4i-a10-crypto
59 +- - items:
60 +- - const: allwinner,sun8i-a33-crypto
61 ++ - const: allwinner,sun8i-a33-crypto
62 +
63 + reg:
64 + maxItems: 1
65 +@@ -59,7 +58,9 @@ if:
66 + properties:
67 + compatible:
68 + contains:
69 +- const: allwinner,sun6i-a31-crypto
70 ++ enum:
71 ++ - allwinner,sun6i-a31-crypto
72 ++ - allwinner,sun8i-a33-crypto
73 +
74 + then:
75 + required:
76 +diff --git a/Documentation/devicetree/bindings/net/socionext-netsec.txt b/Documentation/devicetree/bindings/net/socionext-netsec.txt
77 +index 9d6c9feb12ff1..a3c1dffaa4bb4 100644
78 +--- a/Documentation/devicetree/bindings/net/socionext-netsec.txt
79 ++++ b/Documentation/devicetree/bindings/net/socionext-netsec.txt
80 +@@ -30,7 +30,9 @@ Optional properties: (See ethernet.txt file in the same directory)
81 + - max-frame-size: See ethernet.txt in the same directory.
82 +
83 + The MAC address will be determined using the optional properties
84 +-defined in ethernet.txt.
85 ++defined in ethernet.txt. The 'phy-mode' property is required, but may
86 ++be set to the empty string if the PHY configuration is programmed by
87 ++the firmware or set by hardware straps, and needs to be preserved.
88 +
89 + Example:
90 + eth0: ethernet@522d0000 {
91 +diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
92 +index 837d51f9e1fab..25e6673a085a0 100644
93 +--- a/Documentation/networking/ip-sysctl.rst
94 ++++ b/Documentation/networking/ip-sysctl.rst
95 +@@ -1142,13 +1142,15 @@ icmp_ratelimit - INTEGER
96 + icmp_msgs_per_sec - INTEGER
97 + Limit maximal number of ICMP packets sent per second from this host.
98 + Only messages whose type matches icmp_ratemask (see below) are
99 +- controlled by this limit.
100 ++ controlled by this limit. For security reasons, the precise count
101 ++ of messages per second is randomized.
102 +
103 + Default: 1000
104 +
105 + icmp_msgs_burst - INTEGER
106 + icmp_msgs_per_sec controls number of ICMP packets sent per second,
107 + while icmp_msgs_burst controls the burst size of these packets.
108 ++ For security reasons, the precise burst size is randomized.
109 +
110 + Default: 50
111 +
112 +diff --git a/Makefile b/Makefile
113 +index d600b38144f42..53e7f4ee2557e 100644
114 +--- a/Makefile
115 ++++ b/Makefile
116 +@@ -1,7 +1,7 @@
117 + # SPDX-License-Identifier: GPL-2.0
118 + VERSION = 5
119 + PATCHLEVEL = 9
120 +-SUBLEVEL = 1
121 ++SUBLEVEL = 2
122 + EXTRAVERSION =
123 + NAME = Kleptomaniac Octopus
124 +
125 +diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
126 +index ce81018345184..6b5c54576f54d 100644
127 +--- a/arch/arc/plat-hsdk/Kconfig
128 ++++ b/arch/arc/plat-hsdk/Kconfig
129 +@@ -8,5 +8,6 @@ menuconfig ARC_SOC_HSDK
130 + select ARC_HAS_ACCL_REGS
131 + select ARC_IRQ_NO_AUTOSAVE
132 + select CLK_HSDK
133 ++ select RESET_CONTROLLER
134 + select RESET_HSDK
135 + select HAVE_PCI
136 +diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
137 +index 1c7180f285393..91a8c54d5e113 100644
138 +--- a/arch/arm/boot/dts/imx6sl.dtsi
139 ++++ b/arch/arm/boot/dts/imx6sl.dtsi
140 +@@ -939,8 +939,10 @@
141 + };
142 +
143 + rngb: rngb@21b4000 {
144 ++ compatible = "fsl,imx6sl-rngb", "fsl,imx25-rngb";
145 + reg = <0x021b4000 0x4000>;
146 + interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
147 ++ clocks = <&clks IMX6SL_CLK_DUMMY>;
148 + };
149 +
150 + weim: weim@21b8000 {
151 +diff --git a/arch/arm/boot/dts/iwg20d-q7-common.dtsi b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
152 +index ebbe1518ef8a6..63cafd220dba1 100644
153 +--- a/arch/arm/boot/dts/iwg20d-q7-common.dtsi
154 ++++ b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
155 +@@ -57,7 +57,7 @@
156 +
157 + lvds-receiver {
158 + compatible = "ti,ds90cf384a", "lvds-decoder";
159 +- powerdown-gpios = <&gpio7 25 GPIO_ACTIVE_LOW>;
160 ++ power-supply = <&vcc_3v3_tft1>;
161 +
162 + ports {
163 + #address-cells = <1>;
164 +@@ -81,6 +81,7 @@
165 + panel {
166 + compatible = "edt,etm0700g0dh6";
167 + backlight = <&lcd_backlight>;
168 ++ power-supply = <&vcc_3v3_tft1>;
169 +
170 + port {
171 + panel_in: endpoint {
172 +@@ -113,6 +114,17 @@
173 + };
174 + };
175 +
176 ++ vcc_3v3_tft1: regulator-panel {
177 ++ compatible = "regulator-fixed";
178 ++
179 ++ regulator-name = "vcc-3v3-tft1";
180 ++ regulator-min-microvolt = <3300000>;
181 ++ regulator-max-microvolt = <3300000>;
182 ++ enable-active-high;
183 ++ startup-delay-us = <500>;
184 ++ gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>;
185 ++ };
186 ++
187 + vcc_sdhi1: regulator-vcc-sdhi1 {
188 + compatible = "regulator-fixed";
189 +
190 +@@ -207,6 +219,7 @@
191 + reg = <0x38>;
192 + interrupt-parent = <&gpio2>;
193 + interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
194 ++ vcc-supply = <&vcc_3v3_tft1>;
195 + };
196 + };
197 +
198 +diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
199 +index 277c0bb104534..04688e8abce2c 100644
200 +--- a/arch/arm/boot/dts/meson8.dtsi
201 ++++ b/arch/arm/boot/dts/meson8.dtsi
202 +@@ -240,8 +240,6 @@
203 + <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
204 + <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
205 + <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
206 +- <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
207 +- <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
208 + <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
209 + <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
210 + <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
211 +diff --git a/arch/arm/boot/dts/owl-s500.dtsi b/arch/arm/boot/dts/owl-s500.dtsi
212 +index 5ceb6cc4451d2..1dbe4e8b38ac7 100644
213 +--- a/arch/arm/boot/dts/owl-s500.dtsi
214 ++++ b/arch/arm/boot/dts/owl-s500.dtsi
215 +@@ -84,21 +84,21 @@
216 + global_timer: timer@b0020200 {
217 + compatible = "arm,cortex-a9-global-timer";
218 + reg = <0xb0020200 0x100>;
219 +- interrupts = <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
220 ++ interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
221 + status = "disabled";
222 + };
223 +
224 + twd_timer: timer@b0020600 {
225 + compatible = "arm,cortex-a9-twd-timer";
226 + reg = <0xb0020600 0x20>;
227 +- interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
228 ++ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
229 + status = "disabled";
230 + };
231 +
232 + twd_wdt: wdt@b0020620 {
233 + compatible = "arm,cortex-a9-twd-wdt";
234 + reg = <0xb0020620 0xe0>;
235 +- interrupts = <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
236 ++ interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
237 + status = "disabled";
238 + };
239 +
240 +diff --git a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
241 +index 5700e6b700d36..b85025d009437 100644
242 +--- a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
243 ++++ b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
244 +@@ -121,8 +121,6 @@
245 + reset-gpios = <&gpiog 0 GPIO_ACTIVE_LOW>; /* ETH_RST# */
246 + interrupt-parent = <&gpioa>;
247 + interrupts = <6 IRQ_TYPE_EDGE_FALLING>; /* ETH_MDINT# */
248 +- rxc-skew-ps = <1860>;
249 +- txc-skew-ps = <1860>;
250 + reset-assert-us = <10000>;
251 + reset-deassert-us = <300>;
252 + micrel,force-master;
253 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
254 +index 7c4bd615b3115..e4e3c92eb30d3 100644
255 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
256 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
257 +@@ -11,7 +11,6 @@
258 + serial0 = &uart4;
259 + serial1 = &usart3;
260 + serial2 = &uart8;
261 +- ethernet0 = &ethernet0;
262 + };
263 +
264 + chosen {
265 +@@ -26,23 +25,13 @@
266 +
267 + display_bl: display-bl {
268 + compatible = "pwm-backlight";
269 +- pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>;
270 ++ pwms = <&pwm2 3 500000 PWM_POLARITY_INVERTED>;
271 + brightness-levels = <0 16 22 30 40 55 75 102 138 188 255>;
272 + default-brightness-level = <8>;
273 + enable-gpios = <&gpioi 0 GPIO_ACTIVE_HIGH>;
274 + status = "okay";
275 + };
276 +
277 +- ethernet_vio: vioregulator {
278 +- compatible = "regulator-fixed";
279 +- regulator-name = "vio";
280 +- regulator-min-microvolt = <3300000>;
281 +- regulator-max-microvolt = <3300000>;
282 +- gpio = <&gpiog 3 GPIO_ACTIVE_LOW>;
283 +- regulator-always-on;
284 +- regulator-boot-on;
285 +- };
286 +-
287 + gpio-keys-polled {
288 + compatible = "gpio-keys-polled";
289 + #size-cells = <0>;
290 +@@ -141,28 +130,6 @@
291 + status = "okay";
292 + };
293 +
294 +-&ethernet0 {
295 +- status = "okay";
296 +- pinctrl-0 = <&ethernet0_rmii_pins_a>;
297 +- pinctrl-1 = <&ethernet0_rmii_sleep_pins_a>;
298 +- pinctrl-names = "default", "sleep";
299 +- phy-mode = "rmii";
300 +- max-speed = <100>;
301 +- phy-handle = <&phy0>;
302 +- st,eth-ref-clk-sel;
303 +- phy-reset-gpios = <&gpioh 15 GPIO_ACTIVE_LOW>;
304 +-
305 +- mdio0 {
306 +- #address-cells = <1>;
307 +- #size-cells = <0>;
308 +- compatible = "snps,dwmac-mdio";
309 +-
310 +- phy0: ethernet-phy@1 {
311 +- reg = <1>;
312 +- };
313 +- };
314 +-};
315 +-
316 + &i2c2 { /* Header X22 */
317 + pinctrl-names = "default";
318 + pinctrl-0 = <&i2c2_pins_a>;
319 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
320 +index ba905196fb549..a87ebc4843963 100644
321 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
322 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
323 +@@ -9,6 +9,10 @@
324 + #include <dt-bindings/mfd/st,stpmic1.h>
325 +
326 + / {
327 ++ aliases {
328 ++ ethernet0 = &ethernet0;
329 ++ };
330 ++
331 + memory@c0000000 {
332 + device_type = "memory";
333 + reg = <0xC0000000 0x40000000>;
334 +@@ -55,6 +59,16 @@
335 + no-map;
336 + };
337 + };
338 ++
339 ++ ethernet_vio: vioregulator {
340 ++ compatible = "regulator-fixed";
341 ++ regulator-name = "vio";
342 ++ regulator-min-microvolt = <3300000>;
343 ++ regulator-max-microvolt = <3300000>;
344 ++ gpio = <&gpiog 3 GPIO_ACTIVE_LOW>;
345 ++ regulator-always-on;
346 ++ regulator-boot-on;
347 ++ };
348 + };
349 +
350 + &adc {
351 +@@ -94,6 +108,28 @@
352 + status = "okay";
353 + };
354 +
355 ++&ethernet0 {
356 ++ status = "okay";
357 ++ pinctrl-0 = <&ethernet0_rmii_pins_a>;
358 ++ pinctrl-1 = <&ethernet0_rmii_sleep_pins_a>;
359 ++ pinctrl-names = "default", "sleep";
360 ++ phy-mode = "rmii";
361 ++ max-speed = <100>;
362 ++ phy-handle = <&phy0>;
363 ++ st,eth-ref-clk-sel;
364 ++ phy-reset-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>;
365 ++
366 ++ mdio0 {
367 ++ #address-cells = <1>;
368 ++ #size-cells = <0>;
369 ++ compatible = "snps,dwmac-mdio";
370 ++
371 ++ phy0: ethernet-phy@1 {
372 ++ reg = <1>;
373 ++ };
374 ++ };
375 ++};
376 ++
377 + &i2c4 {
378 + pinctrl-names = "default";
379 + pinctrl-0 = <&i2c4_pins_a>;
380 +@@ -249,7 +285,7 @@
381 + compatible = "ti,tsc2004";
382 + reg = <0x49>;
383 + vio-supply = <&v3v3>;
384 +- interrupts-extended = <&gpioh 3 IRQ_TYPE_EDGE_FALLING>;
385 ++ interrupts-extended = <&gpioh 15 IRQ_TYPE_EDGE_FALLING>;
386 + };
387 +
388 + eeprom@50 {
389 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
390 +index 930202742a3f6..905cd7bb98cf0 100644
391 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
392 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
393 +@@ -295,9 +295,9 @@
394 +
395 + &sdmmc2 {
396 + pinctrl-names = "default", "opendrain", "sleep";
397 +- pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_b>;
398 +- pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_b>;
399 +- pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_b>;
400 ++ pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_c>;
401 ++ pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_c>;
402 ++ pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_c>;
403 + bus-width = <8>;
404 + mmc-ddr-1_8v;
405 + no-sd;
406 +diff --git a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
407 +index 42d62d1ba1dc7..ea15073f0c79c 100644
408 +--- a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
409 ++++ b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
410 +@@ -223,16 +223,16 @@
411 + };
412 +
413 + &reg_dc1sw {
414 +- regulator-min-microvolt = <3000000>;
415 +- regulator-max-microvolt = <3000000>;
416 ++ regulator-min-microvolt = <3300000>;
417 ++ regulator-max-microvolt = <3300000>;
418 + regulator-name = "vcc-gmac-phy";
419 + };
420 +
421 + &reg_dcdc1 {
422 + regulator-always-on;
423 +- regulator-min-microvolt = <3000000>;
424 +- regulator-max-microvolt = <3000000>;
425 +- regulator-name = "vcc-3v0";
426 ++ regulator-min-microvolt = <3300000>;
427 ++ regulator-max-microvolt = <3300000>;
428 ++ regulator-name = "vcc-3v3";
429 + };
430 +
431 + &reg_dcdc2 {
432 +diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
433 +index 2aab043441e8f..eae8aaaadc3bf 100644
434 +--- a/arch/arm/mach-at91/pm.c
435 ++++ b/arch/arm/mach-at91/pm.c
436 +@@ -800,6 +800,7 @@ static void __init at91_pm_init(void (*pm_idle)(void))
437 +
438 + pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
439 + soc_pm.data.pmc = of_iomap(pmc_np, 0);
440 ++ of_node_put(pmc_np);
441 + if (!soc_pm.data.pmc) {
442 + pr_err("AT91: PM not supported, PMC not found\n");
443 + return;
444 +diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
445 +index 6f5f89711f256..a92d277f81a08 100644
446 +--- a/arch/arm/mach-omap2/cpuidle44xx.c
447 ++++ b/arch/arm/mach-omap2/cpuidle44xx.c
448 +@@ -174,8 +174,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
449 + */
450 + if (mpuss_can_lose_context) {
451 + error = cpu_cluster_pm_enter();
452 +- if (error)
453 ++ if (error) {
454 ++ omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
455 + goto cpu_cluster_pm_out;
456 ++ }
457 + }
458 + }
459 +
460 +diff --git a/arch/arm/mach-s3c24xx/mach-at2440evb.c b/arch/arm/mach-s3c24xx/mach-at2440evb.c
461 +index 58c5ef3cf1d7e..2d370f7f75fa2 100644
462 +--- a/arch/arm/mach-s3c24xx/mach-at2440evb.c
463 ++++ b/arch/arm/mach-s3c24xx/mach-at2440evb.c
464 +@@ -143,7 +143,7 @@ static struct gpiod_lookup_table at2440evb_mci_gpio_table = {
465 + .dev_id = "s3c2410-sdi",
466 + .table = {
467 + /* Card detect S3C2410_GPG(10) */
468 +- GPIO_LOOKUP("GPG", 10, "cd", GPIO_ACTIVE_LOW),
469 ++ GPIO_LOOKUP("GPIOG", 10, "cd", GPIO_ACTIVE_LOW),
470 + { },
471 + },
472 + };
473 +diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c
474 +index f4710052843ac..3601c7abe69dc 100644
475 +--- a/arch/arm/mach-s3c24xx/mach-h1940.c
476 ++++ b/arch/arm/mach-s3c24xx/mach-h1940.c
477 +@@ -468,9 +468,9 @@ static struct gpiod_lookup_table h1940_mmc_gpio_table = {
478 + .dev_id = "s3c2410-sdi",
479 + .table = {
480 + /* Card detect S3C2410_GPF(5) */
481 +- GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
482 ++ GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
483 + /* Write protect S3C2410_GPH(8) */
484 +- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
485 ++ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
486 + { },
487 + },
488 + };
489 +diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c
490 +index 2357494483118..5729bf07a6232 100644
491 +--- a/arch/arm/mach-s3c24xx/mach-mini2440.c
492 ++++ b/arch/arm/mach-s3c24xx/mach-mini2440.c
493 +@@ -244,9 +244,9 @@ static struct gpiod_lookup_table mini2440_mmc_gpio_table = {
494 + .dev_id = "s3c2410-sdi",
495 + .table = {
496 + /* Card detect S3C2410_GPG(8) */
497 +- GPIO_LOOKUP("GPG", 8, "cd", GPIO_ACTIVE_LOW),
498 ++ GPIO_LOOKUP("GPIOG", 8, "cd", GPIO_ACTIVE_LOW),
499 + /* Write protect S3C2410_GPH(8) */
500 +- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_HIGH),
501 ++ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_HIGH),
502 + { },
503 + },
504 + };
505 +diff --git a/arch/arm/mach-s3c24xx/mach-n30.c b/arch/arm/mach-s3c24xx/mach-n30.c
506 +index 998ccff3c174b..ed993bc666351 100644
507 +--- a/arch/arm/mach-s3c24xx/mach-n30.c
508 ++++ b/arch/arm/mach-s3c24xx/mach-n30.c
509 +@@ -389,9 +389,9 @@ static struct gpiod_lookup_table n30_mci_gpio_table = {
510 + .dev_id = "s3c2410-sdi",
511 + .table = {
512 + /* Card detect S3C2410_GPF(1) */
513 +- GPIO_LOOKUP("GPF", 1, "cd", GPIO_ACTIVE_LOW),
514 ++ GPIO_LOOKUP("GPIOF", 1, "cd", GPIO_ACTIVE_LOW),
515 + /* Write protect S3C2410_GPG(10) */
516 +- GPIO_LOOKUP("GPG", 10, "wp", GPIO_ACTIVE_LOW),
517 ++ GPIO_LOOKUP("GPIOG", 10, "wp", GPIO_ACTIVE_LOW),
518 + { },
519 + },
520 + };
521 +diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
522 +index fde98b175c752..c0a06f123cfea 100644
523 +--- a/arch/arm/mach-s3c24xx/mach-rx1950.c
524 ++++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
525 +@@ -571,9 +571,9 @@ static struct gpiod_lookup_table rx1950_mmc_gpio_table = {
526 + .dev_id = "s3c2410-sdi",
527 + .table = {
528 + /* Card detect S3C2410_GPF(5) */
529 +- GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
530 ++ GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
531 + /* Write protect S3C2410_GPH(8) */
532 +- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
533 ++ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
534 + { },
535 + },
536 + };
537 +diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
538 +index 12c26eb88afbc..43d91bfd23600 100644
539 +--- a/arch/arm/mm/cache-l2x0.c
540 ++++ b/arch/arm/mm/cache-l2x0.c
541 +@@ -1249,20 +1249,28 @@ static void __init l2c310_of_parse(const struct device_node *np,
542 +
543 + ret = of_property_read_u32(np, "prefetch-data", &val);
544 + if (ret == 0) {
545 +- if (val)
546 ++ if (val) {
547 + prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
548 +- else
549 ++ *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
550 ++ } else {
551 + prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
552 ++ *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
553 ++ }
554 ++ *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
555 + } else if (ret != -EINVAL) {
556 + pr_err("L2C-310 OF prefetch-data property value is missing\n");
557 + }
558 +
559 + ret = of_property_read_u32(np, "prefetch-instr", &val);
560 + if (ret == 0) {
561 +- if (val)
562 ++ if (val) {
563 + prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
564 +- else
565 ++ *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
566 ++ } else {
567 + prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
568 ++ *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
569 ++ }
570 ++ *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
571 + } else if (ret != -EINVAL) {
572 + pr_err("L2C-310 OF prefetch-instr property value is missing\n");
573 + }
574 +diff --git a/arch/arm64/boot/dts/actions/s700.dtsi b/arch/arm64/boot/dts/actions/s700.dtsi
575 +index 2006ad5424fa6..f8eb72bb41254 100644
576 +--- a/arch/arm64/boot/dts/actions/s700.dtsi
577 ++++ b/arch/arm64/boot/dts/actions/s700.dtsi
578 +@@ -231,7 +231,7 @@
579 +
580 + pinctrl: pinctrl@e01b0000 {
581 + compatible = "actions,s700-pinctrl";
582 +- reg = <0x0 0xe01b0000 0x0 0x1000>;
583 ++ reg = <0x0 0xe01b0000 0x0 0x100>;
584 + clocks = <&cmu CLK_GPIO>;
585 + gpio-controller;
586 + gpio-ranges = <&pinctrl 0 0 136>;
587 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
588 +index 6735e316a39c3..6c6053a18413d 100644
589 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
590 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
591 +@@ -139,8 +139,7 @@
592 + <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
593 + <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
594 + <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
595 +- <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
596 +- <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
597 ++ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
598 + interrupt-names = "gp",
599 + "gpmmu",
600 + "pp",
601 +@@ -151,8 +150,7 @@
602 + "pp2",
603 + "ppmmu2",
604 + "pp3",
605 +- "ppmmu3",
606 +- "pmu";
607 ++ "ppmmu3";
608 + clocks = <&ccu CLK_BUS_GPU>, <&ccu CLK_GPU>;
609 + clock-names = "bus", "core";
610 + resets = <&ccu RST_BUS_GPU>;
611 +diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
612 +index 94f75b4465044..73783692e30ee 100644
613 +--- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
614 ++++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
615 +@@ -41,13 +41,13 @@
616 +
617 + led-white {
618 + label = "vim3:white:sys";
619 +- gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>;
620 ++ gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_HIGH>;
621 + linux,default-trigger = "heartbeat";
622 + };
623 +
624 + led-red {
625 + label = "vim3:red";
626 +- gpios = <&gpio_expander 5 GPIO_ACTIVE_LOW>;
627 ++ gpios = <&gpio_expander 5 GPIO_ACTIVE_HIGH>;
628 + };
629 + };
630 +
631 +diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
632 +index 561fa792fe5a9..58c08398d4ba7 100644
633 +--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
634 ++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
635 +@@ -617,6 +617,7 @@
636 + gpc: gpc@303a0000 {
637 + compatible = "fsl,imx8mq-gpc";
638 + reg = <0x303a0000 0x10000>;
639 ++ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
640 + interrupt-parent = <&gic>;
641 + interrupt-controller;
642 + #interrupt-cells = <3>;
643 +diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
644 +index a5a12b2599a4a..44a0346133cde 100644
645 +--- a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
646 ++++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
647 +@@ -5,6 +5,7 @@
648 +
649 + #include <dt-bindings/input/input.h>
650 + #include <dt-bindings/input/linux-event-codes.h>
651 ++#include <dt-bindings/regulator/dlg,da9211-regulator.h>
652 + #include <dt-bindings/gpio/gpio.h>
653 + #include "mt8173.dtsi"
654 +
655 +@@ -294,7 +295,8 @@
656 + regulator-max-microamp = <4400000>;
657 + regulator-ramp-delay = <10000>;
658 + regulator-always-on;
659 +- regulator-allowed-modes = <0 1>;
660 ++ regulator-allowed-modes = <DA9211_BUCK_MODE_SYNC
661 ++ DA9211_BUCK_MODE_AUTO>;
662 + };
663 +
664 + da9211_vgpu_reg: BUCKB {
665 +@@ -431,12 +433,11 @@
666 + status = "okay";
667 + pinctrl-names = "default";
668 + pinctrl-0 = <&nor_gpio1_pins>;
669 +- bus-width = <8>;
670 +- max-frequency = <50000000>;
671 +- non-removable;
672 ++
673 + flash@0 {
674 + compatible = "jedec,spi-nor";
675 + reg = <0>;
676 ++ spi-max-frequency = <50000000>;
677 + };
678 + };
679 +
680 +diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
681 +index 67cae5f9e47e6..75687442d5827 100644
682 +--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
683 ++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
684 +@@ -229,14 +229,14 @@
685 + };
686 +
687 + thermal-zones {
688 +- cpu0_1-thermal {
689 ++ cpu0-1-thermal {
690 + polling-delay-passive = <250>;
691 + polling-delay = <1000>;
692 +
693 + thermal-sensors = <&tsens 5>;
694 +
695 + trips {
696 +- cpu0_1_alert0: trip-point@0 {
697 ++ cpu0_1_alert0: trip-point0 {
698 + temperature = <75000>;
699 + hysteresis = <2000>;
700 + type = "passive";
701 +@@ -259,7 +259,7 @@
702 + };
703 + };
704 +
705 +- cpu2_3-thermal {
706 ++ cpu2-3-thermal {
707 + polling-delay-passive = <250>;
708 + polling-delay = <1000>;
709 +
710 +@@ -1052,7 +1052,7 @@
711 + reg-names = "mdp_phys";
712 +
713 + interrupt-parent = <&mdss>;
714 +- interrupts = <0 0>;
715 ++ interrupts = <0>;
716 +
717 + clocks = <&gcc GCC_MDSS_AHB_CLK>,
718 + <&gcc GCC_MDSS_AXI_CLK>,
719 +@@ -1084,7 +1084,7 @@
720 + reg-names = "dsi_ctrl";
721 +
722 + interrupt-parent = <&mdss>;
723 +- interrupts = <4 0>;
724 ++ interrupts = <4>;
725 +
726 + assigned-clocks = <&gcc BYTE0_CLK_SRC>,
727 + <&gcc PCLK0_CLK_SRC>;
728 +diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi
729 +index 188fff2095f11..8626b3a50eda7 100644
730 +--- a/arch/arm64/boot/dts/qcom/msm8992.dtsi
731 ++++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi
732 +@@ -335,7 +335,7 @@
733 + blsp2_uart2: serial@f995e000 {
734 + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
735 + reg = <0xf995e000 0x1000>;
736 +- interrupt = <GIC_SPI 146 IRQ_TYPE_LEVEL_LOW>;
737 ++ interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_LOW>;
738 + clock-names = "core", "iface";
739 + clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
740 + <&gcc GCC_BLSP2_AHB_CLK>;
741 +diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
742 +index 0bcdf04711079..adf9a5988cdc2 100644
743 +--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
744 ++++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
745 +@@ -119,7 +119,7 @@
746 +
747 + wcd_codec: codec@f000 {
748 + compatible = "qcom,pm8916-wcd-analog-codec";
749 +- reg = <0xf000 0x200>;
750 ++ reg = <0xf000>;
751 + reg-names = "pmic-codec-core";
752 + clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
753 + clock-names = "mclk";
754 +diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
755 +index d46b3833e52fd..a6be72d8f6fde 100644
756 +--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
757 ++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
758 +@@ -2618,7 +2618,7 @@
759 +
760 + system-cache-controller@9200000 {
761 + compatible = "qcom,sc7180-llcc";
762 +- reg = <0 0x09200000 0 0x200000>, <0 0x09600000 0 0x50000>;
763 ++ reg = <0 0x09200000 0 0x50000>, <0 0x09600000 0 0x50000>;
764 + reg-names = "llcc_base", "llcc_broadcast_base";
765 + interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
766 + };
767 +@@ -2785,7 +2785,7 @@
768 + power-domains = <&rpmhpd SC7180_CX>;
769 +
770 + interrupt-parent = <&mdss>;
771 +- interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
772 ++ interrupts = <0>;
773 +
774 + status = "disabled";
775 +
776 +@@ -2833,7 +2833,7 @@
777 + reg-names = "dsi_ctrl";
778 +
779 + interrupt-parent = <&mdss>;
780 +- interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
781 ++ interrupts = <4>;
782 +
783 + clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
784 + <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
785 +diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
786 +index a2a98680ccf53..99d33955270ec 100644
787 +--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
788 ++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
789 +@@ -451,16 +451,16 @@
790 + port@0 {
791 + reg = <0>;
792 +
793 +- lt9611_out: endpoint {
794 +- remote-endpoint = <&hdmi_con>;
795 ++ lt9611_a: endpoint {
796 ++ remote-endpoint = <&dsi0_out>;
797 + };
798 + };
799 +
800 +- port@1 {
801 +- reg = <1>;
802 ++ port@2 {
803 ++ reg = <2>;
804 +
805 +- lt9611_a: endpoint {
806 +- remote-endpoint = <&dsi0_out>;
807 ++ lt9611_out: endpoint {
808 ++ remote-endpoint = <&hdmi_con>;
809 + };
810 + };
811 + };
812 +diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
813 +index 2884577dcb777..eca81cffd2c19 100644
814 +--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
815 ++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
816 +@@ -1093,8 +1093,8 @@
817 + qup_opp_table: qup-opp-table {
818 + compatible = "operating-points-v2";
819 +
820 +- opp-19200000 {
821 +- opp-hz = /bits/ 64 <19200000>;
822 ++ opp-50000000 {
823 ++ opp-hz = /bits/ 64 <50000000>;
824 + required-opps = <&rpmhpd_opp_min_svs>;
825 + };
826 +
827 +@@ -1107,6 +1107,11 @@
828 + opp-hz = /bits/ 64 <100000000>;
829 + required-opps = <&rpmhpd_opp_svs>;
830 + };
831 ++
832 ++ opp-128000000 {
833 ++ opp-hz = /bits/ 64 <128000000>;
834 ++ required-opps = <&rpmhpd_opp_nom>;
835 ++ };
836 + };
837 +
838 + qupv3_id_0: geniqup@8c0000 {
839 +diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
840 +index b86a7ead30067..ab8680c6672e4 100644
841 +--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
842 ++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
843 +@@ -767,7 +767,7 @@
844 +
845 + usb_1_hsphy: phy@88e2000 {
846 + compatible = "qcom,sm8150-usb-hs-phy",
847 +- "qcom,usb-snps-hs-7nm-phy";
848 ++ "qcom,usb-snps-hs-7nm-phy";
849 + reg = <0 0x088e2000 0 0x400>;
850 + status = "disabled";
851 + #phy-cells = <0>;
852 +@@ -833,7 +833,7 @@
853 +
854 + assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
855 + <&gcc GCC_USB30_PRIM_MASTER_CLK>;
856 +- assigned-clock-rates = <19200000>, <150000000>;
857 ++ assigned-clock-rates = <19200000>, <200000000>;
858 +
859 + interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
860 + <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
861 +diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
862 +index 6894f8490dae7..6e2f7ae1d6211 100644
863 +--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
864 ++++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
865 +@@ -17,7 +17,7 @@
866 + compatible = "qcom,sm8250-mtp";
867 +
868 + aliases {
869 +- serial0 = &uart2;
870 ++ serial0 = &uart12;
871 + };
872 +
873 + chosen {
874 +@@ -371,7 +371,7 @@
875 + gpio-reserved-ranges = <28 4>, <40 4>;
876 + };
877 +
878 +-&uart2 {
879 ++&uart12 {
880 + status = "okay";
881 + };
882 +
883 +diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
884 +index 377172e8967b7..e7d139e1a6cec 100644
885 +--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
886 ++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
887 +@@ -935,11 +935,13 @@
888 + status = "disabled";
889 + };
890 +
891 +- uart2: serial@a90000 {
892 ++ uart12: serial@a90000 {
893 + compatible = "qcom,geni-debug-uart";
894 + reg = <0x0 0x00a90000 0x0 0x4000>;
895 + clock-names = "se";
896 + clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
897 ++ pinctrl-names = "default";
898 ++ pinctrl-0 = <&qup_uart12_default>;
899 + interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
900 + status = "disabled";
901 + };
902 +@@ -1880,6 +1882,13 @@
903 + bias-disable;
904 + };
905 + };
906 ++
907 ++ qup_uart12_default: qup-uart12-default {
908 ++ mux {
909 ++ pins = "gpio34", "gpio35";
910 ++ function = "qup12";
911 ++ };
912 ++ };
913 + };
914 +
915 + adsp: remoteproc@17300000 {
916 +diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
917 +index 42171190cce46..065e8fe3a071c 100644
918 +--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
919 ++++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
920 +@@ -1214,9 +1214,8 @@
921 + reg = <0 0xe6ea0000 0 0x0064>;
922 + interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
923 + clocks = <&cpg CPG_MOD 210>;
924 +- dmas = <&dmac1 0x43>, <&dmac1 0x42>,
925 +- <&dmac2 0x43>, <&dmac2 0x42>;
926 +- dma-names = "tx", "rx", "tx", "rx";
927 ++ dmas = <&dmac0 0x43>, <&dmac0 0x42>;
928 ++ dma-names = "tx", "rx";
929 + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
930 + resets = <&cpg 210>;
931 + #address-cells = <1>;
932 +diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
933 +index 1991bdc36792f..27f74df8efbde 100644
934 +--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
935 ++++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
936 +@@ -1192,9 +1192,8 @@
937 + reg = <0 0xe6ea0000 0 0x0064>;
938 + interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
939 + clocks = <&cpg CPG_MOD 210>;
940 +- dmas = <&dmac1 0x43>, <&dmac1 0x42>,
941 +- <&dmac2 0x43>, <&dmac2 0x42>;
942 +- dma-names = "tx", "rx", "tx", "rx";
943 ++ dmas = <&dmac0 0x43>, <&dmac0 0x42>;
944 ++ dma-names = "tx", "rx";
945 + power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
946 + resets = <&cpg 210>;
947 + #address-cells = <1>;
948 +diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
949 +index e8fc01d97adad..6f7490efc438b 100644
950 +--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
951 ++++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
952 +@@ -404,11 +404,12 @@
953 + };
954 +
955 + &serdes_ln_ctrl {
956 +- idle-states = <SERDES0_LANE0_PCIE0_LANE0>, <SERDES0_LANE1_PCIE0_LANE1>,
957 +- <SERDES1_LANE0_PCIE1_LANE0>, <SERDES1_LANE1_PCIE1_LANE1>,
958 +- <SERDES2_LANE0_PCIE2_LANE0>, <SERDES2_LANE1_PCIE2_LANE1>,
959 +- <SERDES3_LANE0_USB3_0_SWAP>, <SERDES3_LANE1_USB3_0>,
960 +- <SERDES4_LANE0_EDP_LANE0>, <SERDES4_LANE1_EDP_LANE1>, <SERDES4_LANE2_EDP_LANE2>, <SERDES4_LANE3_EDP_LANE3>;
961 ++ idle-states = <J721E_SERDES0_LANE0_PCIE0_LANE0>, <J721E_SERDES0_LANE1_PCIE0_LANE1>,
962 ++ <J721E_SERDES1_LANE0_PCIE1_LANE0>, <J721E_SERDES1_LANE1_PCIE1_LANE1>,
963 ++ <J721E_SERDES2_LANE0_PCIE2_LANE0>, <J721E_SERDES2_LANE1_PCIE2_LANE1>,
964 ++ <J721E_SERDES3_LANE0_USB3_0_SWAP>, <J721E_SERDES3_LANE1_USB3_0>,
965 ++ <J721E_SERDES4_LANE0_EDP_LANE0>, <J721E_SERDES4_LANE1_EDP_LANE1>,
966 ++ <J721E_SERDES4_LANE2_EDP_LANE2>, <J721E_SERDES4_LANE3_EDP_LANE3>;
967 + };
968 +
969 + &serdes_wiz3 {
970 +diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
971 +index 12ceea9b3c9ae..63d221aee9bc0 100644
972 +--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
973 ++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
974 +@@ -6,7 +6,7 @@
975 + */
976 + #include <dt-bindings/phy/phy.h>
977 + #include <dt-bindings/mux/mux.h>
978 +-#include <dt-bindings/mux/mux-j721e-wiz.h>
979 ++#include <dt-bindings/mux/ti-serdes.h>
980 +
981 + &cbass_main {
982 + msmc_ram: sram@70000000 {
983 +@@ -38,11 +38,12 @@
984 + <0x40b0 0x3>, <0x40b4 0x3>, /* SERDES3 lane0/1 select */
985 + <0x40c0 0x3>, <0x40c4 0x3>, <0x40c8 0x3>, <0x40cc 0x3>;
986 + /* SERDES4 lane0/1/2/3 select */
987 +- idle-states = <SERDES0_LANE0_PCIE0_LANE0>, <SERDES0_LANE1_PCIE0_LANE1>,
988 +- <SERDES1_LANE0_PCIE1_LANE0>, <SERDES1_LANE1_PCIE1_LANE1>,
989 +- <SERDES2_LANE0_PCIE2_LANE0>, <SERDES2_LANE1_PCIE2_LANE1>,
990 +- <MUX_IDLE_AS_IS>, <SERDES3_LANE1_USB3_0>,
991 +- <SERDES4_LANE0_EDP_LANE0>, <SERDES4_LANE1_EDP_LANE1>, <SERDES4_LANE2_EDP_LANE2>, <SERDES4_LANE3_EDP_LANE3>;
992 ++ idle-states = <J721E_SERDES0_LANE0_PCIE0_LANE0>, <J721E_SERDES0_LANE1_PCIE0_LANE1>,
993 ++ <J721E_SERDES1_LANE0_PCIE1_LANE0>, <J721E_SERDES1_LANE1_PCIE1_LANE1>,
994 ++ <J721E_SERDES2_LANE0_PCIE2_LANE0>, <J721E_SERDES2_LANE1_PCIE2_LANE1>,
995 ++ <MUX_IDLE_AS_IS>, <J721E_SERDES3_LANE1_USB3_0>,
996 ++ <J721E_SERDES4_LANE0_EDP_LANE0>, <J721E_SERDES4_LANE1_EDP_LANE1>,
997 ++ <J721E_SERDES4_LANE2_EDP_LANE2>, <J721E_SERDES4_LANE3_EDP_LANE3>;
998 + };
999 +
1000 + usb_serdes_mux: mux-controller@4000 {
1001 +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
1002 +index 3ec99f13c259e..a6d869727a92e 100644
1003 +--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
1004 ++++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
1005 +@@ -501,7 +501,7 @@
1006 + };
1007 +
1008 + i2c0: i2c@ff020000 {
1009 +- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
1010 ++ compatible = "cdns,i2c-r1p14";
1011 + status = "disabled";
1012 + interrupt-parent = <&gic>;
1013 + interrupts = <0 17 4>;
1014 +@@ -512,7 +512,7 @@
1015 + };
1016 +
1017 + i2c1: i2c@ff030000 {
1018 +- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
1019 ++ compatible = "cdns,i2c-r1p14";
1020 + status = "disabled";
1021 + interrupt-parent = <&gic>;
1022 + interrupts = <0 18 4>;
1023 +diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
1024 +index 0bc46149e4917..4b39293d0f72d 100644
1025 +--- a/arch/arm64/include/asm/insn.h
1026 ++++ b/arch/arm64/include/asm/insn.h
1027 +@@ -359,9 +359,13 @@ __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
1028 + __AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000)
1029 + __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
1030 + __AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000)
1031 ++__AARCH64_INSN_FUNCS(br_auth, 0xFEFFF800, 0xD61F0800)
1032 + __AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000)
1033 ++__AARCH64_INSN_FUNCS(blr_auth, 0xFEFFF800, 0xD63F0800)
1034 + __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
1035 ++__AARCH64_INSN_FUNCS(ret_auth, 0xFFFFFBFF, 0xD65F0BFF)
1036 + __AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0)
1037 ++__AARCH64_INSN_FUNCS(eret_auth, 0xFFFFFBFF, 0xD69F0BFF)
1038 + __AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000)
1039 + __AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F)
1040 + __AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
1041 +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
1042 +index afa722504bfde..1ded73189874d 100644
1043 +--- a/arch/arm64/include/asm/memory.h
1044 ++++ b/arch/arm64/include/asm/memory.h
1045 +@@ -164,7 +164,6 @@
1046 + extern u64 vabits_actual;
1047 + #define PAGE_END (_PAGE_END(vabits_actual))
1048 +
1049 +-extern s64 physvirt_offset;
1050 + extern s64 memstart_addr;
1051 + /* PHYS_OFFSET - the physical address of the start of memory. */
1052 + #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
1053 +@@ -240,7 +239,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
1054 + */
1055 + #define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
1056 +
1057 +-#define __lm_to_phys(addr) (((addr) + physvirt_offset))
1058 ++#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
1059 + #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
1060 +
1061 + #define __virt_to_phys_nodebug(x) ({ \
1062 +@@ -258,7 +257,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
1063 + #define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
1064 + #endif /* CONFIG_DEBUG_VIRTUAL */
1065 +
1066 +-#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset))
1067 ++#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
1068 + #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
1069 +
1070 + /*
1071 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
1072 +index d5d3fbe739534..88233d42d9c29 100644
1073 +--- a/arch/arm64/include/asm/pgtable.h
1074 ++++ b/arch/arm64/include/asm/pgtable.h
1075 +@@ -23,6 +23,8 @@
1076 + #define VMALLOC_START (MODULES_END)
1077 + #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
1078 +
1079 ++#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
1080 ++
1081 + #define FIRST_USER_ADDRESS 0UL
1082 +
1083 + #ifndef __ASSEMBLY__
1084 +@@ -33,8 +35,6 @@
1085 + #include <linux/mm_types.h>
1086 + #include <linux/sched.h>
1087 +
1088 +-extern struct page *vmemmap;
1089 +-
1090 + extern void __pte_error(const char *file, int line, unsigned long val);
1091 + extern void __pmd_error(const char *file, int line, unsigned long val);
1092 + extern void __pud_error(const char *file, int line, unsigned long val);
1093 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
1094 +index 560ba69e13c11..fe3a7695a4202 100644
1095 +--- a/arch/arm64/kernel/cpu_errata.c
1096 ++++ b/arch/arm64/kernel/cpu_errata.c
1097 +@@ -234,14 +234,17 @@ static int detect_harden_bp_fw(void)
1098 + smccc_end = NULL;
1099 + break;
1100 +
1101 +-#if IS_ENABLED(CONFIG_KVM)
1102 + case SMCCC_CONDUIT_SMC:
1103 + cb = call_smc_arch_workaround_1;
1104 ++#if IS_ENABLED(CONFIG_KVM)
1105 + smccc_start = __smccc_workaround_1_smc;
1106 + smccc_end = __smccc_workaround_1_smc +
1107 + __SMCCC_WORKAROUND_1_SMC_SZ;
1108 +- break;
1109 ++#else
1110 ++ smccc_start = NULL;
1111 ++ smccc_end = NULL;
1112 + #endif
1113 ++ break;
1114 +
1115 + default:
1116 + return -1;
1117 +diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
1118 +index a107375005bc9..ccc8c9e22b258 100644
1119 +--- a/arch/arm64/kernel/insn.c
1120 ++++ b/arch/arm64/kernel/insn.c
1121 +@@ -176,7 +176,7 @@ bool __kprobes aarch64_insn_uses_literal(u32 insn)
1122 +
1123 + bool __kprobes aarch64_insn_is_branch(u32 insn)
1124 + {
1125 +- /* b, bl, cb*, tb*, b.cond, br, blr */
1126 ++ /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
1127 +
1128 + return aarch64_insn_is_b(insn) ||
1129 + aarch64_insn_is_bl(insn) ||
1130 +@@ -185,8 +185,11 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
1131 + aarch64_insn_is_tbz(insn) ||
1132 + aarch64_insn_is_tbnz(insn) ||
1133 + aarch64_insn_is_ret(insn) ||
1134 ++ aarch64_insn_is_ret_auth(insn) ||
1135 + aarch64_insn_is_br(insn) ||
1136 ++ aarch64_insn_is_br_auth(insn) ||
1137 + aarch64_insn_is_blr(insn) ||
1138 ++ aarch64_insn_is_blr_auth(insn) ||
1139 + aarch64_insn_is_bcond(insn);
1140 + }
1141 +
1142 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
1143 +index 462f9a9cc44be..481d48e3872b8 100644
1144 +--- a/arch/arm64/kernel/perf_event.c
1145 ++++ b/arch/arm64/kernel/perf_event.c
1146 +@@ -532,6 +532,11 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
1147 +
1148 + static inline void armv8pmu_enable_counter(u32 mask)
1149 + {
1150 ++ /*
1151 ++ * Make sure event configuration register writes are visible before we
1152 ++ * enable the counter.
1153 ++ * */
1154 ++ isb();
1155 + write_sysreg(mask, pmcntenset_el0);
1156 + }
1157 +
1158 +diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
1159 +index 263d5fba4c8a3..c541fb48886e3 100644
1160 +--- a/arch/arm64/kernel/probes/decode-insn.c
1161 ++++ b/arch/arm64/kernel/probes/decode-insn.c
1162 +@@ -29,7 +29,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
1163 + aarch64_insn_is_msr_imm(insn) ||
1164 + aarch64_insn_is_msr_reg(insn) ||
1165 + aarch64_insn_is_exception(insn) ||
1166 +- aarch64_insn_is_eret(insn))
1167 ++ aarch64_insn_is_eret(insn) ||
1168 ++ aarch64_insn_is_eret_auth(insn))
1169 + return false;
1170 +
1171 + /*
1172 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
1173 +index 481d22c32a2e7..324f0e0894f6e 100644
1174 +--- a/arch/arm64/mm/init.c
1175 ++++ b/arch/arm64/mm/init.c
1176 +@@ -54,12 +54,6 @@
1177 + s64 memstart_addr __ro_after_init = -1;
1178 + EXPORT_SYMBOL(memstart_addr);
1179 +
1180 +-s64 physvirt_offset __ro_after_init;
1181 +-EXPORT_SYMBOL(physvirt_offset);
1182 +-
1183 +-struct page *vmemmap __ro_after_init;
1184 +-EXPORT_SYMBOL(vmemmap);
1185 +-
1186 + /*
1187 + * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
1188 + * memory as some devices, namely the Raspberry Pi 4, have peripherals with
1189 +@@ -290,20 +284,6 @@ void __init arm64_memblock_init(void)
1190 + memstart_addr = round_down(memblock_start_of_DRAM(),
1191 + ARM64_MEMSTART_ALIGN);
1192 +
1193 +- physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
1194 +-
1195 +- vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
1196 +-
1197 +- /*
1198 +- * If we are running with a 52-bit kernel VA config on a system that
1199 +- * does not support it, we have to offset our vmemmap and physvirt_offset
1200 +- * s.t. we avoid the 52-bit portion of the direct linear map
1201 +- */
1202 +- if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) {
1203 +- vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
1204 +- physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
1205 +- }
1206 +-
1207 + /*
1208 + * Remove the memory that we will not be able to cover with the
1209 + * linear mapping. Take care not to clip the kernel which may be
1210 +@@ -318,6 +298,16 @@ void __init arm64_memblock_init(void)
1211 + memblock_remove(0, memstart_addr);
1212 + }
1213 +
1214 ++ /*
1215 ++ * If we are running with a 52-bit kernel VA config on a system that
1216 ++ * does not support it, we have to place the available physical
1217 ++ * memory in the 48-bit addressable part of the linear region, i.e.,
1218 ++ * we have to move it upward. Since memstart_addr represents the
1219 ++ * physical address of PAGE_OFFSET, we have to *subtract* from it.
1220 ++ */
1221 ++ if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
1222 ++ memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
1223 ++
1224 + /*
1225 + * Apply the memory limit if it was set. Since the kernel may be loaded
1226 + * high up in memory, add back the kernel region that must be accessible
1227 +diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
1228 +index 9ef4ec0aea008..59f7dfe50a4d0 100644
1229 +--- a/arch/m68k/coldfire/device.c
1230 ++++ b/arch/m68k/coldfire/device.c
1231 +@@ -554,7 +554,7 @@ static struct platform_device mcf_edma = {
1232 + };
1233 + #endif /* IS_ENABLED(CONFIG_MCF_EDMA) */
1234 +
1235 +-#if IS_ENABLED(CONFIG_MMC)
1236 ++#ifdef MCFSDHC_BASE
1237 + static struct mcf_esdhc_platform_data mcf_esdhc_data = {
1238 + .max_bus_width = 4,
1239 + .cd_type = ESDHC_CD_NONE,
1240 +@@ -579,7 +579,7 @@ static struct platform_device mcf_esdhc = {
1241 + .resource = mcf_esdhc_resources,
1242 + .dev.platform_data = &mcf_esdhc_data,
1243 + };
1244 +-#endif /* IS_ENABLED(CONFIG_MMC) */
1245 ++#endif /* MCFSDHC_BASE */
1246 +
1247 + static struct platform_device *mcf_devices[] __initdata = {
1248 + &mcf_uart,
1249 +@@ -613,7 +613,7 @@ static struct platform_device *mcf_devices[] __initdata = {
1250 + #if IS_ENABLED(CONFIG_MCF_EDMA)
1251 + &mcf_edma,
1252 + #endif
1253 +-#if IS_ENABLED(CONFIG_MMC)
1254 ++#ifdef MCFSDHC_BASE
1255 + &mcf_esdhc,
1256 + #endif
1257 + };
1258 +diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
1259 +index 2e87a9b6d312f..63bce836b9f10 100644
1260 +--- a/arch/microblaze/include/asm/Kbuild
1261 ++++ b/arch/microblaze/include/asm/Kbuild
1262 +@@ -1,7 +1,6 @@
1263 + # SPDX-License-Identifier: GPL-2.0
1264 + generated-y += syscall_table.h
1265 + generic-y += extable.h
1266 +-generic-y += hw_irq.h
1267 + generic-y += kvm_para.h
1268 + generic-y += local64.h
1269 + generic-y += mcs_spinlock.h
1270 +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
1271 +index 787e829b6f25c..997da0221780b 100644
1272 +--- a/arch/powerpc/Kconfig
1273 ++++ b/arch/powerpc/Kconfig
1274 +@@ -980,7 +980,7 @@ config PPC_MEM_KEYS
1275 + config PPC_SECURE_BOOT
1276 + prompt "Enable secure boot support"
1277 + bool
1278 +- depends on PPC_POWERNV
1279 ++ depends on PPC_POWERNV || PPC_PSERIES
1280 + depends on IMA_ARCH_POLICY
1281 + imply IMA_SECURE_AND_OR_TRUSTED_BOOT
1282 + help
1283 +diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
1284 +index de14b1a34d568..9652756b0694c 100644
1285 +--- a/arch/powerpc/include/asm/asm-prototypes.h
1286 ++++ b/arch/powerpc/include/asm/asm-prototypes.h
1287 +@@ -144,7 +144,9 @@ void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
1288 + void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
1289 +
1290 + /* Patch sites */
1291 +-extern s32 patch__call_flush_branch_caches;
1292 ++extern s32 patch__call_flush_branch_caches1;
1293 ++extern s32 patch__call_flush_branch_caches2;
1294 ++extern s32 patch__call_flush_branch_caches3;
1295 + extern s32 patch__flush_count_cache_return;
1296 + extern s32 patch__flush_link_stack_return;
1297 + extern s32 patch__call_kvm_flush_link_stack;
1298 +diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
1299 +index 082b988087011..b3ca542f871ec 100644
1300 +--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
1301 ++++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
1302 +@@ -13,20 +13,19 @@
1303 + */
1304 + #define MAX_EA_BITS_PER_CONTEXT 46
1305 +
1306 +-#define REGION_SHIFT (MAX_EA_BITS_PER_CONTEXT - 2)
1307 +
1308 + /*
1309 +- * Our page table limit us to 64TB. Hence for the kernel mapping,
1310 +- * each MAP area is limited to 16 TB.
1311 +- * The four map areas are: linear mapping, vmap, IO and vmemmap
1312 ++ * Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB
1313 ++ * of vmemmap space. To better support sparse memory layout, we use 61TB
1314 ++ * linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap.
1315 + */
1316 ++#define REGION_SHIFT (40)
1317 + #define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT)
1318 +
1319 + /*
1320 +- * Define the address range of the kernel non-linear virtual area
1321 +- * 16TB
1322 ++ * Define the address range of the kernel non-linear virtual area (61TB)
1323 + */
1324 +-#define H_KERN_VIRT_START ASM_CONST(0xc000100000000000)
1325 ++#define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000)
1326 +
1327 + #ifndef __ASSEMBLY__
1328 + #define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
1329 +diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
1330 +index b392384a3b150..86173bfc39feb 100644
1331 +--- a/arch/powerpc/include/asm/book3s/64/mmu.h
1332 ++++ b/arch/powerpc/include/asm/book3s/64/mmu.h
1333 +@@ -85,7 +85,7 @@ extern unsigned int mmu_base_pid;
1334 + /*
1335 + * memory block size used with radix translation.
1336 + */
1337 +-extern unsigned int __ro_after_init radix_mem_block_size;
1338 ++extern unsigned long __ro_after_init radix_mem_block_size;
1339 +
1340 + #define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
1341 + #define PRTB_ENTRIES (1ul << mmu_pid_bits)
1342 +diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
1343 +index 32a15dc49e8ca..ade681c1d4095 100644
1344 +--- a/arch/powerpc/include/asm/cputable.h
1345 ++++ b/arch/powerpc/include/asm/cputable.h
1346 +@@ -483,7 +483,7 @@ static inline void cpu_feature_keys_init(void) { }
1347 + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
1348 + CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
1349 + CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
1350 +- CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_ARCH_31 | \
1351 ++ CPU_FTR_ARCH_300 | CPU_FTR_ARCH_31 | \
1352 + CPU_FTR_DAWR | CPU_FTR_DAWR1)
1353 + #define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \
1354 + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
1355 +diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
1356 +index 17ccc6474ab6f..030a19d922132 100644
1357 +--- a/arch/powerpc/include/asm/drmem.h
1358 ++++ b/arch/powerpc/include/asm/drmem.h
1359 +@@ -8,14 +8,13 @@
1360 + #ifndef _ASM_POWERPC_LMB_H
1361 + #define _ASM_POWERPC_LMB_H
1362 +
1363 ++#include <linux/sched.h>
1364 ++
1365 + struct drmem_lmb {
1366 + u64 base_addr;
1367 + u32 drc_index;
1368 + u32 aa_index;
1369 + u32 flags;
1370 +-#ifdef CONFIG_MEMORY_HOTPLUG
1371 +- int nid;
1372 +-#endif
1373 + };
1374 +
1375 + struct drmem_lmb_info {
1376 +@@ -26,8 +25,22 @@ struct drmem_lmb_info {
1377 +
1378 + extern struct drmem_lmb_info *drmem_info;
1379 +
1380 ++static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
1381 ++ const struct drmem_lmb *start)
1382 ++{
1383 ++ /*
1384 ++ * DLPAR code paths can take several milliseconds per element
1385 ++ * when interacting with firmware. Ensure that we don't
1386 ++ * unfairly monopolize the CPU.
1387 ++ */
1388 ++ if (((++lmb - start) % 16) == 0)
1389 ++ cond_resched();
1390 ++
1391 ++ return lmb;
1392 ++}
1393 ++
1394 + #define for_each_drmem_lmb_in_range(lmb, start, end) \
1395 +- for ((lmb) = (start); (lmb) < (end); (lmb)++)
1396 ++ for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
1397 +
1398 + #define for_each_drmem_lmb(lmb) \
1399 + for_each_drmem_lmb_in_range((lmb), \
1400 +@@ -105,22 +118,4 @@ static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
1401 + lmb->aa_index = 0xffffffff;
1402 + }
1403 +
1404 +-#ifdef CONFIG_MEMORY_HOTPLUG
1405 +-static inline void lmb_set_nid(struct drmem_lmb *lmb)
1406 +-{
1407 +- lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr);
1408 +-}
1409 +-static inline void lmb_clear_nid(struct drmem_lmb *lmb)
1410 +-{
1411 +- lmb->nid = -1;
1412 +-}
1413 +-#else
1414 +-static inline void lmb_set_nid(struct drmem_lmb *lmb)
1415 +-{
1416 +-}
1417 +-static inline void lmb_clear_nid(struct drmem_lmb *lmb)
1418 +-{
1419 +-}
1420 +-#endif
1421 +-
1422 + #endif /* _ASM_POWERPC_LMB_H */
1423 +diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
1424 +index db206a7f38e24..9b68eafebf439 100644
1425 +--- a/arch/powerpc/include/asm/hw_breakpoint.h
1426 ++++ b/arch/powerpc/include/asm/hw_breakpoint.h
1427 +@@ -42,6 +42,7 @@ struct arch_hw_breakpoint {
1428 + #else
1429 + #define HW_BREAKPOINT_SIZE 0x8
1430 + #endif
1431 ++#define HW_BREAKPOINT_SIZE_QUADWORD 0x10
1432 +
1433 + #define DABR_MAX_LEN 8
1434 + #define DAWR_MAX_LEN 512
1435 +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
1436 +index 88fb88491fe9f..5647006ed373e 100644
1437 +--- a/arch/powerpc/include/asm/reg.h
1438 ++++ b/arch/powerpc/include/asm/reg.h
1439 +@@ -817,7 +817,7 @@
1440 + #define THRM1_TIN (1 << 31)
1441 + #define THRM1_TIV (1 << 30)
1442 + #define THRM1_THRES(x) ((x&0x7f)<<23)
1443 +-#define THRM3_SITV(x) ((x&0x3fff)<<1)
1444 ++#define THRM3_SITV(x) ((x & 0x1fff) << 1)
1445 + #define THRM1_TID (1<<2)
1446 + #define THRM1_TIE (1<<1)
1447 + #define THRM1_V (1<<0)
1448 +diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h
1449 +index 85580b30aba48..7546402d796af 100644
1450 +--- a/arch/powerpc/include/asm/svm.h
1451 ++++ b/arch/powerpc/include/asm/svm.h
1452 +@@ -15,6 +15,8 @@ static inline bool is_secure_guest(void)
1453 + return mfmsr() & MSR_S;
1454 + }
1455 +
1456 ++void __init svm_swiotlb_init(void);
1457 ++
1458 + void dtl_cache_ctor(void *addr);
1459 + #define get_dtl_cache_ctor() (is_secure_guest() ? dtl_cache_ctor : NULL)
1460 +
1461 +@@ -25,6 +27,8 @@ static inline bool is_secure_guest(void)
1462 + return false;
1463 + }
1464 +
1465 ++static inline void svm_swiotlb_init(void) {}
1466 ++
1467 + #define get_dtl_cache_ctor() NULL
1468 +
1469 + #endif /* CONFIG_PPC_SVM */
1470 +diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
1471 +index fbc6f3002f236..d97f061fecac0 100644
1472 +--- a/arch/powerpc/include/asm/tlb.h
1473 ++++ b/arch/powerpc/include/asm/tlb.h
1474 +@@ -66,19 +66,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
1475 + return false;
1476 + return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
1477 + }
1478 +-static inline void mm_reset_thread_local(struct mm_struct *mm)
1479 +-{
1480 +- WARN_ON(atomic_read(&mm->context.copros) > 0);
1481 +- /*
1482 +- * It's possible for mm_access to take a reference on mm_users to
1483 +- * access the remote mm from another thread, but it's not allowed
1484 +- * to set mm_cpumask, so mm_users may be > 1 here.
1485 +- */
1486 +- WARN_ON(current->mm != mm);
1487 +- atomic_set(&mm->context.active_cpus, 1);
1488 +- cpumask_clear(mm_cpumask(mm));
1489 +- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
1490 +-}
1491 + #else /* CONFIG_PPC_BOOK3S_64 */
1492 + static inline int mm_is_thread_local(struct mm_struct *mm)
1493 + {
1494 +diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
1495 +index 2aa89c6b28967..0d704f1e07739 100644
1496 +--- a/arch/powerpc/kernel/cputable.c
1497 ++++ b/arch/powerpc/kernel/cputable.c
1498 +@@ -120,9 +120,16 @@ extern void __restore_cpu_e6500(void);
1499 + PPC_FEATURE2_DARN | \
1500 + PPC_FEATURE2_SCV)
1501 + #define COMMON_USER_POWER10 COMMON_USER_POWER9
1502 +-#define COMMON_USER2_POWER10 (COMMON_USER2_POWER9 | \
1503 +- PPC_FEATURE2_ARCH_3_1 | \
1504 +- PPC_FEATURE2_MMA)
1505 ++#define COMMON_USER2_POWER10 (PPC_FEATURE2_ARCH_3_1 | \
1506 ++ PPC_FEATURE2_MMA | \
1507 ++ PPC_FEATURE2_ARCH_3_00 | \
1508 ++ PPC_FEATURE2_HAS_IEEE128 | \
1509 ++ PPC_FEATURE2_DARN | \
1510 ++ PPC_FEATURE2_SCV | \
1511 ++ PPC_FEATURE2_ARCH_2_07 | \
1512 ++ PPC_FEATURE2_DSCR | \
1513 ++ PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
1514 ++ PPC_FEATURE2_VEC_CRYPTO)
1515 +
1516 + #ifdef CONFIG_PPC_BOOK3E_64
1517 + #define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE)
1518 +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
1519 +index 733e40eba4ebe..2f3846192ec7d 100644
1520 +--- a/arch/powerpc/kernel/entry_64.S
1521 ++++ b/arch/powerpc/kernel/entry_64.S
1522 +@@ -430,7 +430,11 @@ _ASM_NOKPROBE_SYMBOL(save_nvgprs);
1523 +
1524 + #define FLUSH_COUNT_CACHE \
1525 + 1: nop; \
1526 +- patch_site 1b, patch__call_flush_branch_caches
1527 ++ patch_site 1b, patch__call_flush_branch_caches1; \
1528 ++1: nop; \
1529 ++ patch_site 1b, patch__call_flush_branch_caches2; \
1530 ++1: nop; \
1531 ++ patch_site 1b, patch__call_flush_branch_caches3
1532 +
1533 + .macro nops number
1534 + .rept \number
1535 +@@ -512,7 +516,7 @@ _GLOBAL(_switch)
1536 +
1537 + kuap_check_amr r9, r10
1538 +
1539 +- FLUSH_COUNT_CACHE
1540 ++ FLUSH_COUNT_CACHE /* Clobbers r9, ctr */
1541 +
1542 + /*
1543 + * On SMP kernels, care must be taken because a task may be
1544 +diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
1545 +index 1f4a1efa00744..f6b24838ca3c0 100644
1546 +--- a/arch/powerpc/kernel/hw_breakpoint.c
1547 ++++ b/arch/powerpc/kernel/hw_breakpoint.c
1548 +@@ -520,9 +520,17 @@ static bool ea_hw_range_overlaps(unsigned long ea, int size,
1549 + struct arch_hw_breakpoint *info)
1550 + {
1551 + unsigned long hw_start_addr, hw_end_addr;
1552 ++ unsigned long align_size = HW_BREAKPOINT_SIZE;
1553 +
1554 +- hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
1555 +- hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
1556 ++ /*
1557 ++ * On p10 predecessors, quadword is handle differently then
1558 ++ * other instructions.
1559 ++ */
1560 ++ if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16)
1561 ++ align_size = HW_BREAKPOINT_SIZE_QUADWORD;
1562 ++
1563 ++ hw_start_addr = ALIGN_DOWN(info->address, align_size);
1564 ++ hw_end_addr = ALIGN(info->address + info->len, align_size);
1565 +
1566 + return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
1567 + }
1568 +@@ -636,6 +644,8 @@ static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
1569 + if (*type == CACHEOP) {
1570 + *size = cache_op_size();
1571 + *ea &= ~(*size - 1);
1572 ++ } else if (*type == LOAD_VMX || *type == STORE_VMX) {
1573 ++ *ea &= ~(*size - 1);
1574 + }
1575 + }
1576 +
1577 +diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
1578 +index bf21ebd361900..3fdad93368858 100644
1579 +--- a/arch/powerpc/kernel/irq.c
1580 ++++ b/arch/powerpc/kernel/irq.c
1581 +@@ -214,7 +214,7 @@ void replay_soft_interrupts(void)
1582 + struct pt_regs regs;
1583 +
1584 + ppc_save_regs(&regs);
1585 +- regs.softe = IRQS_ALL_DISABLED;
1586 ++ regs.softe = IRQS_ENABLED;
1587 +
1588 + again:
1589 + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
1590 +@@ -368,6 +368,12 @@ notrace void arch_local_irq_restore(unsigned long mask)
1591 + }
1592 + }
1593 +
1594 ++ /*
1595 ++ * Disable preempt here, so that the below preempt_enable will
1596 ++ * perform resched if required (a replayed interrupt may set
1597 ++ * need_resched).
1598 ++ */
1599 ++ preempt_disable();
1600 + irq_soft_mask_set(IRQS_ALL_DISABLED);
1601 + trace_hardirqs_off();
1602 +
1603 +@@ -377,6 +383,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
1604 + trace_hardirqs_on();
1605 + irq_soft_mask_set(IRQS_ENABLED);
1606 + __hard_irq_enable();
1607 ++ preempt_enable();
1608 + }
1609 + EXPORT_SYMBOL(arch_local_irq_restore);
1610 +
1611 +diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
1612 +index 697c7e4b5877f..8bd8d8de5c40b 100644
1613 +--- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c
1614 ++++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
1615 +@@ -219,6 +219,7 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf
1616 + brk.address = ALIGN_DOWN(bp_info->addr, HW_BREAKPOINT_SIZE);
1617 + brk.type = HW_BRK_TYPE_TRANSLATE;
1618 + brk.len = DABR_MAX_LEN;
1619 ++ brk.hw_len = DABR_MAX_LEN;
1620 + if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1621 + brk.type |= HW_BRK_TYPE_READ;
1622 + if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1623 +diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
1624 +index c9876aab31421..e4e1a94ccf6a6 100644
1625 +--- a/arch/powerpc/kernel/security.c
1626 ++++ b/arch/powerpc/kernel/security.c
1627 +@@ -430,30 +430,44 @@ device_initcall(stf_barrier_debugfs_init);
1628 +
1629 + static void update_branch_cache_flush(void)
1630 + {
1631 ++ u32 *site;
1632 ++
1633 + #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1634 ++ site = &patch__call_kvm_flush_link_stack;
1635 + // This controls the branch from guest_exit_cont to kvm_flush_link_stack
1636 + if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
1637 +- patch_instruction_site(&patch__call_kvm_flush_link_stack,
1638 +- ppc_inst(PPC_INST_NOP));
1639 ++ patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
1640 + } else {
1641 + // Could use HW flush, but that could also flush count cache
1642 +- patch_branch_site(&patch__call_kvm_flush_link_stack,
1643 +- (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
1644 ++ patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
1645 + }
1646 + #endif
1647 +
1648 ++ // Patch out the bcctr first, then nop the rest
1649 ++ site = &patch__call_flush_branch_caches3;
1650 ++ patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
1651 ++ site = &patch__call_flush_branch_caches2;
1652 ++ patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
1653 ++ site = &patch__call_flush_branch_caches1;
1654 ++ patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
1655 ++
1656 + // This controls the branch from _switch to flush_branch_caches
1657 + if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE &&
1658 + link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
1659 +- patch_instruction_site(&patch__call_flush_branch_caches,
1660 +- ppc_inst(PPC_INST_NOP));
1661 ++ // Nothing to be done
1662 ++
1663 + } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW &&
1664 + link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) {
1665 +- patch_instruction_site(&patch__call_flush_branch_caches,
1666 +- ppc_inst(PPC_INST_BCCTR_FLUSH));
1667 ++ // Patch in the bcctr last
1668 ++ site = &patch__call_flush_branch_caches1;
1669 ++ patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff
1670 ++ site = &patch__call_flush_branch_caches2;
1671 ++ patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9
1672 ++ site = &patch__call_flush_branch_caches3;
1673 ++ patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH));
1674 ++
1675 + } else {
1676 +- patch_branch_site(&patch__call_flush_branch_caches,
1677 +- (u64)&flush_branch_caches, BRANCH_SET_LINK);
1678 ++ patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK);
1679 +
1680 + // If we just need to flush the link stack, early return
1681 + if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) {
1682 +diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
1683 +index e2ab8a111b693..0b4694b8d2482 100644
1684 +--- a/arch/powerpc/kernel/tau_6xx.c
1685 ++++ b/arch/powerpc/kernel/tau_6xx.c
1686 +@@ -13,13 +13,14 @@
1687 + */
1688 +
1689 + #include <linux/errno.h>
1690 +-#include <linux/jiffies.h>
1691 + #include <linux/kernel.h>
1692 + #include <linux/param.h>
1693 + #include <linux/string.h>
1694 + #include <linux/mm.h>
1695 + #include <linux/interrupt.h>
1696 + #include <linux/init.h>
1697 ++#include <linux/delay.h>
1698 ++#include <linux/workqueue.h>
1699 +
1700 + #include <asm/io.h>
1701 + #include <asm/reg.h>
1702 +@@ -39,9 +40,7 @@ static struct tau_temp
1703 + unsigned char grew;
1704 + } tau[NR_CPUS];
1705 +
1706 +-struct timer_list tau_timer;
1707 +-
1708 +-#undef DEBUG
1709 ++static bool tau_int_enable;
1710 +
1711 + /* TODO: put these in a /proc interface, with some sanity checks, and maybe
1712 + * dynamic adjustment to minimize # of interrupts */
1713 +@@ -50,72 +49,49 @@ struct timer_list tau_timer;
1714 + #define step_size 2 /* step size when temp goes out of range */
1715 + #define window_expand 1 /* expand the window by this much */
1716 + /* configurable values for shrinking the window */
1717 +-#define shrink_timer 2*HZ /* period between shrinking the window */
1718 ++#define shrink_timer 2000 /* period between shrinking the window */
1719 + #define min_window 2 /* minimum window size, degrees C */
1720 +
1721 + static void set_thresholds(unsigned long cpu)
1722 + {
1723 +-#ifdef CONFIG_TAU_INT
1724 +- /*
1725 +- * setup THRM1,
1726 +- * threshold, valid bit, enable interrupts, interrupt when below threshold
1727 +- */
1728 +- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
1729 ++ u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0;
1730 +
1731 +- /* setup THRM2,
1732 +- * threshold, valid bit, enable interrupts, interrupt when above threshold
1733 +- */
1734 +- mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
1735 +-#else
1736 +- /* same thing but don't enable interrupts */
1737 +- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
1738 +- mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
1739 +-#endif
1740 ++ /* setup THRM1, threshold, valid bit, interrupt when below threshold */
1741 ++ mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID);
1742 ++
1743 ++ /* setup THRM2, threshold, valid bit, interrupt when above threshold */
1744 ++ mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie);
1745 + }
1746 +
1747 + static void TAUupdate(int cpu)
1748 + {
1749 +- unsigned thrm;
1750 +-
1751 +-#ifdef DEBUG
1752 +- printk("TAUupdate ");
1753 +-#endif
1754 ++ u32 thrm;
1755 ++ u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V;
1756 +
1757 + /* if both thresholds are crossed, the step_sizes cancel out
1758 + * and the window winds up getting expanded twice. */
1759 +- if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
1760 +- if(thrm & THRM1_TIN){ /* crossed low threshold */
1761 +- if (tau[cpu].low >= step_size){
1762 +- tau[cpu].low -= step_size;
1763 +- tau[cpu].high -= (step_size - window_expand);
1764 +- }
1765 +- tau[cpu].grew = 1;
1766 +-#ifdef DEBUG
1767 +- printk("low threshold crossed ");
1768 +-#endif
1769 ++ thrm = mfspr(SPRN_THRM1);
1770 ++ if ((thrm & bits) == bits) {
1771 ++ mtspr(SPRN_THRM1, 0);
1772 ++
1773 ++ if (tau[cpu].low >= step_size) {
1774 ++ tau[cpu].low -= step_size;
1775 ++ tau[cpu].high -= (step_size - window_expand);
1776 + }
1777 ++ tau[cpu].grew = 1;
1778 ++ pr_debug("%s: low threshold crossed\n", __func__);
1779 + }
1780 +- if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
1781 +- if(thrm & THRM1_TIN){ /* crossed high threshold */
1782 +- if (tau[cpu].high <= 127-step_size){
1783 +- tau[cpu].low += (step_size - window_expand);
1784 +- tau[cpu].high += step_size;
1785 +- }
1786 +- tau[cpu].grew = 1;
1787 +-#ifdef DEBUG
1788 +- printk("high threshold crossed ");
1789 +-#endif
1790 ++ thrm = mfspr(SPRN_THRM2);
1791 ++ if ((thrm & bits) == bits) {
1792 ++ mtspr(SPRN_THRM2, 0);
1793 ++
1794 ++ if (tau[cpu].high <= 127 - step_size) {
1795 ++ tau[cpu].low += (step_size - window_expand);
1796 ++ tau[cpu].high += step_size;
1797 + }
1798 ++ tau[cpu].grew = 1;
1799 ++ pr_debug("%s: high threshold crossed\n", __func__);
1800 + }
1801 +-
1802 +-#ifdef DEBUG
1803 +- printk("grew = %d\n", tau[cpu].grew);
1804 +-#endif
1805 +-
1806 +-#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
1807 +- set_thresholds(cpu);
1808 +-#endif
1809 +-
1810 + }
1811 +
1812 + #ifdef CONFIG_TAU_INT
1813 +@@ -140,17 +116,16 @@ void TAUException(struct pt_regs * regs)
1814 + static void tau_timeout(void * info)
1815 + {
1816 + int cpu;
1817 +- unsigned long flags;
1818 + int size;
1819 + int shrink;
1820 +
1821 +- /* disabling interrupts *should* be okay */
1822 +- local_irq_save(flags);
1823 + cpu = smp_processor_id();
1824 +
1825 +-#ifndef CONFIG_TAU_INT
1826 +- TAUupdate(cpu);
1827 +-#endif
1828 ++ if (!tau_int_enable)
1829 ++ TAUupdate(cpu);
1830 ++
1831 ++ /* Stop thermal sensor comparisons and interrupts */
1832 ++ mtspr(SPRN_THRM3, 0);
1833 +
1834 + size = tau[cpu].high - tau[cpu].low;
1835 + if (size > min_window && ! tau[cpu].grew) {
1836 +@@ -173,32 +148,26 @@ static void tau_timeout(void * info)
1837 +
1838 + set_thresholds(cpu);
1839 +
1840 +- /*
1841 +- * Do the enable every time, since otherwise a bunch of (relatively)
1842 +- * complex sleep code needs to be added. One mtspr every time
1843 +- * tau_timeout is called is probably not a big deal.
1844 +- *
1845 +- * Enable thermal sensor and set up sample interval timer
1846 +- * need 20 us to do the compare.. until a nice 'cpu_speed' function
1847 +- * call is implemented, just assume a 500 mhz clock. It doesn't really
1848 +- * matter if we take too long for a compare since it's all interrupt
1849 +- * driven anyway.
1850 +- *
1851 +- * use a extra long time.. (60 us @ 500 mhz)
1852 ++ /* Restart thermal sensor comparisons and interrupts.
1853 ++ * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet"
1854 ++ * recommends that "the maximum value be set in THRM3 under all
1855 ++ * conditions."
1856 + */
1857 +- mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
1858 +-
1859 +- local_irq_restore(flags);
1860 ++ mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E);
1861 + }
1862 +
1863 +-static void tau_timeout_smp(struct timer_list *unused)
1864 +-{
1865 ++static struct workqueue_struct *tau_workq;
1866 +
1867 +- /* schedule ourselves to be run again */
1868 +- mod_timer(&tau_timer, jiffies + shrink_timer) ;
1869 ++static void tau_work_func(struct work_struct *work)
1870 ++{
1871 ++ msleep(shrink_timer);
1872 + on_each_cpu(tau_timeout, NULL, 0);
1873 ++ /* schedule ourselves to be run again */
1874 ++ queue_work(tau_workq, work);
1875 + }
1876 +
1877 ++DECLARE_WORK(tau_work, tau_work_func);
1878 ++
1879 + /*
1880 + * setup the TAU
1881 + *
1882 +@@ -231,21 +200,19 @@ static int __init TAU_init(void)
1883 + return 1;
1884 + }
1885 +
1886 ++ tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
1887 ++ !strcmp(cur_cpu_spec->platform, "ppc750");
1888 +
1889 +- /* first, set up the window shrinking timer */
1890 +- timer_setup(&tau_timer, tau_timeout_smp, 0);
1891 +- tau_timer.expires = jiffies + shrink_timer;
1892 +- add_timer(&tau_timer);
1893 ++ tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0);
1894 ++ if (!tau_workq)
1895 ++ return -ENOMEM;
1896 +
1897 + on_each_cpu(TAU_init_smp, NULL, 0);
1898 +
1899 +- printk("Thermal assist unit ");
1900 +-#ifdef CONFIG_TAU_INT
1901 +- printk("using interrupts, ");
1902 +-#else
1903 +- printk("using timers, ");
1904 +-#endif
1905 +- printk("shrink_timer: %d jiffies\n", shrink_timer);
1906 ++ queue_work(tau_workq, &tau_work);
1907 ++
1908 ++ pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n",
1909 ++ tau_int_enable ? "interrupts" : "workqueue", shrink_timer);
1910 + tau_initialized = 1;
1911 +
1912 + return 0;
1913 +diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
1914 +index d5f0c10d752a3..aae8550379bae 100644
1915 +--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
1916 ++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
1917 +@@ -34,7 +34,7 @@
1918 +
1919 + unsigned int mmu_pid_bits;
1920 + unsigned int mmu_base_pid;
1921 +-unsigned int radix_mem_block_size __ro_after_init;
1922 ++unsigned long radix_mem_block_size __ro_after_init;
1923 +
1924 + static __ref void *early_alloc_pgtable(unsigned long size, int nid,
1925 + unsigned long region_start, unsigned long region_end)
1926 +diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
1927 +index 0d233763441fd..143b4fd396f08 100644
1928 +--- a/arch/powerpc/mm/book3s64/radix_tlb.c
1929 ++++ b/arch/powerpc/mm/book3s64/radix_tlb.c
1930 +@@ -645,19 +645,29 @@ static void do_exit_flush_lazy_tlb(void *arg)
1931 + struct mm_struct *mm = arg;
1932 + unsigned long pid = mm->context.id;
1933 +
1934 ++ /*
1935 ++ * A kthread could have done a mmget_not_zero() after the flushing CPU
1936 ++ * checked mm_is_singlethreaded, and be in the process of
1937 ++ * kthread_use_mm when interrupted here. In that case, current->mm will
1938 ++ * be set to mm, because kthread_use_mm() setting ->mm and switching to
1939 ++ * the mm is done with interrupts off.
1940 ++ */
1941 + if (current->mm == mm)
1942 +- return; /* Local CPU */
1943 ++ goto out_flush;
1944 +
1945 + if (current->active_mm == mm) {
1946 +- /*
1947 +- * Must be a kernel thread because sender is single-threaded.
1948 +- */
1949 +- BUG_ON(current->mm);
1950 ++ WARN_ON_ONCE(current->mm != NULL);
1951 ++ /* Is a kernel thread and is using mm as the lazy tlb */
1952 + mmgrab(&init_mm);
1953 +- switch_mm(mm, &init_mm, current);
1954 + current->active_mm = &init_mm;
1955 ++ switch_mm_irqs_off(mm, &init_mm, current);
1956 + mmdrop(mm);
1957 + }
1958 ++
1959 ++ atomic_dec(&mm->context.active_cpus);
1960 ++ cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm));
1961 ++
1962 ++out_flush:
1963 + _tlbiel_pid(pid, RIC_FLUSH_ALL);
1964 + }
1965 +
1966 +@@ -672,7 +682,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
1967 + */
1968 + smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
1969 + (void *)mm, 1);
1970 +- mm_reset_thread_local(mm);
1971 + }
1972 +
1973 + void radix__flush_tlb_mm(struct mm_struct *mm)
1974 +diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
1975 +index b2eeea39684ca..9af3832c9d8dc 100644
1976 +--- a/arch/powerpc/mm/drmem.c
1977 ++++ b/arch/powerpc/mm/drmem.c
1978 +@@ -389,10 +389,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
1979 + if (!drmem_info->lmbs)
1980 + return;
1981 +
1982 +- for_each_drmem_lmb(lmb) {
1983 ++ for_each_drmem_lmb(lmb)
1984 + read_drconf_v1_cell(lmb, &prop);
1985 +- lmb_set_nid(lmb);
1986 +- }
1987 + }
1988 +
1989 + static void __init init_drmem_v2_lmbs(const __be32 *prop)
1990 +@@ -437,8 +435,6 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
1991 +
1992 + lmb->aa_index = dr_cell.aa_index;
1993 + lmb->flags = dr_cell.flags;
1994 +-
1995 +- lmb_set_nid(lmb);
1996 + }
1997 + }
1998 + }
1999 +diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
2000 +index fb294046e00e4..929716ea21e9c 100644
2001 +--- a/arch/powerpc/mm/kasan/kasan_init_32.c
2002 ++++ b/arch/powerpc/mm/kasan/kasan_init_32.c
2003 +@@ -127,8 +127,7 @@ void __init kasan_mmu_init(void)
2004 + {
2005 + int ret;
2006 +
2007 +- if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
2008 +- IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
2009 ++ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
2010 + ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
2011 +
2012 + if (ret)
2013 +@@ -139,11 +138,11 @@ void __init kasan_mmu_init(void)
2014 + void __init kasan_init(void)
2015 + {
2016 + struct memblock_region *reg;
2017 ++ int ret;
2018 +
2019 + for_each_memblock(memory, reg) {
2020 + phys_addr_t base = reg->base;
2021 + phys_addr_t top = min(base + reg->size, total_lowmem);
2022 +- int ret;
2023 +
2024 + if (base >= top)
2025 + continue;
2026 +@@ -153,6 +152,13 @@ void __init kasan_init(void)
2027 + panic("kasan: kasan_init_region() failed");
2028 + }
2029 +
2030 ++ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
2031 ++ ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
2032 ++
2033 ++ if (ret)
2034 ++ panic("kasan: kasan_init_shadow_page_tables() failed");
2035 ++ }
2036 ++
2037 + kasan_remap_early_shadow_ro();
2038 +
2039 + clear_page(kasan_early_shadow_page);
2040 +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
2041 +index 42e25874f5a8f..ddc32cc1b6cfc 100644
2042 +--- a/arch/powerpc/mm/mem.c
2043 ++++ b/arch/powerpc/mm/mem.c
2044 +@@ -49,6 +49,7 @@
2045 + #include <asm/swiotlb.h>
2046 + #include <asm/rtas.h>
2047 + #include <asm/kasan.h>
2048 ++#include <asm/svm.h>
2049 +
2050 + #include <mm/mmu_decl.h>
2051 +
2052 +@@ -282,7 +283,10 @@ void __init mem_init(void)
2053 + * back to to-down.
2054 + */
2055 + memblock_set_bottom_up(true);
2056 +- swiotlb_init(0);
2057 ++ if (is_secure_guest())
2058 ++ svm_swiotlb_init();
2059 ++ else
2060 ++ swiotlb_init(0);
2061 + #endif
2062 +
2063 + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
2064 +diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h
2065 +index e608f9db12ddc..8965b4463d433 100644
2066 +--- a/arch/powerpc/perf/hv-gpci-requests.h
2067 ++++ b/arch/powerpc/perf/hv-gpci-requests.h
2068 +@@ -95,7 +95,7 @@ REQUEST(__field(0, 8, partition_id)
2069 +
2070 + #define REQUEST_NAME system_performance_capabilities
2071 + #define REQUEST_NUM 0x40
2072 +-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
2073 ++#define REQUEST_IDX_KIND "starting_index=0xffffffff"
2074 + #include I(REQUEST_BEGIN)
2075 + REQUEST(__field(0, 1, perf_collect_privileged)
2076 + __field(0x1, 1, capability_mask)
2077 +@@ -223,7 +223,7 @@ REQUEST(__field(0, 2, partition_id)
2078 +
2079 + #define REQUEST_NAME system_hypervisor_times
2080 + #define REQUEST_NUM 0xF0
2081 +-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
2082 ++#define REQUEST_IDX_KIND "starting_index=0xffffffff"
2083 + #include I(REQUEST_BEGIN)
2084 + REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
2085 + __count(0x8, 8, time_spent_processing_virtual_processor_timers)
2086 +@@ -234,7 +234,7 @@ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
2087 +
2088 + #define REQUEST_NAME system_tlbie_count_and_time
2089 + #define REQUEST_NUM 0xF4
2090 +-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
2091 ++#define REQUEST_IDX_KIND "starting_index=0xffffffff"
2092 + #include I(REQUEST_BEGIN)
2093 + REQUEST(__count(0, 8, tlbie_instructions_issued)
2094 + /*
2095 +diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
2096 +index 964437adec185..2848904df6383 100644
2097 +--- a/arch/powerpc/perf/isa207-common.c
2098 ++++ b/arch/powerpc/perf/isa207-common.c
2099 +@@ -288,6 +288,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
2100 +
2101 + mask |= CNST_PMC_MASK(pmc);
2102 + value |= CNST_PMC_VAL(pmc);
2103 ++
2104 ++ /*
2105 ++ * PMC5 and PMC6 are used to count cycles and instructions and
2106 ++ * they do not support most of the constraint bits. Add a check
2107 ++ * to exclude PMC5/6 from most of the constraints except for
2108 ++ * EBB/BHRB.
2109 ++ */
2110 ++ if (pmc >= 5)
2111 ++ goto ebb_bhrb;
2112 + }
2113 +
2114 + if (pmc <= 4) {
2115 +@@ -357,6 +366,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
2116 + }
2117 + }
2118 +
2119 ++ebb_bhrb:
2120 + if (!pmc && ebb)
2121 + /* EBB events must specify the PMC */
2122 + return -1;
2123 +diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
2124 +index fb7515b4fa9c6..b439b027a42f1 100644
2125 +--- a/arch/powerpc/platforms/Kconfig
2126 ++++ b/arch/powerpc/platforms/Kconfig
2127 +@@ -223,12 +223,11 @@ config TAU
2128 + temperature within 2-4 degrees Celsius. This option shows the current
2129 + on-die temperature in /proc/cpuinfo if the cpu supports it.
2130 +
2131 +- Unfortunately, on some chip revisions, this sensor is very inaccurate
2132 +- and in many cases, does not work at all, so don't assume the cpu
2133 +- temp is actually what /proc/cpuinfo says it is.
2134 ++ Unfortunately, this sensor is very inaccurate when uncalibrated, so
2135 ++ don't assume the cpu temp is actually what /proc/cpuinfo says it is.
2136 +
2137 + config TAU_INT
2138 +- bool "Interrupt driven TAU driver (DANGEROUS)"
2139 ++ bool "Interrupt driven TAU driver (EXPERIMENTAL)"
2140 + depends on TAU
2141 + help
2142 + The TAU supports an interrupt driven mode which causes an interrupt
2143 +@@ -236,12 +235,7 @@ config TAU_INT
2144 + to get notified the temp has exceeded a range. With this option off,
2145 + a timer is used to re-check the temperature periodically.
2146 +
2147 +- However, on some cpus it appears that the TAU interrupt hardware
2148 +- is buggy and can cause a situation which would lead unexplained hard
2149 +- lockups.
2150 +-
2151 +- Unless you are extending the TAU driver, or enjoy kernel/hardware
2152 +- debugging, leave this option off.
2153 ++ If in doubt, say N here.
2154 +
2155 + config TAU_AVERAGE
2156 + bool "Average high and low temp"
2157 +diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
2158 +index 543c816fa99ef..0e6693bacb7e7 100644
2159 +--- a/arch/powerpc/platforms/powernv/opal-dump.c
2160 ++++ b/arch/powerpc/platforms/powernv/opal-dump.c
2161 +@@ -318,15 +318,14 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
2162 + return count;
2163 + }
2164 +
2165 +-static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
2166 +- uint32_t type)
2167 ++static void create_dump_obj(uint32_t id, size_t size, uint32_t type)
2168 + {
2169 + struct dump_obj *dump;
2170 + int rc;
2171 +
2172 + dump = kzalloc(sizeof(*dump), GFP_KERNEL);
2173 + if (!dump)
2174 +- return NULL;
2175 ++ return;
2176 +
2177 + dump->kobj.kset = dump_kset;
2178 +
2179 +@@ -346,21 +345,39 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
2180 + rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
2181 + if (rc) {
2182 + kobject_put(&dump->kobj);
2183 +- return NULL;
2184 ++ return;
2185 + }
2186 +
2187 ++ /*
2188 ++ * As soon as the sysfs file for this dump is created/activated there is
2189 ++ * a chance the opal_errd daemon (or any userspace) might read and
2190 ++ * acknowledge the dump before kobject_uevent() is called. If that
2191 ++ * happens then there is a potential race between
2192 ++ * dump_ack_store->kobject_put() and kobject_uevent() which leads to a
2193 ++ * use-after-free of a kernfs object resulting in a kernel crash.
2194 ++ *
2195 ++ * To avoid that, we need to take a reference on behalf of the bin file,
2196 ++ * so that our reference remains valid while we call kobject_uevent().
2197 ++ * We then drop our reference before exiting the function, leaving the
2198 ++ * bin file to drop the last reference (if it hasn't already).
2199 ++ */
2200 ++
2201 ++ /* Take a reference for the bin file */
2202 ++ kobject_get(&dump->kobj);
2203 + rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
2204 +- if (rc) {
2205 ++ if (rc == 0) {
2206 ++ kobject_uevent(&dump->kobj, KOBJ_ADD);
2207 ++
2208 ++ pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
2209 ++ __func__, dump->id, dump->size);
2210 ++ } else {
2211 ++ /* Drop reference count taken for bin file */
2212 + kobject_put(&dump->kobj);
2213 +- return NULL;
2214 + }
2215 +
2216 +- pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
2217 +- __func__, dump->id, dump->size);
2218 +-
2219 +- kobject_uevent(&dump->kobj, KOBJ_ADD);
2220 +-
2221 +- return dump;
2222 ++ /* Drop our reference */
2223 ++ kobject_put(&dump->kobj);
2224 ++ return;
2225 + }
2226 +
2227 + static irqreturn_t process_dump(int irq, void *data)
2228 +diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
2229 +index 5d545b78111f9..0ea976d1cac47 100644
2230 +--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
2231 ++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
2232 +@@ -354,25 +354,32 @@ static int dlpar_add_lmb(struct drmem_lmb *);
2233 +
2234 + static int dlpar_remove_lmb(struct drmem_lmb *lmb)
2235 + {
2236 ++ struct memory_block *mem_block;
2237 + unsigned long block_sz;
2238 + int rc;
2239 +
2240 + if (!lmb_is_removable(lmb))
2241 + return -EINVAL;
2242 +
2243 ++ mem_block = lmb_to_memblock(lmb);
2244 ++ if (mem_block == NULL)
2245 ++ return -EINVAL;
2246 ++
2247 + rc = dlpar_offline_lmb(lmb);
2248 +- if (rc)
2249 ++ if (rc) {
2250 ++ put_device(&mem_block->dev);
2251 + return rc;
2252 ++ }
2253 +
2254 + block_sz = pseries_memory_block_size();
2255 +
2256 +- __remove_memory(lmb->nid, lmb->base_addr, block_sz);
2257 ++ __remove_memory(mem_block->nid, lmb->base_addr, block_sz);
2258 ++ put_device(&mem_block->dev);
2259 +
2260 + /* Update memory regions for memory remove */
2261 + memblock_remove(lmb->base_addr, block_sz);
2262 +
2263 + invalidate_lmb_associativity_index(lmb);
2264 +- lmb_clear_nid(lmb);
2265 + lmb->flags &= ~DRCONF_MEM_ASSIGNED;
2266 +
2267 + return 0;
2268 +@@ -591,7 +598,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
2269 + static int dlpar_add_lmb(struct drmem_lmb *lmb)
2270 + {
2271 + unsigned long block_sz;
2272 +- int rc;
2273 ++ int nid, rc;
2274 +
2275 + if (lmb->flags & DRCONF_MEM_ASSIGNED)
2276 + return -EINVAL;
2277 +@@ -602,11 +609,13 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
2278 + return rc;
2279 + }
2280 +
2281 +- lmb_set_nid(lmb);
2282 + block_sz = memory_block_size_bytes();
2283 +
2284 ++ /* Find the node id for this address. */
2285 ++ nid = memory_add_physaddr_to_nid(lmb->base_addr);
2286 ++
2287 + /* Add the memory */
2288 +- rc = __add_memory(lmb->nid, lmb->base_addr, block_sz);
2289 ++ rc = __add_memory(nid, lmb->base_addr, block_sz);
2290 + if (rc) {
2291 + invalidate_lmb_associativity_index(lmb);
2292 + return rc;
2293 +@@ -614,9 +623,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
2294 +
2295 + rc = dlpar_online_lmb(lmb);
2296 + if (rc) {
2297 +- __remove_memory(lmb->nid, lmb->base_addr, block_sz);
2298 ++ __remove_memory(nid, lmb->base_addr, block_sz);
2299 + invalidate_lmb_associativity_index(lmb);
2300 +- lmb_clear_nid(lmb);
2301 + } else {
2302 + lmb->flags |= DRCONF_MEM_ASSIGNED;
2303 + }
2304 +diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
2305 +index a88a707a608aa..27268370dee00 100644
2306 +--- a/arch/powerpc/platforms/pseries/papr_scm.c
2307 ++++ b/arch/powerpc/platforms/pseries/papr_scm.c
2308 +@@ -785,7 +785,8 @@ static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
2309 + static ssize_t perf_stats_show(struct device *dev,
2310 + struct device_attribute *attr, char *buf)
2311 + {
2312 +- int index, rc;
2313 ++ int index;
2314 ++ ssize_t rc;
2315 + struct seq_buf s;
2316 + struct papr_scm_perf_stat *stat;
2317 + struct papr_scm_perf_stats *stats;
2318 +@@ -820,7 +821,7 @@ static ssize_t perf_stats_show(struct device *dev,
2319 +
2320 + free_stats:
2321 + kfree(stats);
2322 +- return rc ? rc : seq_buf_used(&s);
2323 ++ return rc ? rc : (ssize_t)seq_buf_used(&s);
2324 + }
2325 + DEVICE_ATTR_ADMIN_RO(perf_stats);
2326 +
2327 +@@ -897,6 +898,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
2328 + p->bus_desc.of_node = p->pdev->dev.of_node;
2329 + p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
2330 +
2331 ++ /* Set the dimm command family mask to accept PDSMs */
2332 ++ set_bit(NVDIMM_FAMILY_PAPR, &p->bus_desc.dimm_family_mask);
2333 ++
2334 + if (!p->bus_desc.provider_name)
2335 + return -ENOMEM;
2336 +
2337 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
2338 +index 13c86a292c6d7..b2b245b25edba 100644
2339 +--- a/arch/powerpc/platforms/pseries/ras.c
2340 ++++ b/arch/powerpc/platforms/pseries/ras.c
2341 +@@ -521,18 +521,55 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
2342 + return 0; /* need to perform reset */
2343 + }
2344 +
2345 ++static int mce_handle_err_realmode(int disposition, u8 error_type)
2346 ++{
2347 ++#ifdef CONFIG_PPC_BOOK3S_64
2348 ++ if (disposition == RTAS_DISP_NOT_RECOVERED) {
2349 ++ switch (error_type) {
2350 ++ case MC_ERROR_TYPE_SLB:
2351 ++ case MC_ERROR_TYPE_ERAT:
2352 ++ /*
2353 ++ * Store the old slb content in paca before flushing.
2354 ++ * Print this when we go to virtual mode.
2355 ++ * There are chances that we may hit MCE again if there
2356 ++ * is a parity error on the SLB entry we trying to read
2357 ++ * for saving. Hence limit the slb saving to single
2358 ++ * level of recursion.
2359 ++ */
2360 ++ if (local_paca->in_mce == 1)
2361 ++ slb_save_contents(local_paca->mce_faulty_slbs);
2362 ++ flush_and_reload_slb();
2363 ++ disposition = RTAS_DISP_FULLY_RECOVERED;
2364 ++ break;
2365 ++ default:
2366 ++ break;
2367 ++ }
2368 ++ } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
2369 ++ /* Platform corrected itself but could be degraded */
2370 ++ pr_err("MCE: limited recovery, system may be degraded\n");
2371 ++ disposition = RTAS_DISP_FULLY_RECOVERED;
2372 ++ }
2373 ++#endif
2374 ++ return disposition;
2375 ++}
2376 +
2377 +-static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
2378 ++static int mce_handle_err_virtmode(struct pt_regs *regs,
2379 ++ struct rtas_error_log *errp,
2380 ++ struct pseries_mc_errorlog *mce_log,
2381 ++ int disposition)
2382 + {
2383 + struct mce_error_info mce_err = { 0 };
2384 +- unsigned long eaddr = 0, paddr = 0;
2385 +- struct pseries_errorlog *pseries_log;
2386 +- struct pseries_mc_errorlog *mce_log;
2387 +- int disposition = rtas_error_disposition(errp);
2388 + int initiator = rtas_error_initiator(errp);
2389 + int severity = rtas_error_severity(errp);
2390 ++ unsigned long eaddr = 0, paddr = 0;
2391 + u8 error_type, err_sub_type;
2392 +
2393 ++ if (!mce_log)
2394 ++ goto out;
2395 ++
2396 ++ error_type = mce_log->error_type;
2397 ++ err_sub_type = rtas_mc_error_sub_type(mce_log);
2398 ++
2399 + if (initiator == RTAS_INITIATOR_UNKNOWN)
2400 + mce_err.initiator = MCE_INITIATOR_UNKNOWN;
2401 + else if (initiator == RTAS_INITIATOR_CPU)
2402 +@@ -571,18 +608,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
2403 + mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
2404 + mce_err.error_class = MCE_ECLASS_UNKNOWN;
2405 +
2406 +- if (!rtas_error_extended(errp))
2407 +- goto out;
2408 +-
2409 +- pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
2410 +- if (pseries_log == NULL)
2411 +- goto out;
2412 +-
2413 +- mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
2414 +- error_type = mce_log->error_type;
2415 +- err_sub_type = rtas_mc_error_sub_type(mce_log);
2416 +-
2417 +- switch (mce_log->error_type) {
2418 ++ switch (error_type) {
2419 + case MC_ERROR_TYPE_UE:
2420 + mce_err.error_type = MCE_ERROR_TYPE_UE;
2421 + mce_common_process_ue(regs, &mce_err);
2422 +@@ -682,37 +708,31 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
2423 + mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
2424 + break;
2425 + }
2426 ++out:
2427 ++ save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
2428 ++ &mce_err, regs->nip, eaddr, paddr);
2429 ++ return disposition;
2430 ++}
2431 +
2432 +-#ifdef CONFIG_PPC_BOOK3S_64
2433 +- if (disposition == RTAS_DISP_NOT_RECOVERED) {
2434 +- switch (error_type) {
2435 +- case MC_ERROR_TYPE_SLB:
2436 +- case MC_ERROR_TYPE_ERAT:
2437 +- /*
2438 +- * Store the old slb content in paca before flushing.
2439 +- * Print this when we go to virtual mode.
2440 +- * There are chances that we may hit MCE again if there
2441 +- * is a parity error on the SLB entry we trying to read
2442 +- * for saving. Hence limit the slb saving to single
2443 +- * level of recursion.
2444 +- */
2445 +- if (local_paca->in_mce == 1)
2446 +- slb_save_contents(local_paca->mce_faulty_slbs);
2447 +- flush_and_reload_slb();
2448 +- disposition = RTAS_DISP_FULLY_RECOVERED;
2449 +- break;
2450 +- default:
2451 +- break;
2452 +- }
2453 +- } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
2454 +- /* Platform corrected itself but could be degraded */
2455 +- printk(KERN_ERR "MCE: limited recovery, system may "
2456 +- "be degraded\n");
2457 +- disposition = RTAS_DISP_FULLY_RECOVERED;
2458 +- }
2459 +-#endif
2460 ++static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
2461 ++{
2462 ++ struct pseries_errorlog *pseries_log;
2463 ++ struct pseries_mc_errorlog *mce_log = NULL;
2464 ++ int disposition = rtas_error_disposition(errp);
2465 ++ u8 error_type;
2466 ++
2467 ++ if (!rtas_error_extended(errp))
2468 ++ goto out;
2469 ++
2470 ++ pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
2471 ++ if (!pseries_log)
2472 ++ goto out;
2473 ++
2474 ++ mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
2475 ++ error_type = mce_log->error_type;
2476 ++
2477 ++ disposition = mce_handle_err_realmode(disposition, error_type);
2478 +
2479 +-out:
2480 + /*
2481 + * Enable translation as we will be accessing per-cpu variables
2482 + * in save_mce_event() which may fall outside RMO region, also
2483 +@@ -723,10 +743,10 @@ out:
2484 + * Note: All the realmode handling like flushing SLB entries for
2485 + * SLB multihit is done by now.
2486 + */
2487 ++out:
2488 + mtmsr(mfmsr() | MSR_IR | MSR_DR);
2489 +- save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
2490 +- &mce_err, regs->nip, eaddr, paddr);
2491 +-
2492 ++ disposition = mce_handle_err_virtmode(regs, errp, mce_log,
2493 ++ disposition);
2494 + return disposition;
2495 + }
2496 +
2497 +diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
2498 +index bbb97169bf63e..6268545947b83 100644
2499 +--- a/arch/powerpc/platforms/pseries/rng.c
2500 ++++ b/arch/powerpc/platforms/pseries/rng.c
2501 +@@ -36,6 +36,7 @@ static __init int rng_init(void)
2502 +
2503 + ppc_md.get_random_seed = pseries_get_random_long;
2504 +
2505 ++ of_node_put(dn);
2506 + return 0;
2507 + }
2508 + machine_subsys_initcall(pseries, rng_init);
2509 +diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c
2510 +index e6d7a344d9f22..7b739cc7a8a93 100644
2511 +--- a/arch/powerpc/platforms/pseries/svm.c
2512 ++++ b/arch/powerpc/platforms/pseries/svm.c
2513 +@@ -7,6 +7,7 @@
2514 + */
2515 +
2516 + #include <linux/mm.h>
2517 ++#include <linux/memblock.h>
2518 + #include <asm/machdep.h>
2519 + #include <asm/svm.h>
2520 + #include <asm/swiotlb.h>
2521 +@@ -35,6 +36,31 @@ static int __init init_svm(void)
2522 + }
2523 + machine_early_initcall(pseries, init_svm);
2524 +
2525 ++/*
2526 ++ * Initialize SWIOTLB. Essentially the same as swiotlb_init(), except that it
2527 ++ * can allocate the buffer anywhere in memory. Since the hypervisor doesn't have
2528 ++ * any addressing limitation, we don't need to allocate it in low addresses.
2529 ++ */
2530 ++void __init svm_swiotlb_init(void)
2531 ++{
2532 ++ unsigned char *vstart;
2533 ++ unsigned long bytes, io_tlb_nslabs;
2534 ++
2535 ++ io_tlb_nslabs = (swiotlb_size_or_default() >> IO_TLB_SHIFT);
2536 ++ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
2537 ++
2538 ++ bytes = io_tlb_nslabs << IO_TLB_SHIFT;
2539 ++
2540 ++ vstart = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
2541 ++ if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false))
2542 ++ return;
2543 ++
2544 ++ if (io_tlb_start)
2545 ++ memblock_free_early(io_tlb_start,
2546 ++ PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
2547 ++ panic("SVM: Cannot allocate SWIOTLB buffer");
2548 ++}
2549 ++
2550 + int set_memory_encrypted(unsigned long addr, int numpages)
2551 + {
2552 + if (!PAGE_ALIGNED(addr))
2553 +diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
2554 +index ad8117148ea3b..21b9d1bf39ff6 100644
2555 +--- a/arch/powerpc/sysdev/xics/icp-hv.c
2556 ++++ b/arch/powerpc/sysdev/xics/icp-hv.c
2557 +@@ -174,6 +174,7 @@ int icp_hv_init(void)
2558 +
2559 + icp_ops = &icp_hv_ops;
2560 +
2561 ++ of_node_put(np);
2562 + return 0;
2563 + }
2564 +
2565 +diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
2566 +index df7bca00f5ec9..55c43a6c91112 100644
2567 +--- a/arch/powerpc/xmon/xmon.c
2568 ++++ b/arch/powerpc/xmon/xmon.c
2569 +@@ -969,6 +969,7 @@ static void insert_cpu_bpts(void)
2570 + brk.address = dabr[i].address;
2571 + brk.type = (dabr[i].enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2572 + brk.len = 8;
2573 ++ brk.hw_len = 8;
2574 + __set_breakpoint(i, &brk);
2575 + }
2576 + }
2577 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
2578 +index be4b8532dd3c4..0a41827928769 100644
2579 +--- a/arch/s390/net/bpf_jit_comp.c
2580 ++++ b/arch/s390/net/bpf_jit_comp.c
2581 +@@ -50,7 +50,6 @@ struct bpf_jit {
2582 + int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
2583 + int tail_call_start; /* Tail call start offset */
2584 + int excnt; /* Number of exception table entries */
2585 +- int labels[1]; /* Labels for local jumps */
2586 + };
2587 +
2588 + #define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
2589 +@@ -229,18 +228,18 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
2590 + REG_SET_SEEN(b3); \
2591 + })
2592 +
2593 +-#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \
2594 ++#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \
2595 + ({ \
2596 +- int rel = (jit->labels[label] - jit->prg) >> 1; \
2597 ++ unsigned int rel = (int)((target) - jit->prg) / 2; \
2598 + _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \
2599 + (op2) | (mask) << 12); \
2600 + REG_SET_SEEN(b1); \
2601 + REG_SET_SEEN(b2); \
2602 + })
2603 +
2604 +-#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \
2605 ++#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \
2606 + ({ \
2607 +- int rel = (jit->labels[label] - jit->prg) >> 1; \
2608 ++ unsigned int rel = (int)((target) - jit->prg) / 2; \
2609 + _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \
2610 + (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \
2611 + REG_SET_SEEN(b1); \
2612 +@@ -1282,7 +1281,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
2613 + EMIT4(0xb9040000, BPF_REG_0, REG_2);
2614 + break;
2615 + }
2616 +- case BPF_JMP | BPF_TAIL_CALL:
2617 ++ case BPF_JMP | BPF_TAIL_CALL: {
2618 ++ int patch_1_clrj, patch_2_clij, patch_3_brc;
2619 ++
2620 + /*
2621 + * Implicit input:
2622 + * B1: pointer to ctx
2623 +@@ -1300,16 +1301,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
2624 + EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
2625 + offsetof(struct bpf_array, map.max_entries));
2626 + /* if ((u32)%b3 >= (u32)%w1) goto out; */
2627 +- if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
2628 +- /* clrj %b3,%w1,0xa,label0 */
2629 +- EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
2630 +- REG_W1, 0, 0xa);
2631 +- } else {
2632 +- /* clr %b3,%w1 */
2633 +- EMIT2(0x1500, BPF_REG_3, REG_W1);
2634 +- /* brcl 0xa,label0 */
2635 +- EMIT6_PCREL_RILC(0xc0040000, 0xa, jit->labels[0]);
2636 +- }
2637 ++ /* clrj %b3,%w1,0xa,out */
2638 ++ patch_1_clrj = jit->prg;
2639 ++ EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
2640 ++ jit->prg);
2641 +
2642 + /*
2643 + * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
2644 +@@ -1324,16 +1319,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
2645 + EMIT4_IMM(0xa7080000, REG_W0, 1);
2646 + /* laal %w1,%w0,off(%r15) */
2647 + EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
2648 +- if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
2649 +- /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
2650 +- EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
2651 +- MAX_TAIL_CALL_CNT, 0, 0x2);
2652 +- } else {
2653 +- /* clfi %w1,MAX_TAIL_CALL_CNT */
2654 +- EMIT6_IMM(0xc20f0000, REG_W1, MAX_TAIL_CALL_CNT);
2655 +- /* brcl 0x2,label0 */
2656 +- EMIT6_PCREL_RILC(0xc0040000, 0x2, jit->labels[0]);
2657 +- }
2658 ++ /* clij %w1,MAX_TAIL_CALL_CNT,0x2,out */
2659 ++ patch_2_clij = jit->prg;
2660 ++ EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT,
2661 ++ 2, jit->prg);
2662 +
2663 + /*
2664 + * prog = array->ptrs[index];
2665 +@@ -1348,13 +1337,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
2666 + /* ltg %r1,prog(%b2,%r1) */
2667 + EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
2668 + REG_1, offsetof(struct bpf_array, ptrs));
2669 +- if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
2670 +- /* brc 0x8,label0 */
2671 +- EMIT4_PCREL_RIC(0xa7040000, 0x8, jit->labels[0]);
2672 +- } else {
2673 +- /* brcl 0x8,label0 */
2674 +- EMIT6_PCREL_RILC(0xc0040000, 0x8, jit->labels[0]);
2675 +- }
2676 ++ /* brc 0x8,out */
2677 ++ patch_3_brc = jit->prg;
2678 ++ EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
2679 +
2680 + /*
2681 + * Restore registers before calling function
2682 +@@ -1371,8 +1356,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
2683 + /* bc 0xf,tail_call_start(%r1) */
2684 + _EMIT4(0x47f01000 + jit->tail_call_start);
2685 + /* out: */
2686 +- jit->labels[0] = jit->prg;
2687 ++ if (jit->prg_buf) {
2688 ++ *(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
2689 ++ (jit->prg - patch_1_clrj) >> 1;
2690 ++ *(u16 *)(jit->prg_buf + patch_2_clij + 2) =
2691 ++ (jit->prg - patch_2_clij) >> 1;
2692 ++ *(u16 *)(jit->prg_buf + patch_3_brc + 2) =
2693 ++ (jit->prg - patch_3_brc) >> 1;
2694 ++ }
2695 + break;
2696 ++ }
2697 + case BPF_JMP | BPF_EXIT: /* return b0 */
2698 + last = (i == fp->len - 1) ? 1 : 0;
2699 + if (last)
2700 +diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
2701 +index 5967f30141563..c93486a9989bc 100644
2702 +--- a/arch/s390/pci/pci_bus.c
2703 ++++ b/arch/s390/pci/pci_bus.c
2704 +@@ -197,9 +197,10 @@ void pcibios_bus_add_device(struct pci_dev *pdev)
2705 + * With pdev->no_vf_scan the common PCI probing code does not
2706 + * perform PF/VF linking.
2707 + */
2708 +- if (zdev->vfn)
2709 ++ if (zdev->vfn) {
2710 + zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
2711 +-
2712 ++ pdev->no_command_memory = 1;
2713 ++ }
2714 + }
2715 +
2716 + static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
2717 +diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
2718 +index 8735c468230a5..555203e3e7b45 100644
2719 +--- a/arch/um/drivers/vector_kern.c
2720 ++++ b/arch/um/drivers/vector_kern.c
2721 +@@ -1403,7 +1403,7 @@ static int vector_net_load_bpf_flash(struct net_device *dev,
2722 + kfree(vp->bpf->filter);
2723 + vp->bpf->filter = NULL;
2724 + } else {
2725 +- vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
2726 ++ vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC);
2727 + if (vp->bpf == NULL) {
2728 + netdev_err(dev, "failed to allocate memory for firmware\n");
2729 + goto flash_fail;
2730 +@@ -1415,7 +1415,7 @@ static int vector_net_load_bpf_flash(struct net_device *dev,
2731 + if (request_firmware(&fw, efl->data, &vdevice->pdev.dev))
2732 + goto flash_fail;
2733 +
2734 +- vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_KERNEL);
2735 ++ vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC);
2736 + if (!vp->bpf->filter)
2737 + goto free_buffer;
2738 +
2739 +diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
2740 +index 25eaa6a0c6583..c07436e89e599 100644
2741 +--- a/arch/um/kernel/time.c
2742 ++++ b/arch/um/kernel/time.c
2743 +@@ -70,13 +70,17 @@ static void time_travel_handle_message(struct um_timetravel_msg *msg,
2744 + * read of the message and write of the ACK.
2745 + */
2746 + if (mode != TTMH_READ) {
2747 ++ bool disabled = irqs_disabled();
2748 ++
2749 ++ BUG_ON(mode == TTMH_IDLE && !disabled);
2750 ++
2751 ++ if (disabled)
2752 ++ local_irq_enable();
2753 + while (os_poll(1, &time_travel_ext_fd) != 0) {
2754 +- if (mode == TTMH_IDLE) {
2755 +- BUG_ON(!irqs_disabled());
2756 +- local_irq_enable();
2757 +- local_irq_disable();
2758 +- }
2759 ++ /* nothing */
2760 + }
2761 ++ if (disabled)
2762 ++ local_irq_disable();
2763 + }
2764 +
2765 + ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
2766 +diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
2767 +index c8862696a47b9..7d0394f4ebf97 100644
2768 +--- a/arch/x86/boot/compressed/pgtable_64.c
2769 ++++ b/arch/x86/boot/compressed/pgtable_64.c
2770 +@@ -5,15 +5,6 @@
2771 + #include "pgtable.h"
2772 + #include "../string.h"
2773 +
2774 +-/*
2775 +- * __force_order is used by special_insns.h asm code to force instruction
2776 +- * serialization.
2777 +- *
2778 +- * It is not referenced from the code, but GCC < 5 with -fPIE would fail
2779 +- * due to an undefined symbol. Define it to make these ancient GCCs work.
2780 +- */
2781 +-unsigned long __force_order;
2782 +-
2783 + #define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
2784 + #define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */
2785 +
2786 +diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
2787 +index fb616203ce427..be50ef8572cce 100644
2788 +--- a/arch/x86/events/amd/iommu.c
2789 ++++ b/arch/x86/events/amd/iommu.c
2790 +@@ -379,7 +379,7 @@ static __init int _init_events_attrs(void)
2791 + while (amd_iommu_v2_event_descs[i].attr.attr.name)
2792 + i++;
2793 +
2794 +- attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
2795 ++ attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
2796 + if (!attrs)
2797 + return -ENOMEM;
2798 +
2799 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
2800 +index 1cbf57dc2ac89..11bbc6590f904 100644
2801 +--- a/arch/x86/events/core.c
2802 ++++ b/arch/x86/events/core.c
2803 +@@ -1087,8 +1087,10 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
2804 +
2805 + cpuc->event_list[n] = event;
2806 + n++;
2807 +- if (is_counter_pair(&event->hw))
2808 ++ if (is_counter_pair(&event->hw)) {
2809 + cpuc->n_pair++;
2810 ++ cpuc->n_txn_pair++;
2811 ++ }
2812 + }
2813 + return n;
2814 + }
2815 +@@ -1962,6 +1964,7 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
2816 +
2817 + perf_pmu_disable(pmu);
2818 + __this_cpu_write(cpu_hw_events.n_txn, 0);
2819 ++ __this_cpu_write(cpu_hw_events.n_txn_pair, 0);
2820 + }
2821 +
2822 + /*
2823 +@@ -1987,6 +1990,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
2824 + */
2825 + __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
2826 + __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
2827 ++ __this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair));
2828 + perf_pmu_enable(pmu);
2829 + }
2830 +
2831 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
2832 +index 86848c57b55ed..404315df1e167 100644
2833 +--- a/arch/x86/events/intel/ds.c
2834 ++++ b/arch/x86/events/intel/ds.c
2835 +@@ -670,9 +670,7 @@ unlock:
2836 +
2837 + static inline void intel_pmu_drain_pebs_buffer(void)
2838 + {
2839 +- struct pt_regs regs;
2840 +-
2841 +- x86_pmu.drain_pebs(&regs);
2842 ++ x86_pmu.drain_pebs(NULL);
2843 + }
2844 +
2845 + /*
2846 +@@ -1737,6 +1735,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
2847 + struct x86_perf_regs perf_regs;
2848 + struct pt_regs *regs = &perf_regs.regs;
2849 + void *at = get_next_pebs_record_by_bit(base, top, bit);
2850 ++ struct pt_regs dummy_iregs;
2851 +
2852 + if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
2853 + /*
2854 +@@ -1749,6 +1748,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
2855 + } else if (!intel_pmu_save_and_restart(event))
2856 + return;
2857 +
2858 ++ if (!iregs)
2859 ++ iregs = &dummy_iregs;
2860 ++
2861 + while (count > 1) {
2862 + setup_sample(event, iregs, at, &data, regs);
2863 + perf_event_output(event, &data, regs);
2864 +@@ -1758,16 +1760,22 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
2865 + }
2866 +
2867 + setup_sample(event, iregs, at, &data, regs);
2868 +-
2869 +- /*
2870 +- * All but the last records are processed.
2871 +- * The last one is left to be able to call the overflow handler.
2872 +- */
2873 +- if (perf_event_overflow(event, &data, regs)) {
2874 +- x86_pmu_stop(event, 0);
2875 +- return;
2876 ++ if (iregs == &dummy_iregs) {
2877 ++ /*
2878 ++ * The PEBS records may be drained in the non-overflow context,
2879 ++ * e.g., large PEBS + context switch. Perf should treat the
2880 ++ * last record the same as other PEBS records, and doesn't
2881 ++ * invoke the generic overflow handler.
2882 ++ */
2883 ++ perf_event_output(event, &data, regs);
2884 ++ } else {
2885 ++ /*
2886 ++ * All but the last records are processed.
2887 ++ * The last one is left to be able to call the overflow handler.
2888 ++ */
2889 ++ if (perf_event_overflow(event, &data, regs))
2890 ++ x86_pmu_stop(event, 0);
2891 + }
2892 +-
2893 + }
2894 +
2895 + static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
2896 +diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
2897 +index 6a4ca27b2c9e1..4aa735694e030 100644
2898 +--- a/arch/x86/events/intel/uncore_snb.c
2899 ++++ b/arch/x86/events/intel/uncore_snb.c
2900 +@@ -126,6 +126,10 @@
2901 + #define ICL_UNC_CBO_0_PER_CTR0 0x702
2902 + #define ICL_UNC_CBO_MSR_OFFSET 0x8
2903 +
2904 ++/* ICL ARB register */
2905 ++#define ICL_UNC_ARB_PER_CTR 0x3b1
2906 ++#define ICL_UNC_ARB_PERFEVTSEL 0x3b3
2907 ++
2908 + DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
2909 + DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
2910 + DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
2911 +@@ -313,15 +317,21 @@ void skl_uncore_cpu_init(void)
2912 + snb_uncore_arb.ops = &skl_uncore_msr_ops;
2913 + }
2914 +
2915 ++static struct intel_uncore_ops icl_uncore_msr_ops = {
2916 ++ .disable_event = snb_uncore_msr_disable_event,
2917 ++ .enable_event = snb_uncore_msr_enable_event,
2918 ++ .read_counter = uncore_msr_read_counter,
2919 ++};
2920 ++
2921 + static struct intel_uncore_type icl_uncore_cbox = {
2922 + .name = "cbox",
2923 +- .num_counters = 4,
2924 ++ .num_counters = 2,
2925 + .perf_ctr_bits = 44,
2926 + .perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
2927 + .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
2928 + .event_mask = SNB_UNC_RAW_EVENT_MASK,
2929 + .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
2930 +- .ops = &skl_uncore_msr_ops,
2931 ++ .ops = &icl_uncore_msr_ops,
2932 + .format_group = &snb_uncore_format_group,
2933 + };
2934 +
2935 +@@ -350,13 +360,25 @@ static struct intel_uncore_type icl_uncore_clockbox = {
2936 + .single_fixed = 1,
2937 + .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
2938 + .format_group = &icl_uncore_clock_format_group,
2939 +- .ops = &skl_uncore_msr_ops,
2940 ++ .ops = &icl_uncore_msr_ops,
2941 + .event_descs = icl_uncore_events,
2942 + };
2943 +
2944 ++static struct intel_uncore_type icl_uncore_arb = {
2945 ++ .name = "arb",
2946 ++ .num_counters = 1,
2947 ++ .num_boxes = 1,
2948 ++ .perf_ctr_bits = 44,
2949 ++ .perf_ctr = ICL_UNC_ARB_PER_CTR,
2950 ++ .event_ctl = ICL_UNC_ARB_PERFEVTSEL,
2951 ++ .event_mask = SNB_UNC_RAW_EVENT_MASK,
2952 ++ .ops = &icl_uncore_msr_ops,
2953 ++ .format_group = &snb_uncore_format_group,
2954 ++};
2955 ++
2956 + static struct intel_uncore_type *icl_msr_uncores[] = {
2957 + &icl_uncore_cbox,
2958 +- &snb_uncore_arb,
2959 ++ &icl_uncore_arb,
2960 + &icl_uncore_clockbox,
2961 + NULL,
2962 + };
2963 +@@ -374,7 +396,6 @@ void icl_uncore_cpu_init(void)
2964 + {
2965 + uncore_msr_uncores = icl_msr_uncores;
2966 + icl_uncore_cbox.num_boxes = icl_get_cbox_num();
2967 +- snb_uncore_arb.ops = &skl_uncore_msr_ops;
2968 + }
2969 +
2970 + enum {
2971 +diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
2972 +index 62e88ad919ffc..4f5e78a4003be 100644
2973 +--- a/arch/x86/events/intel/uncore_snbep.c
2974 ++++ b/arch/x86/events/intel/uncore_snbep.c
2975 +@@ -3749,7 +3749,9 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type)
2976 +
2977 + ret = skx_iio_get_topology(type);
2978 + if (ret)
2979 +- return ret;
2980 ++ goto clear_attr_update;
2981 ++
2982 ++ ret = -ENOMEM;
2983 +
2984 + /* One more for NULL. */
2985 + attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
2986 +@@ -3781,8 +3783,9 @@ err:
2987 + kfree(eas);
2988 + kfree(attrs);
2989 + kfree(type->topology);
2990 ++clear_attr_update:
2991 + type->attr_update = NULL;
2992 +- return -ENOMEM;
2993 ++ return ret;
2994 + }
2995 +
2996 + static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
2997 +@@ -4751,10 +4754,10 @@ static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
2998 + INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
2999 +
3000 + INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
3001 +- INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"),
3002 ++ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
3003 + INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
3004 + INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
3005 +- INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
3006 ++ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
3007 + INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
3008 + { /* end: all zeroes */ },
3009 + };
3010 +@@ -5212,17 +5215,17 @@ static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
3011 + INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
3012 +
3013 + INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
3014 +- INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"),
3015 ++ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
3016 + INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
3017 + INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
3018 +- INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
3019 ++ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
3020 + INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
3021 +
3022 + INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
3023 +- INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "3.814697266e-6"),
3024 ++ INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
3025 + INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
3026 + INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
3027 +- INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "3.814697266e-6"),
3028 ++ INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
3029 + INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
3030 + { /* end: all zeroes */ },
3031 + };
3032 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
3033 +index 7b68ab5f19e76..0e74235cdac9e 100644
3034 +--- a/arch/x86/events/perf_event.h
3035 ++++ b/arch/x86/events/perf_event.h
3036 +@@ -210,6 +210,7 @@ struct cpu_hw_events {
3037 + they've never been enabled yet */
3038 + int n_txn; /* the # last events in the below arrays;
3039 + added in the current transaction */
3040 ++ int n_txn_pair;
3041 + int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
3042 + u64 tags[X86_PMC_IDX_MAX];
3043 +
3044 +diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
3045 +index 59a3e13204c34..d6e3bb9363d22 100644
3046 +--- a/arch/x86/include/asm/special_insns.h
3047 ++++ b/arch/x86/include/asm/special_insns.h
3048 +@@ -11,45 +11,47 @@
3049 + #include <linux/jump_label.h>
3050 +
3051 + /*
3052 +- * Volatile isn't enough to prevent the compiler from reordering the
3053 +- * read/write functions for the control registers and messing everything up.
3054 +- * A memory clobber would solve the problem, but would prevent reordering of
3055 +- * all loads stores around it, which can hurt performance. Solution is to
3056 +- * use a variable and mimic reads and writes to it to enforce serialization
3057 ++ * The compiler should not reorder volatile asm statements with respect to each
3058 ++ * other: they should execute in program order. However GCC 4.9.x and 5.x have
3059 ++ * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder
3060 ++ * volatile asm. The write functions are not affected since they have memory
3061 ++ * clobbers preventing reordering. To prevent reads from being reordered with
3062 ++ * respect to writes, use a dummy memory operand.
3063 + */
3064 +-extern unsigned long __force_order;
3065 ++
3066 ++#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL)
3067 +
3068 + void native_write_cr0(unsigned long val);
3069 +
3070 + static inline unsigned long native_read_cr0(void)
3071 + {
3072 + unsigned long val;
3073 +- asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
3074 ++ asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER);
3075 + return val;
3076 + }
3077 +
3078 + static __always_inline unsigned long native_read_cr2(void)
3079 + {
3080 + unsigned long val;
3081 +- asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
3082 ++ asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER);
3083 + return val;
3084 + }
3085 +
3086 + static __always_inline void native_write_cr2(unsigned long val)
3087 + {
3088 +- asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
3089 ++ asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
3090 + }
3091 +
3092 + static inline unsigned long __native_read_cr3(void)
3093 + {
3094 + unsigned long val;
3095 +- asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
3096 ++ asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
3097 + return val;
3098 + }
3099 +
3100 + static inline void native_write_cr3(unsigned long val)
3101 + {
3102 +- asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
3103 ++ asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
3104 + }
3105 +
3106 + static inline unsigned long native_read_cr4(void)
3107 +@@ -64,10 +66,10 @@ static inline unsigned long native_read_cr4(void)
3108 + asm volatile("1: mov %%cr4, %0\n"
3109 + "2:\n"
3110 + _ASM_EXTABLE(1b, 2b)
3111 +- : "=r" (val), "=m" (__force_order) : "0" (0));
3112 ++ : "=r" (val) : "0" (0), __FORCE_ORDER);
3113 + #else
3114 + /* CR4 always exists on x86_64. */
3115 +- asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
3116 ++ asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER);
3117 + #endif
3118 + return val;
3119 + }
3120 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
3121 +index c5d6f17d9b9d3..178499f903661 100644
3122 +--- a/arch/x86/kernel/cpu/common.c
3123 ++++ b/arch/x86/kernel/cpu/common.c
3124 +@@ -359,7 +359,7 @@ void native_write_cr0(unsigned long val)
3125 + unsigned long bits_missing = 0;
3126 +
3127 + set_register:
3128 +- asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order));
3129 ++ asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
3130 +
3131 + if (static_branch_likely(&cr_pinning)) {
3132 + if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
3133 +@@ -378,7 +378,7 @@ void native_write_cr4(unsigned long val)
3134 + unsigned long bits_changed = 0;
3135 +
3136 + set_register:
3137 +- asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
3138 ++ asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
3139 +
3140 + if (static_branch_likely(&cr_pinning)) {
3141 + if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
3142 +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
3143 +index fc4f8c04bdb56..84eef4fa95990 100644
3144 +--- a/arch/x86/kernel/cpu/mce/core.c
3145 ++++ b/arch/x86/kernel/cpu/mce/core.c
3146 +@@ -373,42 +373,105 @@ static int msr_to_offset(u32 msr)
3147 + return -1;
3148 + }
3149 +
3150 ++__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
3151 ++ struct pt_regs *regs, int trapnr,
3152 ++ unsigned long error_code,
3153 ++ unsigned long fault_addr)
3154 ++{
3155 ++ pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
3156 ++ (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
3157 ++
3158 ++ show_stack_regs(regs);
3159 ++
3160 ++ panic("MCA architectural violation!\n");
3161 ++
3162 ++ while (true)
3163 ++ cpu_relax();
3164 ++
3165 ++ return true;
3166 ++}
3167 ++
3168 + /* MSR access wrappers used for error injection */
3169 +-static u64 mce_rdmsrl(u32 msr)
3170 ++static noinstr u64 mce_rdmsrl(u32 msr)
3171 + {
3172 +- u64 v;
3173 ++ DECLARE_ARGS(val, low, high);
3174 +
3175 + if (__this_cpu_read(injectm.finished)) {
3176 +- int offset = msr_to_offset(msr);
3177 ++ int offset;
3178 ++ u64 ret;
3179 +
3180 ++ instrumentation_begin();
3181 ++
3182 ++ offset = msr_to_offset(msr);
3183 + if (offset < 0)
3184 +- return 0;
3185 +- return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
3186 +- }
3187 ++ ret = 0;
3188 ++ else
3189 ++ ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
3190 +
3191 +- if (rdmsrl_safe(msr, &v)) {
3192 +- WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
3193 +- /*
3194 +- * Return zero in case the access faulted. This should
3195 +- * not happen normally but can happen if the CPU does
3196 +- * something weird, or if the code is buggy.
3197 +- */
3198 +- v = 0;
3199 ++ instrumentation_end();
3200 ++
3201 ++ return ret;
3202 + }
3203 +
3204 +- return v;
3205 ++ /*
3206 ++ * RDMSR on MCA MSRs should not fault. If they do, this is very much an
3207 ++ * architectural violation and needs to be reported to hw vendor. Panic
3208 ++ * the box to not allow any further progress.
3209 ++ */
3210 ++ asm volatile("1: rdmsr\n"
3211 ++ "2:\n"
3212 ++ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault)
3213 ++ : EAX_EDX_RET(val, low, high) : "c" (msr));
3214 ++
3215 ++
3216 ++ return EAX_EDX_VAL(val, low, high);
3217 ++}
3218 ++
3219 ++__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
3220 ++ struct pt_regs *regs, int trapnr,
3221 ++ unsigned long error_code,
3222 ++ unsigned long fault_addr)
3223 ++{
3224 ++ pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
3225 ++ (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
3226 ++ regs->ip, (void *)regs->ip);
3227 ++
3228 ++ show_stack_regs(regs);
3229 ++
3230 ++ panic("MCA architectural violation!\n");
3231 ++
3232 ++ while (true)
3233 ++ cpu_relax();
3234 ++
3235 ++ return true;
3236 + }
3237 +
3238 +-static void mce_wrmsrl(u32 msr, u64 v)
3239 ++static noinstr void mce_wrmsrl(u32 msr, u64 v)
3240 + {
3241 ++ u32 low, high;
3242 ++
3243 + if (__this_cpu_read(injectm.finished)) {
3244 +- int offset = msr_to_offset(msr);
3245 ++ int offset;
3246 ++
3247 ++ instrumentation_begin();
3248 +
3249 ++ offset = msr_to_offset(msr);
3250 + if (offset >= 0)
3251 + *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
3252 ++
3253 ++ instrumentation_end();
3254 ++
3255 + return;
3256 + }
3257 +- wrmsrl(msr, v);
3258 ++
3259 ++ low = (u32)v;
3260 ++ high = (u32)(v >> 32);
3261 ++
3262 ++ /* See comment in mce_rdmsrl() */
3263 ++ asm volatile("1: wrmsr\n"
3264 ++ "2:\n"
3265 ++ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault)
3266 ++ : : "c" (msr), "a"(low), "d" (high) : "memory");
3267 + }
3268 +
3269 + /*
3270 +diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
3271 +index 6473070b5da49..b122610e9046a 100644
3272 +--- a/arch/x86/kernel/cpu/mce/internal.h
3273 ++++ b/arch/x86/kernel/cpu/mce/internal.h
3274 +@@ -185,4 +185,14 @@ extern bool amd_filter_mce(struct mce *m);
3275 + static inline bool amd_filter_mce(struct mce *m) { return false; };
3276 + #endif
3277 +
3278 ++__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
3279 ++ struct pt_regs *regs, int trapnr,
3280 ++ unsigned long error_code,
3281 ++ unsigned long fault_addr);
3282 ++
3283 ++__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
3284 ++ struct pt_regs *regs, int trapnr,
3285 ++ unsigned long error_code,
3286 ++ unsigned long fault_addr);
3287 ++
3288 + #endif /* __X86_MCE_INTERNAL_H__ */
3289 +diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
3290 +index e1da619add192..567ce09a02868 100644
3291 +--- a/arch/x86/kernel/cpu/mce/severity.c
3292 ++++ b/arch/x86/kernel/cpu/mce/severity.c
3293 +@@ -9,9 +9,11 @@
3294 + #include <linux/seq_file.h>
3295 + #include <linux/init.h>
3296 + #include <linux/debugfs.h>
3297 +-#include <asm/mce.h>
3298 + #include <linux/uaccess.h>
3299 +
3300 ++#include <asm/mce.h>
3301 ++#include <asm/intel-family.h>
3302 ++
3303 + #include "internal.h"
3304 +
3305 + /*
3306 +@@ -40,9 +42,14 @@ static struct severity {
3307 + unsigned char context;
3308 + unsigned char excp;
3309 + unsigned char covered;
3310 ++ unsigned char cpu_model;
3311 ++ unsigned char cpu_minstepping;
3312 ++ unsigned char bank_lo, bank_hi;
3313 + char *msg;
3314 + } severities[] = {
3315 + #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
3316 ++#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
3317 ++#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s
3318 + #define KERNEL .context = IN_KERNEL
3319 + #define USER .context = IN_USER
3320 + #define KERNEL_RECOV .context = IN_KERNEL_RECOV
3321 +@@ -97,7 +104,6 @@ static struct severity {
3322 + KEEP, "Corrected error",
3323 + NOSER, BITCLR(MCI_STATUS_UC)
3324 + ),
3325 +-
3326 + /*
3327 + * known AO MCACODs reported via MCE or CMC:
3328 + *
3329 +@@ -113,6 +119,18 @@ static struct severity {
3330 + AO, "Action optional: last level cache writeback error",
3331 + SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
3332 + ),
3333 ++ /*
3334 ++ * Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured
3335 ++ * to report uncorrected errors using CMCI with a special signature.
3336 ++ * UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported
3337 ++ * in one of the memory controller banks.
3338 ++ * Set severity to "AO" for same action as normal patrol scrub error.
3339 ++ */
3340 ++ MCESEV(
3341 ++ AO, "Uncorrected Patrol Scrub Error",
3342 ++ SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
3343 ++ MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18)
3344 ++ ),
3345 +
3346 + /* ignore OVER for UCNA */
3347 + MCESEV(
3348 +@@ -324,6 +342,12 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
3349 + continue;
3350 + if (s->excp && excp != s->excp)
3351 + continue;
3352 ++ if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model)
3353 ++ continue;
3354 ++ if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
3355 ++ continue;
3356 ++ if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi))
3357 ++ continue;
3358 + if (msg)
3359 + *msg = s->msg;
3360 + s->covered = 1;
3361 +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
3362 +index 48ce44576947c..ea8d51ec251bb 100644
3363 +--- a/arch/x86/kernel/dumpstack.c
3364 ++++ b/arch/x86/kernel/dumpstack.c
3365 +@@ -115,7 +115,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
3366 + unsigned long prologue = regs->ip - PROLOGUE_SIZE;
3367 +
3368 + if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
3369 +- printk("%sCode: Bad RIP value.\n", loglvl);
3370 ++ printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
3371 ++ loglvl, prologue);
3372 + } else {
3373 + printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
3374 + __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
3375 +diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
3376 +index 61ddc3a5e5c2b..f8ff895aaf7e1 100644
3377 +--- a/arch/x86/kernel/fpu/init.c
3378 ++++ b/arch/x86/kernel/fpu/init.c
3379 +@@ -243,9 +243,9 @@ static void __init fpu__init_system_ctx_switch(void)
3380 + */
3381 + static void __init fpu__init_parse_early_param(void)
3382 + {
3383 +- char arg[32];
3384 ++ char arg[128];
3385 + char *argptr = arg;
3386 +- int bit;
3387 ++ int arglen, res, bit;
3388 +
3389 + #ifdef CONFIG_X86_32
3390 + if (cmdline_find_option_bool(boot_command_line, "no387"))
3391 +@@ -268,12 +268,26 @@ static void __init fpu__init_parse_early_param(void)
3392 + if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
3393 + setup_clear_cpu_cap(X86_FEATURE_XSAVES);
3394 +
3395 +- if (cmdline_find_option(boot_command_line, "clearcpuid", arg,
3396 +- sizeof(arg)) &&
3397 +- get_option(&argptr, &bit) &&
3398 +- bit >= 0 &&
3399 +- bit < NCAPINTS * 32)
3400 +- setup_clear_cpu_cap(bit);
3401 ++ arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
3402 ++ if (arglen <= 0)
3403 ++ return;
3404 ++
3405 ++ pr_info("Clearing CPUID bits:");
3406 ++ do {
3407 ++ res = get_option(&argptr, &bit);
3408 ++ if (res == 0 || res == 3)
3409 ++ break;
3410 ++
3411 ++ /* If the argument was too long, the last bit may be cut off */
3412 ++ if (res == 1 && arglen >= sizeof(arg))
3413 ++ break;
3414 ++
3415 ++ if (bit >= 0 && bit < NCAPINTS * 32) {
3416 ++ pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
3417 ++ setup_clear_cpu_cap(bit);
3418 ++ }
3419 ++ } while (res == 2);
3420 ++ pr_cont("\n");
3421 + }
3422 +
3423 + /*
3424 +diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
3425 +index 4fc9954a95600..47381666d6a55 100644
3426 +--- a/arch/x86/kernel/nmi.c
3427 ++++ b/arch/x86/kernel/nmi.c
3428 +@@ -102,7 +102,6 @@ fs_initcall(nmi_warning_debugfs);
3429 +
3430 + static void nmi_check_duration(struct nmiaction *action, u64 duration)
3431 + {
3432 +- u64 whole_msecs = READ_ONCE(action->max_duration);
3433 + int remainder_ns, decimal_msecs;
3434 +
3435 + if (duration < nmi_longest_ns || duration < action->max_duration)
3436 +@@ -110,12 +109,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
3437 +
3438 + action->max_duration = duration;
3439 +
3440 +- remainder_ns = do_div(whole_msecs, (1000 * 1000));
3441 ++ remainder_ns = do_div(duration, (1000 * 1000));
3442 + decimal_msecs = remainder_ns / 1000;
3443 +
3444 + printk_ratelimited(KERN_INFO
3445 + "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
3446 +- action->handler, whole_msecs, decimal_msecs);
3447 ++ action->handler, duration, decimal_msecs);
3448 + }
3449 +
3450 + static int nmi_handle(unsigned int type, struct pt_regs *regs)
3451 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
3452 +index 2f6510de6b0c0..85111cd0adcd0 100644
3453 +--- a/arch/x86/kvm/emulate.c
3454 ++++ b/arch/x86/kvm/emulate.c
3455 +@@ -3606,7 +3606,7 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3456 + u64 tsc_aux = 0;
3457 +
3458 + if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3459 +- return emulate_gp(ctxt, 0);
3460 ++ return emulate_ud(ctxt);
3461 + ctxt->dst.val = tsc_aux;
3462 + return X86EMUL_CONTINUE;
3463 + }
3464 +diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
3465 +index d057376bd3d33..698969e18fe35 100644
3466 +--- a/arch/x86/kvm/ioapic.c
3467 ++++ b/arch/x86/kvm/ioapic.c
3468 +@@ -197,12 +197,9 @@ static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)
3469 +
3470 + /*
3471 + * If no longer has pending EOI in LAPICs, update
3472 +- * EOI for this vetor.
3473 ++ * EOI for this vector.
3474 + */
3475 + rtc_irq_eoi(ioapic, vcpu, entry->fields.vector);
3476 +- kvm_ioapic_update_eoi_one(vcpu, ioapic,
3477 +- entry->fields.trig_mode,
3478 +- irq);
3479 + break;
3480 + }
3481 + }
3482 +diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
3483 +index cfe83d4ae6252..ca0781b41df9d 100644
3484 +--- a/arch/x86/kvm/kvm_cache_regs.h
3485 ++++ b/arch/x86/kvm/kvm_cache_regs.h
3486 +@@ -7,7 +7,7 @@
3487 + #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
3488 + #define KVM_POSSIBLE_CR4_GUEST_BITS \
3489 + (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
3490 +- | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
3491 ++ | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD)
3492 +
3493 + #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
3494 + static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
3495 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
3496 +index 35cca2e0c8026..8055a486d843d 100644
3497 +--- a/arch/x86/kvm/lapic.c
3498 ++++ b/arch/x86/kvm/lapic.c
3499 +@@ -488,6 +488,12 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
3500 + }
3501 + }
3502 +
3503 ++void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
3504 ++{
3505 ++ apic_clear_irr(vec, vcpu->arch.apic);
3506 ++}
3507 ++EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
3508 ++
3509 + static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
3510 + {
3511 + struct kvm_vcpu *vcpu;
3512 +@@ -2461,6 +2467,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
3513 + __apic_update_ppr(apic, &ppr);
3514 + return apic_has_interrupt_for_ppr(apic, ppr);
3515 + }
3516 ++EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
3517 +
3518 + int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
3519 + {
3520 +diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
3521 +index 754f29beb83e3..4fb86e3a9dd3d 100644
3522 +--- a/arch/x86/kvm/lapic.h
3523 ++++ b/arch/x86/kvm/lapic.h
3524 +@@ -89,6 +89,7 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
3525 + bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
3526 + int shorthand, unsigned int dest, int dest_mode);
3527 + int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
3528 ++void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec);
3529 + bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr);
3530 + bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr);
3531 + void kvm_apic_update_ppr(struct kvm_vcpu *vcpu);
3532 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
3533 +index 71aa3da2a0b7b..d0ca3ab389520 100644
3534 +--- a/arch/x86/kvm/mmu/mmu.c
3535 ++++ b/arch/x86/kvm/mmu/mmu.c
3536 +@@ -6376,6 +6376,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
3537 + cond_resched_lock(&kvm->mmu_lock);
3538 + }
3539 + }
3540 ++ kvm_mmu_commit_zap_page(kvm, &invalid_list);
3541 +
3542 + spin_unlock(&kvm->mmu_lock);
3543 + srcu_read_unlock(&kvm->srcu, rcu_idx);
3544 +diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
3545 +index ac830cd508305..381d22daa4acd 100644
3546 +--- a/arch/x86/kvm/svm/avic.c
3547 ++++ b/arch/x86/kvm/svm/avic.c
3548 +@@ -868,6 +868,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
3549 + * - Tell IOMMU to use legacy mode for this interrupt.
3550 + * - Retrieve ga_tag of prior interrupt remapping data.
3551 + */
3552 ++ pi.prev_ga_tag = 0;
3553 + pi.is_guest_mode = false;
3554 + ret = irq_set_vcpu_affinity(host_irq, &pi);
3555 +
3556 +diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
3557 +index e90bc436f5849..27042c9ea40d6 100644
3558 +--- a/arch/x86/kvm/svm/nested.c
3559 ++++ b/arch/x86/kvm/svm/nested.c
3560 +@@ -243,7 +243,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
3561 + } else {
3562 + if (!(vmcb->save.cr4 & X86_CR4_PAE) ||
3563 + !(vmcb->save.cr0 & X86_CR0_PE) ||
3564 +- (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
3565 ++ (vmcb->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
3566 + return false;
3567 + }
3568 + if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4))
3569 +diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
3570 +index a798e17317094..c0d75b1e06645 100644
3571 +--- a/arch/x86/kvm/svm/svm.h
3572 ++++ b/arch/x86/kvm/svm/svm.h
3573 +@@ -345,7 +345,7 @@ static inline bool gif_set(struct vcpu_svm *svm)
3574 + /* svm.c */
3575 + #define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
3576 + #define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
3577 +-#define MSR_CR3_LONG_RESERVED_MASK 0xfff0000000000fe7U
3578 ++#define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U
3579 + #define MSR_INVALID 0xffffffffU
3580 +
3581 + u32 svm_msrpm_offset(u32 msr);
3582 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
3583 +index 1bb6b31eb6466..76ee5553b9d6c 100644
3584 +--- a/arch/x86/kvm/vmx/nested.c
3585 ++++ b/arch/x86/kvm/vmx/nested.c
3586 +@@ -2408,6 +2408,8 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
3587 + vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
3588 + vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
3589 + vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
3590 ++
3591 ++ vmx->segment_cache.bitmask = 0;
3592 + }
3593 +
3594 + if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
3595 +@@ -3344,8 +3346,10 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
3596 + prepare_vmcs02_early(vmx, vmcs12);
3597 +
3598 + if (from_vmentry) {
3599 +- if (unlikely(!nested_get_vmcs12_pages(vcpu)))
3600 ++ if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
3601 ++ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3602 + return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
3603 ++ }
3604 +
3605 + if (nested_vmx_check_vmentry_hw(vcpu)) {
3606 + vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3607 +@@ -3528,6 +3532,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3608 + if (unlikely(status != NVMX_VMENTRY_SUCCESS))
3609 + goto vmentry_failed;
3610 +
3611 ++ /* Emulate processing of posted interrupts on VM-Enter. */
3612 ++ if (nested_cpu_has_posted_intr(vmcs12) &&
3613 ++ kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
3614 ++ vmx->nested.pi_pending = true;
3615 ++ kvm_make_request(KVM_REQ_EVENT, vcpu);
3616 ++ kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
3617 ++ }
3618 ++
3619 + /* Hide L1D cache contents from the nested guest. */
3620 + vmx->vcpu.arch.l1tf_flush_l1d = true;
3621 +
3622 +diff --git a/block/blk-core.c b/block/blk-core.c
3623 +index 10c08ac506978..0014e7caae3d2 100644
3624 +--- a/block/blk-core.c
3625 ++++ b/block/blk-core.c
3626 +@@ -803,11 +803,10 @@ static void handle_bad_sector(struct bio *bio, sector_t maxsector)
3627 + {
3628 + char b[BDEVNAME_SIZE];
3629 +
3630 +- printk(KERN_INFO "attempt to access beyond end of device\n");
3631 +- printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
3632 +- bio_devname(bio, b), bio->bi_opf,
3633 +- (unsigned long long)bio_end_sector(bio),
3634 +- (long long)maxsector);
3635 ++ pr_info_ratelimited("attempt to access beyond end of device\n"
3636 ++ "%s: rw=%d, want=%llu, limit=%llu\n",
3637 ++ bio_devname(bio, b), bio->bi_opf,
3638 ++ bio_end_sector(bio), maxsector);
3639 + }
3640 +
3641 + #ifdef CONFIG_FAIL_MAKE_REQUEST
3642 +diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
3643 +index 062229395a507..7b52e7657b2d1 100644
3644 +--- a/block/blk-mq-sysfs.c
3645 ++++ b/block/blk-mq-sysfs.c
3646 +@@ -36,8 +36,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
3647 + struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
3648 + kobj);
3649 +
3650 +- cancel_delayed_work_sync(&hctx->run_work);
3651 +-
3652 + if (hctx->flags & BLK_MQ_F_BLOCKING)
3653 + cleanup_srcu_struct(hctx->srcu);
3654 + blk_free_flush_queue(hctx->fq);
3655 +diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
3656 +index 32d82e23b0953..a1c1e7c611f7b 100644
3657 +--- a/block/blk-mq-tag.c
3658 ++++ b/block/blk-mq-tag.c
3659 +@@ -59,7 +59,8 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
3660 + static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
3661 + struct sbitmap_queue *bt)
3662 + {
3663 +- if (!data->q->elevator && !hctx_may_queue(data->hctx, bt))
3664 ++ if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
3665 ++ !hctx_may_queue(data->hctx, bt))
3666 + return BLK_MQ_NO_TAG;
3667 +
3668 + if (data->shallow_depth)
3669 +diff --git a/block/blk-mq.c b/block/blk-mq.c
3670 +index cdced4aca2e81..94a53d779c12b 100644
3671 +--- a/block/blk-mq.c
3672 ++++ b/block/blk-mq.c
3673 +@@ -1105,10 +1105,11 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
3674 + if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
3675 + bt = &rq->mq_hctx->tags->breserved_tags;
3676 + tag_offset = 0;
3677 ++ } else {
3678 ++ if (!hctx_may_queue(rq->mq_hctx, bt))
3679 ++ return false;
3680 + }
3681 +
3682 +- if (!hctx_may_queue(rq->mq_hctx, bt))
3683 +- return false;
3684 + tag = __sbitmap_queue_get(bt);
3685 + if (tag == BLK_MQ_NO_TAG)
3686 + return false;
3687 +@@ -2264,7 +2265,6 @@ queue_exit:
3688 + blk_queue_exit(q);
3689 + return BLK_QC_T_NONE;
3690 + }
3691 +-EXPORT_SYMBOL_GPL(blk_mq_submit_bio); /* only for request based dm */
3692 +
3693 + void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
3694 + unsigned int hctx_idx)
3695 +diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
3696 +index 7dda709f3ccb6..8c6bafc801dd9 100644
3697 +--- a/block/blk-sysfs.c
3698 ++++ b/block/blk-sysfs.c
3699 +@@ -934,9 +934,16 @@ static void blk_release_queue(struct kobject *kobj)
3700 +
3701 + blk_free_queue_stats(q->stats);
3702 +
3703 +- if (queue_is_mq(q))
3704 ++ if (queue_is_mq(q)) {
3705 ++ struct blk_mq_hw_ctx *hctx;
3706 ++ int i;
3707 ++
3708 + cancel_delayed_work_sync(&q->requeue_work);
3709 +
3710 ++ queue_for_each_hw_ctx(q, hctx, i)
3711 ++ cancel_delayed_work_sync(&hctx->run_work);
3712 ++ }
3713 ++
3714 + blk_exit_queue(q);
3715 +
3716 + blk_queue_free_zone_bitmaps(q);
3717 +diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
3718 +index 21efa786f09c9..002edfdbb0937 100644
3719 +--- a/crypto/algif_aead.c
3720 ++++ b/crypto/algif_aead.c
3721 +@@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
3722 + SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
3723 +
3724 + skcipher_request_set_sync_tfm(skreq, null_tfm);
3725 +- skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
3726 ++ skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
3727 + NULL, NULL);
3728 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
3729 +
3730 +@@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
3731 + areq->outlen = outlen;
3732 +
3733 + aead_request_set_callback(&areq->cra_u.aead_req,
3734 +- CRYPTO_TFM_REQ_MAY_BACKLOG,
3735 ++ CRYPTO_TFM_REQ_MAY_SLEEP,
3736 + af_alg_async_cb, areq);
3737 + err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
3738 + crypto_aead_decrypt(&areq->cra_u.aead_req);
3739 +
3740 + /* AIO operation in progress */
3741 +- if (err == -EINPROGRESS || err == -EBUSY)
3742 ++ if (err == -EINPROGRESS)
3743 + return -EIOCBQUEUED;
3744 +
3745 + sock_put(sk);
3746 + } else {
3747 + /* Synchronous operation */
3748 + aead_request_set_callback(&areq->cra_u.aead_req,
3749 ++ CRYPTO_TFM_REQ_MAY_SLEEP |
3750 + CRYPTO_TFM_REQ_MAY_BACKLOG,
3751 + crypto_req_done, &ctx->wait);
3752 + err = crypto_wait_req(ctx->enc ?
3753 +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
3754 +index 478f3b8f5bd52..ee8890ee8f332 100644
3755 +--- a/crypto/algif_skcipher.c
3756 ++++ b/crypto/algif_skcipher.c
3757 +@@ -123,7 +123,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
3758 + crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
3759 +
3760 + /* AIO operation in progress */
3761 +- if (err == -EINPROGRESS || err == -EBUSY)
3762 ++ if (err == -EINPROGRESS)
3763 + return -EIOCBQUEUED;
3764 +
3765 + sock_put(sk);
3766 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
3767 +index f936530a19b0e..b27b6bf0c1186 100644
3768 +--- a/drivers/android/binder.c
3769 ++++ b/drivers/android/binder.c
3770 +@@ -223,7 +223,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
3771 + struct binder_work {
3772 + struct list_head entry;
3773 +
3774 +- enum {
3775 ++ enum binder_work_type {
3776 + BINDER_WORK_TRANSACTION = 1,
3777 + BINDER_WORK_TRANSACTION_COMPLETE,
3778 + BINDER_WORK_RETURN_ERROR,
3779 +@@ -885,27 +885,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked(
3780 + return w;
3781 + }
3782 +
3783 +-/**
3784 +- * binder_dequeue_work_head() - Dequeues the item at head of list
3785 +- * @proc: binder_proc associated with list
3786 +- * @list: list to dequeue head
3787 +- *
3788 +- * Removes the head of the list if there are items on the list
3789 +- *
3790 +- * Return: pointer dequeued binder_work, NULL if list was empty
3791 +- */
3792 +-static struct binder_work *binder_dequeue_work_head(
3793 +- struct binder_proc *proc,
3794 +- struct list_head *list)
3795 +-{
3796 +- struct binder_work *w;
3797 +-
3798 +- binder_inner_proc_lock(proc);
3799 +- w = binder_dequeue_work_head_ilocked(list);
3800 +- binder_inner_proc_unlock(proc);
3801 +- return w;
3802 +-}
3803 +-
3804 + static void
3805 + binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
3806 + static void binder_free_thread(struct binder_thread *thread);
3807 +@@ -2344,8 +2323,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
3808 + * file is done when the transaction is torn
3809 + * down.
3810 + */
3811 +- WARN_ON(failed_at &&
3812 +- proc->tsk == current->group_leader);
3813 + } break;
3814 + case BINDER_TYPE_PTR:
3815 + /*
3816 +@@ -4587,13 +4564,17 @@ static void binder_release_work(struct binder_proc *proc,
3817 + struct list_head *list)
3818 + {
3819 + struct binder_work *w;
3820 ++ enum binder_work_type wtype;
3821 +
3822 + while (1) {
3823 +- w = binder_dequeue_work_head(proc, list);
3824 ++ binder_inner_proc_lock(proc);
3825 ++ w = binder_dequeue_work_head_ilocked(list);
3826 ++ wtype = w ? w->type : 0;
3827 ++ binder_inner_proc_unlock(proc);
3828 + if (!w)
3829 + return;
3830 +
3831 +- switch (w->type) {
3832 ++ switch (wtype) {
3833 + case BINDER_WORK_TRANSACTION: {
3834 + struct binder_transaction *t;
3835 +
3836 +@@ -4627,9 +4608,11 @@ static void binder_release_work(struct binder_proc *proc,
3837 + kfree(death);
3838 + binder_stats_deleted(BINDER_STAT_DEATH);
3839 + } break;
3840 ++ case BINDER_WORK_NODE:
3841 ++ break;
3842 + default:
3843 + pr_err("unexpected work type, %d, not freed\n",
3844 +- w->type);
3845 ++ wtype);
3846 + break;
3847 + }
3848 + }
3849 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
3850 +index b71f9ecddff5d..fff0547c26c53 100644
3851 +--- a/drivers/base/regmap/regmap.c
3852 ++++ b/drivers/base/regmap/regmap.c
3853 +@@ -711,6 +711,8 @@ struct regmap *__regmap_init(struct device *dev,
3854 + if (ret)
3855 + goto err_map;
3856 +
3857 ++ ret = -EINVAL; /* Later error paths rely on this */
3858 ++
3859 + if (config->disable_locking) {
3860 + map->lock = map->unlock = regmap_lock_unlock_none;
3861 + regmap_debugfs_disable(map);
3862 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
3863 +index 8d2608ddfd087..f88968bcdd6a8 100644
3864 +--- a/drivers/bluetooth/btusb.c
3865 ++++ b/drivers/bluetooth/btusb.c
3866 +@@ -2896,6 +2896,7 @@ static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
3867 + buf = kmalloc(size, GFP_KERNEL);
3868 + if (!buf) {
3869 + kfree(dr);
3870 ++ usb_free_urb(urb);
3871 + return -ENOMEM;
3872 + }
3873 +
3874 +diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
3875 +index 85a30fb9177bb..f83d67eafc9f0 100644
3876 +--- a/drivers/bluetooth/hci_ldisc.c
3877 ++++ b/drivers/bluetooth/hci_ldisc.c
3878 +@@ -538,6 +538,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
3879 + clear_bit(HCI_UART_PROTO_READY, &hu->flags);
3880 + percpu_up_write(&hu->proto_lock);
3881 +
3882 ++ cancel_work_sync(&hu->init_ready);
3883 + cancel_work_sync(&hu->write_work);
3884 +
3885 + if (hdev) {
3886 +diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
3887 +index 7b233312e723f..3977bba485c22 100644
3888 +--- a/drivers/bluetooth/hci_serdev.c
3889 ++++ b/drivers/bluetooth/hci_serdev.c
3890 +@@ -355,6 +355,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
3891 + struct hci_dev *hdev = hu->hdev;
3892 +
3893 + clear_bit(HCI_UART_PROTO_READY, &hu->flags);
3894 ++
3895 ++ cancel_work_sync(&hu->init_ready);
3896 + if (test_bit(HCI_UART_REGISTERED, &hu->flags))
3897 + hci_unregister_dev(hdev);
3898 + hci_free_dev(hdev);
3899 +diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile
3900 +index 66e2700c9032a..bc1469778cf87 100644
3901 +--- a/drivers/bus/mhi/core/Makefile
3902 ++++ b/drivers/bus/mhi/core/Makefile
3903 +@@ -1,3 +1,3 @@
3904 +-obj-$(CONFIG_MHI_BUS) := mhi.o
3905 ++obj-$(CONFIG_MHI_BUS) += mhi.o
3906 +
3907 + mhi-y := init.o main.o pm.o boot.o
3908 +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
3909 +index 77b8d551ae7fe..dd559661c15b3 100644
3910 +--- a/drivers/char/ipmi/ipmi_si_intf.c
3911 ++++ b/drivers/char/ipmi/ipmi_si_intf.c
3912 +@@ -1963,7 +1963,7 @@ static int try_smi_init(struct smi_info *new_smi)
3913 + /* Do this early so it's available for logs. */
3914 + if (!new_smi->io.dev) {
3915 + pr_err("IPMI interface added with no device\n");
3916 +- rv = EIO;
3917 ++ rv = -EIO;
3918 + goto out_err;
3919 + }
3920 +
3921 +diff --git a/drivers/char/random.c b/drivers/char/random.c
3922 +index d20ba1b104ca3..2a41b21623ae4 100644
3923 +--- a/drivers/char/random.c
3924 ++++ b/drivers/char/random.c
3925 +@@ -1277,7 +1277,6 @@ void add_interrupt_randomness(int irq, int irq_flags)
3926 +
3927 + fast_mix(fast_pool);
3928 + add_interrupt_bench(cycles);
3929 +- this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
3930 +
3931 + if (unlikely(crng_init == 0)) {
3932 + if ((fast_pool->count >= 64) &&
3933 +diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
3934 +index 5c83e899084ff..cfae2f59df665 100644
3935 +--- a/drivers/clk/at91/clk-main.c
3936 ++++ b/drivers/clk/at91/clk-main.c
3937 +@@ -437,12 +437,17 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
3938 + return -EINVAL;
3939 +
3940 + regmap_read(regmap, AT91_CKGR_MOR, &tmp);
3941 +- tmp &= ~MOR_KEY_MASK;
3942 +
3943 + if (index && !(tmp & AT91_PMC_MOSCSEL))
3944 +- regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
3945 ++ tmp = AT91_PMC_MOSCSEL;
3946 + else if (!index && (tmp & AT91_PMC_MOSCSEL))
3947 +- regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
3948 ++ tmp = 0;
3949 ++ else
3950 ++ return 0;
3951 ++
3952 ++ regmap_update_bits(regmap, AT91_CKGR_MOR,
3953 ++ AT91_PMC_MOSCSEL | MOR_KEY_MASK,
3954 ++ tmp | AT91_PMC_KEY);
3955 +
3956 + while (!clk_sam9x5_main_ready(regmap))
3957 + cpu_relax();
3958 +diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
3959 +index ab6318c0589e9..3c4c956035954 100644
3960 +--- a/drivers/clk/at91/sam9x60.c
3961 ++++ b/drivers/clk/at91/sam9x60.c
3962 +@@ -279,7 +279,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
3963 + parent_names[3] = "masterck";
3964 + parent_names[4] = "pllack_divck";
3965 + parent_names[5] = "upllck_divck";
3966 +- for (i = 0; i < 8; i++) {
3967 ++ for (i = 0; i < 2; i++) {
3968 + char name[6];
3969 +
3970 + snprintf(name, sizeof(name), "prog%d", i);
3971 +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
3972 +index 3439bc65bb4e3..1ac803e14fa3e 100644
3973 +--- a/drivers/clk/bcm/clk-bcm2835.c
3974 ++++ b/drivers/clk/bcm/clk-bcm2835.c
3975 +@@ -1338,8 +1338,10 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
3976 + pll->hw.init = &init;
3977 +
3978 + ret = devm_clk_hw_register(cprman->dev, &pll->hw);
3979 +- if (ret)
3980 ++ if (ret) {
3981 ++ kfree(pll);
3982 + return NULL;
3983 ++ }
3984 + return &pll->hw;
3985 + }
3986 +
3987 +diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
3988 +index a64aace213c27..7762c5825e77d 100644
3989 +--- a/drivers/clk/imx/clk-imx8mq.c
3990 ++++ b/drivers/clk/imx/clk-imx8mq.c
3991 +@@ -157,10 +157,10 @@ static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys
3992 + "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
3993 +
3994 + static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
3995 +- "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
3996 ++ "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
3997 +
3998 + static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
3999 +- "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
4000 ++ "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
4001 +
4002 + static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out",
4003 + "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
4004 +diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
4005 +index 2ad26cb927fdb..f126b6045afa7 100644
4006 +--- a/drivers/clk/keystone/sci-clk.c
4007 ++++ b/drivers/clk/keystone/sci-clk.c
4008 +@@ -522,7 +522,7 @@ static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider)
4009 + np = of_find_node_with_property(np, *clk_name);
4010 + if (!np) {
4011 + clk_name++;
4012 +- break;
4013 ++ continue;
4014 + }
4015 +
4016 + if (!of_device_is_available(np))
4017 +diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
4018 +index 9766cccf5844c..6e0d3a1667291 100644
4019 +--- a/drivers/clk/mediatek/clk-mt6779.c
4020 ++++ b/drivers/clk/mediatek/clk-mt6779.c
4021 +@@ -919,6 +919,8 @@ static const struct mtk_gate infra_clks[] = {
4022 + "pwm_sel", 19),
4023 + GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm",
4024 + "pwm_sel", 21),
4025 ++ GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0",
4026 ++ "uart_sel", 22),
4027 + GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1",
4028 + "uart_sel", 23),
4029 + GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2",
4030 +diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
4031 +index 53715e36326c6..9918cb375de30 100644
4032 +--- a/drivers/clk/meson/axg-audio.c
4033 ++++ b/drivers/clk/meson/axg-audio.c
4034 +@@ -1209,13 +1209,132 @@ static struct clk_hw_onecell_data sm1_audio_hw_onecell_data = {
4035 + };
4036 +
4037 +
4038 +-/* Convenience table to populate regmap in .probe()
4039 +- * Note that this table is shared between both AXG and G12A,
4040 +- * with spdifout_b clocks being exclusive to G12A. Since those
4041 +- * clocks are not declared within the AXG onecell table, we do not
4042 +- * feel the need to have separate AXG/G12A regmap tables.
4043 +- */
4044 ++/* Convenience table to populate regmap in .probe(). */
4045 + static struct clk_regmap *const axg_clk_regmaps[] = {
4046 ++ &ddr_arb,
4047 ++ &pdm,
4048 ++ &tdmin_a,
4049 ++ &tdmin_b,
4050 ++ &tdmin_c,
4051 ++ &tdmin_lb,
4052 ++ &tdmout_a,
4053 ++ &tdmout_b,
4054 ++ &tdmout_c,
4055 ++ &frddr_a,
4056 ++ &frddr_b,
4057 ++ &frddr_c,
4058 ++ &toddr_a,
4059 ++ &toddr_b,
4060 ++ &toddr_c,
4061 ++ &loopback,
4062 ++ &spdifin,
4063 ++ &spdifout,
4064 ++ &resample,
4065 ++ &power_detect,
4066 ++ &mst_a_mclk_sel,
4067 ++ &mst_b_mclk_sel,
4068 ++ &mst_c_mclk_sel,
4069 ++ &mst_d_mclk_sel,
4070 ++ &mst_e_mclk_sel,
4071 ++ &mst_f_mclk_sel,
4072 ++ &mst_a_mclk_div,
4073 ++ &mst_b_mclk_div,
4074 ++ &mst_c_mclk_div,
4075 ++ &mst_d_mclk_div,
4076 ++ &mst_e_mclk_div,
4077 ++ &mst_f_mclk_div,
4078 ++ &mst_a_mclk,
4079 ++ &mst_b_mclk,
4080 ++ &mst_c_mclk,
4081 ++ &mst_d_mclk,
4082 ++ &mst_e_mclk,
4083 ++ &mst_f_mclk,
4084 ++ &spdifout_clk_sel,
4085 ++ &spdifout_clk_div,
4086 ++ &spdifout_clk,
4087 ++ &spdifin_clk_sel,
4088 ++ &spdifin_clk_div,
4089 ++ &spdifin_clk,
4090 ++ &pdm_dclk_sel,
4091 ++ &pdm_dclk_div,
4092 ++ &pdm_dclk,
4093 ++ &pdm_sysclk_sel,
4094 ++ &pdm_sysclk_div,
4095 ++ &pdm_sysclk,
4096 ++ &mst_a_sclk_pre_en,
4097 ++ &mst_b_sclk_pre_en,
4098 ++ &mst_c_sclk_pre_en,
4099 ++ &mst_d_sclk_pre_en,
4100 ++ &mst_e_sclk_pre_en,
4101 ++ &mst_f_sclk_pre_en,
4102 ++ &mst_a_sclk_div,
4103 ++ &mst_b_sclk_div,
4104 ++ &mst_c_sclk_div,
4105 ++ &mst_d_sclk_div,
4106 ++ &mst_e_sclk_div,
4107 ++ &mst_f_sclk_div,
4108 ++ &mst_a_sclk_post_en,
4109 ++ &mst_b_sclk_post_en,
4110 ++ &mst_c_sclk_post_en,
4111 ++ &mst_d_sclk_post_en,
4112 ++ &mst_e_sclk_post_en,
4113 ++ &mst_f_sclk_post_en,
4114 ++ &mst_a_sclk,
4115 ++ &mst_b_sclk,
4116 ++ &mst_c_sclk,
4117 ++ &mst_d_sclk,
4118 ++ &mst_e_sclk,
4119 ++ &mst_f_sclk,
4120 ++ &mst_a_lrclk_div,
4121 ++ &mst_b_lrclk_div,
4122 ++ &mst_c_lrclk_div,
4123 ++ &mst_d_lrclk_div,
4124 ++ &mst_e_lrclk_div,
4125 ++ &mst_f_lrclk_div,
4126 ++ &mst_a_lrclk,
4127 ++ &mst_b_lrclk,
4128 ++ &mst_c_lrclk,
4129 ++ &mst_d_lrclk,
4130 ++ &mst_e_lrclk,
4131 ++ &mst_f_lrclk,
4132 ++ &tdmin_a_sclk_sel,
4133 ++ &tdmin_b_sclk_sel,
4134 ++ &tdmin_c_sclk_sel,
4135 ++ &tdmin_lb_sclk_sel,
4136 ++ &tdmout_a_sclk_sel,
4137 ++ &tdmout_b_sclk_sel,
4138 ++ &tdmout_c_sclk_sel,
4139 ++ &tdmin_a_sclk_pre_en,
4140 ++ &tdmin_b_sclk_pre_en,
4141 ++ &tdmin_c_sclk_pre_en,
4142 ++ &tdmin_lb_sclk_pre_en,
4143 ++ &tdmout_a_sclk_pre_en,
4144 ++ &tdmout_b_sclk_pre_en,
4145 ++ &tdmout_c_sclk_pre_en,
4146 ++ &tdmin_a_sclk_post_en,
4147 ++ &tdmin_b_sclk_post_en,
4148 ++ &tdmin_c_sclk_post_en,
4149 ++ &tdmin_lb_sclk_post_en,
4150 ++ &tdmout_a_sclk_post_en,
4151 ++ &tdmout_b_sclk_post_en,
4152 ++ &tdmout_c_sclk_post_en,
4153 ++ &tdmin_a_sclk,
4154 ++ &tdmin_b_sclk,
4155 ++ &tdmin_c_sclk,
4156 ++ &tdmin_lb_sclk,
4157 ++ &tdmout_a_sclk,
4158 ++ &tdmout_b_sclk,
4159 ++ &tdmout_c_sclk,
4160 ++ &tdmin_a_lrclk,
4161 ++ &tdmin_b_lrclk,
4162 ++ &tdmin_c_lrclk,
4163 ++ &tdmin_lb_lrclk,
4164 ++ &tdmout_a_lrclk,
4165 ++ &tdmout_b_lrclk,
4166 ++ &tdmout_c_lrclk,
4167 ++};
4168 ++
4169 ++static struct clk_regmap *const g12a_clk_regmaps[] = {
4170 + &ddr_arb,
4171 + &pdm,
4172 + &tdmin_a,
4173 +@@ -1713,8 +1832,8 @@ static const struct audioclk_data axg_audioclk_data = {
4174 + };
4175 +
4176 + static const struct audioclk_data g12a_audioclk_data = {
4177 +- .regmap_clks = axg_clk_regmaps,
4178 +- .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
4179 ++ .regmap_clks = g12a_clk_regmaps,
4180 ++ .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
4181 + .hw_onecell_data = &g12a_audio_hw_onecell_data,
4182 + .reset_offset = AUDIO_SW_RESET,
4183 + .reset_num = 26,
4184 +diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
4185 +index 9803d44bb1578..b814d44917a5d 100644
4186 +--- a/drivers/clk/meson/g12a.c
4187 ++++ b/drivers/clk/meson/g12a.c
4188 +@@ -298,6 +298,17 @@ static struct clk_regmap g12a_fclk_div2 = {
4189 + &g12a_fclk_div2_div.hw
4190 + },
4191 + .num_parents = 1,
4192 ++ /*
4193 ++ * Similar to fclk_div3, it seems that this clock is used by
4194 ++ * the resident firmware and is required by the platform to
4195 ++ * operate correctly.
4196 ++ * Until the following condition are met, we need this clock to
4197 ++ * be marked as critical:
4198 ++ * a) Mark the clock used by a firmware resource, if possible
4199 ++ * b) CCF has a clock hand-off mechanism to make the sure the
4200 ++ * clock stays on until the proper driver comes along
4201 ++ */
4202 ++ .flags = CLK_IS_CRITICAL,
4203 + },
4204 + };
4205 +
4206 +diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
4207 +index f0b47b7d50ca6..31258795e7b86 100644
4208 +--- a/drivers/clk/qcom/gcc-sdm660.c
4209 ++++ b/drivers/clk/qcom/gcc-sdm660.c
4210 +@@ -666,7 +666,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
4211 + .cmd_rcgr = 0x48044,
4212 + .mnd_width = 0,
4213 + .hid_width = 5,
4214 +- .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
4215 ++ .parent_map = gcc_parent_map_xo_gpll0,
4216 + .freq_tbl = ftbl_hmss_rbcpr_clk_src,
4217 + .clkr.hw.init = &(struct clk_init_data){
4218 + .name = "hmss_rbcpr_clk_src",
4219 +diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
4220 +index bfc4ac02f9ea2..af26e0695b866 100644
4221 +--- a/drivers/clk/qcom/gdsc.c
4222 ++++ b/drivers/clk/qcom/gdsc.c
4223 +@@ -358,6 +358,14 @@ static int gdsc_init(struct gdsc *sc)
4224 + if ((sc->flags & VOTABLE) && on)
4225 + gdsc_enable(&sc->pd);
4226 +
4227 ++ /*
4228 ++ * Make sure the retain bit is set if the GDSC is already on, otherwise
4229 ++ * we end up turning off the GDSC and destroying all the register
4230 ++ * contents that we thought we were saving.
4231 ++ */
4232 ++ if ((sc->flags & RETAIN_FF_ENABLE) && on)
4233 ++ gdsc_retain_ff_on(sc);
4234 ++
4235 + /* If ALWAYS_ON GDSCs are not ON, turn them ON */
4236 + if (sc->flags & ALWAYS_ON) {
4237 + if (!on)
4238 +diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
4239 +index b333fc28c94b6..37c858d689e0d 100644
4240 +--- a/drivers/clk/rockchip/clk-half-divider.c
4241 ++++ b/drivers/clk/rockchip/clk-half-divider.c
4242 +@@ -166,7 +166,7 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
4243 + unsigned long flags,
4244 + spinlock_t *lock)
4245 + {
4246 +- struct clk *clk;
4247 ++ struct clk *clk = ERR_PTR(-ENOMEM);
4248 + struct clk_mux *mux = NULL;
4249 + struct clk_gate *gate = NULL;
4250 + struct clk_divider *div = NULL;
4251 +diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
4252 +index 09aa44cb8a91d..ba04cb381cd3f 100644
4253 +--- a/drivers/clocksource/hyperv_timer.c
4254 ++++ b/drivers/clocksource/hyperv_timer.c
4255 +@@ -341,7 +341,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
4256 + return read_hv_clock_tsc();
4257 + }
4258 +
4259 +-static u64 read_hv_sched_clock_tsc(void)
4260 ++static u64 notrace read_hv_sched_clock_tsc(void)
4261 + {
4262 + return (read_hv_clock_tsc() - hv_sched_clock_offset) *
4263 + (NSEC_PER_SEC / HV_CLOCK_HZ);
4264 +@@ -404,7 +404,7 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
4265 + return read_hv_clock_msr();
4266 + }
4267 +
4268 +-static u64 read_hv_sched_clock_msr(void)
4269 ++static u64 notrace read_hv_sched_clock_msr(void)
4270 + {
4271 + return (read_hv_clock_msr() - hv_sched_clock_offset) *
4272 + (NSEC_PER_SEC / HV_CLOCK_HZ);
4273 +diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
4274 +index df1c941260d14..b4af4094309b0 100644
4275 +--- a/drivers/cpufreq/armada-37xx-cpufreq.c
4276 ++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
4277 +@@ -484,6 +484,12 @@ remove_opp:
4278 + /* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
4279 + late_initcall(armada37xx_cpufreq_driver_init);
4280 +
4281 ++static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
4282 ++ { .compatible = "marvell,armada-3700-nb-pm" },
4283 ++ { },
4284 ++};
4285 ++MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
4286 ++
4287 + MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@××××××××××××××.com>");
4288 + MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
4289 + MODULE_LICENSE("GPL");
4290 +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
4291 +index a9af15e994ccf..e439b43c19ebe 100644
4292 +--- a/drivers/cpufreq/powernv-cpufreq.c
4293 ++++ b/drivers/cpufreq/powernv-cpufreq.c
4294 +@@ -885,12 +885,15 @@ static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
4295 + unsigned long action, void *unused)
4296 + {
4297 + int cpu;
4298 +- struct cpufreq_policy cpu_policy;
4299 ++ struct cpufreq_policy *cpu_policy;
4300 +
4301 + rebooting = true;
4302 + for_each_online_cpu(cpu) {
4303 +- cpufreq_get_policy(&cpu_policy, cpu);
4304 +- powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
4305 ++ cpu_policy = cpufreq_cpu_get(cpu);
4306 ++ if (!cpu_policy)
4307 ++ continue;
4308 ++ powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
4309 ++ cpufreq_cpu_put(cpu_policy);
4310 + }
4311 +
4312 + return NOTIFY_DONE;
4313 +diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
4314 +index 3fb044b907a83..47b7d394d2abb 100644
4315 +--- a/drivers/cpufreq/qcom-cpufreq-hw.c
4316 ++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
4317 +@@ -177,10 +177,15 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
4318 + freq = cpu_hw_rate / 1000;
4319 +
4320 + if (freq != prev_freq && core_count != LUT_TURBO_IND) {
4321 +- table[i].frequency = freq;
4322 +- qcom_cpufreq_update_opp(cpu_dev, freq, volt);
4323 +- dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
4324 ++ if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
4325 ++ table[i].frequency = freq;
4326 ++ dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
4327 + freq, core_count);
4328 ++ } else {
4329 ++ dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
4330 ++ table[i].frequency = CPUFREQ_ENTRY_INVALID;
4331 ++ }
4332 ++
4333 + } else if (core_count == LUT_TURBO_IND) {
4334 + table[i].frequency = CPUFREQ_ENTRY_INVALID;
4335 + }
4336 +@@ -197,9 +202,13 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
4337 + * as the boost frequency
4338 + */
4339 + if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
4340 +- prev->frequency = prev_freq;
4341 +- prev->flags = CPUFREQ_BOOST_FREQ;
4342 +- qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt);
4343 ++ if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
4344 ++ prev->frequency = prev_freq;
4345 ++ prev->flags = CPUFREQ_BOOST_FREQ;
4346 ++ } else {
4347 ++ dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
4348 ++ freq);
4349 ++ }
4350 + }
4351 +
4352 + break;
4353 +diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
4354 +index 52a9b7cf6576f..ab941cfd27a88 100644
4355 +--- a/drivers/crypto/Kconfig
4356 ++++ b/drivers/crypto/Kconfig
4357 +@@ -876,6 +876,7 @@ config CRYPTO_DEV_SA2UL
4358 + select CRYPTO_SHA1
4359 + select CRYPTO_SHA256
4360 + select CRYPTO_SHA512
4361 ++ select CRYPTO_AUTHENC
4362 + select HW_RANDOM
4363 + select SG_SPLIT
4364 + help
4365 +diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
4366 +index 138759dc8190e..08ed1ca12baf9 100644
4367 +--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
4368 ++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
4369 +@@ -120,7 +120,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
4370 + /* Be sure all data is written before enabling the task */
4371 + wmb();
4372 +
4373 +- v = 1 | (ce->chanlist[flow].tl->t_common_ctl & 0x7F) << 8;
4374 ++ /* Only H6 needs to write a part of t_common_ctl along with "1", but since it is ignored
4375 ++ * on older SoCs, we have no reason to complicate things.
4376 ++ */
4377 ++ v = 1 | ((le32_to_cpu(ce->chanlist[flow].tl->t_common_ctl) & 0x7F) << 8);
4378 + writel(v, ce->base + CE_TLR);
4379 + mutex_unlock(&ce->mlock);
4380 +
4381 +diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
4382 +index bc35aa0ec07ae..84ea7cba5ee5b 100644
4383 +--- a/drivers/crypto/caam/Kconfig
4384 ++++ b/drivers/crypto/caam/Kconfig
4385 +@@ -101,6 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
4386 + select CRYPTO_AUTHENC
4387 + select CRYPTO_SKCIPHER
4388 + select CRYPTO_LIB_DES
4389 ++ select CRYPTO_XTS
4390 + help
4391 + Selecting this will offload crypto for users of the
4392 + scatterlist crypto API (such as the linux native IPSec
4393 +@@ -114,6 +115,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
4394 + select CRYPTO_AUTHENC
4395 + select CRYPTO_SKCIPHER
4396 + select CRYPTO_DES
4397 ++ select CRYPTO_XTS
4398 + help
4399 + Selecting this will use CAAM Queue Interface (QI) for sending
4400 + & receiving crypto jobs to/from CAAM. This gives better performance
4401 +@@ -165,6 +167,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
4402 + select CRYPTO_AEAD
4403 + select CRYPTO_HASH
4404 + select CRYPTO_DES
4405 ++ select CRYPTO_XTS
4406 + help
4407 + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
4408 + It handles DPSECI DPAA2 objects that sit on the Management Complex
4409 +diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
4410 +index 91feda5b63f65..e72aa3e2e0656 100644
4411 +--- a/drivers/crypto/caam/caamalg.c
4412 ++++ b/drivers/crypto/caam/caamalg.c
4413 +@@ -57,6 +57,8 @@
4414 + #include "key_gen.h"
4415 + #include "caamalg_desc.h"
4416 + #include <crypto/engine.h>
4417 ++#include <crypto/xts.h>
4418 ++#include <asm/unaligned.h>
4419 +
4420 + /*
4421 + * crypto alg
4422 +@@ -114,10 +116,13 @@ struct caam_ctx {
4423 + struct alginfo adata;
4424 + struct alginfo cdata;
4425 + unsigned int authsize;
4426 ++ bool xts_key_fallback;
4427 ++ struct crypto_skcipher *fallback;
4428 + };
4429 +
4430 + struct caam_skcipher_req_ctx {
4431 + struct skcipher_edesc *edesc;
4432 ++ struct skcipher_request fallback_req;
4433 + };
4434 +
4435 + struct caam_aead_req_ctx {
4436 +@@ -830,12 +835,21 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
4437 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
4438 + struct device *jrdev = ctx->jrdev;
4439 + u32 *desc;
4440 ++ int err;
4441 +
4442 +- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
4443 ++ err = xts_verify_key(skcipher, key, keylen);
4444 ++ if (err) {
4445 + dev_dbg(jrdev, "key size mismatch\n");
4446 +- return -EINVAL;
4447 ++ return err;
4448 + }
4449 +
4450 ++ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
4451 ++ ctx->xts_key_fallback = true;
4452 ++
4453 ++ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
4454 ++ if (err)
4455 ++ return err;
4456 ++
4457 + ctx->cdata.keylen = keylen;
4458 + ctx->cdata.key_virt = key;
4459 + ctx->cdata.key_inline = true;
4460 +@@ -1755,6 +1769,14 @@ static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
4461 + return ret;
4462 + }
4463 +
4464 ++static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
4465 ++{
4466 ++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
4467 ++ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
4468 ++
4469 ++ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
4470 ++}
4471 ++
4472 + static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
4473 + {
4474 + struct skcipher_edesc *edesc;
4475 +@@ -1765,9 +1787,30 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
4476 + u32 *desc;
4477 + int ret = 0;
4478 +
4479 +- if (!req->cryptlen)
4480 ++ /*
4481 ++ * XTS is expected to return an error even for input length = 0
4482 ++ * Note that the case input length < block size will be caught during
4483 ++ * HW offloading and return an error.
4484 ++ */
4485 ++ if (!req->cryptlen && !ctx->fallback)
4486 + return 0;
4487 +
4488 ++ if (ctx->fallback && (xts_skcipher_ivsize(req) ||
4489 ++ ctx->xts_key_fallback)) {
4490 ++ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
4491 ++
4492 ++ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
4493 ++ skcipher_request_set_callback(&rctx->fallback_req,
4494 ++ req->base.flags,
4495 ++ req->base.complete,
4496 ++ req->base.data);
4497 ++ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
4498 ++ req->dst, req->cryptlen, req->iv);
4499 ++
4500 ++ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
4501 ++ crypto_skcipher_decrypt(&rctx->fallback_req);
4502 ++ }
4503 ++
4504 + /* allocate extended descriptor */
4505 + edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
4506 + if (IS_ERR(edesc))
4507 +@@ -1905,6 +1948,7 @@ static struct caam_skcipher_alg driver_algs[] = {
4508 + .base = {
4509 + .cra_name = "xts(aes)",
4510 + .cra_driver_name = "xts-aes-caam",
4511 ++ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
4512 + .cra_blocksize = AES_BLOCK_SIZE,
4513 + },
4514 + .setkey = xts_skcipher_setkey,
4515 +@@ -3344,13 +3388,35 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
4516 + struct caam_skcipher_alg *caam_alg =
4517 + container_of(alg, typeof(*caam_alg), skcipher);
4518 + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
4519 +-
4520 +- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
4521 ++ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4522 ++ int ret = 0;
4523 +
4524 + ctx->enginectx.op.do_one_request = skcipher_do_one_req;
4525 +
4526 +- return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
4527 +- false);
4528 ++ if (alg_aai == OP_ALG_AAI_XTS) {
4529 ++ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
4530 ++ struct crypto_skcipher *fallback;
4531 ++
4532 ++ fallback = crypto_alloc_skcipher(tfm_name, 0,
4533 ++ CRYPTO_ALG_NEED_FALLBACK);
4534 ++ if (IS_ERR(fallback)) {
4535 ++ dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
4536 ++ tfm_name, PTR_ERR(fallback));
4537 ++ return PTR_ERR(fallback);
4538 ++ }
4539 ++
4540 ++ ctx->fallback = fallback;
4541 ++ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
4542 ++ crypto_skcipher_reqsize(fallback));
4543 ++ } else {
4544 ++ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
4545 ++ }
4546 ++
4547 ++ ret = caam_init_common(ctx, &caam_alg->caam, false);
4548 ++ if (ret && ctx->fallback)
4549 ++ crypto_free_skcipher(ctx->fallback);
4550 ++
4551 ++ return ret;
4552 + }
4553 +
4554 + static int caam_aead_init(struct crypto_aead *tfm)
4555 +@@ -3378,7 +3444,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
4556 +
4557 + static void caam_cra_exit(struct crypto_skcipher *tfm)
4558 + {
4559 +- caam_exit_common(crypto_skcipher_ctx(tfm));
4560 ++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
4561 ++
4562 ++ if (ctx->fallback)
4563 ++ crypto_free_skcipher(ctx->fallback);
4564 ++ caam_exit_common(ctx);
4565 + }
4566 +
4567 + static void caam_aead_exit(struct crypto_aead *tfm)
4568 +@@ -3412,8 +3482,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
4569 + alg->base.cra_module = THIS_MODULE;
4570 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
4571 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
4572 +- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
4573 +- CRYPTO_ALG_KERN_DRIVER_ONLY;
4574 ++ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
4575 ++ CRYPTO_ALG_KERN_DRIVER_ONLY);
4576 +
4577 + alg->init = caam_cra_init;
4578 + alg->exit = caam_cra_exit;
4579 +diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
4580 +index bb1c0106a95c3..efcc7cb050fc7 100644
4581 +--- a/drivers/crypto/caam/caamalg_qi.c
4582 ++++ b/drivers/crypto/caam/caamalg_qi.c
4583 +@@ -18,6 +18,8 @@
4584 + #include "qi.h"
4585 + #include "jr.h"
4586 + #include "caamalg_desc.h"
4587 ++#include <crypto/xts.h>
4588 ++#include <asm/unaligned.h>
4589 +
4590 + /*
4591 + * crypto alg
4592 +@@ -67,6 +69,12 @@ struct caam_ctx {
4593 + struct device *qidev;
4594 + spinlock_t lock; /* Protects multiple init of driver context */
4595 + struct caam_drv_ctx *drv_ctx[NUM_OP];
4596 ++ bool xts_key_fallback;
4597 ++ struct crypto_skcipher *fallback;
4598 ++};
4599 ++
4600 ++struct caam_skcipher_req_ctx {
4601 ++ struct skcipher_request fallback_req;
4602 + };
4603 +
4604 + static int aead_set_sh_desc(struct crypto_aead *aead)
4605 +@@ -726,12 +734,21 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
4606 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
4607 + struct device *jrdev = ctx->jrdev;
4608 + int ret = 0;
4609 ++ int err;
4610 +
4611 +- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
4612 ++ err = xts_verify_key(skcipher, key, keylen);
4613 ++ if (err) {
4614 + dev_dbg(jrdev, "key size mismatch\n");
4615 +- return -EINVAL;
4616 ++ return err;
4617 + }
4618 +
4619 ++ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
4620 ++ ctx->xts_key_fallback = true;
4621 ++
4622 ++ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
4623 ++ if (err)
4624 ++ return err;
4625 ++
4626 + ctx->cdata.keylen = keylen;
4627 + ctx->cdata.key_virt = key;
4628 + ctx->cdata.key_inline = true;
4629 +@@ -1373,6 +1390,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
4630 + return edesc;
4631 + }
4632 +
4633 ++static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
4634 ++{
4635 ++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
4636 ++ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
4637 ++
4638 ++ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
4639 ++}
4640 ++
4641 + static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
4642 + {
4643 + struct skcipher_edesc *edesc;
4644 +@@ -1380,9 +1405,30 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
4645 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
4646 + int ret;
4647 +
4648 +- if (!req->cryptlen)
4649 ++ /*
4650 ++ * XTS is expected to return an error even for input length = 0
4651 ++ * Note that the case input length < block size will be caught during
4652 ++ * HW offloading and return an error.
4653 ++ */
4654 ++ if (!req->cryptlen && !ctx->fallback)
4655 + return 0;
4656 +
4657 ++ if (ctx->fallback && (xts_skcipher_ivsize(req) ||
4658 ++ ctx->xts_key_fallback)) {
4659 ++ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
4660 ++
4661 ++ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
4662 ++ skcipher_request_set_callback(&rctx->fallback_req,
4663 ++ req->base.flags,
4664 ++ req->base.complete,
4665 ++ req->base.data);
4666 ++ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
4667 ++ req->dst, req->cryptlen, req->iv);
4668 ++
4669 ++ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
4670 ++ crypto_skcipher_decrypt(&rctx->fallback_req);
4671 ++ }
4672 ++
4673 + if (unlikely(caam_congested))
4674 + return -EAGAIN;
4675 +
4676 +@@ -1507,6 +1553,7 @@ static struct caam_skcipher_alg driver_algs[] = {
4677 + .base = {
4678 + .cra_name = "xts(aes)",
4679 + .cra_driver_name = "xts-aes-caam-qi",
4680 ++ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
4681 + .cra_blocksize = AES_BLOCK_SIZE,
4682 + },
4683 + .setkey = xts_skcipher_setkey,
4684 +@@ -2440,9 +2487,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
4685 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
4686 + struct caam_skcipher_alg *caam_alg =
4687 + container_of(alg, typeof(*caam_alg), skcipher);
4688 ++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
4689 ++ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4690 ++ int ret = 0;
4691 ++
4692 ++ if (alg_aai == OP_ALG_AAI_XTS) {
4693 ++ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
4694 ++ struct crypto_skcipher *fallback;
4695 ++
4696 ++ fallback = crypto_alloc_skcipher(tfm_name, 0,
4697 ++ CRYPTO_ALG_NEED_FALLBACK);
4698 ++ if (IS_ERR(fallback)) {
4699 ++ dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
4700 ++ tfm_name, PTR_ERR(fallback));
4701 ++ return PTR_ERR(fallback);
4702 ++ }
4703 ++
4704 ++ ctx->fallback = fallback;
4705 ++ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
4706 ++ crypto_skcipher_reqsize(fallback));
4707 ++ }
4708 ++
4709 ++ ret = caam_init_common(ctx, &caam_alg->caam, false);
4710 ++ if (ret && ctx->fallback)
4711 ++ crypto_free_skcipher(ctx->fallback);
4712 +
4713 +- return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
4714 +- false);
4715 ++ return ret;
4716 + }
4717 +
4718 + static int caam_aead_init(struct crypto_aead *tfm)
4719 +@@ -2468,7 +2538,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
4720 +
4721 + static void caam_cra_exit(struct crypto_skcipher *tfm)
4722 + {
4723 +- caam_exit_common(crypto_skcipher_ctx(tfm));
4724 ++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
4725 ++
4726 ++ if (ctx->fallback)
4727 ++ crypto_free_skcipher(ctx->fallback);
4728 ++ caam_exit_common(ctx);
4729 + }
4730 +
4731 + static void caam_aead_exit(struct crypto_aead *tfm)
4732 +@@ -2502,8 +2576,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
4733 + alg->base.cra_module = THIS_MODULE;
4734 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
4735 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
4736 +- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
4737 +- CRYPTO_ALG_KERN_DRIVER_ONLY;
4738 ++ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
4739 ++ CRYPTO_ALG_KERN_DRIVER_ONLY);
4740 +
4741 + alg->init = caam_cra_init;
4742 + alg->exit = caam_cra_exit;
4743 +diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
4744 +index 66ae1d5811689..d7622edb31610 100644
4745 +--- a/drivers/crypto/caam/caamalg_qi2.c
4746 ++++ b/drivers/crypto/caam/caamalg_qi2.c
4747 +@@ -19,6 +19,8 @@
4748 + #include <linux/fsl/mc.h>
4749 + #include <soc/fsl/dpaa2-io.h>
4750 + #include <soc/fsl/dpaa2-fd.h>
4751 ++#include <crypto/xts.h>
4752 ++#include <asm/unaligned.h>
4753 +
4754 + #define CAAM_CRA_PRIORITY 2000
4755 +
4756 +@@ -80,6 +82,8 @@ struct caam_ctx {
4757 + struct alginfo adata;
4758 + struct alginfo cdata;
4759 + unsigned int authsize;
4760 ++ bool xts_key_fallback;
4761 ++ struct crypto_skcipher *fallback;
4762 + };
4763 +
4764 + static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
4765 +@@ -1056,12 +1060,21 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
4766 + struct device *dev = ctx->dev;
4767 + struct caam_flc *flc;
4768 + u32 *desc;
4769 ++ int err;
4770 +
4771 +- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
4772 ++ err = xts_verify_key(skcipher, key, keylen);
4773 ++ if (err) {
4774 + dev_dbg(dev, "key size mismatch\n");
4775 +- return -EINVAL;
4776 ++ return err;
4777 + }
4778 +
4779 ++ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
4780 ++ ctx->xts_key_fallback = true;
4781 ++
4782 ++ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
4783 ++ if (err)
4784 ++ return err;
4785 ++
4786 + ctx->cdata.keylen = keylen;
4787 + ctx->cdata.key_virt = key;
4788 + ctx->cdata.key_inline = true;
4789 +@@ -1443,6 +1456,14 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
4790 + skcipher_request_complete(req, ecode);
4791 + }
4792 +
4793 ++static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
4794 ++{
4795 ++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
4796 ++ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
4797 ++
4798 ++ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
4799 ++}
4800 ++
4801 + static int skcipher_encrypt(struct skcipher_request *req)
4802 + {
4803 + struct skcipher_edesc *edesc;
4804 +@@ -1451,9 +1472,27 @@ static int skcipher_encrypt(struct skcipher_request *req)
4805 + struct caam_request *caam_req = skcipher_request_ctx(req);
4806 + int ret;
4807 +
4808 +- if (!req->cryptlen)
4809 ++ /*
4810 ++ * XTS is expected to return an error even for input length = 0
4811 ++ * Note that the case input length < block size will be caught during
4812 ++ * HW offloading and return an error.
4813 ++ */
4814 ++ if (!req->cryptlen && !ctx->fallback)
4815 + return 0;
4816 +
4817 ++ if (ctx->fallback && (xts_skcipher_ivsize(req) ||
4818 ++ ctx->xts_key_fallback)) {
4819 ++ skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
4820 ++ skcipher_request_set_callback(&caam_req->fallback_req,
4821 ++ req->base.flags,
4822 ++ req->base.complete,
4823 ++ req->base.data);
4824 ++ skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
4825 ++ req->dst, req->cryptlen, req->iv);
4826 ++
4827 ++ return crypto_skcipher_encrypt(&caam_req->fallback_req);
4828 ++ }
4829 ++
4830 + /* allocate extended descriptor */
4831 + edesc = skcipher_edesc_alloc(req);
4832 + if (IS_ERR(edesc))
4833 +@@ -1482,8 +1521,27 @@ static int skcipher_decrypt(struct skcipher_request *req)
4834 + struct caam_request *caam_req = skcipher_request_ctx(req);
4835 + int ret;
4836 +
4837 +- if (!req->cryptlen)
4838 ++ /*
4839 ++ * XTS is expected to return an error even for input length = 0
4840 ++ * Note that the case input length < block size will be caught during
4841 ++ * HW offloading and return an error.
4842 ++ */
4843 ++ if (!req->cryptlen && !ctx->fallback)
4844 + return 0;
4845 ++
4846 ++ if (ctx->fallback && (xts_skcipher_ivsize(req) ||
4847 ++ ctx->xts_key_fallback)) {
4848 ++ skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
4849 ++ skcipher_request_set_callback(&caam_req->fallback_req,
4850 ++ req->base.flags,
4851 ++ req->base.complete,
4852 ++ req->base.data);
4853 ++ skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
4854 ++ req->dst, req->cryptlen, req->iv);
4855 ++
4856 ++ return crypto_skcipher_decrypt(&caam_req->fallback_req);
4857 ++ }
4858 ++
4859 + /* allocate extended descriptor */
4860 + edesc = skcipher_edesc_alloc(req);
4861 + if (IS_ERR(edesc))
4862 +@@ -1537,9 +1595,34 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
4863 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
4864 + struct caam_skcipher_alg *caam_alg =
4865 + container_of(alg, typeof(*caam_alg), skcipher);
4866 ++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
4867 ++ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4868 ++ int ret = 0;
4869 ++
4870 ++ if (alg_aai == OP_ALG_AAI_XTS) {
4871 ++ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
4872 ++ struct crypto_skcipher *fallback;
4873 ++
4874 ++ fallback = crypto_alloc_skcipher(tfm_name, 0,
4875 ++ CRYPTO_ALG_NEED_FALLBACK);
4876 ++ if (IS_ERR(fallback)) {
4877 ++ dev_err(ctx->dev, "Failed to allocate %s fallback: %ld\n",
4878 ++ tfm_name, PTR_ERR(fallback));
4879 ++ return PTR_ERR(fallback);
4880 ++ }
4881 +
4882 +- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
4883 +- return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
4884 ++ ctx->fallback = fallback;
4885 ++ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
4886 ++ crypto_skcipher_reqsize(fallback));
4887 ++ } else {
4888 ++ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
4889 ++ }
4890 ++
4891 ++ ret = caam_cra_init(ctx, &caam_alg->caam, false);
4892 ++ if (ret && ctx->fallback)
4893 ++ crypto_free_skcipher(ctx->fallback);
4894 ++
4895 ++ return ret;
4896 + }
4897 +
4898 + static int caam_cra_init_aead(struct crypto_aead *tfm)
4899 +@@ -1562,7 +1645,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
4900 +
4901 + static void caam_cra_exit(struct crypto_skcipher *tfm)
4902 + {
4903 +- caam_exit_common(crypto_skcipher_ctx(tfm));
4904 ++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
4905 ++
4906 ++ if (ctx->fallback)
4907 ++ crypto_free_skcipher(ctx->fallback);
4908 ++ caam_exit_common(ctx);
4909 + }
4910 +
4911 + static void caam_cra_exit_aead(struct crypto_aead *tfm)
4912 +@@ -1665,6 +1752,7 @@ static struct caam_skcipher_alg driver_algs[] = {
4913 + .base = {
4914 + .cra_name = "xts(aes)",
4915 + .cra_driver_name = "xts-aes-caam-qi2",
4916 ++ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
4917 + .cra_blocksize = AES_BLOCK_SIZE,
4918 + },
4919 + .setkey = xts_skcipher_setkey,
4920 +@@ -2912,8 +3000,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
4921 + alg->base.cra_module = THIS_MODULE;
4922 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
4923 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
4924 +- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
4925 +- CRYPTO_ALG_KERN_DRIVER_ONLY;
4926 ++ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
4927 ++ CRYPTO_ALG_KERN_DRIVER_ONLY);
4928 +
4929 + alg->init = caam_cra_init_skcipher;
4930 + alg->exit = caam_cra_exit;
4931 +diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
4932 +index f29cb7bd7dd36..d35253407ade4 100644
4933 +--- a/drivers/crypto/caam/caamalg_qi2.h
4934 ++++ b/drivers/crypto/caam/caamalg_qi2.h
4935 +@@ -13,6 +13,7 @@
4936 + #include <linux/netdevice.h>
4937 + #include "dpseci.h"
4938 + #include "desc_constr.h"
4939 ++#include <crypto/skcipher.h>
4940 +
4941 + #define DPAA2_CAAM_STORE_SIZE 16
4942 + /* NAPI weight *must* be a multiple of the store size. */
4943 +@@ -186,6 +187,7 @@ struct caam_request {
4944 + void (*cbk)(void *ctx, u32 err);
4945 + void *ctx;
4946 + void *edesc;
4947 ++ struct skcipher_request fallback_req;
4948 + };
4949 +
4950 + /**
4951 +diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
4952 +index bd270e66185e9..40869ea1ed20f 100644
4953 +--- a/drivers/crypto/ccp/ccp-ops.c
4954 ++++ b/drivers/crypto/ccp/ccp-ops.c
4955 +@@ -1744,7 +1744,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
4956 + break;
4957 + default:
4958 + ret = -EINVAL;
4959 +- goto e_ctx;
4960 ++ goto e_data;
4961 + }
4962 + } else {
4963 + /* Stash the context */
4964 +diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
4965 +index d39e1664fc7ed..3c65bf070c908 100644
4966 +--- a/drivers/crypto/ccree/cc_pm.c
4967 ++++ b/drivers/crypto/ccree/cc_pm.c
4968 +@@ -65,8 +65,12 @@ const struct dev_pm_ops ccree_pm = {
4969 + int cc_pm_get(struct device *dev)
4970 + {
4971 + int rc = pm_runtime_get_sync(dev);
4972 ++ if (rc < 0) {
4973 ++ pm_runtime_put_noidle(dev);
4974 ++ return rc;
4975 ++ }
4976 +
4977 +- return (rc == 1 ? 0 : rc);
4978 ++ return 0;
4979 + }
4980 +
4981 + void cc_pm_put_suspend(struct device *dev)
4982 +diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
4983 +index 05520dccd9065..ec4f79049a061 100644
4984 +--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
4985 ++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
4986 +@@ -92,11 +92,13 @@ static void chtls_sock_release(struct kref *ref)
4987 + static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
4988 + struct sock *sk)
4989 + {
4990 ++ struct adapter *adap = pci_get_drvdata(cdev->pdev);
4991 + struct net_device *ndev = cdev->ports[0];
4992 + #if IS_ENABLED(CONFIG_IPV6)
4993 + struct net_device *temp;
4994 + int addr_type;
4995 + #endif
4996 ++ int i;
4997 +
4998 + switch (sk->sk_family) {
4999 + case PF_INET:
5000 +@@ -127,8 +129,12 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
5001 + return NULL;
5002 +
5003 + if (is_vlan_dev(ndev))
5004 +- return vlan_dev_real_dev(ndev);
5005 +- return ndev;
5006 ++ ndev = vlan_dev_real_dev(ndev);
5007 ++
5008 ++ for_each_port(adap, i)
5009 ++ if (cdev->ports[i] == ndev)
5010 ++ return ndev;
5011 ++ return NULL;
5012 + }
5013 +
5014 + static void assign_rxopt(struct sock *sk, unsigned int opt)
5015 +@@ -477,7 +483,6 @@ void chtls_destroy_sock(struct sock *sk)
5016 + chtls_purge_write_queue(sk);
5017 + free_tls_keyid(sk);
5018 + kref_put(&csk->kref, chtls_sock_release);
5019 +- csk->cdev = NULL;
5020 + if (sk->sk_family == AF_INET)
5021 + sk->sk_prot = &tcp_prot;
5022 + #if IS_ENABLED(CONFIG_IPV6)
5023 +@@ -736,14 +741,13 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
5024 +
5025 + #if IS_ENABLED(CONFIG_IPV6)
5026 + if (sk->sk_family == PF_INET6) {
5027 +- struct chtls_sock *csk;
5028 ++ struct net_device *ndev = chtls_find_netdev(cdev, sk);
5029 + int addr_type = 0;
5030 +
5031 +- csk = rcu_dereference_sk_user_data(sk);
5032 + addr_type = ipv6_addr_type((const struct in6_addr *)
5033 + &sk->sk_v6_rcv_saddr);
5034 + if (addr_type != IPV6_ADDR_ANY)
5035 +- cxgb4_clip_release(csk->egress_dev, (const u32 *)
5036 ++ cxgb4_clip_release(ndev, (const u32 *)
5037 + &sk->sk_v6_rcv_saddr, 1);
5038 + }
5039 + #endif
5040 +@@ -1157,6 +1161,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
5041 + ndev = n->dev;
5042 + if (!ndev)
5043 + goto free_dst;
5044 ++ if (is_vlan_dev(ndev))
5045 ++ ndev = vlan_dev_real_dev(ndev);
5046 ++
5047 + port_id = cxgb4_port_idx(ndev);
5048 +
5049 + csk = chtls_sock_create(cdev);
5050 +diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
5051 +index 2e9acae1cba3b..9fb5ca6682ea2 100644
5052 +--- a/drivers/crypto/chelsio/chtls/chtls_io.c
5053 ++++ b/drivers/crypto/chelsio/chtls/chtls_io.c
5054 +@@ -902,9 +902,9 @@ static int chtls_skb_copy_to_page_nocache(struct sock *sk,
5055 + return 0;
5056 + }
5057 +
5058 +-static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
5059 ++static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
5060 + {
5061 +- return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
5062 ++ return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0);
5063 + }
5064 +
5065 + static int csk_wait_memory(struct chtls_dev *cdev,
5066 +@@ -1240,6 +1240,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
5067 + copied = 0;
5068 + csk = rcu_dereference_sk_user_data(sk);
5069 + cdev = csk->cdev;
5070 ++ lock_sock(sk);
5071 + timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
5072 +
5073 + err = sk_stream_wait_connect(sk, &timeo);
5074 +diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
5075 +index 497969ae8b230..b9973d152a24a 100644
5076 +--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
5077 ++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
5078 +@@ -342,11 +342,14 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
5079 + ret = sec_alloc_pbuf_resource(dev, res);
5080 + if (ret) {
5081 + dev_err(dev, "fail to alloc pbuf dma resource!\n");
5082 +- goto alloc_fail;
5083 ++ goto alloc_pbuf_fail;
5084 + }
5085 + }
5086 +
5087 + return 0;
5088 ++alloc_pbuf_fail:
5089 ++ if (ctx->alg_type == SEC_AEAD)
5090 ++ sec_free_mac_resource(dev, qp_ctx->res);
5091 + alloc_fail:
5092 + sec_free_civ_resource(dev, res);
5093 +
5094 +@@ -457,8 +460,10 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
5095 + ctx->fake_req_limit = QM_Q_DEPTH >> 1;
5096 + ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
5097 + GFP_KERNEL);
5098 +- if (!ctx->qp_ctx)
5099 +- return -ENOMEM;
5100 ++ if (!ctx->qp_ctx) {
5101 ++ ret = -ENOMEM;
5102 ++ goto err_destroy_qps;
5103 ++ }
5104 +
5105 + for (i = 0; i < sec->ctx_q_num; i++) {
5106 + ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
5107 +@@ -467,12 +472,15 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
5108 + }
5109 +
5110 + return 0;
5111 ++
5112 + err_sec_release_qp_ctx:
5113 + for (i = i - 1; i >= 0; i--)
5114 + sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
5115 +
5116 +- sec_destroy_qps(ctx->qps, sec->ctx_q_num);
5117 + kfree(ctx->qp_ctx);
5118 ++err_destroy_qps:
5119 ++ sec_destroy_qps(ctx->qps, sec->ctx_q_num);
5120 ++
5121 + return ret;
5122 + }
5123 +
5124 +diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
5125 +index f478bb0a566af..276012e7c482f 100644
5126 +--- a/drivers/crypto/ixp4xx_crypto.c
5127 ++++ b/drivers/crypto/ixp4xx_crypto.c
5128 +@@ -528,7 +528,7 @@ static void release_ixp_crypto(struct device *dev)
5129 +
5130 + if (crypt_virt) {
5131 + dma_free_coherent(dev,
5132 +- NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
5133 ++ NPE_QLEN * sizeof(struct crypt_ctl),
5134 + crypt_virt, crypt_phys);
5135 + }
5136 + }
5137 +diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
5138 +index 7e3ad085b5bdd..efce3a83b35a8 100644
5139 +--- a/drivers/crypto/mediatek/mtk-platform.c
5140 ++++ b/drivers/crypto/mediatek/mtk-platform.c
5141 +@@ -442,7 +442,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
5142 + static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
5143 + {
5144 + struct mtk_ring **ring = cryp->ring;
5145 +- int i, err = ENOMEM;
5146 ++ int i;
5147 +
5148 + for (i = 0; i < MTK_RING_MAX; i++) {
5149 + ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
5150 +@@ -469,14 +469,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
5151 + return 0;
5152 +
5153 + err_cleanup:
5154 +- for (; i--; ) {
5155 ++ do {
5156 + dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
5157 + ring[i]->res_base, ring[i]->res_dma);
5158 + dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
5159 + ring[i]->cmd_base, ring[i]->cmd_dma);
5160 + kfree(ring[i]);
5161 +- }
5162 +- return err;
5163 ++ } while (i--);
5164 ++ return -ENOMEM;
5165 + }
5166 +
5167 + static int mtk_crypto_probe(struct platform_device *pdev)
5168 +diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
5169 +index 954d703f29811..89ed055f21bf4 100644
5170 +--- a/drivers/crypto/omap-sham.c
5171 ++++ b/drivers/crypto/omap-sham.c
5172 +@@ -456,6 +456,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
5173 + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
5174 + u32 val, mask;
5175 +
5176 ++ if (likely(ctx->digcnt))
5177 ++ omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
5178 ++
5179 + /*
5180 + * Setting ALGO_CONST only for the first iteration and
5181 + * CLOSE_HASH only for the last one. Note that flags mode bits
5182 +diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
5183 +index dac6eb37fff93..fb34bf92861d1 100644
5184 +--- a/drivers/crypto/picoxcell_crypto.c
5185 ++++ b/drivers/crypto/picoxcell_crypto.c
5186 +@@ -1685,11 +1685,6 @@ static int spacc_probe(struct platform_device *pdev)
5187 + goto err_clk_put;
5188 + }
5189 +
5190 +- ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
5191 +- if (ret)
5192 +- goto err_clk_disable;
5193 +-
5194 +-
5195 + /*
5196 + * Use an IRQ threshold of 50% as a default. This seems to be a
5197 + * reasonable trade off of latency against throughput but can be
5198 +@@ -1697,6 +1692,10 @@ static int spacc_probe(struct platform_device *pdev)
5199 + */
5200 + engine->stat_irq_thresh = (engine->fifo_sz / 2);
5201 +
5202 ++ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
5203 ++ if (ret)
5204 ++ goto err_clk_disable;
5205 ++
5206 + /*
5207 + * Configure the interrupts. We only use the STAT_CNT interrupt as we
5208 + * only submit a new packet for processing when we complete another in
5209 +diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
5210 +index 5bc099052bd20..039579b7cc818 100644
5211 +--- a/drivers/crypto/sa2ul.c
5212 ++++ b/drivers/crypto/sa2ul.c
5213 +@@ -1148,12 +1148,10 @@ static int sa_run(struct sa_req *req)
5214 + ret = sg_split(req->dst, mapped_dst_nents, 0, 1,
5215 + &split_size, &dst, &dst_nents,
5216 + gfp_flags);
5217 +- if (ret) {
5218 +- dst_nents = dst_nents;
5219 ++ if (ret)
5220 + dst = req->dst;
5221 +- } else {
5222 ++ else
5223 + rxd->split_dst_sg = dst;
5224 +- }
5225 + }
5226 + }
5227 +
5228 +@@ -2333,7 +2331,7 @@ static int sa_ul_probe(struct platform_device *pdev)
5229 +
5230 + pm_runtime_enable(dev);
5231 + ret = pm_runtime_get_sync(dev);
5232 +- if (ret) {
5233 ++ if (ret < 0) {
5234 + dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
5235 + ret);
5236 + return ret;
5237 +diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
5238 +index 4ef3eb11361c2..4a4c3284ae1f3 100644
5239 +--- a/drivers/crypto/stm32/Kconfig
5240 ++++ b/drivers/crypto/stm32/Kconfig
5241 +@@ -3,6 +3,7 @@ config CRYPTO_DEV_STM32_CRC
5242 + tristate "Support for STM32 crc accelerators"
5243 + depends on ARCH_STM32
5244 + select CRYPTO_HASH
5245 ++ select CRC32
5246 + help
5247 + This enables support for the CRC32 hw accelerator which can be found
5248 + on STMicroelectronics STM32 SOC.
5249 +diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
5250 +index 3ba41148c2a46..2c13f5214d2cf 100644
5251 +--- a/drivers/crypto/stm32/stm32-crc32.c
5252 ++++ b/drivers/crypto/stm32/stm32-crc32.c
5253 +@@ -6,6 +6,7 @@
5254 +
5255 + #include <linux/bitrev.h>
5256 + #include <linux/clk.h>
5257 ++#include <linux/crc32.h>
5258 + #include <linux/crc32poly.h>
5259 + #include <linux/module.h>
5260 + #include <linux/mod_devicetable.h>
5261 +@@ -147,7 +148,6 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
5262 + struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
5263 + struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
5264 + struct stm32_crc *crc;
5265 +- unsigned long flags;
5266 +
5267 + crc = stm32_crc_get_next_crc();
5268 + if (!crc)
5269 +@@ -155,7 +155,15 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
5270 +
5271 + pm_runtime_get_sync(crc->dev);
5272 +
5273 +- spin_lock_irqsave(&crc->lock, flags);
5274 ++ if (!spin_trylock(&crc->lock)) {
5275 ++ /* Hardware is busy, calculate crc32 by software */
5276 ++ if (mctx->poly == CRC32_POLY_LE)
5277 ++ ctx->partial = crc32_le(ctx->partial, d8, length);
5278 ++ else
5279 ++ ctx->partial = __crc32c_le(ctx->partial, d8, length);
5280 ++
5281 ++ goto pm_out;
5282 ++ }
5283 +
5284 + /*
5285 + * Restore previously calculated CRC for this context as init value
5286 +@@ -195,8 +203,9 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
5287 + /* Store partial result */
5288 + ctx->partial = readl_relaxed(crc->regs + CRC_DR);
5289 +
5290 +- spin_unlock_irqrestore(&crc->lock, flags);
5291 ++ spin_unlock(&crc->lock);
5292 +
5293 ++pm_out:
5294 + pm_runtime_mark_last_busy(crc->dev);
5295 + pm_runtime_put_autosuspend(crc->dev);
5296 +
5297 +diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
5298 +index a819611b8892c..146c3f39f576b 100644
5299 +--- a/drivers/dma/dmatest.c
5300 ++++ b/drivers/dma/dmatest.c
5301 +@@ -1249,15 +1249,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
5302 + add_threaded_test(info);
5303 +
5304 + /* Check if channel was added successfully */
5305 +- dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
5306 +-
5307 +- if (dtc->chan) {
5308 ++ if (!list_empty(&info->channels)) {
5309 + /*
5310 + * if new channel was not successfully added, revert the
5311 + * "test_channel" string to the name of the last successfully
5312 + * added channel. exception for when users issues empty string
5313 + * to channel parameter.
5314 + */
5315 ++ dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
5316 + if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
5317 + && (strcmp("", strim(test_channel)) != 0)) {
5318 + ret = -EINVAL;
5319 +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
5320 +index 4700f2e87a627..d9333ee14527e 100644
5321 +--- a/drivers/dma/dw/core.c
5322 ++++ b/drivers/dma/dw/core.c
5323 +@@ -772,6 +772,10 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
5324 + if (dws->dma_dev != chan->device->dev)
5325 + return false;
5326 +
5327 ++ /* permit channels in accordance with the channels mask */
5328 ++ if (dws->channels && !(dws->channels & dwc->mask))
5329 ++ return false;
5330 ++
5331 + /* We have to copy data since dws can be temporary storage */
5332 + memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
5333 +
5334 +diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c
5335 +index 7a085b3c1854c..d9810980920a1 100644
5336 +--- a/drivers/dma/dw/dw.c
5337 ++++ b/drivers/dma/dw/dw.c
5338 +@@ -14,7 +14,7 @@
5339 + static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
5340 + {
5341 + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
5342 +- u32 cfghi = DWC_CFGH_FIFO_MODE;
5343 ++ u32 cfghi = is_slave_direction(dwc->direction) ? 0 : DWC_CFGH_FIFO_MODE;
5344 + u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
5345 + bool hs_polarity = dwc->dws.hs_polarity;
5346 +
5347 +diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c
5348 +index 1474b3817ef4f..c1cf7675b9d10 100644
5349 +--- a/drivers/dma/dw/of.c
5350 ++++ b/drivers/dma/dw/of.c
5351 +@@ -22,18 +22,21 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
5352 + };
5353 + dma_cap_mask_t cap;
5354 +
5355 +- if (dma_spec->args_count != 3)
5356 ++ if (dma_spec->args_count < 3 || dma_spec->args_count > 4)
5357 + return NULL;
5358 +
5359 + slave.src_id = dma_spec->args[0];
5360 + slave.dst_id = dma_spec->args[0];
5361 + slave.m_master = dma_spec->args[1];
5362 + slave.p_master = dma_spec->args[2];
5363 ++ if (dma_spec->args_count >= 4)
5364 ++ slave.channels = dma_spec->args[3];
5365 +
5366 + if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
5367 + slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
5368 + slave.m_master >= dw->pdata->nr_masters ||
5369 +- slave.p_master >= dw->pdata->nr_masters))
5370 ++ slave.p_master >= dw->pdata->nr_masters ||
5371 ++ slave.channels >= BIT(dw->pdata->nr_channels)))
5372 + return NULL;
5373 +
5374 + dma_cap_zero(cap);
5375 +diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
5376 +index a814b200299bf..07296171e2bbc 100644
5377 +--- a/drivers/dma/ioat/dma.c
5378 ++++ b/drivers/dma/ioat/dma.c
5379 +@@ -389,7 +389,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
5380 + struct ioat_descs *descs = &ioat_chan->descs[i];
5381 +
5382 + descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
5383 +- SZ_2M, &descs->hw, flags);
5384 ++ IOAT_CHUNK_SIZE, &descs->hw, flags);
5385 + if (!descs->virt) {
5386 + int idx;
5387 +
5388 +diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
5389 +index 3a5d33ea5ebe7..a367584f0d7b3 100644
5390 +--- a/drivers/dma/ti/k3-udma-glue.c
5391 ++++ b/drivers/dma/ti/k3-udma-glue.c
5392 +@@ -378,17 +378,11 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
5393 +
5394 + int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
5395 + {
5396 +- u32 txrt_ctl;
5397 +-
5398 +- txrt_ctl = UDMA_PEER_RT_EN_ENABLE;
5399 + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
5400 +- txrt_ctl);
5401 ++ UDMA_PEER_RT_EN_ENABLE);
5402 +
5403 +- txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx,
5404 +- UDMA_CHAN_RT_CTL_REG);
5405 +- txrt_ctl |= UDMA_CHAN_RT_CTL_EN;
5406 + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
5407 +- txrt_ctl);
5408 ++ UDMA_CHAN_RT_CTL_EN);
5409 +
5410 + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
5411 + return 0;
5412 +@@ -579,8 +573,8 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
5413 +
5414 + /* request and cfg rings */
5415 + ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
5416 +- flow_cfg->ring_rxq_id,
5417 + flow_cfg->ring_rxfdq0_id,
5418 ++ flow_cfg->ring_rxq_id,
5419 + &flow->ringrxfdq,
5420 + &flow->ringrx);
5421 + if (ret) {
5422 +@@ -1058,19 +1052,14 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
5423 +
5424 + int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
5425 + {
5426 +- u32 rxrt_ctl;
5427 +-
5428 + if (rx_chn->remote)
5429 + return -EINVAL;
5430 +
5431 + if (rx_chn->flows_ready < rx_chn->flow_num)
5432 + return -EINVAL;
5433 +
5434 +- rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx,
5435 +- UDMA_CHAN_RT_CTL_REG);
5436 +- rxrt_ctl |= UDMA_CHAN_RT_CTL_EN;
5437 + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
5438 +- rxrt_ctl);
5439 ++ UDMA_CHAN_RT_CTL_EN);
5440 +
5441 + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
5442 + UDMA_PEER_RT_EN_ENABLE);
5443 +diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
5444 +index b194658b8b5c9..fbec28dc661d7 100644
5445 +--- a/drivers/edac/aspeed_edac.c
5446 ++++ b/drivers/edac/aspeed_edac.c
5447 +@@ -209,8 +209,8 @@ static int config_irq(void *ctx, struct platform_device *pdev)
5448 + /* register interrupt handler */
5449 + irq = platform_get_irq(pdev, 0);
5450 + dev_dbg(&pdev->dev, "got irq %d\n", irq);
5451 +- if (!irq)
5452 +- return -ENODEV;
5453 ++ if (irq < 0)
5454 ++ return irq;
5455 +
5456 + rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH,
5457 + DRV_NAME, ctx);
5458 +diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
5459 +index 191aa7c19ded7..324a46b8479b0 100644
5460 +--- a/drivers/edac/i5100_edac.c
5461 ++++ b/drivers/edac/i5100_edac.c
5462 +@@ -1061,16 +1061,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
5463 + PCI_DEVICE_ID_INTEL_5100_19, 0);
5464 + if (!einj) {
5465 + ret = -ENODEV;
5466 +- goto bail_einj;
5467 ++ goto bail_mc_free;
5468 + }
5469 +
5470 + rc = pci_enable_device(einj);
5471 + if (rc < 0) {
5472 + ret = rc;
5473 +- goto bail_disable_einj;
5474 ++ goto bail_einj;
5475 + }
5476 +
5477 +-
5478 + mci->pdev = &pdev->dev;
5479 +
5480 + priv = mci->pvt_info;
5481 +@@ -1136,14 +1135,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
5482 + bail_scrub:
5483 + priv->scrub_enable = 0;
5484 + cancel_delayed_work_sync(&(priv->i5100_scrubbing));
5485 +- edac_mc_free(mci);
5486 +-
5487 +-bail_disable_einj:
5488 + pci_disable_device(einj);
5489 +
5490 + bail_einj:
5491 + pci_dev_put(einj);
5492 +
5493 ++bail_mc_free:
5494 ++ edac_mc_free(mci);
5495 ++
5496 + bail_disable_ch1:
5497 + pci_disable_device(ch1mm);
5498 +
5499 +diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
5500 +index 8be3e89a510e4..d7419a90a2f5b 100644
5501 +--- a/drivers/edac/ti_edac.c
5502 ++++ b/drivers/edac/ti_edac.c
5503 +@@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev)
5504 +
5505 + /* add EMIF ECC error handler */
5506 + error_irq = platform_get_irq(pdev, 0);
5507 +- if (!error_irq) {
5508 ++ if (error_irq < 0) {
5509 ++ ret = error_irq;
5510 + edac_printk(KERN_ERR, EDAC_MOD_NAME,
5511 + "EMIF irq number not defined.\n");
5512 + goto err;
5513 +diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
5514 +index 6998dc86b5ce8..b797a713c3313 100644
5515 +--- a/drivers/firmware/arm_scmi/mailbox.c
5516 ++++ b/drivers/firmware/arm_scmi/mailbox.c
5517 +@@ -110,7 +110,7 @@ static int mailbox_chan_free(int id, void *p, void *data)
5518 + struct scmi_chan_info *cinfo = p;
5519 + struct scmi_mailbox *smbox = cinfo->transport_info;
5520 +
5521 +- if (!IS_ERR(smbox->chan)) {
5522 ++ if (smbox && !IS_ERR(smbox->chan)) {
5523 + mbox_free_channel(smbox->chan);
5524 + cinfo->transport_info = NULL;
5525 + smbox->chan = NULL;
5526 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
5527 +index 913c8f0513bd3..5b7dc1d1b44c7 100644
5528 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
5529 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
5530 +@@ -806,8 +806,8 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
5531 + }
5532 + adev->atif = atif;
5533 +
5534 +- if (atif->notifications.brightness_change) {
5535 + #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5536 ++ if (atif->notifications.brightness_change) {
5537 + if (amdgpu_device_has_dc_support(adev)) {
5538 + #if defined(CONFIG_DRM_AMD_DC)
5539 + struct amdgpu_display_manager *dm = &adev->dm;
5540 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
5541 +index 71e005cf29522..479735c448478 100644
5542 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
5543 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
5544 +@@ -1691,13 +1691,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
5545 + uint64_t max_entries;
5546 + uint64_t addr, last;
5547 +
5548 ++ max_entries = mapping->last - start + 1;
5549 + if (nodes) {
5550 + addr = nodes->start << PAGE_SHIFT;
5551 +- max_entries = (nodes->size - pfn) *
5552 +- AMDGPU_GPU_PAGES_IN_CPU_PAGE;
5553 ++ max_entries = min((nodes->size - pfn) *
5554 ++ AMDGPU_GPU_PAGES_IN_CPU_PAGE, max_entries);
5555 + } else {
5556 + addr = 0;
5557 +- max_entries = S64_MAX;
5558 + }
5559 +
5560 + if (pages_addr) {
5561 +@@ -1727,7 +1727,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
5562 + addr += pfn << PAGE_SHIFT;
5563 + }
5564 +
5565 +- last = min((uint64_t)mapping->last, start + max_entries - 1);
5566 ++ last = start + max_entries - 1;
5567 + r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
5568 + start, last, flags, addr,
5569 + dma_addr, fence);
5570 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
5571 +index a717a4904268e..5474f7e4c75b1 100644
5572 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
5573 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
5574 +@@ -8217,8 +8217,7 @@ static int dm_update_plane_state(struct dc *dc,
5575 + dm_old_plane_state->dc_state,
5576 + dm_state->context)) {
5577 +
5578 +- ret = EINVAL;
5579 +- return ret;
5580 ++ return -EINVAL;
5581 + }
5582 +
5583 +
5584 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
5585 +index 92eb1ca1634fc..95ec8ae5a7739 100644
5586 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
5587 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
5588 +@@ -2295,6 +2295,7 @@ static void commit_planes_for_stream(struct dc *dc,
5589 + enum surface_update_type update_type,
5590 + struct dc_state *context)
5591 + {
5592 ++ bool mpcc_disconnected = false;
5593 + int i, j;
5594 + struct pipe_ctx *top_pipe_to_program = NULL;
5595 +
5596 +@@ -2325,6 +2326,15 @@ static void commit_planes_for_stream(struct dc *dc,
5597 + context_clock_trace(dc, context);
5598 + }
5599 +
5600 ++ if (update_type != UPDATE_TYPE_FAST && dc->hwss.interdependent_update_lock &&
5601 ++ dc->hwss.disconnect_pipes && dc->hwss.wait_for_pending_cleared){
5602 ++ dc->hwss.interdependent_update_lock(dc, context, true);
5603 ++ mpcc_disconnected = dc->hwss.disconnect_pipes(dc, context);
5604 ++ dc->hwss.interdependent_update_lock(dc, context, false);
5605 ++ if (mpcc_disconnected)
5606 ++ dc->hwss.wait_for_pending_cleared(dc, context);
5607 ++ }
5608 ++
5609 + for (j = 0; j < dc->res_pool->pipe_count; j++) {
5610 + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
5611 +
5612 +@@ -2621,7 +2631,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
5613 +
5614 + copy_stream_update_to_stream(dc, context, stream, stream_update);
5615 +
5616 +- if (update_type > UPDATE_TYPE_FAST) {
5617 ++ if (update_type >= UPDATE_TYPE_FULL) {
5618 + if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
5619 + DC_ERROR("Mode validation failed for stream update!\n");
5620 + dc_release_state(context);
5621 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
5622 +index 43781e77be431..f9456ff6845b6 100644
5623 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
5624 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
5625 +@@ -75,7 +75,7 @@ static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *d
5626 + else
5627 + bl_pwm &= 0xFFFF;
5628 +
5629 +- current_backlight = bl_pwm << (1 + bl_int_count);
5630 ++ current_backlight = (uint64_t)bl_pwm << (1 + bl_int_count);
5631 +
5632 + if (bl_period == 0)
5633 + bl_period = 0xFFFF;
5634 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
5635 +index fa643ec5a8760..4bbfd8a26a606 100644
5636 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
5637 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
5638 +@@ -2769,6 +2769,152 @@ static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
5639 + return NULL;
5640 + }
5641 +
5642 ++bool dcn10_disconnect_pipes(
5643 ++ struct dc *dc,
5644 ++ struct dc_state *context)
5645 ++{
5646 ++ bool found_stream = false;
5647 ++ int i, j;
5648 ++ struct dce_hwseq *hws = dc->hwseq;
5649 ++ struct dc_state *old_ctx = dc->current_state;
5650 ++ bool mpcc_disconnected = false;
5651 ++ struct pipe_ctx *old_pipe;
5652 ++ struct pipe_ctx *new_pipe;
5653 ++ DC_LOGGER_INIT(dc->ctx->logger);
5654 ++
5655 ++ /* Set pipe update flags and lock pipes */
5656 ++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
5657 ++ old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5658 ++ new_pipe = &context->res_ctx.pipe_ctx[i];
5659 ++ new_pipe->update_flags.raw = 0;
5660 ++
5661 ++ if (!old_pipe->plane_state && !new_pipe->plane_state)
5662 ++ continue;
5663 ++
5664 ++ if (old_pipe->plane_state && !new_pipe->plane_state)
5665 ++ new_pipe->update_flags.bits.disable = 1;
5666 ++
5667 ++ /* Check for scl update */
5668 ++ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
5669 ++ new_pipe->update_flags.bits.scaler = 1;
5670 ++
5671 ++ /* Check for vp update */
5672 ++ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
5673 ++ || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
5674 ++ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
5675 ++ new_pipe->update_flags.bits.viewport = 1;
5676 ++
5677 ++ }
5678 ++
5679 ++ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
5680 ++ /* Disconnect mpcc here only if losing pipe split*/
5681 ++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
5682 ++ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable &&
5683 ++ old_ctx->res_ctx.pipe_ctx[i].top_pipe) {
5684 ++
5685 ++ /* Find the top pipe in the new ctx for the bottom pipe that we
5686 ++ * want to remove by comparing the streams. If both pipes are being
5687 ++ * disabled then do it in the regular pipe programming sequence
5688 ++ */
5689 ++ for (j = 0; j < dc->res_pool->pipe_count; j++) {
5690 ++ if (old_ctx->res_ctx.pipe_ctx[i].top_pipe->stream == context->res_ctx.pipe_ctx[j].stream &&
5691 ++ !context->res_ctx.pipe_ctx[j].top_pipe &&
5692 ++ !context->res_ctx.pipe_ctx[j].update_flags.bits.disable) {
5693 ++ found_stream = true;
5694 ++ break;
5695 ++ }
5696 ++ }
5697 ++
5698 ++ // Disconnect if the top pipe lost it's pipe split
5699 ++ if (found_stream && !context->res_ctx.pipe_ctx[j].bottom_pipe) {
5700 ++ hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
5701 ++ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
5702 ++ mpcc_disconnected = true;
5703 ++ }
5704 ++ }
5705 ++ found_stream = false;
5706 ++ }
5707 ++ }
5708 ++
5709 ++ if (mpcc_disconnected) {
5710 ++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
5711 ++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
5712 ++ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5713 ++ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
5714 ++ struct hubp *hubp = pipe_ctx->plane_res.hubp;
5715 ++
5716 ++ if (!pipe_ctx || !plane_state || !pipe_ctx->stream)
5717 ++ continue;
5718 ++
5719 ++ // Only update scaler and viewport here if we lose a pipe split.
5720 ++ // This is to prevent half the screen from being black when we
5721 ++ // unlock after disconnecting MPCC.
5722 ++ if (!(old_pipe && !pipe_ctx->top_pipe &&
5723 ++ !pipe_ctx->bottom_pipe && old_pipe->bottom_pipe))
5724 ++ continue;
5725 ++
5726 ++ if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) {
5727 ++ if (pipe_ctx->update_flags.bits.scaler ||
5728 ++ plane_state->update_flags.bits.scaling_change ||
5729 ++ plane_state->update_flags.bits.position_change ||
5730 ++ plane_state->update_flags.bits.per_pixel_alpha_change ||
5731 ++ pipe_ctx->stream->update_flags.bits.scaling) {
5732 ++
5733 ++ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
5734 ++ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
5735 ++ /* scaler configuration */
5736 ++ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
5737 ++ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
5738 ++ }
5739 ++
5740 ++ if (pipe_ctx->update_flags.bits.viewport ||
5741 ++ (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
5742 ++ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
5743 ++ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
5744 ++
5745 ++ hubp->funcs->mem_program_viewport(
5746 ++ hubp,
5747 ++ &pipe_ctx->plane_res.scl_data.viewport,
5748 ++ &pipe_ctx->plane_res.scl_data.viewport_c);
5749 ++ }
5750 ++ }
5751 ++ }
5752 ++ }
5753 ++ return mpcc_disconnected;
5754 ++}
5755 ++
5756 ++void dcn10_wait_for_pending_cleared(struct dc *dc,
5757 ++ struct dc_state *context)
5758 ++{
5759 ++ struct pipe_ctx *pipe_ctx;
5760 ++ struct timing_generator *tg;
5761 ++ int i;
5762 ++
5763 ++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
5764 ++ pipe_ctx = &context->res_ctx.pipe_ctx[i];
5765 ++ tg = pipe_ctx->stream_res.tg;
5766 ++
5767 ++ /*
5768 ++ * Only wait for top pipe's tg penindg bit
5769 ++ * Also skip if pipe is disabled.
5770 ++ */
5771 ++ if (pipe_ctx->top_pipe ||
5772 ++ !pipe_ctx->stream || !pipe_ctx->plane_state ||
5773 ++ !tg->funcs->is_tg_enabled(tg))
5774 ++ continue;
5775 ++
5776 ++ /*
5777 ++ * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
5778 ++ * For some reason waiting for OTG_UPDATE_PENDING cleared
5779 ++ * seems to not trigger the update right away, and if we
5780 ++ * lock again before VUPDATE then we don't get a separated
5781 ++ * operation.
5782 ++ */
5783 ++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
5784 ++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
5785 ++ }
5786 ++}
5787 ++
5788 + void dcn10_apply_ctx_for_surface(
5789 + struct dc *dc,
5790 + const struct dc_stream_state *stream,
5791 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
5792 +index 6d891166da8a4..e5691e4990231 100644
5793 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
5794 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
5795 +@@ -194,6 +194,12 @@ void dcn10_get_surface_visual_confirm_color(
5796 + void dcn10_get_hdr_visual_confirm_color(
5797 + struct pipe_ctx *pipe_ctx,
5798 + struct tg_color *color);
5799 ++bool dcn10_disconnect_pipes(
5800 ++ struct dc *dc,
5801 ++ struct dc_state *context);
5802 ++
5803 ++void dcn10_wait_for_pending_cleared(struct dc *dc,
5804 ++ struct dc_state *context);
5805 + void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
5806 + void dcn10_verify_allow_pstate_change_high(struct dc *dc);
5807 +
5808 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
5809 +index 5c98b71c1d47a..a1d1559bb5d73 100644
5810 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
5811 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
5812 +@@ -34,6 +34,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
5813 + .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
5814 + .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
5815 + .post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
5816 ++ .disconnect_pipes = dcn10_disconnect_pipes,
5817 ++ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
5818 + .update_plane_addr = dcn10_update_plane_addr,
5819 + .update_dchub = dcn10_update_dchub,
5820 + .update_pending_status = dcn10_update_pending_status,
5821 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
5822 +index 3dde6f26de474..966e1790b9bfd 100644
5823 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
5824 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
5825 +@@ -34,6 +34,8 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
5826 + .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
5827 + .apply_ctx_for_surface = NULL,
5828 + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
5829 ++ .disconnect_pipes = dcn10_disconnect_pipes,
5830 ++ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
5831 + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
5832 + .update_plane_addr = dcn20_update_plane_addr,
5833 + .update_dchub = dcn10_update_dchub,
5834 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
5835 +index f31f48dd0da29..aaf9a99f9f045 100644
5836 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
5837 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
5838 +@@ -3209,6 +3209,9 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
5839 + context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
5840 + dc->debug.enable_dram_clock_change_one_display_vactive;
5841 +
5842 ++ /*Unsafe due to current pipe merge and split logic*/
5843 ++ ASSERT(context != dc->current_state);
5844 ++
5845 + if (fast_validate) {
5846 + return dcn20_validate_bandwidth_internal(dc, context, true);
5847 + }
5848 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
5849 +index b187f71afa652..2ba880c3943c3 100644
5850 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
5851 ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
5852 +@@ -35,6 +35,8 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
5853 + .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
5854 + .apply_ctx_for_surface = NULL,
5855 + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
5856 ++ .disconnect_pipes = dcn10_disconnect_pipes,
5857 ++ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
5858 + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
5859 + .update_plane_addr = dcn20_update_plane_addr,
5860 + .update_dchub = dcn10_update_dchub,
5861 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
5862 +index 88d41a385add8..a4f37d83d5cc9 100644
5863 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
5864 ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
5865 +@@ -1184,6 +1184,9 @@ bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
5866 +
5867 + BW_VAL_TRACE_COUNT();
5868 +
5869 ++ /*Unsafe due to current pipe merge and split logic*/
5870 ++ ASSERT(context != dc->current_state);
5871 ++
5872 + out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel);
5873 +
5874 + if (pipe_cnt == 0)
5875 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
5876 +index 9afee71604902..19daa456e3bfe 100644
5877 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
5878 ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
5879 +@@ -35,6 +35,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
5880 + .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
5881 + .apply_ctx_for_surface = NULL,
5882 + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
5883 ++ .disconnect_pipes = dcn10_disconnect_pipes,
5884 ++ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
5885 + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
5886 + .update_plane_addr = dcn20_update_plane_addr,
5887 + .update_dchub = dcn10_update_dchub,
5888 +diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
5889 +index 3c986717dcd56..64c1be818b0e8 100644
5890 +--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
5891 ++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
5892 +@@ -67,6 +67,10 @@ struct hw_sequencer_funcs {
5893 + int num_planes, struct dc_state *context);
5894 + void (*program_front_end_for_ctx)(struct dc *dc,
5895 + struct dc_state *context);
5896 ++ bool (*disconnect_pipes)(struct dc *dc,
5897 ++ struct dc_state *context);
5898 ++ void (*wait_for_pending_cleared)(struct dc *dc,
5899 ++ struct dc_state *context);
5900 + void (*post_unlock_program_front_end)(struct dc *dc,
5901 + struct dc_state *context);
5902 + void (*update_plane_addr)(const struct dc *dc,
5903 +diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
5904 +index ab45ac445045a..351a85088d0ec 100644
5905 +--- a/drivers/gpu/drm/arm/malidp_planes.c
5906 ++++ b/drivers/gpu/drm/arm/malidp_planes.c
5907 +@@ -346,7 +346,7 @@ static bool malidp_check_pages_threshold(struct malidp_plane_state *ms,
5908 + if (cma_obj->sgt)
5909 + sgt = cma_obj->sgt;
5910 + else
5911 +- sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
5912 ++ sgt = obj->funcs->get_sg_table(obj);
5913 +
5914 + if (!sgt)
5915 + return false;
5916 +diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
5917 +index 5d67a41f7c3a8..3dd70d813f694 100644
5918 +--- a/drivers/gpu/drm/drm_debugfs_crc.c
5919 ++++ b/drivers/gpu/drm/drm_debugfs_crc.c
5920 +@@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
5921 + source[len - 1] = '\0';
5922 +
5923 + ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
5924 +- if (ret)
5925 ++ if (ret) {
5926 ++ kfree(source);
5927 + return ret;
5928 ++ }
5929 +
5930 + spin_lock_irq(&crc->lock);
5931 +
5932 +diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
5933 +index 3296ed3df3580..8b65ca164bf4b 100644
5934 +--- a/drivers/gpu/drm/drm_gem_vram_helper.c
5935 ++++ b/drivers/gpu/drm/drm_gem_vram_helper.c
5936 +@@ -167,6 +167,10 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
5937 + }
5938 + }
5939 +
5940 ++/*
5941 ++ * Note that on error, drm_gem_vram_init will free the buffer object.
5942 ++ */
5943 ++
5944 + static int drm_gem_vram_init(struct drm_device *dev,
5945 + struct drm_gem_vram_object *gbo,
5946 + size_t size, unsigned long pg_align)
5947 +@@ -176,15 +180,19 @@ static int drm_gem_vram_init(struct drm_device *dev,
5948 + int ret;
5949 + size_t acc_size;
5950 +
5951 +- if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
5952 ++ if (WARN_ONCE(!vmm, "VRAM MM not initialized")) {
5953 ++ kfree(gbo);
5954 + return -EINVAL;
5955 ++ }
5956 + bdev = &vmm->bdev;
5957 +
5958 + gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
5959 +
5960 + ret = drm_gem_object_init(dev, &gbo->bo.base, size);
5961 +- if (ret)
5962 ++ if (ret) {
5963 ++ kfree(gbo);
5964 + return ret;
5965 ++ }
5966 +
5967 + acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
5968 +
5969 +@@ -195,13 +203,13 @@ static int drm_gem_vram_init(struct drm_device *dev,
5970 + &gbo->placement, pg_align, false, acc_size,
5971 + NULL, NULL, ttm_buffer_object_destroy);
5972 + if (ret)
5973 +- goto err_drm_gem_object_release;
5974 ++ /*
5975 ++ * A failing ttm_bo_init will call ttm_buffer_object_destroy
5976 ++ * to release gbo->bo.base and kfree gbo.
5977 ++ */
5978 ++ return ret;
5979 +
5980 + return 0;
5981 +-
5982 +-err_drm_gem_object_release:
5983 +- drm_gem_object_release(&gbo->bo.base);
5984 +- return ret;
5985 + }
5986 +
5987 + /**
5988 +@@ -235,13 +243,9 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
5989 +
5990 + ret = drm_gem_vram_init(dev, gbo, size, pg_align);
5991 + if (ret < 0)
5992 +- goto err_kfree;
5993 ++ return ERR_PTR(ret);
5994 +
5995 + return gbo;
5996 +-
5997 +-err_kfree:
5998 +- kfree(gbo);
5999 +- return ERR_PTR(ret);
6000 + }
6001 + EXPORT_SYMBOL(drm_gem_vram_create);
6002 +
6003 +diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
6004 +index f41cbb753bb46..720a767118c9c 100644
6005 +--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
6006 ++++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
6007 +@@ -2078,7 +2078,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
6008 + intel_dp->dpcd,
6009 + sizeof(intel_dp->dpcd));
6010 + cdv_intel_edp_panel_vdd_off(gma_encoder);
6011 +- if (ret == 0) {
6012 ++ if (ret <= 0) {
6013 + /* if this fails, presume the device is a ghost */
6014 + DRM_INFO("failed to retrieve link info, disabling eDP\n");
6015 + drm_encoder_cleanup(encoder);
6016 +diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
6017 +index cc70e836522f0..8758958e16893 100644
6018 +--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
6019 ++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
6020 +@@ -160,37 +160,6 @@ static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
6021 + .atomic_update = hibmc_plane_atomic_update,
6022 + };
6023 +
6024 +-static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
6025 +-{
6026 +- struct drm_device *dev = priv->dev;
6027 +- struct drm_plane *plane;
6028 +- int ret = 0;
6029 +-
6030 +- plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL);
6031 +- if (!plane) {
6032 +- DRM_ERROR("failed to alloc memory when init plane\n");
6033 +- return ERR_PTR(-ENOMEM);
6034 +- }
6035 +- /*
6036 +- * plane init
6037 +- * TODO: Now only support primary plane, overlay planes
6038 +- * need to do.
6039 +- */
6040 +- ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs,
6041 +- channel_formats1,
6042 +- ARRAY_SIZE(channel_formats1),
6043 +- NULL,
6044 +- DRM_PLANE_TYPE_PRIMARY,
6045 +- NULL);
6046 +- if (ret) {
6047 +- DRM_ERROR("failed to init plane: %d\n", ret);
6048 +- return ERR_PTR(ret);
6049 +- }
6050 +-
6051 +- drm_plane_helper_add(plane, &hibmc_plane_helper_funcs);
6052 +- return plane;
6053 +-}
6054 +-
6055 + static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
6056 + {
6057 + struct hibmc_drm_private *priv = crtc->dev->dev_private;
6058 +@@ -537,22 +506,24 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
6059 + int hibmc_de_init(struct hibmc_drm_private *priv)
6060 + {
6061 + struct drm_device *dev = priv->dev;
6062 +- struct drm_crtc *crtc;
6063 +- struct drm_plane *plane;
6064 ++ struct drm_crtc *crtc = &priv->crtc;
6065 ++ struct drm_plane *plane = &priv->primary_plane;
6066 + int ret;
6067 +
6068 +- plane = hibmc_plane_init(priv);
6069 +- if (IS_ERR(plane)) {
6070 +- DRM_ERROR("failed to create plane: %ld\n", PTR_ERR(plane));
6071 +- return PTR_ERR(plane);
6072 +- }
6073 ++ ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs,
6074 ++ channel_formats1,
6075 ++ ARRAY_SIZE(channel_formats1),
6076 ++ NULL,
6077 ++ DRM_PLANE_TYPE_PRIMARY,
6078 ++ NULL);
6079 +
6080 +- crtc = devm_kzalloc(dev->dev, sizeof(*crtc), GFP_KERNEL);
6081 +- if (!crtc) {
6082 +- DRM_ERROR("failed to alloc memory when init crtc\n");
6083 +- return -ENOMEM;
6084 ++ if (ret) {
6085 ++ DRM_ERROR("failed to init plane: %d\n", ret);
6086 ++ return ret;
6087 + }
6088 +
6089 ++ drm_plane_helper_add(plane, &hibmc_plane_helper_funcs);
6090 ++
6091 + ret = drm_crtc_init_with_planes(dev, crtc, plane,
6092 + NULL, &hibmc_crtc_funcs, NULL);
6093 + if (ret) {
6094 +diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
6095 +index 609768748de65..0a74ba220cac5 100644
6096 +--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
6097 ++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
6098 +@@ -29,6 +29,8 @@ struct hibmc_drm_private {
6099 +
6100 + /* drm */
6101 + struct drm_device *dev;
6102 ++ struct drm_plane primary_plane;
6103 ++ struct drm_crtc crtc;
6104 + struct drm_encoder encoder;
6105 + struct drm_connector connector;
6106 + bool mode_config_initialized;
6107 +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
6108 +index 4d29568be3f53..ac038572164d3 100644
6109 +--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
6110 ++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
6111 +@@ -481,7 +481,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
6112 + mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
6113 + cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
6114 + cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
6115 +- cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
6116 ++ cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
6117 + mtk_crtc_ddp_config(crtc, cmdq_handle);
6118 + cmdq_pkt_finalize(cmdq_handle);
6119 + cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
6120 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
6121 +index 66a95e22b7b3d..456d729c81c39 100644
6122 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
6123 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
6124 +@@ -1048,6 +1048,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
6125 + {
6126 + struct msm_drm_private *priv = dev->dev_private;
6127 + struct platform_device *pdev = priv->gpu_pdev;
6128 ++ struct adreno_platform_config *config = pdev->dev.platform_data;
6129 ++ const struct adreno_info *info;
6130 + struct device_node *node;
6131 + struct a6xx_gpu *a6xx_gpu;
6132 + struct adreno_gpu *adreno_gpu;
6133 +@@ -1064,7 +1066,14 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
6134 + adreno_gpu->registers = NULL;
6135 + adreno_gpu->reg_offsets = a6xx_register_offsets;
6136 +
6137 +- if (adreno_is_a650(adreno_gpu))
6138 ++ /*
6139 ++ * We need to know the platform type before calling into adreno_gpu_init
6140 ++ * so that the hw_apriv flag can be correctly set. Snoop into the info
6141 ++ * and grab the revision number
6142 ++ */
6143 ++ info = adreno_info(config->rev);
6144 ++
6145 ++ if (info && info->revn == 650)
6146 + adreno_gpu->base.hw_apriv = true;
6147 +
6148 + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
6149 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
6150 +index b12f5b4a1bea9..e9ede19193b0e 100644
6151 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
6152 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
6153 +@@ -875,7 +875,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
6154 + int i;
6155 +
6156 + a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
6157 +- sizeof(a6xx_state->indexed_regs));
6158 ++ sizeof(*a6xx_state->indexed_regs));
6159 + if (!a6xx_state->indexed_regs)
6160 + return;
6161 +
6162 +diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
6163 +index 862dd35b27d3d..6e8bef1a9ea25 100644
6164 +--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
6165 ++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
6166 +@@ -189,10 +189,16 @@ struct msm_gem_address_space *
6167 + adreno_iommu_create_address_space(struct msm_gpu *gpu,
6168 + struct platform_device *pdev)
6169 + {
6170 +- struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
6171 +- struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
6172 ++ struct iommu_domain *iommu;
6173 ++ struct msm_mmu *mmu;
6174 + struct msm_gem_address_space *aspace;
6175 +
6176 ++ iommu = iommu_domain_alloc(&platform_bus_type);
6177 ++ if (!iommu)
6178 ++ return NULL;
6179 ++
6180 ++ mmu = msm_iommu_new(&pdev->dev, iommu);
6181 ++
6182 + aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
6183 + 0xffffffff - SZ_16M);
6184 +
6185 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
6186 +index c2729f71e2fa7..f9cb1e0da1a59 100644
6187 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
6188 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
6189 +@@ -881,7 +881,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
6190 + struct drm_plane *plane;
6191 + struct drm_display_mode *mode;
6192 +
6193 +- int cnt = 0, rc = 0, mixer_width, i, z_pos;
6194 ++ int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
6195 +
6196 + struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
6197 + int multirect_count = 0;
6198 +@@ -914,9 +914,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
6199 +
6200 + memset(pipe_staged, 0, sizeof(pipe_staged));
6201 +
6202 +- mixer_width = mode->hdisplay / cstate->num_mixers;
6203 ++ if (cstate->num_mixers) {
6204 ++ mixer_width = mode->hdisplay / cstate->num_mixers;
6205 +
6206 +- _dpu_crtc_setup_lm_bounds(crtc, state);
6207 ++ _dpu_crtc_setup_lm_bounds(crtc, state);
6208 ++ }
6209 +
6210 + crtc_rect.x2 = mode->hdisplay;
6211 + crtc_rect.y2 = mode->vdisplay;
6212 +diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
6213 +index 508764fccd27d..27ccfa531d31f 100644
6214 +--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
6215 ++++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
6216 +@@ -26,6 +26,7 @@
6217 + #include <drm/drm_drv.h>
6218 + #include <drm/drm_fb_cma_helper.h>
6219 + #include <drm/drm_fb_helper.h>
6220 ++#include <drm/drm_fourcc.h>
6221 + #include <drm/drm_gem_cma_helper.h>
6222 + #include <drm/drm_gem_framebuffer_helper.h>
6223 + #include <drm/drm_irq.h>
6224 +@@ -92,8 +93,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
6225 + clk_disable_unprepare(mxsfb->clk_axi);
6226 + }
6227 +
6228 ++static struct drm_framebuffer *
6229 ++mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
6230 ++ const struct drm_mode_fb_cmd2 *mode_cmd)
6231 ++{
6232 ++ const struct drm_format_info *info;
6233 ++
6234 ++ info = drm_get_format_info(dev, mode_cmd);
6235 ++ if (!info)
6236 ++ return ERR_PTR(-EINVAL);
6237 ++
6238 ++ if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
6239 ++ dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
6240 ++ return ERR_PTR(-EINVAL);
6241 ++ }
6242 ++
6243 ++ return drm_gem_fb_create(dev, file_priv, mode_cmd);
6244 ++}
6245 ++
6246 + static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
6247 +- .fb_create = drm_gem_fb_create,
6248 ++ .fb_create = mxsfb_fb_create,
6249 + .atomic_check = drm_atomic_helper_check,
6250 + .atomic_commit = drm_atomic_helper_commit,
6251 + };
6252 +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
6253 +index cb6550d37e858..eabc9e41d92b4 100644
6254 +--- a/drivers/gpu/drm/panel/panel-simple.c
6255 ++++ b/drivers/gpu/drm/panel/panel-simple.c
6256 +@@ -2941,12 +2941,12 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
6257 + static const struct panel_desc ortustech_com43h4m85ulc = {
6258 + .modes = &ortustech_com43h4m85ulc_mode,
6259 + .num_modes = 1,
6260 +- .bpc = 8,
6261 ++ .bpc = 6,
6262 + .size = {
6263 + .width = 56,
6264 + .height = 93,
6265 + },
6266 +- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
6267 ++ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
6268 + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
6269 + .connector_type = DRM_MODE_CONNECTOR_DPI,
6270 + };
6271 +diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
6272 +index c30c719a80594..3c4a85213c15f 100644
6273 +--- a/drivers/gpu/drm/panfrost/panfrost_device.h
6274 ++++ b/drivers/gpu/drm/panfrost/panfrost_device.h
6275 +@@ -69,6 +69,9 @@ struct panfrost_compatible {
6276 + int num_pm_domains;
6277 + /* Only required if num_pm_domains > 1. */
6278 + const char * const *pm_domain_names;
6279 ++
6280 ++ /* Vendor implementation quirks callback */
6281 ++ void (*vendor_quirk)(struct panfrost_device *pfdev);
6282 + };
6283 +
6284 + struct panfrost_device {
6285 +diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
6286 +index ada51df9a7a32..f6d5d03201fad 100644
6287 +--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
6288 ++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
6289 +@@ -667,7 +667,18 @@ static const struct panfrost_compatible default_data = {
6290 + .pm_domain_names = NULL,
6291 + };
6292 +
6293 ++static const struct panfrost_compatible amlogic_data = {
6294 ++ .num_supplies = ARRAY_SIZE(default_supplies),
6295 ++ .supply_names = default_supplies,
6296 ++ .vendor_quirk = panfrost_gpu_amlogic_quirk,
6297 ++};
6298 ++
6299 + static const struct of_device_id dt_match[] = {
6300 ++ /* Set first to probe before the generic compatibles */
6301 ++ { .compatible = "amlogic,meson-gxm-mali",
6302 ++ .data = &amlogic_data, },
6303 ++ { .compatible = "amlogic,meson-g12a-mali",
6304 ++ .data = &amlogic_data, },
6305 + { .compatible = "arm,mali-t604", .data = &default_data, },
6306 + { .compatible = "arm,mali-t624", .data = &default_data, },
6307 + { .compatible = "arm,mali-t628", .data = &default_data, },
6308 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
6309 +index f2c1ddc41a9bf..165403878ad9b 100644
6310 +--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
6311 ++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
6312 +@@ -75,6 +75,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
6313 + return 0;
6314 + }
6315 +
6316 ++void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev)
6317 ++{
6318 ++ /*
6319 ++ * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs
6320 ++ * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order
6321 ++ * to operate correctly.
6322 ++ */
6323 ++ gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK);
6324 ++ gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16));
6325 ++}
6326 ++
6327 + static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
6328 + {
6329 + u32 quirks = 0;
6330 +@@ -135,6 +146,10 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
6331 +
6332 + if (quirks)
6333 + gpu_write(pfdev, GPU_JM_CONFIG, quirks);
6334 ++
6335 ++ /* Here goes platform specific quirks */
6336 ++ if (pfdev->comp->vendor_quirk)
6337 ++ pfdev->comp->vendor_quirk(pfdev);
6338 + }
6339 +
6340 + #define MAX_HW_REVS 6
6341 +@@ -304,16 +319,18 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
6342 + int ret;
6343 + u32 val;
6344 +
6345 ++ panfrost_gpu_init_quirks(pfdev);
6346 ++
6347 + /* Just turn on everything for now */
6348 + gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
6349 + ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
6350 +- val, val == pfdev->features.l2_present, 100, 1000);
6351 ++ val, val == pfdev->features.l2_present, 100, 20000);
6352 + if (ret)
6353 + dev_err(pfdev->dev, "error powering up gpu L2");
6354 +
6355 + gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
6356 + ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
6357 +- val, val == pfdev->features.shader_present, 100, 1000);
6358 ++ val, val == pfdev->features.shader_present, 100, 20000);
6359 + if (ret)
6360 + dev_err(pfdev->dev, "error powering up gpu shader");
6361 +
6362 +@@ -355,7 +372,6 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
6363 + return err;
6364 + }
6365 +
6366 +- panfrost_gpu_init_quirks(pfdev);
6367 + panfrost_gpu_power_on(pfdev);
6368 +
6369 + return 0;
6370 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
6371 +index 4112412087b27..468c51e7e46db 100644
6372 +--- a/drivers/gpu/drm/panfrost/panfrost_gpu.h
6373 ++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
6374 +@@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
6375 + void panfrost_gpu_power_on(struct panfrost_device *pfdev);
6376 + void panfrost_gpu_power_off(struct panfrost_device *pfdev);
6377 +
6378 ++void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev);
6379 ++
6380 + #endif
6381 +diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
6382 +index ec4695cf3caf3..fdbc8d9491356 100644
6383 +--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
6384 ++++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
6385 +@@ -83,11 +83,13 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
6386 +
6387 + ret = pm_runtime_get_sync(pfdev->dev);
6388 + if (ret < 0)
6389 +- return ret;
6390 ++ goto err_put_pm;
6391 +
6392 + bo = drm_gem_shmem_create(pfdev->ddev, perfcnt->bosize);
6393 +- if (IS_ERR(bo))
6394 +- return PTR_ERR(bo);
6395 ++ if (IS_ERR(bo)) {
6396 ++ ret = PTR_ERR(bo);
6397 ++ goto err_put_pm;
6398 ++ }
6399 +
6400 + /* Map the perfcnt buf in the address space attached to file_priv. */
6401 + ret = panfrost_gem_open(&bo->base, file_priv);
6402 +@@ -168,6 +170,8 @@ err_close_bo:
6403 + panfrost_gem_close(&bo->base, file_priv);
6404 + err_put_bo:
6405 + drm_gem_object_put(&bo->base);
6406 ++err_put_pm:
6407 ++ pm_runtime_put(pfdev->dev);
6408 + return ret;
6409 + }
6410 +
6411 +diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
6412 +index ea38ac60581c6..eddaa62ad8b0e 100644
6413 +--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
6414 ++++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
6415 +@@ -51,6 +51,10 @@
6416 + #define GPU_STATUS 0x34
6417 + #define GPU_STATUS_PRFCNT_ACTIVE BIT(2)
6418 + #define GPU_LATEST_FLUSH_ID 0x38
6419 ++#define GPU_PWR_KEY 0x50 /* (WO) Power manager key register */
6420 ++#define GPU_PWR_KEY_UNLOCK 0x2968A819
6421 ++#define GPU_PWR_OVERRIDE0 0x54 /* (RW) Power manager override settings */
6422 ++#define GPU_PWR_OVERRIDE1 0x58 /* (RW) Power manager override settings */
6423 + #define GPU_FAULT_STATUS 0x3C
6424 + #define GPU_FAULT_ADDRESS_LO 0x40
6425 + #define GPU_FAULT_ADDRESS_HI 0x44
6426 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
6427 +index f1a81c9b184d4..fa09b3ae8b9d4 100644
6428 +--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
6429 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
6430 +@@ -13,6 +13,7 @@
6431 + #include <drm/drm_fourcc.h>
6432 + #include <drm/drm_gem_cma_helper.h>
6433 + #include <drm/drm_gem_framebuffer_helper.h>
6434 ++#include <drm/drm_managed.h>
6435 + #include <drm/drm_plane_helper.h>
6436 + #include <drm/drm_vblank.h>
6437 +
6438 +@@ -341,6 +342,13 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
6439 + .atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state,
6440 + };
6441 +
6442 ++static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res)
6443 ++{
6444 ++ struct rcar_du_vsp *vsp = res;
6445 ++
6446 ++ put_device(vsp->vsp);
6447 ++}
6448 ++
6449 + int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
6450 + unsigned int crtcs)
6451 + {
6452 +@@ -357,6 +365,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
6453 +
6454 + vsp->vsp = &pdev->dev;
6455 +
6456 ++ ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp);
6457 ++ if (ret < 0)
6458 ++ return ret;
6459 ++
6460 + ret = vsp1_du_init(vsp->vsp);
6461 + if (ret < 0)
6462 + return ret;
6463 +diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
6464 +index 6d8fa6118fc1a..eaad187c41f07 100644
6465 +--- a/drivers/gpu/drm/vc4/vc4_crtc.c
6466 ++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
6467 +@@ -723,11 +723,18 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc,
6468 +
6469 + void vc4_crtc_reset(struct drm_crtc *crtc)
6470 + {
6471 ++ struct vc4_crtc_state *vc4_crtc_state;
6472 ++
6473 + if (crtc->state)
6474 + vc4_crtc_destroy_state(crtc, crtc->state);
6475 +- crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
6476 +- if (crtc->state)
6477 +- __drm_atomic_helper_crtc_reset(crtc, crtc->state);
6478 ++
6479 ++ vc4_crtc_state = kzalloc(sizeof(*vc4_crtc_state), GFP_KERNEL);
6480 ++ if (!vc4_crtc_state) {
6481 ++ crtc->state = NULL;
6482 ++ return;
6483 ++ }
6484 ++
6485 ++ __drm_atomic_helper_crtc_reset(crtc, &vc4_crtc_state->base);
6486 + }
6487 +
6488 + static const struct drm_crtc_funcs vc4_crtc_funcs = {
6489 +diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
6490 +index a775feda1cc73..313339bbff901 100644
6491 +--- a/drivers/gpu/drm/vgem/vgem_drv.c
6492 ++++ b/drivers/gpu/drm/vgem/vgem_drv.c
6493 +@@ -471,8 +471,8 @@ static int __init vgem_init(void)
6494 +
6495 + out_put:
6496 + drm_dev_put(&vgem_device->drm);
6497 ++ platform_device_unregister(vgem_device->platform);
6498 + return ret;
6499 +-
6500 + out_unregister:
6501 + platform_device_unregister(vgem_device->platform);
6502 + out_free:
6503 +diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
6504 +index 4d944a0dff3e9..fdd7671a7b126 100644
6505 +--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
6506 ++++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
6507 +@@ -80,8 +80,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
6508 + vgdev->capsets[i].id > 0, 5 * HZ);
6509 + if (ret == 0) {
6510 + DRM_ERROR("timed out waiting for cap set %d\n", i);
6511 ++ spin_lock(&vgdev->display_info_lock);
6512 + kfree(vgdev->capsets);
6513 + vgdev->capsets = NULL;
6514 ++ spin_unlock(&vgdev->display_info_lock);
6515 + return;
6516 + }
6517 + DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
6518 +diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
6519 +index 53af60d484a44..9d2abdbd865a7 100644
6520 +--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
6521 ++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
6522 +@@ -684,9 +684,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
6523 + int i = le32_to_cpu(cmd->capset_index);
6524 +
6525 + spin_lock(&vgdev->display_info_lock);
6526 +- vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
6527 +- vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
6528 +- vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
6529 ++ if (vgdev->capsets) {
6530 ++ vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
6531 ++ vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
6532 ++ vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
6533 ++ } else {
6534 ++ DRM_ERROR("invalid capset memory.");
6535 ++ }
6536 + spin_unlock(&vgdev->display_info_lock);
6537 + wake_up(&vgdev->resp_wq);
6538 + }
6539 +diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
6540 +index 4af2f19480f4f..b8b060354667e 100644
6541 +--- a/drivers/gpu/drm/vkms/vkms_composer.c
6542 ++++ b/drivers/gpu/drm/vkms/vkms_composer.c
6543 +@@ -33,7 +33,7 @@ static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
6544 + + (i * composer->pitch)
6545 + + (j * composer->cpp);
6546 + /* XRGB format ignores Alpha channel */
6547 +- memset(vaddr_out + src_offset + 24, 0, 8);
6548 ++ bitmap_clear(vaddr_out + src_offset, 24, 8);
6549 + crc = crc32_le(crc, vaddr_out + src_offset,
6550 + sizeof(u32));
6551 + }
6552 +diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
6553 +index 57a8a397d5e84..83dd5567de8b5 100644
6554 +--- a/drivers/gpu/drm/vkms/vkms_drv.c
6555 ++++ b/drivers/gpu/drm/vkms/vkms_drv.c
6556 +@@ -190,8 +190,8 @@ static int __init vkms_init(void)
6557 +
6558 + out_put:
6559 + drm_dev_put(&vkms_device->drm);
6560 ++ platform_device_unregister(vkms_device->platform);
6561 + return ret;
6562 +-
6563 + out_unregister:
6564 + platform_device_unregister(vkms_device->platform);
6565 + out_free:
6566 +diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
6567 +index 26328c76305be..8e69303aad3f7 100644
6568 +--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
6569 ++++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
6570 +@@ -111,7 +111,7 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
6571 + /* Initialize mode config, vblank and the KMS poll helper. */
6572 + ret = drmm_mode_config_init(drm);
6573 + if (ret < 0)
6574 +- goto err_dev_put;
6575 ++ return ret;
6576 +
6577 + drm->mode_config.funcs = &zynqmp_dpsub_mode_config_funcs;
6578 + drm->mode_config.min_width = 0;
6579 +@@ -121,7 +121,7 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
6580 +
6581 + ret = drm_vblank_init(drm, 1);
6582 + if (ret)
6583 +- goto err_dev_put;
6584 ++ return ret;
6585 +
6586 + drm->irq_enabled = 1;
6587 +
6588 +@@ -154,8 +154,6 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
6589 +
6590 + err_poll_fini:
6591 + drm_kms_helper_poll_fini(drm);
6592 +-err_dev_put:
6593 +- drm_dev_put(drm);
6594 + return ret;
6595 + }
6596 +
6597 +@@ -208,27 +206,16 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
6598 + int ret;
6599 +
6600 + /* Allocate private data. */
6601 +- dpsub = kzalloc(sizeof(*dpsub), GFP_KERNEL);
6602 +- if (!dpsub)
6603 +- return -ENOMEM;
6604 ++ dpsub = devm_drm_dev_alloc(&pdev->dev, &zynqmp_dpsub_drm_driver,
6605 ++ struct zynqmp_dpsub, drm);
6606 ++ if (IS_ERR(dpsub))
6607 ++ return PTR_ERR(dpsub);
6608 +
6609 + dpsub->dev = &pdev->dev;
6610 + platform_set_drvdata(pdev, dpsub);
6611 +
6612 + dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT));
6613 +
6614 +- /*
6615 +- * Initialize the DRM device early, as the DRM core mandates usage of
6616 +- * the managed memory helpers tied to the DRM device.
6617 +- */
6618 +- ret = drm_dev_init(&dpsub->drm, &zynqmp_dpsub_drm_driver, &pdev->dev);
6619 +- if (ret < 0) {
6620 +- kfree(dpsub);
6621 +- return ret;
6622 +- }
6623 +-
6624 +- drmm_add_final_kfree(&dpsub->drm, dpsub);
6625 +-
6626 + /* Try the reserved memory. Proceed if there's none. */
6627 + of_reserved_mem_device_init(&pdev->dev);
6628 +
6629 +@@ -286,8 +273,6 @@ static int zynqmp_dpsub_remove(struct platform_device *pdev)
6630 + clk_disable_unprepare(dpsub->apb_clk);
6631 + of_reserved_mem_device_release(&pdev->dev);
6632 +
6633 +- drm_dev_put(drm);
6634 +-
6635 + return 0;
6636 + }
6637 +
6638 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
6639 +index 74fc1df6e3c27..79495e218b7fc 100644
6640 +--- a/drivers/hid/hid-ids.h
6641 ++++ b/drivers/hid/hid-ids.h
6642 +@@ -727,6 +727,7 @@
6643 + #define USB_DEVICE_ID_LENOVO_TP10UBKBD 0x6062
6644 + #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
6645 + #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
6646 ++#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
6647 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
6648 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
6649 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e
6650 +@@ -1123,6 +1124,7 @@
6651 + #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819
6652 + #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
6653 + #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
6654 ++#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
6655 + #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
6656 +
6657 + #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
6658 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
6659 +index 88e19996427e6..9770db624bfaf 100644
6660 +--- a/drivers/hid/hid-input.c
6661 ++++ b/drivers/hid/hid-input.c
6662 +@@ -797,7 +797,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
6663 + case 0x3b: /* Battery Strength */
6664 + hidinput_setup_battery(device, HID_INPUT_REPORT, field);
6665 + usage->type = EV_PWR;
6666 +- goto ignore;
6667 ++ return;
6668 +
6669 + case 0x3c: /* Invert */
6670 + map_key_clear(BTN_TOOL_RUBBER);
6671 +@@ -1059,7 +1059,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
6672 + case HID_DC_BATTERYSTRENGTH:
6673 + hidinput_setup_battery(device, HID_INPUT_REPORT, field);
6674 + usage->type = EV_PWR;
6675 +- goto ignore;
6676 ++ return;
6677 + }
6678 + goto unknown;
6679 +
6680 +diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
6681 +index 6c55682c59740..044a93f3c1178 100644
6682 +--- a/drivers/hid/hid-ite.c
6683 ++++ b/drivers/hid/hid-ite.c
6684 +@@ -44,6 +44,10 @@ static const struct hid_device_id ite_devices[] = {
6685 + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
6686 + USB_VENDOR_ID_SYNAPTICS,
6687 + USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
6688 ++ /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
6689 ++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
6690 ++ USB_VENDOR_ID_SYNAPTICS,
6691 ++ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
6692 + { }
6693 + };
6694 + MODULE_DEVICE_TABLE(hid, ite_devices);
6695 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
6696 +index e3152155c4b85..99f041afd5c0c 100644
6697 +--- a/drivers/hid/hid-multitouch.c
6698 ++++ b/drivers/hid/hid-multitouch.c
6699 +@@ -1973,6 +1973,12 @@ static const struct hid_device_id mt_devices[] = {
6700 + HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
6701 + USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
6702 +
6703 ++ /* Lenovo X1 TAB Gen 3 */
6704 ++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
6705 ++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
6706 ++ USB_VENDOR_ID_LENOVO,
6707 ++ USB_DEVICE_ID_LENOVO_X1_TAB3) },
6708 ++
6709 + /* MosArt panels */
6710 + { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
6711 + MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
6712 +diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
6713 +index 2ff4c8e366ff2..1ca64481145ee 100644
6714 +--- a/drivers/hid/hid-roccat-kone.c
6715 ++++ b/drivers/hid/hid-roccat-kone.c
6716 +@@ -294,31 +294,40 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
6717 + struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
6718 + struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
6719 + int retval = 0, difference, old_profile;
6720 ++ struct kone_settings *settings = (struct kone_settings *)buf;
6721 +
6722 + /* I need to get my data in one piece */
6723 + if (off != 0 || count != sizeof(struct kone_settings))
6724 + return -EINVAL;
6725 +
6726 + mutex_lock(&kone->kone_lock);
6727 +- difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
6728 ++ difference = memcmp(settings, &kone->settings,
6729 ++ sizeof(struct kone_settings));
6730 + if (difference) {
6731 +- retval = kone_set_settings(usb_dev,
6732 +- (struct kone_settings const *)buf);
6733 +- if (retval) {
6734 +- mutex_unlock(&kone->kone_lock);
6735 +- return retval;
6736 ++ if (settings->startup_profile < 1 ||
6737 ++ settings->startup_profile > 5) {
6738 ++ retval = -EINVAL;
6739 ++ goto unlock;
6740 + }
6741 +
6742 ++ retval = kone_set_settings(usb_dev, settings);
6743 ++ if (retval)
6744 ++ goto unlock;
6745 ++
6746 + old_profile = kone->settings.startup_profile;
6747 +- memcpy(&kone->settings, buf, sizeof(struct kone_settings));
6748 ++ memcpy(&kone->settings, settings, sizeof(struct kone_settings));
6749 +
6750 + kone_profile_activated(kone, kone->settings.startup_profile);
6751 +
6752 + if (kone->settings.startup_profile != old_profile)
6753 + kone_profile_report(kone, kone->settings.startup_profile);
6754 + }
6755 ++unlock:
6756 + mutex_unlock(&kone->kone_lock);
6757 +
6758 ++ if (retval)
6759 ++ return retval;
6760 ++
6761 + return sizeof(struct kone_settings);
6762 + }
6763 + static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
6764 +diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c
6765 +index 94698cae04971..3e1d56585b91a 100644
6766 +--- a/drivers/hwmon/bt1-pvt.c
6767 ++++ b/drivers/hwmon/bt1-pvt.c
6768 +@@ -13,6 +13,7 @@
6769 + #include <linux/bitops.h>
6770 + #include <linux/clk.h>
6771 + #include <linux/completion.h>
6772 ++#include <linux/delay.h>
6773 + #include <linux/device.h>
6774 + #include <linux/hwmon-sysfs.h>
6775 + #include <linux/hwmon.h>
6776 +@@ -476,6 +477,7 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
6777 + long *val)
6778 + {
6779 + struct pvt_cache *cache = &pvt->cache[type];
6780 ++ unsigned long timeout;
6781 + u32 data;
6782 + int ret;
6783 +
6784 +@@ -499,7 +501,14 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
6785 + pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, 0);
6786 + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
6787 +
6788 +- wait_for_completion(&cache->conversion);
6789 ++ /*
6790 ++ * Wait with timeout since in case if the sensor is suddenly powered
6791 ++ * down the request won't be completed and the caller will hang up on
6792 ++ * this procedure until the power is back up again. Multiply the
6793 ++ * timeout by the factor of two to prevent a false timeout.
6794 ++ */
6795 ++ timeout = 2 * usecs_to_jiffies(ktime_to_us(pvt->timeout));
6796 ++ ret = wait_for_completion_timeout(&cache->conversion, timeout);
6797 +
6798 + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
6799 + pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID,
6800 +@@ -509,6 +518,9 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
6801 +
6802 + mutex_unlock(&pvt->iface_mtx);
6803 +
6804 ++ if (!ret)
6805 ++ return -ETIMEDOUT;
6806 ++
6807 + if (type == PVT_TEMP)
6808 + *val = pvt_calc_poly(&poly_N_to_temp, data);
6809 + else
6810 +@@ -654,44 +666,16 @@ static int pvt_write_trim(struct pvt_hwmon *pvt, long val)
6811 +
6812 + static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val)
6813 + {
6814 +- unsigned long rate;
6815 +- ktime_t kt;
6816 +- u32 data;
6817 +-
6818 +- rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
6819 +- if (!rate)
6820 +- return -ENODEV;
6821 +-
6822 +- /*
6823 +- * Don't bother with mutex here, since we just read data from MMIO.
6824 +- * We also have to scale the ticks timeout up to compensate the
6825 +- * ms-ns-data translations.
6826 +- */
6827 +- data = readl(pvt->regs + PVT_TTIMEOUT) + 1;
6828 ++ int ret;
6829 +
6830 +- /*
6831 +- * Calculate ref-clock based delay (Ttotal) between two consecutive
6832 +- * data samples of the same sensor. So we first must calculate the
6833 +- * delay introduced by the internal ref-clock timer (Tref * Fclk).
6834 +- * Then add the constant timeout cuased by each conversion latency
6835 +- * (Tmin). The basic formulae for each conversion is following:
6836 +- * Ttotal = Tref * Fclk + Tmin
6837 +- * Note if alarms are enabled the sensors are polled one after
6838 +- * another, so in order to have the delay being applicable for each
6839 +- * sensor the requested value must be equally redistirbuted.
6840 +- */
6841 +-#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
6842 +- kt = ktime_set(PVT_SENSORS_NUM * (u64)data, 0);
6843 +- kt = ktime_divns(kt, rate);
6844 +- kt = ktime_add_ns(kt, PVT_SENSORS_NUM * PVT_TOUT_MIN);
6845 +-#else
6846 +- kt = ktime_set(data, 0);
6847 +- kt = ktime_divns(kt, rate);
6848 +- kt = ktime_add_ns(kt, PVT_TOUT_MIN);
6849 +-#endif
6850 ++ ret = mutex_lock_interruptible(&pvt->iface_mtx);
6851 ++ if (ret)
6852 ++ return ret;
6853 +
6854 + /* Return the result in msec as hwmon sysfs interface requires. */
6855 +- *val = ktime_to_ms(kt);
6856 ++ *val = ktime_to_ms(pvt->timeout);
6857 ++
6858 ++ mutex_unlock(&pvt->iface_mtx);
6859 +
6860 + return 0;
6861 + }
6862 +@@ -699,7 +683,7 @@ static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val)
6863 + static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
6864 + {
6865 + unsigned long rate;
6866 +- ktime_t kt;
6867 ++ ktime_t kt, cache;
6868 + u32 data;
6869 + int ret;
6870 +
6871 +@@ -712,7 +696,7 @@ static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
6872 + * between all available sensors to have the requested delay
6873 + * applicable to each individual sensor.
6874 + */
6875 +- kt = ms_to_ktime(val);
6876 ++ cache = kt = ms_to_ktime(val);
6877 + #if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
6878 + kt = ktime_divns(kt, PVT_SENSORS_NUM);
6879 + #endif
6880 +@@ -741,6 +725,7 @@ static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
6881 + return ret;
6882 +
6883 + pvt_set_tout(pvt, data);
6884 ++ pvt->timeout = cache;
6885 +
6886 + mutex_unlock(&pvt->iface_mtx);
6887 +
6888 +@@ -982,10 +967,52 @@ static int pvt_request_clks(struct pvt_hwmon *pvt)
6889 + return 0;
6890 + }
6891 +
6892 +-static void pvt_init_iface(struct pvt_hwmon *pvt)
6893 ++static int pvt_check_pwr(struct pvt_hwmon *pvt)
6894 + {
6895 ++ unsigned long tout;
6896 ++ int ret = 0;
6897 ++ u32 data;
6898 ++
6899 ++ /*
6900 ++ * Test out the sensor conversion functionality. If it is not done on
6901 ++ * time then the domain must have been unpowered and we won't be able
6902 ++ * to use the device later in this driver.
6903 ++ * Note If the power source is lost during the normal driver work the
6904 ++ * data read procedure will either return -ETIMEDOUT (for the
6905 ++ * alarm-less driver configuration) or just stop the repeated
6906 ++ * conversion. In the later case alas we won't be able to detect the
6907 ++ * problem.
6908 ++ */
6909 ++ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_ALL, PVT_INTR_ALL);
6910 ++ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
6911 ++ pvt_set_tout(pvt, 0);
6912 ++ readl(pvt->regs + PVT_DATA);
6913 ++
6914 ++ tout = PVT_TOUT_MIN / NSEC_PER_USEC;
6915 ++ usleep_range(tout, 2 * tout);
6916 ++
6917 ++ data = readl(pvt->regs + PVT_DATA);
6918 ++ if (!(data & PVT_DATA_VALID)) {
6919 ++ ret = -ENODEV;
6920 ++ dev_err(pvt->dev, "Sensor is powered down\n");
6921 ++ }
6922 ++
6923 ++ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
6924 ++
6925 ++ return ret;
6926 ++}
6927 ++
6928 ++static int pvt_init_iface(struct pvt_hwmon *pvt)
6929 ++{
6930 ++ unsigned long rate;
6931 + u32 trim, temp;
6932 +
6933 ++ rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
6934 ++ if (!rate) {
6935 ++ dev_err(pvt->dev, "Invalid reference clock rate\n");
6936 ++ return -ENODEV;
6937 ++ }
6938 ++
6939 + /*
6940 + * Make sure all interrupts and controller are disabled so not to
6941 + * accidentally have ISR executed before the driver data is fully
6942 +@@ -1000,12 +1027,37 @@ static void pvt_init_iface(struct pvt_hwmon *pvt)
6943 + pvt_set_mode(pvt, pvt_info[pvt->sensor].mode);
6944 + pvt_set_tout(pvt, PVT_TOUT_DEF);
6945 +
6946 ++ /*
6947 ++ * Preserve the current ref-clock based delay (Ttotal) between the
6948 ++ * sensors data samples in the driver data so not to recalculate it
6949 ++ * each time on the data requests and timeout reads. It consists of the
6950 ++ * delay introduced by the internal ref-clock timer (N / Fclk) and the
6951 ++ * constant timeout caused by each conversion latency (Tmin):
6952 ++ * Ttotal = N / Fclk + Tmin
6953 ++ * If alarms are enabled the sensors are polled one after another and
6954 ++ * in order to get the next measurement of a particular sensor the
6955 ++ * caller will have to wait for at most until all the others are
6956 ++ * polled. In that case the formulae will look a bit different:
6957 ++ * Ttotal = 5 * (N / Fclk + Tmin)
6958 ++ */
6959 ++#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
6960 ++ pvt->timeout = ktime_set(PVT_SENSORS_NUM * PVT_TOUT_DEF, 0);
6961 ++ pvt->timeout = ktime_divns(pvt->timeout, rate);
6962 ++ pvt->timeout = ktime_add_ns(pvt->timeout, PVT_SENSORS_NUM * PVT_TOUT_MIN);
6963 ++#else
6964 ++ pvt->timeout = ktime_set(PVT_TOUT_DEF, 0);
6965 ++ pvt->timeout = ktime_divns(pvt->timeout, rate);
6966 ++ pvt->timeout = ktime_add_ns(pvt->timeout, PVT_TOUT_MIN);
6967 ++#endif
6968 ++
6969 + trim = PVT_TRIM_DEF;
6970 + if (!of_property_read_u32(pvt->dev->of_node,
6971 + "baikal,pvt-temp-offset-millicelsius", &temp))
6972 + trim = pvt_calc_trim(temp);
6973 +
6974 + pvt_set_trim(pvt, trim);
6975 ++
6976 ++ return 0;
6977 + }
6978 +
6979 + static int pvt_request_irq(struct pvt_hwmon *pvt)
6980 +@@ -1109,7 +1161,13 @@ static int pvt_probe(struct platform_device *pdev)
6981 + if (ret)
6982 + return ret;
6983 +
6984 +- pvt_init_iface(pvt);
6985 ++ ret = pvt_check_pwr(pvt);
6986 ++ if (ret)
6987 ++ return ret;
6988 ++
6989 ++ ret = pvt_init_iface(pvt);
6990 ++ if (ret)
6991 ++ return ret;
6992 +
6993 + ret = pvt_request_irq(pvt);
6994 + if (ret)
6995 +diff --git a/drivers/hwmon/bt1-pvt.h b/drivers/hwmon/bt1-pvt.h
6996 +index 5eac73e948854..93b8dd5e7c944 100644
6997 +--- a/drivers/hwmon/bt1-pvt.h
6998 ++++ b/drivers/hwmon/bt1-pvt.h
6999 +@@ -10,6 +10,7 @@
7000 + #include <linux/completion.h>
7001 + #include <linux/hwmon.h>
7002 + #include <linux/kernel.h>
7003 ++#include <linux/ktime.h>
7004 + #include <linux/mutex.h>
7005 + #include <linux/seqlock.h>
7006 +
7007 +@@ -201,6 +202,7 @@ struct pvt_cache {
7008 + * if alarms are disabled).
7009 + * @sensor: current PVT sensor the data conversion is being performed for.
7010 + * @cache: data cache descriptor.
7011 ++ * @timeout: conversion timeout cache.
7012 + */
7013 + struct pvt_hwmon {
7014 + struct device *dev;
7015 +@@ -214,6 +216,7 @@ struct pvt_hwmon {
7016 + struct mutex iface_mtx;
7017 + enum pvt_sensor_type sensor;
7018 + struct pvt_cache cache[PVT_SENSORS_NUM];
7019 ++ ktime_t timeout;
7020 + };
7021 +
7022 + /*
7023 +diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
7024 +index 18b4e071067f7..de04dff28945b 100644
7025 +--- a/drivers/hwmon/pmbus/max34440.c
7026 ++++ b/drivers/hwmon/pmbus/max34440.c
7027 +@@ -388,7 +388,6 @@ static struct pmbus_driver_info max34440_info[] = {
7028 + .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
7029 + .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
7030 + .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
7031 +- .read_byte_data = max34440_read_byte_data,
7032 + .read_word_data = max34440_read_word_data,
7033 + .write_word_data = max34440_write_word_data,
7034 + },
7035 +@@ -419,7 +418,6 @@ static struct pmbus_driver_info max34440_info[] = {
7036 + .func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
7037 + .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
7038 + .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
7039 +- .read_byte_data = max34440_read_byte_data,
7040 + .read_word_data = max34440_read_word_data,
7041 + .write_word_data = max34440_write_word_data,
7042 + },
7043 +@@ -455,7 +453,6 @@ static struct pmbus_driver_info max34440_info[] = {
7044 + .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
7045 + .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
7046 + .func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
7047 +- .read_byte_data = max34440_read_byte_data,
7048 + .read_word_data = max34440_read_word_data,
7049 + .write_word_data = max34440_write_word_data,
7050 + },
7051 +diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
7052 +index 5a5120121e507..3964ceab2817c 100644
7053 +--- a/drivers/hwmon/w83627ehf.c
7054 ++++ b/drivers/hwmon/w83627ehf.c
7055 +@@ -1951,8 +1951,12 @@ static int w83627ehf_probe(struct platform_device *pdev)
7056 + data,
7057 + &w83627ehf_chip_info,
7058 + w83627ehf_groups);
7059 ++ if (IS_ERR(hwmon_dev)) {
7060 ++ err = PTR_ERR(hwmon_dev);
7061 ++ goto exit_release;
7062 ++ }
7063 +
7064 +- return PTR_ERR_OR_ZERO(hwmon_dev);
7065 ++ return 0;
7066 +
7067 + exit_release:
7068 + release_region(res->start, IOREGION_LENGTH);
7069 +diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
7070 +index 3ccc703dc9409..167fbc2e7033f 100644
7071 +--- a/drivers/hwtracing/coresight/coresight-cti.c
7072 ++++ b/drivers/hwtracing/coresight/coresight-cti.c
7073 +@@ -86,22 +86,16 @@ void cti_write_all_hw_regs(struct cti_drvdata *drvdata)
7074 + CS_LOCK(drvdata->base);
7075 + }
7076 +
7077 +-static void cti_enable_hw_smp_call(void *info)
7078 +-{
7079 +- struct cti_drvdata *drvdata = info;
7080 +-
7081 +- cti_write_all_hw_regs(drvdata);
7082 +-}
7083 +-
7084 + /* write regs to hardware and enable */
7085 + static int cti_enable_hw(struct cti_drvdata *drvdata)
7086 + {
7087 + struct cti_config *config = &drvdata->config;
7088 + struct device *dev = &drvdata->csdev->dev;
7089 ++ unsigned long flags;
7090 + int rc = 0;
7091 +
7092 + pm_runtime_get_sync(dev->parent);
7093 +- spin_lock(&drvdata->spinlock);
7094 ++ spin_lock_irqsave(&drvdata->spinlock, flags);
7095 +
7096 + /* no need to do anything if enabled or unpowered*/
7097 + if (config->hw_enabled || !config->hw_powered)
7098 +@@ -112,19 +106,11 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
7099 + if (rc)
7100 + goto cti_err_not_enabled;
7101 +
7102 +- if (drvdata->ctidev.cpu >= 0) {
7103 +- rc = smp_call_function_single(drvdata->ctidev.cpu,
7104 +- cti_enable_hw_smp_call,
7105 +- drvdata, 1);
7106 +- if (rc)
7107 +- goto cti_err_not_enabled;
7108 +- } else {
7109 +- cti_write_all_hw_regs(drvdata);
7110 +- }
7111 ++ cti_write_all_hw_regs(drvdata);
7112 +
7113 + config->hw_enabled = true;
7114 + atomic_inc(&drvdata->config.enable_req_count);
7115 +- spin_unlock(&drvdata->spinlock);
7116 ++ spin_unlock_irqrestore(&drvdata->spinlock, flags);
7117 + return rc;
7118 +
7119 + cti_state_unchanged:
7120 +@@ -132,7 +118,7 @@ cti_state_unchanged:
7121 +
7122 + /* cannot enable due to error */
7123 + cti_err_not_enabled:
7124 +- spin_unlock(&drvdata->spinlock);
7125 ++ spin_unlock_irqrestore(&drvdata->spinlock, flags);
7126 + pm_runtime_put(dev->parent);
7127 + return rc;
7128 + }
7129 +@@ -141,9 +127,7 @@ cti_err_not_enabled:
7130 + static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
7131 + {
7132 + struct cti_config *config = &drvdata->config;
7133 +- struct device *dev = &drvdata->csdev->dev;
7134 +
7135 +- pm_runtime_get_sync(dev->parent);
7136 + spin_lock(&drvdata->spinlock);
7137 + config->hw_powered = true;
7138 +
7139 +@@ -163,7 +147,6 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
7140 + /* did not re-enable due to no claim / no request */
7141 + cti_hp_not_enabled:
7142 + spin_unlock(&drvdata->spinlock);
7143 +- pm_runtime_put(dev->parent);
7144 + }
7145 +
7146 + /* disable hardware */
7147 +@@ -511,12 +494,15 @@ static bool cti_add_sysfs_link(struct cti_drvdata *drvdata,
7148 + return !link_err;
7149 + }
7150 +
7151 +-static void cti_remove_sysfs_link(struct cti_trig_con *tc)
7152 ++static void cti_remove_sysfs_link(struct cti_drvdata *drvdata,
7153 ++ struct cti_trig_con *tc)
7154 + {
7155 + struct coresight_sysfs_link link_info;
7156 +
7157 ++ link_info.orig = drvdata->csdev;
7158 + link_info.orig_name = tc->con_dev_name;
7159 + link_info.target = tc->con_dev;
7160 ++ link_info.target_name = dev_name(&drvdata->csdev->dev);
7161 + coresight_remove_sysfs_link(&link_info);
7162 + }
7163 +
7164 +@@ -606,8 +592,8 @@ void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
7165 + ctidrv = csdev_to_cti_drvdata(csdev->ect_dev);
7166 + ctidev = &ctidrv->ctidev;
7167 + list_for_each_entry(tc, &ctidev->trig_cons, node) {
7168 +- if (tc->con_dev == csdev->ect_dev) {
7169 +- cti_remove_sysfs_link(tc);
7170 ++ if (tc->con_dev == csdev) {
7171 ++ cti_remove_sysfs_link(ctidrv, tc);
7172 + tc->con_dev = NULL;
7173 + break;
7174 + }
7175 +@@ -651,7 +637,7 @@ static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata)
7176 + if (tc->con_dev) {
7177 + coresight_set_assoc_ectdev_mutex(tc->con_dev,
7178 + NULL);
7179 +- cti_remove_sysfs_link(tc);
7180 ++ cti_remove_sysfs_link(drvdata, tc);
7181 + tc->con_dev = NULL;
7182 + }
7183 + }
7184 +@@ -742,7 +728,8 @@ static int cti_dying_cpu(unsigned int cpu)
7185 +
7186 + spin_lock(&drvdata->spinlock);
7187 + drvdata->config.hw_powered = false;
7188 +- coresight_disclaim_device(drvdata->base);
7189 ++ if (drvdata->config.hw_enabled)
7190 ++ coresight_disclaim_device(drvdata->base);
7191 + spin_unlock(&drvdata->spinlock);
7192 + return 0;
7193 + }
7194 +diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
7195 +index 1a3169e69bb19..be591b557df94 100644
7196 +--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
7197 ++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
7198 +@@ -126,10 +126,10 @@ static void free_sink_buffer(struct etm_event_data *event_data)
7199 + cpumask_t *mask = &event_data->mask;
7200 + struct coresight_device *sink;
7201 +
7202 +- if (WARN_ON(cpumask_empty(mask)))
7203 ++ if (!event_data->snk_config)
7204 + return;
7205 +
7206 +- if (!event_data->snk_config)
7207 ++ if (WARN_ON(cpumask_empty(mask)))
7208 + return;
7209 +
7210 + cpu = cpumask_first(mask);
7211 +@@ -321,6 +321,16 @@ static void etm_event_start(struct perf_event *event, int flags)
7212 + if (!event_data)
7213 + goto fail;
7214 +
7215 ++ /*
7216 ++ * Check if this ETM is allowed to trace, as decided
7217 ++ * at etm_setup_aux(). This could be due to an unreachable
7218 ++ * sink from this ETM. We can't do much in this case if
7219 ++ * the sink was specified or hinted to the driver. For
7220 ++ * now, simply don't record anything on this ETM.
7221 ++ */
7222 ++ if (!cpumask_test_cpu(cpu, &event_data->mask))
7223 ++ goto fail_end_stop;
7224 ++
7225 + path = etm_event_cpu_path(event_data, cpu);
7226 + /* We need a sink, no need to continue without one */
7227 + sink = coresight_get_sink(path);
7228 +diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
7229 +index b673e738bc9a8..a588cd6de01c7 100644
7230 +--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
7231 ++++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
7232 +@@ -206,7 +206,7 @@ static ssize_t reset_store(struct device *dev,
7233 + * each trace run.
7234 + */
7235 + config->vinst_ctrl = BIT(0);
7236 +- if (drvdata->nr_addr_cmp == true) {
7237 ++ if (drvdata->nr_addr_cmp > 0) {
7238 + config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
7239 + /* SSSTATUS, bit[9] */
7240 + config->vinst_ctrl |= BIT(9);
7241 +diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
7242 +index 96425e818fc20..fd678792b755d 100644
7243 +--- a/drivers/hwtracing/coresight/coresight-etm4x.c
7244 ++++ b/drivers/hwtracing/coresight/coresight-etm4x.c
7245 +@@ -48,12 +48,11 @@ module_param(pm_save_enable, int, 0444);
7246 + MODULE_PARM_DESC(pm_save_enable,
7247 + "Save/restore state on power down: 1 = never, 2 = self-hosted");
7248 +
7249 +-/* The number of ETMv4 currently registered */
7250 +-static int etm4_count;
7251 + static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
7252 + static void etm4_set_default_config(struct etmv4_config *config);
7253 + static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
7254 + struct perf_event *event);
7255 ++static u64 etm4_get_access_type(struct etmv4_config *config);
7256 +
7257 + static enum cpuhp_state hp_online;
7258 +
7259 +@@ -785,6 +784,22 @@ static void etm4_init_arch_data(void *info)
7260 + CS_LOCK(drvdata->base);
7261 + }
7262 +
7263 ++/* Set ELx trace filter access in the TRCVICTLR register */
7264 ++static void etm4_set_victlr_access(struct etmv4_config *config)
7265 ++{
7266 ++ u64 access_type;
7267 ++
7268 ++ config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK | ETM_EXLEVEL_NS_VICTLR_MASK);
7269 ++
7270 ++ /*
7271 ++ * TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering
7272 ++ * bits in vinst_ctrl, same bit pattern as TRCACATRn values returned by
7273 ++ * etm4_get_access_type() but with a relative shift in this register.
7274 ++ */
7275 ++ access_type = etm4_get_access_type(config) << ETM_EXLEVEL_LSHIFT_TRCVICTLR;
7276 ++ config->vinst_ctrl |= (u32)access_type;
7277 ++}
7278 ++
7279 + static void etm4_set_default_config(struct etmv4_config *config)
7280 + {
7281 + /* disable all events tracing */
7282 +@@ -802,6 +817,9 @@ static void etm4_set_default_config(struct etmv4_config *config)
7283 +
7284 + /* TRCVICTLR::EVENT = 0x01, select the always on logic */
7285 + config->vinst_ctrl = BIT(0);
7286 ++
7287 ++ /* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */
7288 ++ etm4_set_victlr_access(config);
7289 + }
7290 +
7291 + static u64 etm4_get_ns_access_type(struct etmv4_config *config)
7292 +@@ -1066,7 +1084,7 @@ out:
7293 +
7294 + void etm4_config_trace_mode(struct etmv4_config *config)
7295 + {
7296 +- u32 addr_acc, mode;
7297 ++ u32 mode;
7298 +
7299 + mode = config->mode;
7300 + mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
7301 +@@ -1078,15 +1096,7 @@ void etm4_config_trace_mode(struct etmv4_config *config)
7302 + if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
7303 + return;
7304 +
7305 +- addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
7306 +- /* clear default config */
7307 +- addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS |
7308 +- ETM_EXLEVEL_NS_HYP);
7309 +-
7310 +- addr_acc |= etm4_get_ns_access_type(config);
7311 +-
7312 +- config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
7313 +- config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
7314 ++ etm4_set_victlr_access(config);
7315 + }
7316 +
7317 + static int etm4_online_cpu(unsigned int cpu)
7318 +@@ -1183,7 +1193,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
7319 + state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
7320 + state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
7321 +
7322 +- for (i = 0; i < drvdata->nrseqstate; i++)
7323 ++ for (i = 0; i < drvdata->nrseqstate - 1; i++)
7324 + state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i));
7325 +
7326 + state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR);
7327 +@@ -1227,7 +1237,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
7328 + state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
7329 +
7330 + state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
7331 +- state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR1);
7332 ++ state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1);
7333 +
7334 + state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
7335 +
7336 +@@ -1288,7 +1298,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
7337 + writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
7338 + writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
7339 +
7340 +- for (i = 0; i < drvdata->nrseqstate; i++)
7341 ++ for (i = 0; i < drvdata->nrseqstate - 1; i++)
7342 + writel_relaxed(state->trcseqevr[i],
7343 + drvdata->base + TRCSEQEVRn(i));
7344 +
7345 +@@ -1337,7 +1347,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
7346 + writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
7347 +
7348 + writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
7349 +- writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR1);
7350 ++ writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1);
7351 +
7352 + writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
7353 +
7354 +@@ -1397,28 +1407,25 @@ static struct notifier_block etm4_cpu_pm_nb = {
7355 + .notifier_call = etm4_cpu_pm_notify,
7356 + };
7357 +
7358 +-/* Setup PM. Called with cpus locked. Deals with error conditions and counts */
7359 +-static int etm4_pm_setup_cpuslocked(void)
7360 ++/* Setup PM. Deals with error conditions and counts */
7361 ++static int __init etm4_pm_setup(void)
7362 + {
7363 + int ret;
7364 +
7365 +- if (etm4_count++)
7366 +- return 0;
7367 +-
7368 + ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb);
7369 + if (ret)
7370 +- goto reduce_count;
7371 ++ return ret;
7372 +
7373 +- ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
7374 +- "arm/coresight4:starting",
7375 +- etm4_starting_cpu, etm4_dying_cpu);
7376 ++ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
7377 ++ "arm/coresight4:starting",
7378 ++ etm4_starting_cpu, etm4_dying_cpu);
7379 +
7380 + if (ret)
7381 + goto unregister_notifier;
7382 +
7383 +- ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
7384 +- "arm/coresight4:online",
7385 +- etm4_online_cpu, NULL);
7386 ++ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
7387 ++ "arm/coresight4:online",
7388 ++ etm4_online_cpu, NULL);
7389 +
7390 + /* HP dyn state ID returned in ret on success */
7391 + if (ret > 0) {
7392 +@@ -1427,21 +1434,15 @@ static int etm4_pm_setup_cpuslocked(void)
7393 + }
7394 +
7395 + /* failed dyn state - remove others */
7396 +- cpuhp_remove_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING);
7397 ++ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
7398 +
7399 + unregister_notifier:
7400 + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
7401 +-
7402 +-reduce_count:
7403 +- --etm4_count;
7404 + return ret;
7405 + }
7406 +
7407 +-static void etm4_pm_clear(void)
7408 ++static void __init etm4_pm_clear(void)
7409 + {
7410 +- if (--etm4_count != 0)
7411 +- return;
7412 +-
7413 + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
7414 + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
7415 + if (hp_online) {
7416 +@@ -1497,22 +1498,12 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
7417 + if (!desc.name)
7418 + return -ENOMEM;
7419 +
7420 +- cpus_read_lock();
7421 + etmdrvdata[drvdata->cpu] = drvdata;
7422 +
7423 + if (smp_call_function_single(drvdata->cpu,
7424 + etm4_init_arch_data, drvdata, 1))
7425 + dev_err(dev, "ETM arch init failed\n");
7426 +
7427 +- ret = etm4_pm_setup_cpuslocked();
7428 +- cpus_read_unlock();
7429 +-
7430 +- /* etm4_pm_setup_cpuslocked() does its own cleanup - exit on error */
7431 +- if (ret) {
7432 +- etmdrvdata[drvdata->cpu] = NULL;
7433 +- return ret;
7434 +- }
7435 +-
7436 + if (etm4_arch_supported(drvdata->arch) == false) {
7437 + ret = -EINVAL;
7438 + goto err_arch_supported;
7439 +@@ -1559,7 +1550,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
7440 +
7441 + err_arch_supported:
7442 + etmdrvdata[drvdata->cpu] = NULL;
7443 +- etm4_pm_clear();
7444 + return ret;
7445 + }
7446 +
7447 +@@ -1597,4 +1587,23 @@ static struct amba_driver etm4x_driver = {
7448 + .probe = etm4_probe,
7449 + .id_table = etm4_ids,
7450 + };
7451 +-builtin_amba_driver(etm4x_driver);
7452 ++
7453 ++static int __init etm4x_init(void)
7454 ++{
7455 ++ int ret;
7456 ++
7457 ++ ret = etm4_pm_setup();
7458 ++
7459 ++ /* etm4_pm_setup() does its own cleanup - exit on error */
7460 ++ if (ret)
7461 ++ return ret;
7462 ++
7463 ++ ret = amba_driver_register(&etm4x_driver);
7464 ++ if (ret) {
7465 ++ pr_err("Error registering etm4x driver\n");
7466 ++ etm4_pm_clear();
7467 ++ }
7468 ++
7469 ++ return ret;
7470 ++}
7471 ++device_initcall(etm4x_init);
7472 +diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
7473 +index b8283e1d6d88c..5259f96fd28a0 100644
7474 +--- a/drivers/hwtracing/coresight/coresight-etm4x.h
7475 ++++ b/drivers/hwtracing/coresight/coresight-etm4x.h
7476 +@@ -192,6 +192,9 @@
7477 + #define ETM_EXLEVEL_NS_HYP BIT(14)
7478 + #define ETM_EXLEVEL_NS_NA BIT(15)
7479 +
7480 ++/* access level control in TRCVICTLR - same bits as TRCACATRn but shifted */
7481 ++#define ETM_EXLEVEL_LSHIFT_TRCVICTLR 8
7482 ++
7483 + /* secure / non secure masks - TRCVICTLR, IDR3 */
7484 + #define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16)
7485 + /* NS MON (EL3) mode never implemented */
7486 +diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
7487 +index bfd44231d7ad5..227e234a24701 100644
7488 +--- a/drivers/hwtracing/coresight/coresight-platform.c
7489 ++++ b/drivers/hwtracing/coresight/coresight-platform.c
7490 +@@ -711,11 +711,11 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
7491 + return dir;
7492 +
7493 + if (dir == ACPI_CORESIGHT_LINK_MASTER) {
7494 +- if (ptr->outport > pdata->nr_outport)
7495 +- pdata->nr_outport = ptr->outport;
7496 ++ if (ptr->outport >= pdata->nr_outport)
7497 ++ pdata->nr_outport = ptr->outport + 1;
7498 + ptr++;
7499 + } else {
7500 +- WARN_ON(pdata->nr_inport == ptr->child_port);
7501 ++ WARN_ON(pdata->nr_inport == ptr->child_port + 1);
7502 + /*
7503 + * We do not track input port connections for a device.
7504 + * However we need the highest port number described,
7505 +@@ -723,8 +723,8 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
7506 + * record for an output connection. Hence, do not move
7507 + * the ptr for input connections
7508 + */
7509 +- if (ptr->child_port > pdata->nr_inport)
7510 +- pdata->nr_inport = ptr->child_port;
7511 ++ if (ptr->child_port >= pdata->nr_inport)
7512 ++ pdata->nr_inport = ptr->child_port + 1;
7513 + }
7514 + }
7515 +
7516 +diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
7517 +index e9c90f2de34ac..cdcb1917216fd 100644
7518 +--- a/drivers/hwtracing/coresight/coresight.c
7519 ++++ b/drivers/hwtracing/coresight/coresight.c
7520 +@@ -1188,7 +1188,6 @@ static void coresight_device_release(struct device *dev)
7521 + {
7522 + struct coresight_device *csdev = to_coresight_device(dev);
7523 +
7524 +- cti_remove_assoc_from_csdev(csdev);
7525 + fwnode_handle_put(csdev->dev.fwnode);
7526 + kfree(csdev->refcnt);
7527 + kfree(csdev);
7528 +@@ -1522,6 +1521,7 @@ void coresight_unregister(struct coresight_device *csdev)
7529 + {
7530 + etm_perf_del_symlink_sink(csdev);
7531 + /* Remove references of that device in the topology */
7532 ++ cti_remove_assoc_from_csdev(csdev);
7533 + coresight_remove_conns(csdev);
7534 + coresight_clear_default_sink(csdev);
7535 + coresight_release_platform_data(csdev, csdev->pdata);
7536 +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
7537 +index 293e7a0760e77..7ccbfbcb02e9a 100644
7538 +--- a/drivers/i2c/busses/Kconfig
7539 ++++ b/drivers/i2c/busses/Kconfig
7540 +@@ -1181,6 +1181,7 @@ config I2C_RCAR
7541 + tristate "Renesas R-Car I2C Controller"
7542 + depends on ARCH_RENESAS || COMPILE_TEST
7543 + select I2C_SLAVE
7544 ++ select RESET_CONTROLLER if ARCH_RCAR_GEN3
7545 + help
7546 + If you say yes to this option, support will be included for the
7547 + R-Car I2C controller.
7548 +diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
7549 +index e627d7b2790f7..37c510d9347a7 100644
7550 +--- a/drivers/i2c/i2c-core-acpi.c
7551 ++++ b/drivers/i2c/i2c-core-acpi.c
7552 +@@ -264,6 +264,7 @@ static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
7553 + void i2c_acpi_register_devices(struct i2c_adapter *adap)
7554 + {
7555 + acpi_status status;
7556 ++ acpi_handle handle;
7557 +
7558 + if (!has_acpi_companion(&adap->dev))
7559 + return;
7560 +@@ -274,6 +275,15 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
7561 + adap, NULL);
7562 + if (ACPI_FAILURE(status))
7563 + dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
7564 ++
7565 ++ if (!adap->dev.parent)
7566 ++ return;
7567 ++
7568 ++ handle = ACPI_HANDLE(adap->dev.parent);
7569 ++ if (!handle)
7570 ++ return;
7571 ++
7572 ++ acpi_walk_dep_device_list(handle);
7573 + }
7574 +
7575 + static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
7576 +@@ -719,7 +729,6 @@ int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
7577 + return -ENOMEM;
7578 + }
7579 +
7580 +- acpi_walk_dep_device_list(handle);
7581 + return 0;
7582 + }
7583 +
7584 +diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
7585 +index 97f2e29265da7..cc7564446ccd2 100644
7586 +--- a/drivers/i3c/master.c
7587 ++++ b/drivers/i3c/master.c
7588 +@@ -1782,6 +1782,21 @@ static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
7589 + i3c_master_detach_free_devs(master);
7590 + }
7591 +
7592 ++static void i3c_master_attach_boardinfo(struct i3c_dev_desc *i3cdev)
7593 ++{
7594 ++ struct i3c_master_controller *master = i3cdev->common.master;
7595 ++ struct i3c_dev_boardinfo *i3cboardinfo;
7596 ++
7597 ++ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
7598 ++ if (i3cdev->info.pid != i3cboardinfo->pid)
7599 ++ continue;
7600 ++
7601 ++ i3cdev->boardinfo = i3cboardinfo;
7602 ++ i3cdev->info.static_addr = i3cboardinfo->static_addr;
7603 ++ return;
7604 ++ }
7605 ++}
7606 ++
7607 + static struct i3c_dev_desc *
7608 + i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
7609 + {
7610 +@@ -1837,10 +1852,10 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
7611 + if (ret)
7612 + goto err_detach_dev;
7613 +
7614 ++ i3c_master_attach_boardinfo(newdev);
7615 ++
7616 + olddev = i3c_master_search_i3c_dev_duplicate(newdev);
7617 + if (olddev) {
7618 +- newdev->boardinfo = olddev->boardinfo;
7619 +- newdev->info.static_addr = olddev->info.static_addr;
7620 + newdev->dev = olddev->dev;
7621 + if (newdev->dev)
7622 + newdev->dev->desc = newdev;
7623 +diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
7624 +index 3fee8bd7fe20b..3f2226928fe05 100644
7625 +--- a/drivers/i3c/master/i3c-master-cdns.c
7626 ++++ b/drivers/i3c/master/i3c-master-cdns.c
7627 +@@ -1635,8 +1635,10 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
7628 + master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
7629 + sizeof(*master->ibi.slots),
7630 + GFP_KERNEL);
7631 +- if (!master->ibi.slots)
7632 ++ if (!master->ibi.slots) {
7633 ++ ret = -ENOMEM;
7634 + goto err_disable_sysclk;
7635 ++ }
7636 +
7637 + writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
7638 + writel(MST_INT_IBIR_THR, master->regs + MST_IER);
7639 +diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
7640 +index 0e2068ec068b8..358636954619d 100644
7641 +--- a/drivers/iio/adc/stm32-adc-core.c
7642 ++++ b/drivers/iio/adc/stm32-adc-core.c
7643 +@@ -794,6 +794,13 @@ static int stm32_adc_core_runtime_resume(struct device *dev)
7644 + {
7645 + return stm32_adc_core_hw_start(dev);
7646 + }
7647 ++
7648 ++static int stm32_adc_core_runtime_idle(struct device *dev)
7649 ++{
7650 ++ pm_runtime_mark_last_busy(dev);
7651 ++
7652 ++ return 0;
7653 ++}
7654 + #endif
7655 +
7656 + static const struct dev_pm_ops stm32_adc_core_pm_ops = {
7657 +@@ -801,7 +808,7 @@ static const struct dev_pm_ops stm32_adc_core_pm_ops = {
7658 + pm_runtime_force_resume)
7659 + SET_RUNTIME_PM_OPS(stm32_adc_core_runtime_suspend,
7660 + stm32_adc_core_runtime_resume,
7661 +- NULL)
7662 ++ stm32_adc_core_runtime_idle)
7663 + };
7664 +
7665 + static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
7666 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
7667 +index 5888311b21198..baf0b6ae7a8bb 100644
7668 +--- a/drivers/infiniband/core/cma.c
7669 ++++ b/drivers/infiniband/core/cma.c
7670 +@@ -68,6 +68,9 @@ static const char * const cma_events[] = {
7671 + [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
7672 + };
7673 +
7674 ++static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
7675 ++ union ib_gid *mgid);
7676 ++
7677 + const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
7678 + {
7679 + size_t index = event;
7680 +@@ -345,13 +348,10 @@ struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
7681 +
7682 + struct cma_multicast {
7683 + struct rdma_id_private *id_priv;
7684 +- union {
7685 +- struct ib_sa_multicast *ib;
7686 +- } multicast;
7687 ++ struct ib_sa_multicast *sa_mc;
7688 + struct list_head list;
7689 + void *context;
7690 + struct sockaddr_storage addr;
7691 +- struct kref mcref;
7692 + u8 join_state;
7693 + };
7694 +
7695 +@@ -363,18 +363,6 @@ struct cma_work {
7696 + struct rdma_cm_event event;
7697 + };
7698 +
7699 +-struct cma_ndev_work {
7700 +- struct work_struct work;
7701 +- struct rdma_id_private *id;
7702 +- struct rdma_cm_event event;
7703 +-};
7704 +-
7705 +-struct iboe_mcast_work {
7706 +- struct work_struct work;
7707 +- struct rdma_id_private *id;
7708 +- struct cma_multicast *mc;
7709 +-};
7710 +-
7711 + union cma_ip_addr {
7712 + struct in6_addr ip6;
7713 + struct {
7714 +@@ -483,14 +471,6 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
7715 + rdma_start_port(cma_dev->device)];
7716 + }
7717 +
7718 +-static inline void release_mc(struct kref *kref)
7719 +-{
7720 +- struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
7721 +-
7722 +- kfree(mc->multicast.ib);
7723 +- kfree(mc);
7724 +-}
7725 +-
7726 + static void cma_release_dev(struct rdma_id_private *id_priv)
7727 + {
7728 + mutex_lock(&lock);
7729 +@@ -1783,19 +1763,30 @@ static void cma_release_port(struct rdma_id_private *id_priv)
7730 + mutex_unlock(&lock);
7731 + }
7732 +
7733 +-static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv,
7734 +- struct cma_multicast *mc)
7735 ++static void destroy_mc(struct rdma_id_private *id_priv,
7736 ++ struct cma_multicast *mc)
7737 + {
7738 +- struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
7739 +- struct net_device *ndev = NULL;
7740 ++ if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
7741 ++ ib_sa_free_multicast(mc->sa_mc);
7742 +
7743 +- if (dev_addr->bound_dev_if)
7744 +- ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
7745 +- if (ndev) {
7746 +- cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
7747 +- dev_put(ndev);
7748 ++ if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
7749 ++ struct rdma_dev_addr *dev_addr =
7750 ++ &id_priv->id.route.addr.dev_addr;
7751 ++ struct net_device *ndev = NULL;
7752 ++
7753 ++ if (dev_addr->bound_dev_if)
7754 ++ ndev = dev_get_by_index(dev_addr->net,
7755 ++ dev_addr->bound_dev_if);
7756 ++ if (ndev) {
7757 ++ union ib_gid mgid;
7758 ++
7759 ++ cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
7760 ++ &mgid);
7761 ++ cma_igmp_send(ndev, &mgid, false);
7762 ++ dev_put(ndev);
7763 ++ }
7764 + }
7765 +- kref_put(&mc->mcref, release_mc);
7766 ++ kfree(mc);
7767 + }
7768 +
7769 + static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
7770 +@@ -1803,16 +1794,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
7771 + struct cma_multicast *mc;
7772 +
7773 + while (!list_empty(&id_priv->mc_list)) {
7774 +- mc = container_of(id_priv->mc_list.next,
7775 +- struct cma_multicast, list);
7776 ++ mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
7777 ++ list);
7778 + list_del(&mc->list);
7779 +- if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
7780 +- id_priv->id.port_num)) {
7781 +- ib_sa_free_multicast(mc->multicast.ib);
7782 +- kfree(mc);
7783 +- } else {
7784 +- cma_leave_roce_mc_group(id_priv, mc);
7785 +- }
7786 ++ destroy_mc(id_priv, mc);
7787 + }
7788 + }
7789 +
7790 +@@ -2647,32 +2632,14 @@ static void cma_work_handler(struct work_struct *_work)
7791 + struct rdma_id_private *id_priv = work->id;
7792 +
7793 + mutex_lock(&id_priv->handler_mutex);
7794 +- if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
7795 ++ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
7796 ++ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
7797 + goto out_unlock;
7798 +-
7799 +- if (cma_cm_event_handler(id_priv, &work->event)) {
7800 +- cma_id_put(id_priv);
7801 +- destroy_id_handler_unlock(id_priv);
7802 +- goto out_free;
7803 ++ if (work->old_state != 0 || work->new_state != 0) {
7804 ++ if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
7805 ++ goto out_unlock;
7806 + }
7807 +
7808 +-out_unlock:
7809 +- mutex_unlock(&id_priv->handler_mutex);
7810 +- cma_id_put(id_priv);
7811 +-out_free:
7812 +- kfree(work);
7813 +-}
7814 +-
7815 +-static void cma_ndev_work_handler(struct work_struct *_work)
7816 +-{
7817 +- struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
7818 +- struct rdma_id_private *id_priv = work->id;
7819 +-
7820 +- mutex_lock(&id_priv->handler_mutex);
7821 +- if (id_priv->state == RDMA_CM_DESTROYING ||
7822 +- id_priv->state == RDMA_CM_DEVICE_REMOVAL)
7823 +- goto out_unlock;
7824 +-
7825 + if (cma_cm_event_handler(id_priv, &work->event)) {
7826 + cma_id_put(id_priv);
7827 + destroy_id_handler_unlock(id_priv);
7828 +@@ -2683,6 +2650,8 @@ out_unlock:
7829 + mutex_unlock(&id_priv->handler_mutex);
7830 + cma_id_put(id_priv);
7831 + out_free:
7832 ++ if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
7833 ++ rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
7834 + kfree(work);
7835 + }
7836 +
7837 +@@ -4299,63 +4268,66 @@ out:
7838 + }
7839 + EXPORT_SYMBOL(rdma_disconnect);
7840 +
7841 +-static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
7842 ++static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
7843 ++ struct ib_sa_multicast *multicast,
7844 ++ struct rdma_cm_event *event,
7845 ++ struct cma_multicast *mc)
7846 + {
7847 +- struct rdma_id_private *id_priv;
7848 +- struct cma_multicast *mc = multicast->context;
7849 +- struct rdma_cm_event event = {};
7850 +- int ret = 0;
7851 +-
7852 +- id_priv = mc->id_priv;
7853 +- mutex_lock(&id_priv->handler_mutex);
7854 +- if (id_priv->state != RDMA_CM_ADDR_BOUND &&
7855 +- id_priv->state != RDMA_CM_ADDR_RESOLVED)
7856 +- goto out;
7857 ++ struct rdma_dev_addr *dev_addr;
7858 ++ enum ib_gid_type gid_type;
7859 ++ struct net_device *ndev;
7860 +
7861 + if (!status)
7862 + status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
7863 + else
7864 + pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
7865 + status);
7866 +- mutex_lock(&id_priv->qp_mutex);
7867 +- if (!status && id_priv->id.qp) {
7868 +- status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
7869 +- be16_to_cpu(multicast->rec.mlid));
7870 +- if (status)
7871 +- pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
7872 +- status);
7873 ++
7874 ++ event->status = status;
7875 ++ event->param.ud.private_data = mc->context;
7876 ++ if (status) {
7877 ++ event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
7878 ++ return;
7879 + }
7880 +- mutex_unlock(&id_priv->qp_mutex);
7881 +
7882 +- event.status = status;
7883 +- event.param.ud.private_data = mc->context;
7884 +- if (!status) {
7885 +- struct rdma_dev_addr *dev_addr =
7886 +- &id_priv->id.route.addr.dev_addr;
7887 +- struct net_device *ndev =
7888 +- dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
7889 +- enum ib_gid_type gid_type =
7890 +- id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
7891 +- rdma_start_port(id_priv->cma_dev->device)];
7892 +-
7893 +- event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
7894 +- ret = ib_init_ah_from_mcmember(id_priv->id.device,
7895 +- id_priv->id.port_num,
7896 +- &multicast->rec,
7897 +- ndev, gid_type,
7898 +- &event.param.ud.ah_attr);
7899 +- if (ret)
7900 +- event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
7901 ++ dev_addr = &id_priv->id.route.addr.dev_addr;
7902 ++ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
7903 ++ gid_type =
7904 ++ id_priv->cma_dev
7905 ++ ->default_gid_type[id_priv->id.port_num -
7906 ++ rdma_start_port(
7907 ++ id_priv->cma_dev->device)];
7908 ++
7909 ++ event->event = RDMA_CM_EVENT_MULTICAST_JOIN;
7910 ++ if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
7911 ++ &multicast->rec, ndev, gid_type,
7912 ++ &event->param.ud.ah_attr)) {
7913 ++ event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
7914 ++ goto out;
7915 ++ }
7916 +
7917 +- event.param.ud.qp_num = 0xFFFFFF;
7918 +- event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
7919 +- if (ndev)
7920 +- dev_put(ndev);
7921 +- } else
7922 +- event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
7923 ++ event->param.ud.qp_num = 0xFFFFFF;
7924 ++ event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
7925 +
7926 +- ret = cma_cm_event_handler(id_priv, &event);
7927 ++out:
7928 ++ if (ndev)
7929 ++ dev_put(ndev);
7930 ++}
7931 +
7932 ++static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
7933 ++{
7934 ++ struct cma_multicast *mc = multicast->context;
7935 ++ struct rdma_id_private *id_priv = mc->id_priv;
7936 ++ struct rdma_cm_event event = {};
7937 ++ int ret = 0;
7938 ++
7939 ++ mutex_lock(&id_priv->handler_mutex);
7940 ++ if (id_priv->state != RDMA_CM_ADDR_BOUND &&
7941 ++ id_priv->state != RDMA_CM_ADDR_RESOLVED)
7942 ++ goto out;
7943 ++
7944 ++ cma_make_mc_event(status, id_priv, multicast, &event, mc);
7945 ++ ret = cma_cm_event_handler(id_priv, &event);
7946 + rdma_destroy_ah_attr(&event.param.ud.ah_attr);
7947 + if (ret) {
7948 + destroy_id_handler_unlock(id_priv);
7949 +@@ -4445,23 +4417,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
7950 + IB_SA_MCMEMBER_REC_MTU |
7951 + IB_SA_MCMEMBER_REC_HOP_LIMIT;
7952 +
7953 +- mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
7954 +- id_priv->id.port_num, &rec,
7955 +- comp_mask, GFP_KERNEL,
7956 +- cma_ib_mc_handler, mc);
7957 +- return PTR_ERR_OR_ZERO(mc->multicast.ib);
7958 +-}
7959 +-
7960 +-static void iboe_mcast_work_handler(struct work_struct *work)
7961 +-{
7962 +- struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
7963 +- struct cma_multicast *mc = mw->mc;
7964 +- struct ib_sa_multicast *m = mc->multicast.ib;
7965 +-
7966 +- mc->multicast.ib->context = mc;
7967 +- cma_ib_mc_handler(0, m);
7968 +- kref_put(&mc->mcref, release_mc);
7969 +- kfree(mw);
7970 ++ mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
7971 ++ id_priv->id.port_num, &rec, comp_mask,
7972 ++ GFP_KERNEL, cma_ib_mc_handler, mc);
7973 ++ return PTR_ERR_OR_ZERO(mc->sa_mc);
7974 + }
7975 +
7976 + static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
7977 +@@ -4496,52 +4455,47 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
7978 + static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
7979 + struct cma_multicast *mc)
7980 + {
7981 +- struct iboe_mcast_work *work;
7982 ++ struct cma_work *work;
7983 + struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
7984 + int err = 0;
7985 + struct sockaddr *addr = (struct sockaddr *)&mc->addr;
7986 + struct net_device *ndev = NULL;
7987 ++ struct ib_sa_multicast ib;
7988 + enum ib_gid_type gid_type;
7989 + bool send_only;
7990 +
7991 + send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
7992 +
7993 +- if (cma_zero_addr((struct sockaddr *)&mc->addr))
7994 ++ if (cma_zero_addr(addr))
7995 + return -EINVAL;
7996 +
7997 + work = kzalloc(sizeof *work, GFP_KERNEL);
7998 + if (!work)
7999 + return -ENOMEM;
8000 +
8001 +- mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
8002 +- if (!mc->multicast.ib) {
8003 +- err = -ENOMEM;
8004 +- goto out1;
8005 +- }
8006 +-
8007 + gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
8008 + rdma_start_port(id_priv->cma_dev->device)];
8009 +- cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid, gid_type);
8010 ++ cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
8011 +
8012 +- mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
8013 ++ ib.rec.pkey = cpu_to_be16(0xffff);
8014 + if (id_priv->id.ps == RDMA_PS_UDP)
8015 +- mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
8016 ++ ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
8017 +
8018 + if (dev_addr->bound_dev_if)
8019 + ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
8020 + if (!ndev) {
8021 + err = -ENODEV;
8022 +- goto out2;
8023 ++ goto err_free;
8024 + }
8025 +- mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
8026 +- mc->multicast.ib->rec.hop_limit = 1;
8027 +- mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
8028 ++ ib.rec.rate = iboe_get_rate(ndev);
8029 ++ ib.rec.hop_limit = 1;
8030 ++ ib.rec.mtu = iboe_get_mtu(ndev->mtu);
8031 +
8032 + if (addr->sa_family == AF_INET) {
8033 + if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
8034 +- mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
8035 ++ ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
8036 + if (!send_only) {
8037 +- err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
8038 ++ err = cma_igmp_send(ndev, &ib.rec.mgid,
8039 + true);
8040 + }
8041 + }
8042 +@@ -4550,24 +4504,22 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
8043 + err = -ENOTSUPP;
8044 + }
8045 + dev_put(ndev);
8046 +- if (err || !mc->multicast.ib->rec.mtu) {
8047 ++ if (err || !ib.rec.mtu) {
8048 + if (!err)
8049 + err = -EINVAL;
8050 +- goto out2;
8051 ++ goto err_free;
8052 + }
8053 + rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
8054 +- &mc->multicast.ib->rec.port_gid);
8055 ++ &ib.rec.port_gid);
8056 + work->id = id_priv;
8057 +- work->mc = mc;
8058 +- INIT_WORK(&work->work, iboe_mcast_work_handler);
8059 +- kref_get(&mc->mcref);
8060 ++ INIT_WORK(&work->work, cma_work_handler);
8061 ++ cma_make_mc_event(0, id_priv, &ib, &work->event, mc);
8062 ++ /* Balances with cma_id_put() in cma_work_handler */
8063 ++ cma_id_get(id_priv);
8064 + queue_work(cma_wq, &work->work);
8065 +-
8066 + return 0;
8067 +
8068 +-out2:
8069 +- kfree(mc->multicast.ib);
8070 +-out1:
8071 ++err_free:
8072 + kfree(work);
8073 + return err;
8074 + }
8075 +@@ -4579,6 +4531,10 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
8076 + struct cma_multicast *mc;
8077 + int ret;
8078 +
8079 ++ /* Not supported for kernel QPs */
8080 ++ if (WARN_ON(id->qp))
8081 ++ return -EINVAL;
8082 ++
8083 + if (!id->device)
8084 + return -EINVAL;
8085 +
8086 +@@ -4587,7 +4543,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
8087 + !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
8088 + return -EINVAL;
8089 +
8090 +- mc = kmalloc(sizeof *mc, GFP_KERNEL);
8091 ++ mc = kzalloc(sizeof(*mc), GFP_KERNEL);
8092 + if (!mc)
8093 + return -ENOMEM;
8094 +
8095 +@@ -4597,7 +4553,6 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
8096 + mc->join_state = join_state;
8097 +
8098 + if (rdma_protocol_roce(id->device, id->port_num)) {
8099 +- kref_init(&mc->mcref);
8100 + ret = cma_iboe_join_multicast(id_priv, mc);
8101 + if (ret)
8102 + goto out_err;
8103 +@@ -4629,25 +4584,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
8104 + id_priv = container_of(id, struct rdma_id_private, id);
8105 + spin_lock_irq(&id_priv->lock);
8106 + list_for_each_entry(mc, &id_priv->mc_list, list) {
8107 +- if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
8108 +- list_del(&mc->list);
8109 +- spin_unlock_irq(&id_priv->lock);
8110 +-
8111 +- if (id->qp)
8112 +- ib_detach_mcast(id->qp,
8113 +- &mc->multicast.ib->rec.mgid,
8114 +- be16_to_cpu(mc->multicast.ib->rec.mlid));
8115 +-
8116 +- BUG_ON(id_priv->cma_dev->device != id->device);
8117 +-
8118 +- if (rdma_cap_ib_mcast(id->device, id->port_num)) {
8119 +- ib_sa_free_multicast(mc->multicast.ib);
8120 +- kfree(mc);
8121 +- } else if (rdma_protocol_roce(id->device, id->port_num)) {
8122 +- cma_leave_roce_mc_group(id_priv, mc);
8123 +- }
8124 +- return;
8125 +- }
8126 ++ if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
8127 ++ continue;
8128 ++ list_del(&mc->list);
8129 ++ spin_unlock_irq(&id_priv->lock);
8130 ++
8131 ++ WARN_ON(id_priv->cma_dev->device != id->device);
8132 ++ destroy_mc(id_priv, mc);
8133 ++ return;
8134 + }
8135 + spin_unlock_irq(&id_priv->lock);
8136 + }
8137 +@@ -4656,7 +4600,7 @@ EXPORT_SYMBOL(rdma_leave_multicast);
8138 + static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
8139 + {
8140 + struct rdma_dev_addr *dev_addr;
8141 +- struct cma_ndev_work *work;
8142 ++ struct cma_work *work;
8143 +
8144 + dev_addr = &id_priv->id.route.addr.dev_addr;
8145 +
8146 +@@ -4669,7 +4613,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
8147 + if (!work)
8148 + return -ENOMEM;
8149 +
8150 +- INIT_WORK(&work->work, cma_ndev_work_handler);
8151 ++ INIT_WORK(&work->work, cma_work_handler);
8152 + work->id = id_priv;
8153 + work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
8154 + cma_id_get(id_priv);
8155 +diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
8156 +index a92fc3f90bb5b..19e36e52181be 100644
8157 +--- a/drivers/infiniband/core/cq.c
8158 ++++ b/drivers/infiniband/core/cq.c
8159 +@@ -197,24 +197,22 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
8160 + }
8161 +
8162 + /**
8163 +- * __ib_alloc_cq_user - allocate a completion queue
8164 ++ * __ib_alloc_cq allocate a completion queue
8165 + * @dev: device to allocate the CQ for
8166 + * @private: driver private data, accessible from cq->cq_context
8167 + * @nr_cqe: number of CQEs to allocate
8168 + * @comp_vector: HCA completion vectors for this CQ
8169 + * @poll_ctx: context to poll the CQ from.
8170 + * @caller: module owner name.
8171 +- * @udata: Valid user data or NULL for kernel object
8172 + *
8173 + * This is the proper interface to allocate a CQ for in-kernel users. A
8174 + * CQ allocated with this interface will automatically be polled from the
8175 + * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
8176 + * to use this CQ abstraction.
8177 + */
8178 +-struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
8179 +- int nr_cqe, int comp_vector,
8180 +- enum ib_poll_context poll_ctx,
8181 +- const char *caller, struct ib_udata *udata)
8182 ++struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
8183 ++ int comp_vector, enum ib_poll_context poll_ctx,
8184 ++ const char *caller)
8185 + {
8186 + struct ib_cq_init_attr cq_attr = {
8187 + .cqe = nr_cqe,
8188 +@@ -277,7 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
8189 + out_destroy_cq:
8190 + rdma_dim_destroy(cq);
8191 + rdma_restrack_del(&cq->res);
8192 +- cq->device->ops.destroy_cq(cq, udata);
8193 ++ cq->device->ops.destroy_cq(cq, NULL);
8194 + out_free_wc:
8195 + kfree(cq->wc);
8196 + out_free_cq:
8197 +@@ -285,7 +283,7 @@ out_free_cq:
8198 + trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
8199 + return ERR_PTR(ret);
8200 + }
8201 +-EXPORT_SYMBOL(__ib_alloc_cq_user);
8202 ++EXPORT_SYMBOL(__ib_alloc_cq);
8203 +
8204 + /**
8205 + * __ib_alloc_cq_any - allocate a completion queue
8206 +@@ -310,18 +308,19 @@ struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
8207 + atomic_inc_return(&counter) %
8208 + min_t(int, dev->num_comp_vectors, num_online_cpus());
8209 +
8210 +- return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
8211 +- caller, NULL);
8212 ++ return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
8213 ++ caller);
8214 + }
8215 + EXPORT_SYMBOL(__ib_alloc_cq_any);
8216 +
8217 + /**
8218 +- * ib_free_cq_user - free a completion queue
8219 ++ * ib_free_cq - free a completion queue
8220 + * @cq: completion queue to free.
8221 +- * @udata: User data or NULL for kernel object
8222 + */
8223 +-void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
8224 ++void ib_free_cq(struct ib_cq *cq)
8225 + {
8226 ++ int ret;
8227 ++
8228 + if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
8229 + return;
8230 + if (WARN_ON_ONCE(cq->cqe_used))
8231 +@@ -343,12 +342,13 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
8232 +
8233 + rdma_dim_destroy(cq);
8234 + trace_cq_free(cq);
8235 ++ ret = cq->device->ops.destroy_cq(cq, NULL);
8236 ++ WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
8237 + rdma_restrack_del(&cq->res);
8238 +- cq->device->ops.destroy_cq(cq, udata);
8239 + kfree(cq->wc);
8240 + kfree(cq);
8241 + }
8242 +-EXPORT_SYMBOL(ib_free_cq_user);
8243 ++EXPORT_SYMBOL(ib_free_cq);
8244 +
8245 + void ib_cq_pool_init(struct ib_device *dev)
8246 + {
8247 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
8248 +index 1d184ea05eba1..6f42ff8f2ec57 100644
8249 +--- a/drivers/infiniband/core/ucma.c
8250 ++++ b/drivers/infiniband/core/ucma.c
8251 +@@ -586,6 +586,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
8252 + list_move_tail(&uevent->list, &list);
8253 + }
8254 + list_del(&ctx->list);
8255 ++ events_reported = ctx->events_reported;
8256 + mutex_unlock(&ctx->file->mut);
8257 +
8258 + list_for_each_entry_safe(uevent, tmp, &list, list) {
8259 +@@ -595,7 +596,6 @@ static int ucma_free_ctx(struct ucma_context *ctx)
8260 + kfree(uevent);
8261 + }
8262 +
8263 +- events_reported = ctx->events_reported;
8264 + mutex_destroy(&ctx->mutex);
8265 + kfree(ctx);
8266 + return events_reported;
8267 +@@ -1512,7 +1512,9 @@ static ssize_t ucma_process_join(struct ucma_file *file,
8268 + return 0;
8269 +
8270 + err3:
8271 ++ mutex_lock(&ctx->mutex);
8272 + rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
8273 ++ mutex_unlock(&ctx->mutex);
8274 + ucma_cleanup_mc_events(mc);
8275 + err2:
8276 + xa_erase(&multicast_table, mc->id);
8277 +@@ -1678,7 +1680,9 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
8278 +
8279 + cur_file = ctx->file;
8280 + if (cur_file == new_file) {
8281 ++ mutex_lock(&cur_file->mut);
8282 + resp.events_reported = ctx->events_reported;
8283 ++ mutex_unlock(&cur_file->mut);
8284 + goto response;
8285 + }
8286 +
8287 +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
8288 +index 831bff8d52e54..1d0599997d0fb 100644
8289 +--- a/drivers/infiniband/core/umem.c
8290 ++++ b/drivers/infiniband/core/umem.c
8291 +@@ -151,13 +151,24 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
8292 + dma_addr_t mask;
8293 + int i;
8294 +
8295 ++ /* rdma_for_each_block() has a bug if the page size is smaller than the
8296 ++ * page size used to build the umem. For now prevent smaller page sizes
8297 ++ * from being returned.
8298 ++ */
8299 ++ pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
8300 ++
8301 + /* At minimum, drivers must support PAGE_SIZE or smaller */
8302 + if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
8303 + return 0;
8304 +
8305 + va = virt;
8306 +- /* max page size not to exceed MR length */
8307 +- mask = roundup_pow_of_two(umem->length);
8308 ++ /* The best result is the smallest page size that results in the minimum
8309 ++ * number of required pages. Compute the largest page size that could
8310 ++ * work based on VA address bits that don't change.
8311 ++ */
8312 ++ mask = pgsz_bitmap &
8313 ++ GENMASK(BITS_PER_LONG - 1,
8314 ++ bits_per((umem->length - 1 + virt) ^ virt));
8315 + /* offset into first SGL */
8316 + pgoff = umem->address & ~PAGE_MASK;
8317 +
8318 +diff --git a/drivers/infiniband/core/uverbs_std_types_wq.c b/drivers/infiniband/core/uverbs_std_types_wq.c
8319 +index cad842ede077d..f2e6a625724a4 100644
8320 +--- a/drivers/infiniband/core/uverbs_std_types_wq.c
8321 ++++ b/drivers/infiniband/core/uverbs_std_types_wq.c
8322 +@@ -16,7 +16,7 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
8323 + container_of(uobject, struct ib_uwq_object, uevent.uobject);
8324 + int ret;
8325 +
8326 +- ret = ib_destroy_wq(wq, &attrs->driver_udata);
8327 ++ ret = ib_destroy_wq_user(wq, &attrs->driver_udata);
8328 + if (ib_is_destroy_retryable(ret, why, uobject))
8329 + return ret;
8330 +
8331 +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
8332 +index 307886737646e..6653f92f2df99 100644
8333 +--- a/drivers/infiniband/core/verbs.c
8334 ++++ b/drivers/infiniband/core/verbs.c
8335 +@@ -2011,16 +2011,21 @@ EXPORT_SYMBOL(rdma_set_cq_moderation);
8336 +
8337 + int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
8338 + {
8339 ++ int ret;
8340 ++
8341 + if (WARN_ON_ONCE(cq->shared))
8342 + return -EOPNOTSUPP;
8343 +
8344 + if (atomic_read(&cq->usecnt))
8345 + return -EBUSY;
8346 +
8347 ++ ret = cq->device->ops.destroy_cq(cq, udata);
8348 ++ if (ret)
8349 ++ return ret;
8350 ++
8351 + rdma_restrack_del(&cq->res);
8352 +- cq->device->ops.destroy_cq(cq, udata);
8353 + kfree(cq);
8354 +- return 0;
8355 ++ return ret;
8356 + }
8357 + EXPORT_SYMBOL(ib_destroy_cq_user);
8358 +
8359 +@@ -2328,13 +2333,17 @@ EXPORT_SYMBOL(ib_alloc_xrcd_user);
8360 + */
8361 + int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
8362 + {
8363 ++ int ret;
8364 ++
8365 + if (atomic_read(&xrcd->usecnt))
8366 + return -EBUSY;
8367 +
8368 + WARN_ON(!xa_empty(&xrcd->tgt_qps));
8369 +- xrcd->device->ops.dealloc_xrcd(xrcd, udata);
8370 ++ ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata);
8371 ++ if (ret)
8372 ++ return ret;
8373 + kfree(xrcd);
8374 +- return 0;
8375 ++ return ret;
8376 + }
8377 + EXPORT_SYMBOL(ib_dealloc_xrcd_user);
8378 +
8379 +@@ -2378,25 +2387,28 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd,
8380 + EXPORT_SYMBOL(ib_create_wq);
8381 +
8382 + /**
8383 +- * ib_destroy_wq - Destroys the specified user WQ.
8384 ++ * ib_destroy_wq_user - Destroys the specified user WQ.
8385 + * @wq: The WQ to destroy.
8386 + * @udata: Valid user data
8387 + */
8388 +-int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
8389 ++int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
8390 + {
8391 + struct ib_cq *cq = wq->cq;
8392 + struct ib_pd *pd = wq->pd;
8393 ++ int ret;
8394 +
8395 + if (atomic_read(&wq->usecnt))
8396 + return -EBUSY;
8397 +
8398 +- wq->device->ops.destroy_wq(wq, udata);
8399 ++ ret = wq->device->ops.destroy_wq(wq, udata);
8400 ++ if (ret)
8401 ++ return ret;
8402 ++
8403 + atomic_dec(&pd->usecnt);
8404 + atomic_dec(&cq->usecnt);
8405 +-
8406 +- return 0;
8407 ++ return ret;
8408 + }
8409 +-EXPORT_SYMBOL(ib_destroy_wq);
8410 ++EXPORT_SYMBOL(ib_destroy_wq_user);
8411 +
8412 + /**
8413 + * ib_modify_wq - Modifies the specified WQ.
8414 +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
8415 +index 1d7a9ca5240c5..e0d06899ad4f4 100644
8416 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
8417 ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
8418 +@@ -2800,7 +2800,7 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
8419 + }
8420 +
8421 + /* Completion Queues */
8422 +-void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
8423 ++int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
8424 + {
8425 + struct bnxt_re_cq *cq;
8426 + struct bnxt_qplib_nq *nq;
8427 +@@ -2816,6 +2816,7 @@ void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
8428 + atomic_dec(&rdev->cq_count);
8429 + nq->budget--;
8430 + kfree(cq->cql);
8431 ++ return 0;
8432 + }
8433 +
8434 + int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
8435 +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
8436 +index 1daeb30e06fda..f1d98540fede5 100644
8437 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
8438 ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
8439 +@@ -193,7 +193,7 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
8440 + const struct ib_recv_wr **bad_recv_wr);
8441 + int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
8442 + struct ib_udata *udata);
8443 +-void bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
8444 ++int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
8445 + int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
8446 + int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
8447 + struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
8448 +diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
8449 +index 352b8af1998a5..28349ed508854 100644
8450 +--- a/drivers/infiniband/hw/cxgb4/cq.c
8451 ++++ b/drivers/infiniband/hw/cxgb4/cq.c
8452 +@@ -967,7 +967,7 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
8453 + return !err || err == -ENODATA ? npolled : err;
8454 + }
8455 +
8456 +-void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
8457 ++int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
8458 + {
8459 + struct c4iw_cq *chp;
8460 + struct c4iw_ucontext *ucontext;
8461 +@@ -985,6 +985,7 @@ void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
8462 + ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
8463 + chp->destroy_skb, chp->wr_waitp);
8464 + c4iw_put_wr_wait(chp->wr_waitp);
8465 ++ return 0;
8466 + }
8467 +
8468 + int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
8469 +diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
8470 +index 2b2b009b371af..a5975119b0d4c 100644
8471 +--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
8472 ++++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
8473 +@@ -992,7 +992,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
8474 + struct ib_udata *udata);
8475 + struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
8476 + int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
8477 +-void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
8478 ++int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
8479 + int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
8480 + struct ib_udata *udata);
8481 + int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
8482 +diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
8483 +index 1889dd172a252..05f593940e7b0 100644
8484 +--- a/drivers/infiniband/hw/efa/efa.h
8485 ++++ b/drivers/infiniband/hw/efa/efa.h
8486 +@@ -139,7 +139,7 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
8487 + struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
8488 + struct ib_qp_init_attr *init_attr,
8489 + struct ib_udata *udata);
8490 +-void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
8491 ++int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
8492 + int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
8493 + struct ib_udata *udata);
8494 + struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
8495 +diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
8496 +index 9e201f1692892..61520521baccd 100644
8497 +--- a/drivers/infiniband/hw/efa/efa_verbs.c
8498 ++++ b/drivers/infiniband/hw/efa/efa_verbs.c
8499 +@@ -843,7 +843,7 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
8500 + return efa_com_destroy_cq(&dev->edev, &params);
8501 + }
8502 +
8503 +-void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
8504 ++int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
8505 + {
8506 + struct efa_dev *dev = to_edev(ibcq->device);
8507 + struct efa_cq *cq = to_ecq(ibcq);
8508 +@@ -856,6 +856,7 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
8509 + efa_destroy_cq_idx(dev, cq->cq_idx);
8510 + efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
8511 + DMA_FROM_DEVICE);
8512 ++ return 0;
8513 + }
8514 +
8515 + static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
8516 +diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
8517 +index e87d616f79882..c5acf3332519b 100644
8518 +--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
8519 ++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
8520 +@@ -311,7 +311,7 @@ err_cq_buf:
8521 + return ret;
8522 + }
8523 +
8524 +-void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
8525 ++int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
8526 + {
8527 + struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
8528 + struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
8529 +@@ -322,6 +322,7 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
8530 + free_cq_buf(hr_dev, hr_cq);
8531 + free_cq_db(hr_dev, hr_cq, udata);
8532 + free_cqc(hr_dev, hr_cq);
8533 ++ return 0;
8534 + }
8535 +
8536 + void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
8537 +diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
8538 +index 6edcbdcd8f432..6dc07bfb4daad 100644
8539 +--- a/drivers/infiniband/hw/hns/hns_roce_device.h
8540 ++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
8541 +@@ -930,7 +930,7 @@ struct hns_roce_hw {
8542 + int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
8543 + int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
8544 + struct ib_udata *udata);
8545 +- void (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
8546 ++ int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
8547 + int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
8548 + int (*init_eq)(struct hns_roce_dev *hr_dev);
8549 + void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
8550 +@@ -1247,7 +1247,7 @@ int to_hr_qp_type(int qp_type);
8551 + int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
8552 + struct ib_udata *udata);
8553 +
8554 +-void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
8555 ++int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
8556 + int hns_roce_db_map_user(struct hns_roce_ucontext *context,
8557 + struct ib_udata *udata, unsigned long virt,
8558 + struct hns_roce_db *db);
8559 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
8560 +index aeb3a6fa7d472..eac971c663791 100644
8561 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
8562 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
8563 +@@ -271,7 +271,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
8564 + ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
8565 + break;
8566 + case IB_WR_LOCAL_INV:
8567 +- break;
8568 + case IB_WR_ATOMIC_CMP_AND_SWP:
8569 + case IB_WR_ATOMIC_FETCH_AND_ADD:
8570 + case IB_WR_LSO:
8571 +@@ -3572,7 +3571,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
8572 + return 0;
8573 + }
8574 +
8575 +-static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
8576 ++static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
8577 + {
8578 + struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
8579 + struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
8580 +@@ -3603,6 +3602,7 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
8581 + }
8582 + wait_time++;
8583 + }
8584 ++ return 0;
8585 + }
8586 +
8587 + static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
8588 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
8589 +index 4cda95ed1fbe2..cee140920c579 100644
8590 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
8591 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
8592 +@@ -1770,9 +1770,9 @@ static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
8593 + int *buf_page_size, int *bt_page_size, u32 hem_type)
8594 + {
8595 + u64 obj_per_chunk;
8596 +- int bt_chunk_size = 1 << PAGE_SHIFT;
8597 +- int buf_chunk_size = 1 << PAGE_SHIFT;
8598 +- int obj_per_chunk_default = buf_chunk_size / obj_size;
8599 ++ u64 bt_chunk_size = PAGE_SIZE;
8600 ++ u64 buf_chunk_size = PAGE_SIZE;
8601 ++ u64 obj_per_chunk_default = buf_chunk_size / obj_size;
8602 +
8603 + *buf_page_size = 0;
8604 + *bt_page_size = 0;
8605 +@@ -3641,9 +3641,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
8606 + V2_QPC_BYTE_76_SRQ_EN_S, 1);
8607 + }
8608 +
8609 +- roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
8610 +- V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
8611 +-
8612 + roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
8613 +
8614 + hr_qp->access_flags = attr->qp_access_flags;
8615 +@@ -3954,6 +3951,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
8616 + dma_addr_t trrl_ba;
8617 + dma_addr_t irrl_ba;
8618 + enum ib_mtu mtu;
8619 ++ u8 lp_pktn_ini;
8620 + u8 port_num;
8621 + u64 *mtts;
8622 + u8 *dmac;
8623 +@@ -4061,13 +4059,21 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
8624 + }
8625 +
8626 + #define MAX_LP_MSG_LEN 65536
8627 +- /* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */
8628 ++ /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
8629 ++ lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu));
8630 ++
8631 + roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
8632 +- V2_QPC_BYTE_56_LP_PKTN_INI_S,
8633 +- ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)));
8634 ++ V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini);
8635 + roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
8636 + V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
8637 +
8638 ++ /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
8639 ++ roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
8640 ++ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, lp_pktn_ini);
8641 ++ roce_set_field(qpc_mask->byte_172_sq_psn,
8642 ++ V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
8643 ++ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
8644 ++
8645 + roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
8646 + V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
8647 + roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
8648 +@@ -4259,11 +4265,19 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
8649 + V2_QPC_BYTE_28_FL_S, 0);
8650 + memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
8651 + memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
8652 ++
8653 ++ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
8654 ++ if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
8655 ++ ibdev_err(ibdev,
8656 ++ "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
8657 ++ hr_qp->sl, MAX_SERVICE_LEVEL);
8658 ++ return -EINVAL;
8659 ++ }
8660 ++
8661 + roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
8662 +- V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
8663 ++ V2_QPC_BYTE_28_SL_S, hr_qp->sl);
8664 + roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
8665 + V2_QPC_BYTE_28_SL_S, 0);
8666 +- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
8667 +
8668 + return 0;
8669 + }
8670 +@@ -4759,7 +4773,9 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
8671 + qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
8672 + V2_QPC_BYTE_212_RETRY_CNT_M,
8673 + V2_QPC_BYTE_212_RETRY_CNT_S);
8674 +- qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
8675 ++ qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
8676 ++ V2_QPC_BYTE_244_RNR_CNT_M,
8677 ++ V2_QPC_BYTE_244_RNR_CNT_S);
8678 +
8679 + done:
8680 + qp_attr->cur_qp_state = qp_attr->qp_state;
8681 +@@ -4775,6 +4791,7 @@ done:
8682 + }
8683 +
8684 + qp_init_attr->cap = qp_attr->cap;
8685 ++ qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
8686 +
8687 + out:
8688 + mutex_unlock(&hr_qp->mutex);
8689 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
8690 +index ac29be43b6bd5..17f35f91f4ad2 100644
8691 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
8692 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
8693 +@@ -1941,6 +1941,8 @@ struct hns_roce_eq_context {
8694 + #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
8695 + #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
8696 +
8697 ++#define MAX_SERVICE_LEVEL 0x7
8698 ++
8699 + struct hns_roce_wqe_atomic_seg {
8700 + __le64 fetchadd_swap_data;
8701 + __le64 cmp_data;
8702 +diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
8703 +index c063c450c715f..975281f034685 100644
8704 +--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
8705 ++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
8706 +@@ -1161,8 +1161,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
8707 +
8708 + mutex_lock(&hr_qp->mutex);
8709 +
8710 +- cur_state = attr_mask & IB_QP_CUR_STATE ?
8711 +- attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
8712 ++ if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
8713 ++ goto out;
8714 ++
8715 ++ cur_state = hr_qp->state;
8716 + new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
8717 +
8718 + if (ibqp->uobject &&
8719 +diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
8720 +index 25747b85a79c7..832b80de004fb 100644
8721 +--- a/drivers/infiniband/hw/i40iw/i40iw.h
8722 ++++ b/drivers/infiniband/hw/i40iw/i40iw.h
8723 +@@ -409,8 +409,8 @@ static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)
8724 + }
8725 +
8726 + /* i40iw.c */
8727 +-void i40iw_add_ref(struct ib_qp *);
8728 +-void i40iw_rem_ref(struct ib_qp *);
8729 ++void i40iw_qp_add_ref(struct ib_qp *ibqp);
8730 ++void i40iw_qp_rem_ref(struct ib_qp *ibqp);
8731 + struct ib_qp *i40iw_get_qp(struct ib_device *, int);
8732 +
8733 + void i40iw_flush_wqes(struct i40iw_device *iwdev,
8734 +@@ -554,9 +554,8 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
8735 + bool wait);
8736 + void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
8737 + void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
8738 +-void i40iw_free_qp_resources(struct i40iw_device *iwdev,
8739 +- struct i40iw_qp *iwqp,
8740 +- u32 qp_num);
8741 ++void i40iw_free_qp_resources(struct i40iw_qp *iwqp);
8742 ++
8743 + enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
8744 + struct i40iw_dma_mem *memptr,
8745 + u32 size, u32 mask);
8746 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
8747 +index a3b95805c154e..3053c345a5a34 100644
8748 +--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
8749 ++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
8750 +@@ -2322,7 +2322,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
8751 + iwqp = cm_node->iwqp;
8752 + if (iwqp) {
8753 + iwqp->cm_node = NULL;
8754 +- i40iw_rem_ref(&iwqp->ibqp);
8755 ++ i40iw_qp_rem_ref(&iwqp->ibqp);
8756 + cm_node->iwqp = NULL;
8757 + } else if (cm_node->qhash_set) {
8758 + i40iw_get_addr_info(cm_node, &nfo);
8759 +@@ -3452,7 +3452,7 @@ void i40iw_cm_disconn(struct i40iw_qp *iwqp)
8760 + kfree(work);
8761 + return;
8762 + }
8763 +- i40iw_add_ref(&iwqp->ibqp);
8764 ++ i40iw_qp_add_ref(&iwqp->ibqp);
8765 + spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
8766 +
8767 + work->iwqp = iwqp;
8768 +@@ -3623,7 +3623,7 @@ static void i40iw_disconnect_worker(struct work_struct *work)
8769 +
8770 + kfree(dwork);
8771 + i40iw_cm_disconn_true(iwqp);
8772 +- i40iw_rem_ref(&iwqp->ibqp);
8773 ++ i40iw_qp_rem_ref(&iwqp->ibqp);
8774 + }
8775 +
8776 + /**
8777 +@@ -3745,7 +3745,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
8778 + cm_node->lsmm_size = accept.size + conn_param->private_data_len;
8779 + i40iw_cm_init_tsa_conn(iwqp, cm_node);
8780 + cm_id->add_ref(cm_id);
8781 +- i40iw_add_ref(&iwqp->ibqp);
8782 ++ i40iw_qp_add_ref(&iwqp->ibqp);
8783 +
8784 + attr.qp_state = IB_QPS_RTS;
8785 + cm_node->qhash_set = false;
8786 +@@ -3908,7 +3908,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
8787 + iwqp->cm_node = cm_node;
8788 + cm_node->iwqp = iwqp;
8789 + iwqp->cm_id = cm_id;
8790 +- i40iw_add_ref(&iwqp->ibqp);
8791 ++ i40iw_qp_add_ref(&iwqp->ibqp);
8792 +
8793 + if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
8794 + cm_node->state = I40IW_CM_STATE_SYN_SENT;
8795 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
8796 +index e1085634b8d9d..56fdc161f6f8e 100644
8797 +--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
8798 ++++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
8799 +@@ -313,7 +313,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
8800 + __func__, info->qp_cq_id);
8801 + continue;
8802 + }
8803 +- i40iw_add_ref(&iwqp->ibqp);
8804 ++ i40iw_qp_add_ref(&iwqp->ibqp);
8805 + spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
8806 + qp = &iwqp->sc_qp;
8807 + spin_lock_irqsave(&iwqp->lock, flags);
8808 +@@ -426,7 +426,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
8809 + break;
8810 + }
8811 + if (info->qp)
8812 +- i40iw_rem_ref(&iwqp->ibqp);
8813 ++ i40iw_qp_rem_ref(&iwqp->ibqp);
8814 + } while (1);
8815 +
8816 + if (aeqcnt)
8817 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
8818 +index e07fb37af0865..5e196bd49a583 100644
8819 +--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
8820 ++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
8821 +@@ -477,25 +477,6 @@ void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
8822 + }
8823 + }
8824 +
8825 +-/**
8826 +- * i40iw_free_qp - callback after destroy cqp completes
8827 +- * @cqp_request: cqp request for destroy qp
8828 +- * @num: not used
8829 +- */
8830 +-static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
8831 +-{
8832 +- struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
8833 +- struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
8834 +- struct i40iw_device *iwdev;
8835 +- u32 qp_num = iwqp->ibqp.qp_num;
8836 +-
8837 +- iwdev = iwqp->iwdev;
8838 +-
8839 +- i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
8840 +- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
8841 +- i40iw_rem_devusecount(iwdev);
8842 +-}
8843 +-
8844 + /**
8845 + * i40iw_wait_event - wait for completion
8846 + * @iwdev: iwarp device
8847 +@@ -616,26 +597,23 @@ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
8848 + }
8849 +
8850 + /**
8851 +- * i40iw_add_ref - add refcount for qp
8852 ++ * i40iw_qp_add_ref - add refcount for qp
8853 + * @ibqp: iqarp qp
8854 + */
8855 +-void i40iw_add_ref(struct ib_qp *ibqp)
8856 ++void i40iw_qp_add_ref(struct ib_qp *ibqp)
8857 + {
8858 + struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
8859 +
8860 +- atomic_inc(&iwqp->refcount);
8861 ++ refcount_inc(&iwqp->refcount);
8862 + }
8863 +
8864 + /**
8865 +- * i40iw_rem_ref - rem refcount for qp and free if 0
8866 ++ * i40iw_qp_rem_ref - rem refcount for qp and free if 0
8867 + * @ibqp: iqarp qp
8868 + */
8869 +-void i40iw_rem_ref(struct ib_qp *ibqp)
8870 ++void i40iw_qp_rem_ref(struct ib_qp *ibqp)
8871 + {
8872 + struct i40iw_qp *iwqp;
8873 +- enum i40iw_status_code status;
8874 +- struct i40iw_cqp_request *cqp_request;
8875 +- struct cqp_commands_info *cqp_info;
8876 + struct i40iw_device *iwdev;
8877 + u32 qp_num;
8878 + unsigned long flags;
8879 +@@ -643,7 +621,7 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
8880 + iwqp = to_iwqp(ibqp);
8881 + iwdev = iwqp->iwdev;
8882 + spin_lock_irqsave(&iwdev->qptable_lock, flags);
8883 +- if (!atomic_dec_and_test(&iwqp->refcount)) {
8884 ++ if (!refcount_dec_and_test(&iwqp->refcount)) {
8885 + spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
8886 + return;
8887 + }
8888 +@@ -651,25 +629,8 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
8889 + qp_num = iwqp->ibqp.qp_num;
8890 + iwdev->qp_table[qp_num] = NULL;
8891 + spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
8892 +- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
8893 +- if (!cqp_request)
8894 +- return;
8895 +-
8896 +- cqp_request->callback_fcn = i40iw_free_qp;
8897 +- cqp_request->param = (void *)&iwqp->sc_qp;
8898 +- cqp_info = &cqp_request->info;
8899 +- cqp_info->cqp_cmd = OP_QP_DESTROY;
8900 +- cqp_info->post_sq = 1;
8901 +- cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
8902 +- cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
8903 +- cqp_info->in.u.qp_destroy.remove_hash_idx = true;
8904 +- status = i40iw_handle_cqp_op(iwdev, cqp_request);
8905 +- if (!status)
8906 +- return;
8907 ++ complete(&iwqp->free_qp);
8908 +
8909 +- i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
8910 +- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
8911 +- i40iw_rem_devusecount(iwdev);
8912 + }
8913 +
8914 + /**
8915 +@@ -936,7 +897,7 @@ static void i40iw_terminate_timeout(struct timer_list *t)
8916 + struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
8917 +
8918 + i40iw_terminate_done(qp, 1);
8919 +- i40iw_rem_ref(&iwqp->ibqp);
8920 ++ i40iw_qp_rem_ref(&iwqp->ibqp);
8921 + }
8922 +
8923 + /**
8924 +@@ -948,7 +909,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
8925 + struct i40iw_qp *iwqp;
8926 +
8927 + iwqp = (struct i40iw_qp *)qp->back_qp;
8928 +- i40iw_add_ref(&iwqp->ibqp);
8929 ++ i40iw_qp_add_ref(&iwqp->ibqp);
8930 + timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
8931 + iwqp->terminate_timer.expires = jiffies + HZ;
8932 + add_timer(&iwqp->terminate_timer);
8933 +@@ -964,7 +925,7 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
8934 +
8935 + iwqp = (struct i40iw_qp *)qp->back_qp;
8936 + if (del_timer(&iwqp->terminate_timer))
8937 +- i40iw_rem_ref(&iwqp->ibqp);
8938 ++ i40iw_qp_rem_ref(&iwqp->ibqp);
8939 + }
8940 +
8941 + /**
8942 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
8943 +index b51339328a51e..09caad228aa4f 100644
8944 +--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
8945 ++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
8946 +@@ -363,11 +363,11 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
8947 + * @iwqp: qp ptr (user or kernel)
8948 + * @qp_num: qp number assigned
8949 + */
8950 +-void i40iw_free_qp_resources(struct i40iw_device *iwdev,
8951 +- struct i40iw_qp *iwqp,
8952 +- u32 qp_num)
8953 ++void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
8954 + {
8955 + struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
8956 ++ struct i40iw_device *iwdev = iwqp->iwdev;
8957 ++ u32 qp_num = iwqp->ibqp.qp_num;
8958 +
8959 + i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
8960 + i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
8961 +@@ -401,6 +401,10 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
8962 + static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
8963 + {
8964 + struct i40iw_qp *iwqp = to_iwqp(ibqp);
8965 ++ struct ib_qp_attr attr;
8966 ++ struct i40iw_device *iwdev = iwqp->iwdev;
8967 ++
8968 ++ memset(&attr, 0, sizeof(attr));
8969 +
8970 + iwqp->destroyed = 1;
8971 +
8972 +@@ -415,7 +419,15 @@ static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
8973 + }
8974 + }
8975 +
8976 +- i40iw_rem_ref(&iwqp->ibqp);
8977 ++ attr.qp_state = IB_QPS_ERR;
8978 ++ i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
8979 ++ i40iw_qp_rem_ref(&iwqp->ibqp);
8980 ++ wait_for_completion(&iwqp->free_qp);
8981 ++ i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp);
8982 ++ i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
8983 ++ i40iw_free_qp_resources(iwqp);
8984 ++ i40iw_rem_devusecount(iwdev);
8985 ++
8986 + return 0;
8987 + }
8988 +
8989 +@@ -576,6 +588,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
8990 + qp->back_qp = (void *)iwqp;
8991 + qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
8992 +
8993 ++ iwqp->iwdev = iwdev;
8994 + iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
8995 +
8996 + if (i40iw_allocate_dma_mem(dev->hw,
8997 +@@ -600,7 +613,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
8998 + goto error;
8999 + }
9000 +
9001 +- iwqp->iwdev = iwdev;
9002 + iwqp->iwpd = iwpd;
9003 + iwqp->ibqp.qp_num = qp_num;
9004 + qp = &iwqp->sc_qp;
9005 +@@ -714,7 +726,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
9006 + goto error;
9007 + }
9008 +
9009 +- i40iw_add_ref(&iwqp->ibqp);
9010 ++ refcount_set(&iwqp->refcount, 1);
9011 + spin_lock_init(&iwqp->lock);
9012 + iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
9013 + iwdev->qp_table[qp_num] = iwqp;
9014 +@@ -736,10 +748,11 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
9015 + }
9016 + init_completion(&iwqp->sq_drained);
9017 + init_completion(&iwqp->rq_drained);
9018 ++ init_completion(&iwqp->free_qp);
9019 +
9020 + return &iwqp->ibqp;
9021 + error:
9022 +- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
9023 ++ i40iw_free_qp_resources(iwqp);
9024 + return ERR_PTR(err_code);
9025 + }
9026 +
9027 +@@ -1052,7 +1065,7 @@ void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
9028 + * @ib_cq: cq pointer
9029 + * @udata: user data or NULL for kernel object
9030 + */
9031 +-static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
9032 ++static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
9033 + {
9034 + struct i40iw_cq *iwcq;
9035 + struct i40iw_device *iwdev;
9036 +@@ -1064,6 +1077,7 @@ static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
9037 + i40iw_cq_wq_destroy(iwdev, cq);
9038 + cq_free_resources(iwdev, iwcq);
9039 + i40iw_rem_devusecount(iwdev);
9040 ++ return 0;
9041 + }
9042 +
9043 + /**
9044 +@@ -2636,13 +2650,13 @@ static const struct ib_device_ops i40iw_dev_ops = {
9045 + .get_hw_stats = i40iw_get_hw_stats,
9046 + .get_port_immutable = i40iw_port_immutable,
9047 + .iw_accept = i40iw_accept,
9048 +- .iw_add_ref = i40iw_add_ref,
9049 ++ .iw_add_ref = i40iw_qp_add_ref,
9050 + .iw_connect = i40iw_connect,
9051 + .iw_create_listen = i40iw_create_listen,
9052 + .iw_destroy_listen = i40iw_destroy_listen,
9053 + .iw_get_qp = i40iw_get_qp,
9054 + .iw_reject = i40iw_reject,
9055 +- .iw_rem_ref = i40iw_rem_ref,
9056 ++ .iw_rem_ref = i40iw_qp_rem_ref,
9057 + .map_mr_sg = i40iw_map_mr_sg,
9058 + .mmap = i40iw_mmap,
9059 + .modify_qp = i40iw_modify_qp,
9060 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
9061 +index 331bc21cbcc73..bab71f3e56374 100644
9062 +--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
9063 ++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
9064 +@@ -139,7 +139,7 @@ struct i40iw_qp {
9065 + struct i40iw_qp_host_ctx_info ctx_info;
9066 + struct i40iwarp_offload_info iwarp_info;
9067 + void *allocated_buffer;
9068 +- atomic_t refcount;
9069 ++ refcount_t refcount;
9070 + struct iw_cm_id *cm_id;
9071 + void *cm_node;
9072 + struct ib_mr *lsmm_mr;
9073 +@@ -174,5 +174,6 @@ struct i40iw_qp {
9074 + struct i40iw_dma_mem ietf_mem;
9075 + struct completion sq_drained;
9076 + struct completion rq_drained;
9077 ++ struct completion free_qp;
9078 + };
9079 + #endif
9080 +diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
9081 +index b591861934b3c..81d6a3460b55d 100644
9082 +--- a/drivers/infiniband/hw/mlx4/cm.c
9083 ++++ b/drivers/infiniband/hw/mlx4/cm.c
9084 +@@ -280,6 +280,9 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
9085 + if (!sriov->is_going_down && !id->scheduled_delete) {
9086 + id->scheduled_delete = 1;
9087 + schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
9088 ++ } else if (id->scheduled_delete) {
9089 ++ /* Adjust timeout if already scheduled */
9090 ++ mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
9091 + }
9092 + spin_unlock_irqrestore(&sriov->going_down_lock, flags);
9093 + spin_unlock(&sriov->id_map_lock);
9094 +diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
9095 +index 8a3436994f809..ee50dd823a8e8 100644
9096 +--- a/drivers/infiniband/hw/mlx4/cq.c
9097 ++++ b/drivers/infiniband/hw/mlx4/cq.c
9098 +@@ -475,7 +475,7 @@ out:
9099 + return err;
9100 + }
9101 +
9102 +-void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
9103 ++int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
9104 + {
9105 + struct mlx4_ib_dev *dev = to_mdev(cq->device);
9106 + struct mlx4_ib_cq *mcq = to_mcq(cq);
9107 +@@ -495,6 +495,7 @@ void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
9108 + mlx4_db_free(dev->dev, &mcq->db);
9109 + }
9110 + ib_umem_release(mcq->umem);
9111 ++ return 0;
9112 + }
9113 +
9114 + static void dump_cqe(void *cqe)
9115 +diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
9116 +index abe68708d6d6e..2cbdba4da9dfe 100644
9117 +--- a/drivers/infiniband/hw/mlx4/mad.c
9118 ++++ b/drivers/infiniband/hw/mlx4/mad.c
9119 +@@ -1299,6 +1299,18 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
9120 + spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
9121 + }
9122 +
9123 ++static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
9124 ++{
9125 ++ unsigned long flags;
9126 ++ struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
9127 ++ struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
9128 ++
9129 ++ spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
9130 ++ if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
9131 ++ queue_work(ctx->wi_wq, &ctx->work);
9132 ++ spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
9133 ++}
9134 ++
9135 + static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
9136 + struct mlx4_ib_demux_pv_qp *tun_qp,
9137 + int index)
9138 +@@ -2001,7 +2013,8 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
9139 + cq_size *= 2;
9140 +
9141 + cq_attr.cqe = cq_size;
9142 +- ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
9143 ++ ctx->cq = ib_create_cq(ctx->ib_dev,
9144 ++ create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
9145 + NULL, ctx, &cq_attr);
9146 + if (IS_ERR(ctx->cq)) {
9147 + ret = PTR_ERR(ctx->cq);
9148 +@@ -2038,6 +2051,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
9149 + INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
9150 +
9151 + ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
9152 ++ ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
9153 +
9154 + ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
9155 + if (ret) {
9156 +@@ -2181,7 +2195,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
9157 + goto err_mcg;
9158 + }
9159 +
9160 +- snprintf(name, sizeof name, "mlx4_ibt%d", port);
9161 ++ snprintf(name, sizeof(name), "mlx4_ibt%d", port);
9162 + ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
9163 + if (!ctx->wq) {
9164 + pr_err("Failed to create tunnelling WQ for port %d\n", port);
9165 +@@ -2189,7 +2203,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
9166 + goto err_wq;
9167 + }
9168 +
9169 +- snprintf(name, sizeof name, "mlx4_ibud%d", port);
9170 ++ snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
9171 ++ ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
9172 ++ if (!ctx->wi_wq) {
9173 ++ pr_err("Failed to create wire WQ for port %d\n", port);
9174 ++ ret = -ENOMEM;
9175 ++ goto err_wiwq;
9176 ++ }
9177 ++
9178 ++ snprintf(name, sizeof(name), "mlx4_ibud%d", port);
9179 + ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
9180 + if (!ctx->ud_wq) {
9181 + pr_err("Failed to create up/down WQ for port %d\n", port);
9182 +@@ -2200,6 +2222,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
9183 + return 0;
9184 +
9185 + err_udwq:
9186 ++ destroy_workqueue(ctx->wi_wq);
9187 ++ ctx->wi_wq = NULL;
9188 ++
9189 ++err_wiwq:
9190 + destroy_workqueue(ctx->wq);
9191 + ctx->wq = NULL;
9192 +
9193 +@@ -2247,12 +2273,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
9194 + ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
9195 + }
9196 + flush_workqueue(ctx->wq);
9197 ++ flush_workqueue(ctx->wi_wq);
9198 + for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
9199 + destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
9200 + free_pv_object(dev, i, ctx->port);
9201 + }
9202 + kfree(ctx->tun);
9203 + destroy_workqueue(ctx->ud_wq);
9204 ++ destroy_workqueue(ctx->wi_wq);
9205 + destroy_workqueue(ctx->wq);
9206 + }
9207 + }
9208 +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
9209 +index bd4f975e7f9ac..d22bf9a4b53e2 100644
9210 +--- a/drivers/infiniband/hw/mlx4/main.c
9211 ++++ b/drivers/infiniband/hw/mlx4/main.c
9212 +@@ -1256,11 +1256,12 @@ err2:
9213 + return err;
9214 + }
9215 +
9216 +-static void mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
9217 ++static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
9218 + {
9219 + ib_destroy_cq(to_mxrcd(xrcd)->cq);
9220 + ib_dealloc_pd(to_mxrcd(xrcd)->pd);
9221 + mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
9222 ++ return 0;
9223 + }
9224 +
9225 + static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
9226 +diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
9227 +index 38e87a700a2a2..bb64f6d9421c2 100644
9228 +--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
9229 ++++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
9230 +@@ -454,6 +454,7 @@ struct mlx4_ib_demux_pv_ctx {
9231 + struct ib_pd *pd;
9232 + struct work_struct work;
9233 + struct workqueue_struct *wq;
9234 ++ struct workqueue_struct *wi_wq;
9235 + struct mlx4_ib_demux_pv_qp qp[2];
9236 + };
9237 +
9238 +@@ -461,6 +462,7 @@ struct mlx4_ib_demux_ctx {
9239 + struct ib_device *ib_dev;
9240 + int port;
9241 + struct workqueue_struct *wq;
9242 ++ struct workqueue_struct *wi_wq;
9243 + struct workqueue_struct *ud_wq;
9244 + spinlock_t ud_lock;
9245 + atomic64_t subnet_prefix;
9246 +@@ -736,7 +738,7 @@ int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
9247 + int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
9248 + int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9249 + struct ib_udata *udata);
9250 +-void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
9251 ++int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
9252 + int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
9253 + int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
9254 + void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
9255 +@@ -890,7 +892,7 @@ void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
9256 + struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
9257 + struct ib_wq_init_attr *init_attr,
9258 + struct ib_udata *udata);
9259 +-void mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
9260 ++int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
9261 + int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
9262 + u32 wq_attr_mask, struct ib_udata *udata);
9263 +
9264 +diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
9265 +index 2975f350b9fd1..b7a0c3f977131 100644
9266 +--- a/drivers/infiniband/hw/mlx4/qp.c
9267 ++++ b/drivers/infiniband/hw/mlx4/qp.c
9268 +@@ -4327,7 +4327,7 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
9269 + return err;
9270 + }
9271 +
9272 +-void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
9273 ++int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
9274 + {
9275 + struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
9276 + struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
9277 +@@ -4338,6 +4338,7 @@ void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
9278 + destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
9279 +
9280 + kfree(qp);
9281 ++ return 0;
9282 + }
9283 +
9284 + struct ib_rwq_ind_table
9285 +diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
9286 +index 145f3cb40ccba..aeeb14ecb3ee7 100644
9287 +--- a/drivers/infiniband/hw/mlx5/counters.c
9288 ++++ b/drivers/infiniband/hw/mlx5/counters.c
9289 +@@ -456,12 +456,12 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
9290 + cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
9291 + num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
9292 + }
9293 +- cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
9294 ++ cnts->names = kcalloc(num_counters, sizeof(*cnts->names), GFP_KERNEL);
9295 + if (!cnts->names)
9296 + return -ENOMEM;
9297 +
9298 + cnts->offsets = kcalloc(num_counters,
9299 +- sizeof(cnts->offsets), GFP_KERNEL);
9300 ++ sizeof(*cnts->offsets), GFP_KERNEL);
9301 + if (!cnts->offsets)
9302 + goto err_names;
9303 +
9304 +diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
9305 +index dceb0eb2bed16..35e5bbb44d3d8 100644
9306 +--- a/drivers/infiniband/hw/mlx5/cq.c
9307 ++++ b/drivers/infiniband/hw/mlx5/cq.c
9308 +@@ -168,7 +168,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
9309 + {
9310 + enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
9311 + struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
9312 +- struct mlx5_ib_srq *srq;
9313 ++ struct mlx5_ib_srq *srq = NULL;
9314 + struct mlx5_ib_wq *wq;
9315 + u16 wqe_ctr;
9316 + u8 roce_packet_type;
9317 +@@ -180,7 +180,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
9318 +
9319 + if (qp->ibqp.xrcd) {
9320 + msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
9321 +- srq = to_mibsrq(msrq);
9322 ++ if (msrq)
9323 ++ srq = to_mibsrq(msrq);
9324 + } else {
9325 + srq = to_msrq(qp->ibqp.srq);
9326 + }
9327 +@@ -1023,16 +1024,21 @@ err_cqb:
9328 + return err;
9329 + }
9330 +
9331 +-void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
9332 ++int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
9333 + {
9334 + struct mlx5_ib_dev *dev = to_mdev(cq->device);
9335 + struct mlx5_ib_cq *mcq = to_mcq(cq);
9336 ++ int ret;
9337 ++
9338 ++ ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
9339 ++ if (ret)
9340 ++ return ret;
9341 +
9342 +- mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
9343 + if (udata)
9344 + destroy_cq_user(mcq, udata);
9345 + else
9346 + destroy_cq_kernel(dev, mcq);
9347 ++ return 0;
9348 + }
9349 +
9350 + static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
9351 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
9352 +index d60d63221b14d..b805cc8124657 100644
9353 +--- a/drivers/infiniband/hw/mlx5/main.c
9354 ++++ b/drivers/infiniband/hw/mlx5/main.c
9355 +@@ -840,7 +840,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
9356 + /* We support 'Gappy' memory registration too */
9357 + props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
9358 + }
9359 +- props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
9360 ++ /* IB_WR_REG_MR always requires changing the entity size with UMR */
9361 ++ if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
9362 ++ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
9363 + if (MLX5_CAP_GEN(mdev, sho)) {
9364 + props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
9365 + /* At this stage no support for signature handover */
9366 +diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
9367 +index 5287fc8686627..884cc7c731253 100644
9368 +--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
9369 ++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
9370 +@@ -1148,7 +1148,7 @@ int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
9371 + size_t buflen, size_t *bc);
9372 + int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9373 + struct ib_udata *udata);
9374 +-void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
9375 ++int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
9376 + int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
9377 + int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
9378 + int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
9379 +@@ -1193,7 +1193,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
9380 + const struct ib_mad *in, struct ib_mad *out,
9381 + size_t *out_mad_size, u16 *out_mad_pkey_index);
9382 + int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
9383 +-void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
9384 ++int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
9385 + int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
9386 + int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
9387 + int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
9388 +@@ -1238,7 +1238,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
9389 + struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
9390 + struct ib_wq_init_attr *init_attr,
9391 + struct ib_udata *udata);
9392 +-void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
9393 ++int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
9394 + int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
9395 + u32 wq_attr_mask, struct ib_udata *udata);
9396 + struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
9397 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
9398 +index 3e6f2f9c66555..6eb40b33e1ea8 100644
9399 +--- a/drivers/infiniband/hw/mlx5/mr.c
9400 ++++ b/drivers/infiniband/hw/mlx5/mr.c
9401 +@@ -50,6 +50,29 @@ enum {
9402 + static void
9403 + create_mkey_callback(int status, struct mlx5_async_work *context);
9404 +
9405 ++static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
9406 ++ struct ib_pd *pd)
9407 ++{
9408 ++ struct mlx5_ib_dev *dev = to_mdev(pd->device);
9409 ++
9410 ++ MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
9411 ++ MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
9412 ++ MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
9413 ++ MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
9414 ++ MLX5_SET(mkc, mkc, lr, 1);
9415 ++
9416 ++ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
9417 ++ MLX5_SET(mkc, mkc, relaxed_ordering_write,
9418 ++ !!(acc & IB_ACCESS_RELAXED_ORDERING));
9419 ++ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
9420 ++ MLX5_SET(mkc, mkc, relaxed_ordering_read,
9421 ++ !!(acc & IB_ACCESS_RELAXED_ORDERING));
9422 ++
9423 ++ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
9424 ++ MLX5_SET(mkc, mkc, qpn, 0xffffff);
9425 ++ MLX5_SET64(mkc, mkc, start_addr, start_addr);
9426 ++}
9427 ++
9428 + static void
9429 + assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
9430 + u32 *in)
9431 +@@ -152,12 +175,12 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
9432 + mr->cache_ent = ent;
9433 + mr->dev = ent->dev;
9434 +
9435 ++ set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
9436 + MLX5_SET(mkc, mkc, free, 1);
9437 + MLX5_SET(mkc, mkc, umr_en, 1);
9438 + MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
9439 + MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
9440 +
9441 +- MLX5_SET(mkc, mkc, qpn, 0xffffff);
9442 + MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
9443 + MLX5_SET(mkc, mkc, log_page_size, ent->page);
9444 + return mr;
9445 +@@ -774,29 +797,6 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
9446 + return 0;
9447 + }
9448 +
9449 +-static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
9450 +- struct ib_pd *pd)
9451 +-{
9452 +- struct mlx5_ib_dev *dev = to_mdev(pd->device);
9453 +-
9454 +- MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
9455 +- MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
9456 +- MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
9457 +- MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
9458 +- MLX5_SET(mkc, mkc, lr, 1);
9459 +-
9460 +- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
9461 +- MLX5_SET(mkc, mkc, relaxed_ordering_write,
9462 +- !!(acc & IB_ACCESS_RELAXED_ORDERING));
9463 +- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
9464 +- MLX5_SET(mkc, mkc, relaxed_ordering_read,
9465 +- !!(acc & IB_ACCESS_RELAXED_ORDERING));
9466 +-
9467 +- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
9468 +- MLX5_SET(mkc, mkc, qpn, 0xffffff);
9469 +- MLX5_SET64(mkc, mkc, start_addr, start_addr);
9470 +-}
9471 +-
9472 + struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
9473 + {
9474 + struct mlx5_ib_dev *dev = to_mdev(pd->device);
9475 +@@ -1190,29 +1190,17 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
9476 + MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
9477 +
9478 + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
9479 ++ set_mkc_access_pd_addr_fields(mkc, access_flags, virt_addr,
9480 ++ populate ? pd : dev->umrc.pd);
9481 + MLX5_SET(mkc, mkc, free, !populate);
9482 + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
9483 +- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
9484 +- MLX5_SET(mkc, mkc, relaxed_ordering_write,
9485 +- !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
9486 +- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
9487 +- MLX5_SET(mkc, mkc, relaxed_ordering_read,
9488 +- !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
9489 +- MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
9490 +- MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
9491 +- MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
9492 +- MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
9493 +- MLX5_SET(mkc, mkc, lr, 1);
9494 + MLX5_SET(mkc, mkc, umr_en, 1);
9495 +
9496 +- MLX5_SET64(mkc, mkc, start_addr, virt_addr);
9497 + MLX5_SET64(mkc, mkc, len, length);
9498 +- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
9499 + MLX5_SET(mkc, mkc, bsf_octword_size, 0);
9500 + MLX5_SET(mkc, mkc, translations_octword_size,
9501 + get_octo_len(virt_addr, length, page_shift));
9502 + MLX5_SET(mkc, mkc, log_page_size, page_shift);
9503 +- MLX5_SET(mkc, mkc, qpn, 0xffffff);
9504 + if (populate) {
9505 + MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
9506 + get_octo_len(virt_addr, length, page_shift));
9507 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
9508 +index 5758dbe640451..7a3e8e6598d34 100644
9509 +--- a/drivers/infiniband/hw/mlx5/qp.c
9510 ++++ b/drivers/infiniband/hw/mlx5/qp.c
9511 +@@ -4716,12 +4716,12 @@ int mlx5_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
9512 + return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
9513 + }
9514 +
9515 +-void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
9516 ++int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
9517 + {
9518 + struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
9519 + u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
9520 +
9521 +- mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
9522 ++ return mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
9523 + }
9524 +
9525 + static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
9526 +@@ -5056,14 +5056,18 @@ err:
9527 + return ERR_PTR(err);
9528 + }
9529 +
9530 +-void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
9531 ++int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
9532 + {
9533 + struct mlx5_ib_dev *dev = to_mdev(wq->device);
9534 + struct mlx5_ib_rwq *rwq = to_mrwq(wq);
9535 ++ int ret;
9536 +
9537 +- mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
9538 ++ ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
9539 ++ if (ret)
9540 ++ return ret;
9541 + destroy_user_rq(dev, wq->pd, rwq, udata);
9542 + kfree(rwq);
9543 ++ return 0;
9544 + }
9545 +
9546 + struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
9547 +diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
9548 +index ba899df44c5b4..5d4e140db99ce 100644
9549 +--- a/drivers/infiniband/hw/mlx5/qp.h
9550 ++++ b/drivers/infiniband/hw/mlx5/qp.h
9551 +@@ -26,8 +26,8 @@ int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
9552 +
9553 + int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec);
9554 +
9555 +-void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
9556 +- struct mlx5_core_qp *rq);
9557 ++int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
9558 ++ struct mlx5_core_qp *rq);
9559 + int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
9560 + struct mlx5_core_qp *sq);
9561 + void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
9562 +diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
9563 +index 7c3968ef9cd10..c683d7000168d 100644
9564 +--- a/drivers/infiniband/hw/mlx5/qpc.c
9565 ++++ b/drivers/infiniband/hw/mlx5/qpc.c
9566 +@@ -576,11 +576,12 @@ err_destroy_rq:
9567 + return err;
9568 + }
9569 +
9570 +-void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
9571 +- struct mlx5_core_qp *rq)
9572 ++int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
9573 ++ struct mlx5_core_qp *rq)
9574 + {
9575 + destroy_resource_common(dev, rq);
9576 + destroy_rq_tracked(dev, rq->qpn, rq->uid);
9577 ++ return 0;
9578 + }
9579 +
9580 + static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
9581 +diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
9582 +index 9fa2f9164a47b..2ad15adf304e5 100644
9583 +--- a/drivers/infiniband/hw/mthca/mthca_provider.c
9584 ++++ b/drivers/infiniband/hw/mthca/mthca_provider.c
9585 +@@ -789,7 +789,7 @@ out:
9586 + return ret;
9587 + }
9588 +
9589 +-static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
9590 ++static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
9591 + {
9592 + if (udata) {
9593 + struct mthca_ucontext *context =
9594 +@@ -808,6 +808,7 @@ static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
9595 + to_mcq(cq)->set_ci_db_index);
9596 + }
9597 + mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
9598 ++ return 0;
9599 + }
9600 +
9601 + static inline u32 convert_access(int acc)
9602 +diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
9603 +index c1751c9a0f625..4ef5298247fcf 100644
9604 +--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
9605 ++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
9606 +@@ -1056,7 +1056,7 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
9607 + spin_unlock_irqrestore(&cq->cq_lock, flags);
9608 + }
9609 +
9610 +-void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
9611 ++int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
9612 + {
9613 + struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
9614 + struct ocrdma_eq *eq = NULL;
9615 +@@ -1081,6 +1081,7 @@ void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
9616 + ocrdma_get_db_addr(dev, pdid),
9617 + dev->nic_info.db_page_size);
9618 + }
9619 ++ return 0;
9620 + }
9621 +
9622 + static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
9623 +diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
9624 +index df8e3b923a440..4322b5d792608 100644
9625 +--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
9626 ++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
9627 +@@ -72,7 +72,7 @@ void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
9628 + int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9629 + struct ib_udata *udata);
9630 + int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
9631 +-void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
9632 ++int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
9633 +
9634 + struct ib_qp *ocrdma_create_qp(struct ib_pd *,
9635 + struct ib_qp_init_attr *attrs,
9636 +diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
9637 +index d85f992bac299..8e1365951fb6a 100644
9638 +--- a/drivers/infiniband/hw/qedr/main.c
9639 ++++ b/drivers/infiniband/hw/qedr/main.c
9640 +@@ -602,7 +602,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
9641 + qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
9642 +
9643 + /* Part 2 - check capabilities */
9644 +- page_size = ~dev->attr.page_size_caps + 1;
9645 ++ page_size = ~qed_attr->page_size_caps + 1;
9646 + if (page_size > PAGE_SIZE) {
9647 + DP_ERR(dev,
9648 + "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
9649 +diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
9650 +index 97fc7dd353b04..c7169d2c69e5b 100644
9651 +--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
9652 ++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
9653 +@@ -736,7 +736,7 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
9654 + struct qedr_dev *dev = ep->dev;
9655 + struct qedr_qp *qp;
9656 + struct qed_iwarp_accept_in params;
9657 +- int rc = 0;
9658 ++ int rc;
9659 +
9660 + DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
9661 +
9662 +@@ -759,8 +759,10 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
9663 + params.ord = conn_param->ord;
9664 +
9665 + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
9666 +- &qp->iwarp_cm_flags))
9667 ++ &qp->iwarp_cm_flags)) {
9668 ++ rc = -EINVAL;
9669 + goto err; /* QP already destroyed */
9670 ++ }
9671 +
9672 + rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
9673 + if (rc) {
9674 +diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
9675 +index b49bef94637e5..10536cce120e8 100644
9676 +--- a/drivers/infiniband/hw/qedr/verbs.c
9677 ++++ b/drivers/infiniband/hw/qedr/verbs.c
9678 +@@ -999,7 +999,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9679 + /* Generate doorbell address. */
9680 + cq->db.data.icid = cq->icid;
9681 + cq->db_addr = dev->db_addr + db_offset;
9682 +- cq->db.data.params = DB_AGG_CMD_SET <<
9683 ++ cq->db.data.params = DB_AGG_CMD_MAX <<
9684 + RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
9685 +
9686 + /* point to the very last element, passing it we will toggle */
9687 +@@ -1051,7 +1051,7 @@ int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
9688 + #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
9689 + #define QEDR_DESTROY_CQ_ITER_DURATION (10)
9690 +
9691 +-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
9692 ++int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
9693 + {
9694 + struct qedr_dev *dev = get_qedr_dev(ibcq->device);
9695 + struct qed_rdma_destroy_cq_out_params oparams;
9696 +@@ -1066,7 +1066,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
9697 + /* GSIs CQs are handled by driver, so they don't exist in the FW */
9698 + if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
9699 + qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
9700 +- return;
9701 ++ return 0;
9702 + }
9703 +
9704 + iparams.icid = cq->icid;
9705 +@@ -1114,6 +1114,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
9706 + * Since the destroy CQ ramrod has also been received on the EQ we can
9707 + * be certain that there's no event handler in process.
9708 + */
9709 ++ return 0;
9710 + }
9711 +
9712 + static inline int get_gid_info_from_table(struct ib_qp *ibqp,
9713 +@@ -2112,6 +2113,28 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
9714 + return rc;
9715 + }
9716 +
9717 ++static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
9718 ++ struct ib_udata *udata)
9719 ++{
9720 ++ struct qedr_ucontext *ctx =
9721 ++ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
9722 ++ ibucontext);
9723 ++ int rc;
9724 ++
9725 ++ if (qp->qp_type != IB_QPT_GSI) {
9726 ++ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
9727 ++ if (rc)
9728 ++ return rc;
9729 ++ }
9730 ++
9731 ++ if (qp->create_type == QEDR_QP_CREATE_USER)
9732 ++ qedr_cleanup_user(dev, ctx, qp);
9733 ++ else
9734 ++ qedr_cleanup_kernel(dev, qp);
9735 ++
9736 ++ return 0;
9737 ++}
9738 ++
9739 + struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
9740 + struct ib_qp_init_attr *attrs,
9741 + struct ib_udata *udata)
9742 +@@ -2158,19 +2181,21 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
9743 + rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
9744 +
9745 + if (rc)
9746 +- goto err;
9747 ++ goto out_free_qp;
9748 +
9749 + qp->ibqp.qp_num = qp->qp_id;
9750 +
9751 + if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
9752 + rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
9753 + if (rc)
9754 +- goto err;
9755 ++ goto out_free_qp_resources;
9756 + }
9757 +
9758 + return &qp->ibqp;
9759 +
9760 +-err:
9761 ++out_free_qp_resources:
9762 ++ qedr_free_qp_resources(dev, qp, udata);
9763 ++out_free_qp:
9764 + kfree(qp);
9765 +
9766 + return ERR_PTR(-EFAULT);
9767 +@@ -2636,7 +2661,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
9768 + qp_attr->cap.max_recv_wr = qp->rq.max_wr;
9769 + qp_attr->cap.max_send_sge = qp->sq.max_sges;
9770 + qp_attr->cap.max_recv_sge = qp->rq.max_sges;
9771 +- qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
9772 ++ qp_attr->cap.max_inline_data = dev->attr.max_inline;
9773 + qp_init_attr->cap = qp_attr->cap;
9774 +
9775 + qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
9776 +@@ -2671,28 +2696,6 @@ err:
9777 + return rc;
9778 + }
9779 +
9780 +-static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
9781 +- struct ib_udata *udata)
9782 +-{
9783 +- struct qedr_ucontext *ctx =
9784 +- rdma_udata_to_drv_context(udata, struct qedr_ucontext,
9785 +- ibucontext);
9786 +- int rc;
9787 +-
9788 +- if (qp->qp_type != IB_QPT_GSI) {
9789 +- rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
9790 +- if (rc)
9791 +- return rc;
9792 +- }
9793 +-
9794 +- if (qp->create_type == QEDR_QP_CREATE_USER)
9795 +- qedr_cleanup_user(dev, ctx, qp);
9796 +- else
9797 +- qedr_cleanup_kernel(dev, qp);
9798 +-
9799 +- return 0;
9800 +-}
9801 +-
9802 + int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
9803 + {
9804 + struct qedr_qp *qp = get_qedr_qp(ibqp);
9805 +@@ -2752,6 +2755,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
9806 +
9807 + if (rdma_protocol_iwarp(&dev->ibdev, 1))
9808 + qedr_iw_qp_rem_ref(&qp->ibqp);
9809 ++ else
9810 ++ kfree(qp);
9811 +
9812 + return 0;
9813 + }
9814 +diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
9815 +index 39dd6286ba395..b6d09f5376d81 100644
9816 +--- a/drivers/infiniband/hw/qedr/verbs.h
9817 ++++ b/drivers/infiniband/hw/qedr/verbs.h
9818 +@@ -52,7 +52,7 @@ void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
9819 + int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9820 + struct ib_udata *udata);
9821 + int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
9822 +-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
9823 ++int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
9824 + int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
9825 + struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
9826 + struct ib_udata *);
9827 +diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
9828 +index b8a77ce115908..586ff16be1bb3 100644
9829 +--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
9830 ++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
9831 +@@ -596,9 +596,9 @@ int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9832 + return 0;
9833 + }
9834 +
9835 +-void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
9836 ++int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
9837 + {
9838 +- return;
9839 ++ return 0;
9840 + }
9841 +
9842 + struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
9843 +diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
9844 +index 2aedf78c13cf2..f13b08c59b9a3 100644
9845 +--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
9846 ++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
9847 +@@ -60,7 +60,7 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
9848 + int attr_mask, struct ib_udata *udata);
9849 + int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9850 + struct ib_udata *udata);
9851 +-void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
9852 ++int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
9853 + struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
9854 + u64 virt_addr, int access_flags,
9855 + struct ib_udata *udata);
9856 +diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
9857 +index 4f6cc0de7ef95..6d3e6389e47da 100644
9858 +--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
9859 ++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
9860 +@@ -235,7 +235,7 @@ static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
9861 + * @cq: the completion queue to destroy.
9862 + * @udata: user data or null for kernel object
9863 + */
9864 +-void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
9865 ++int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
9866 + {
9867 + struct pvrdma_cq *vcq = to_vcq(cq);
9868 + union pvrdma_cmd_req req;
9869 +@@ -261,6 +261,7 @@ void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
9870 +
9871 + pvrdma_free_cq(dev, vcq);
9872 + atomic_dec(&dev->num_cqs);
9873 ++ return 0;
9874 + }
9875 +
9876 + static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
9877 +diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
9878 +index 699b20849a7ef..61b8425d92c5e 100644
9879 +--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
9880 ++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
9881 +@@ -411,7 +411,7 @@ int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
9882 + int sg_nents, unsigned int *sg_offset);
9883 + int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9884 + struct ib_udata *udata);
9885 +-void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
9886 ++int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
9887 + int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
9888 + int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
9889 + int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
9890 +diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
9891 +index 04d2e72017fed..19248be140933 100644
9892 +--- a/drivers/infiniband/sw/rdmavt/cq.c
9893 ++++ b/drivers/infiniband/sw/rdmavt/cq.c
9894 +@@ -315,7 +315,7 @@ bail_wc:
9895 + *
9896 + * Called by ib_destroy_cq() in the generic verbs code.
9897 + */
9898 +-void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
9899 ++int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
9900 + {
9901 + struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
9902 + struct rvt_dev_info *rdi = cq->rdi;
9903 +@@ -328,6 +328,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
9904 + kref_put(&cq->ip->ref, rvt_release_mmap_info);
9905 + else
9906 + vfree(cq->kqueue);
9907 ++ return 0;
9908 + }
9909 +
9910 + /**
9911 +diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h
9912 +index 5e26a2eb19a4c..feb01e7ee0044 100644
9913 +--- a/drivers/infiniband/sw/rdmavt/cq.h
9914 ++++ b/drivers/infiniband/sw/rdmavt/cq.h
9915 +@@ -53,7 +53,7 @@
9916 +
9917 + int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9918 + struct ib_udata *udata);
9919 +-void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
9920 ++int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
9921 + int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
9922 + int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
9923 + int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
9924 +diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
9925 +index f904bb34477ae..2d534c450f3c8 100644
9926 +--- a/drivers/infiniband/sw/rdmavt/vt.c
9927 ++++ b/drivers/infiniband/sw/rdmavt/vt.c
9928 +@@ -95,9 +95,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
9929 + if (!rdi)
9930 + return rdi;
9931 +
9932 +- rdi->ports = kcalloc(nports,
9933 +- sizeof(struct rvt_ibport **),
9934 +- GFP_KERNEL);
9935 ++ rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL);
9936 + if (!rdi->ports)
9937 + ib_dealloc_device(&rdi->ibdev);
9938 +
9939 +diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
9940 +index 7e123d3c4d09b..2da4187db80c9 100644
9941 +--- a/drivers/infiniband/sw/rxe/rxe_recv.c
9942 ++++ b/drivers/infiniband/sw/rxe/rxe_recv.c
9943 +@@ -260,6 +260,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
9944 + struct rxe_mc_elem *mce;
9945 + struct rxe_qp *qp;
9946 + union ib_gid dgid;
9947 ++ struct sk_buff *per_qp_skb;
9948 ++ struct rxe_pkt_info *per_qp_pkt;
9949 + int err;
9950 +
9951 + if (skb->protocol == htons(ETH_P_IP))
9952 +@@ -288,21 +290,29 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
9953 + if (err)
9954 + continue;
9955 +
9956 +- /* if *not* the last qp in the list
9957 +- * increase the users of the skb then post to the next qp
9958 ++ /* for all but the last qp create a new clone of the
9959 ++ * skb and pass to the qp.
9960 + */
9961 + if (mce->qp_list.next != &mcg->qp_list)
9962 +- skb_get(skb);
9963 ++ per_qp_skb = skb_clone(skb, GFP_ATOMIC);
9964 ++ else
9965 ++ per_qp_skb = skb;
9966 ++
9967 ++ if (unlikely(!per_qp_skb))
9968 ++ continue;
9969 +
9970 +- pkt->qp = qp;
9971 ++ per_qp_pkt = SKB_TO_PKT(per_qp_skb);
9972 ++ per_qp_pkt->qp = qp;
9973 + rxe_add_ref(qp);
9974 +- rxe_rcv_pkt(pkt, skb);
9975 ++ rxe_rcv_pkt(per_qp_pkt, per_qp_skb);
9976 + }
9977 +
9978 + spin_unlock_bh(&mcg->mcg_lock);
9979 +
9980 + rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
9981 +
9982 ++ return;
9983 ++
9984 + err1:
9985 + kfree_skb(skb);
9986 + }
9987 +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
9988 +index 8522e9a3e9140..cfe115d64cb88 100644
9989 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
9990 ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
9991 +@@ -803,13 +803,14 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9992 + return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
9993 + }
9994 +
9995 +-static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
9996 ++static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
9997 + {
9998 + struct rxe_cq *cq = to_rcq(ibcq);
9999 +
10000 + rxe_cq_disable(cq);
10001 +
10002 + rxe_drop_ref(cq);
10003 ++ return 0;
10004 + }
10005 +
10006 + static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
10007 +diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
10008 +index adafa1b8bebe3..60271c30e7de5 100644
10009 +--- a/drivers/infiniband/sw/siw/siw_verbs.c
10010 ++++ b/drivers/infiniband/sw/siw/siw_verbs.c
10011 +@@ -1055,7 +1055,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
10012 + return rv > 0 ? 0 : rv;
10013 + }
10014 +
10015 +-void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
10016 ++int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
10017 + {
10018 + struct siw_cq *cq = to_siw_cq(base_cq);
10019 + struct siw_device *sdev = to_siw_dev(base_cq->device);
10020 +@@ -1073,6 +1073,7 @@ void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
10021 + atomic_dec(&sdev->num_cq);
10022 +
10023 + vfree(cq->queue);
10024 ++ return 0;
10025 + }
10026 +
10027 + /*
10028 +diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
10029 +index d9572275a6b69..476e9283fce25 100644
10030 +--- a/drivers/infiniband/sw/siw/siw_verbs.h
10031 ++++ b/drivers/infiniband/sw/siw/siw_verbs.h
10032 +@@ -62,7 +62,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
10033 + const struct ib_send_wr **bad_wr);
10034 + int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
10035 + const struct ib_recv_wr **bad_wr);
10036 +-void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata);
10037 ++int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata);
10038 + int siw_poll_cq(struct ib_cq *base_cq, int num_entries, struct ib_wc *wc);
10039 + int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags);
10040 + struct ib_mr *siw_reg_user_mr(struct ib_pd *base_pd, u64 start, u64 len,
10041 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
10042 +index f772fe8c5b663..abfab89423f41 100644
10043 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
10044 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
10045 +@@ -2480,6 +2480,8 @@ static struct net_device *ipoib_add_port(const char *format,
10046 + /* call event handler to ensure pkey in sync */
10047 + queue_work(ipoib_workqueue, &priv->flush_heavy);
10048 +
10049 ++ ndev->rtnl_link_ops = ipoib_get_link_ops();
10050 ++
10051 + result = register_netdev(ndev);
10052 + if (result) {
10053 + pr_warn("%s: couldn't register ipoib port %d; error %d\n",
10054 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
10055 +index 38c984d16996d..d5a90a66b45cf 100644
10056 +--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
10057 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
10058 +@@ -144,6 +144,16 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
10059 + return 0;
10060 + }
10061 +
10062 ++static void ipoib_del_child_link(struct net_device *dev, struct list_head *head)
10063 ++{
10064 ++ struct ipoib_dev_priv *priv = ipoib_priv(dev);
10065 ++
10066 ++ if (!priv->parent)
10067 ++ return;
10068 ++
10069 ++ unregister_netdevice_queue(dev, head);
10070 ++}
10071 ++
10072 + static size_t ipoib_get_size(const struct net_device *dev)
10073 + {
10074 + return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
10075 +@@ -158,6 +168,7 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
10076 + .priv_size = sizeof(struct ipoib_dev_priv),
10077 + .setup = ipoib_setup_common,
10078 + .newlink = ipoib_new_child_link,
10079 ++ .dellink = ipoib_del_child_link,
10080 + .changelink = ipoib_changelink,
10081 + .get_size = ipoib_get_size,
10082 + .fill_info = ipoib_fill_info,
10083 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
10084 +index 30865605e0980..4c50a87ed7cc2 100644
10085 +--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
10086 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
10087 +@@ -195,6 +195,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
10088 + }
10089 + priv = ipoib_priv(ndev);
10090 +
10091 ++ ndev->rtnl_link_ops = ipoib_get_link_ops();
10092 ++
10093 + result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
10094 +
10095 + if (result && ndev->reg_state == NETREG_UNINITIALIZED)
10096 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
10097 +index 28f6414dfa3dc..d6f93601712e4 100644
10098 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
10099 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
10100 +@@ -16,6 +16,7 @@
10101 + #include "rtrs-srv.h"
10102 + #include "rtrs-log.h"
10103 + #include <rdma/ib_cm.h>
10104 ++#include <rdma/ib_verbs.h>
10105 +
10106 + MODULE_DESCRIPTION("RDMA Transport Server");
10107 + MODULE_LICENSE("GPL");
10108 +@@ -31,6 +32,7 @@ MODULE_LICENSE("GPL");
10109 + static struct rtrs_rdma_dev_pd dev_pd;
10110 + static mempool_t *chunk_pool;
10111 + struct class *rtrs_dev_class;
10112 ++static struct rtrs_srv_ib_ctx ib_ctx;
10113 +
10114 + static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
10115 + static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
10116 +@@ -2042,6 +2044,70 @@ static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
10117 + kfree(ctx);
10118 + }
10119 +
10120 ++static int rtrs_srv_add_one(struct ib_device *device)
10121 ++{
10122 ++ struct rtrs_srv_ctx *ctx;
10123 ++ int ret = 0;
10124 ++
10125 ++ mutex_lock(&ib_ctx.ib_dev_mutex);
10126 ++ if (ib_ctx.ib_dev_count)
10127 ++ goto out;
10128 ++
10129 ++ /*
10130 ++ * Since our CM IDs are NOT bound to any ib device we will create them
10131 ++ * only once
10132 ++ */
10133 ++ ctx = ib_ctx.srv_ctx;
10134 ++ ret = rtrs_srv_rdma_init(ctx, ib_ctx.port);
10135 ++ if (ret) {
10136 ++ /*
10137 ++ * We errored out here.
10138 ++ * According to the ib code, if we encounter an error here then the
10139 ++ * error code is ignored, and no more calls to our ops are made.
10140 ++ */
10141 ++ pr_err("Failed to initialize RDMA connection");
10142 ++ goto err_out;
10143 ++ }
10144 ++
10145 ++out:
10146 ++ /*
10147 ++ * Keep a track on the number of ib devices added
10148 ++ */
10149 ++ ib_ctx.ib_dev_count++;
10150 ++
10151 ++err_out:
10152 ++ mutex_unlock(&ib_ctx.ib_dev_mutex);
10153 ++ return ret;
10154 ++}
10155 ++
10156 ++static void rtrs_srv_remove_one(struct ib_device *device, void *client_data)
10157 ++{
10158 ++ struct rtrs_srv_ctx *ctx;
10159 ++
10160 ++ mutex_lock(&ib_ctx.ib_dev_mutex);
10161 ++ ib_ctx.ib_dev_count--;
10162 ++
10163 ++ if (ib_ctx.ib_dev_count)
10164 ++ goto out;
10165 ++
10166 ++ /*
10167 ++ * Since our CM IDs are NOT bound to any ib device we will remove them
10168 ++ * only once, when the last device is removed
10169 ++ */
10170 ++ ctx = ib_ctx.srv_ctx;
10171 ++ rdma_destroy_id(ctx->cm_id_ip);
10172 ++ rdma_destroy_id(ctx->cm_id_ib);
10173 ++
10174 ++out:
10175 ++ mutex_unlock(&ib_ctx.ib_dev_mutex);
10176 ++}
10177 ++
10178 ++static struct ib_client rtrs_srv_client = {
10179 ++ .name = "rtrs_server",
10180 ++ .add = rtrs_srv_add_one,
10181 ++ .remove = rtrs_srv_remove_one
10182 ++};
10183 ++
10184 + /**
10185 + * rtrs_srv_open() - open RTRS server context
10186 + * @ops: callback functions
10187 +@@ -2060,7 +2126,11 @@ struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
10188 + if (!ctx)
10189 + return ERR_PTR(-ENOMEM);
10190 +
10191 +- err = rtrs_srv_rdma_init(ctx, port);
10192 ++ mutex_init(&ib_ctx.ib_dev_mutex);
10193 ++ ib_ctx.srv_ctx = ctx;
10194 ++ ib_ctx.port = port;
10195 ++
10196 ++ err = ib_register_client(&rtrs_srv_client);
10197 + if (err) {
10198 + free_srv_ctx(ctx);
10199 + return ERR_PTR(err);
10200 +@@ -2099,8 +2169,8 @@ static void close_ctx(struct rtrs_srv_ctx *ctx)
10201 + */
10202 + void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
10203 + {
10204 +- rdma_destroy_id(ctx->cm_id_ip);
10205 +- rdma_destroy_id(ctx->cm_id_ib);
10206 ++ ib_unregister_client(&rtrs_srv_client);
10207 ++ mutex_destroy(&ib_ctx.ib_dev_mutex);
10208 + close_ctx(ctx);
10209 + free_srv_ctx(ctx);
10210 + }
10211 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
10212 +index dc95b0932f0df..08b0b8a6eebe6 100644
10213 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
10214 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
10215 +@@ -118,6 +118,13 @@ struct rtrs_srv_ctx {
10216 + struct list_head srv_list;
10217 + };
10218 +
10219 ++struct rtrs_srv_ib_ctx {
10220 ++ struct rtrs_srv_ctx *srv_ctx;
10221 ++ u16 port;
10222 ++ struct mutex ib_dev_mutex;
10223 ++ int ib_dev_count;
10224 ++};
10225 ++
10226 + extern struct class *rtrs_dev_class;
10227 +
10228 + void close_sess(struct rtrs_srv_sess *sess);
10229 +diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
10230 +index 7c70492d9d6b5..f831f01501d58 100644
10231 +--- a/drivers/input/keyboard/ep93xx_keypad.c
10232 ++++ b/drivers/input/keyboard/ep93xx_keypad.c
10233 +@@ -250,8 +250,8 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
10234 + }
10235 +
10236 + keypad->irq = platform_get_irq(pdev, 0);
10237 +- if (!keypad->irq) {
10238 +- err = -ENXIO;
10239 ++ if (keypad->irq < 0) {
10240 ++ err = keypad->irq;
10241 + goto failed_free;
10242 + }
10243 +
10244 +diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
10245 +index 94c94d7f5155f..d6c924032aaa8 100644
10246 +--- a/drivers/input/keyboard/omap4-keypad.c
10247 ++++ b/drivers/input/keyboard/omap4-keypad.c
10248 +@@ -240,10 +240,8 @@ static int omap4_keypad_probe(struct platform_device *pdev)
10249 + }
10250 +
10251 + irq = platform_get_irq(pdev, 0);
10252 +- if (!irq) {
10253 +- dev_err(&pdev->dev, "no keyboard irq assigned\n");
10254 +- return -EINVAL;
10255 +- }
10256 ++ if (irq < 0)
10257 ++ return irq;
10258 +
10259 + keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL);
10260 + if (!keypad_data) {
10261 +diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
10262 +index af3a6824f1a4d..77e0743a3cf85 100644
10263 +--- a/drivers/input/keyboard/twl4030_keypad.c
10264 ++++ b/drivers/input/keyboard/twl4030_keypad.c
10265 +@@ -50,7 +50,7 @@ struct twl4030_keypad {
10266 + bool autorepeat;
10267 + unsigned int n_rows;
10268 + unsigned int n_cols;
10269 +- unsigned int irq;
10270 ++ int irq;
10271 +
10272 + struct device *dbg_dev;
10273 + struct input_dev *input;
10274 +@@ -376,10 +376,8 @@ static int twl4030_kp_probe(struct platform_device *pdev)
10275 + }
10276 +
10277 + kp->irq = platform_get_irq(pdev, 0);
10278 +- if (!kp->irq) {
10279 +- dev_err(&pdev->dev, "no keyboard irq assigned\n");
10280 +- return -EINVAL;
10281 +- }
10282 ++ if (kp->irq < 0)
10283 ++ return kp->irq;
10284 +
10285 + error = matrix_keypad_build_keymap(keymap_data, NULL,
10286 + TWL4030_MAX_ROWS,
10287 +diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c
10288 +index a681a2c04e399..f15ed3dcdb9b2 100644
10289 +--- a/drivers/input/serio/sun4i-ps2.c
10290 ++++ b/drivers/input/serio/sun4i-ps2.c
10291 +@@ -211,7 +211,6 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
10292 + struct sun4i_ps2data *drvdata;
10293 + struct serio *serio;
10294 + struct device *dev = &pdev->dev;
10295 +- unsigned int irq;
10296 + int error;
10297 +
10298 + drvdata = kzalloc(sizeof(struct sun4i_ps2data), GFP_KERNEL);
10299 +@@ -264,14 +263,12 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
10300 + writel(0, drvdata->reg_base + PS2_REG_GCTL);
10301 +
10302 + /* Get IRQ for the device */
10303 +- irq = platform_get_irq(pdev, 0);
10304 +- if (!irq) {
10305 +- dev_err(dev, "no IRQ found\n");
10306 +- error = -ENXIO;
10307 ++ drvdata->irq = platform_get_irq(pdev, 0);
10308 ++ if (drvdata->irq < 0) {
10309 ++ error = drvdata->irq;
10310 + goto err_disable_clk;
10311 + }
10312 +
10313 +- drvdata->irq = irq;
10314 + drvdata->serio = serio;
10315 + drvdata->dev = dev;
10316 +
10317 +diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
10318 +index b0bd5bb079bec..75b39ef39b743 100644
10319 +--- a/drivers/input/touchscreen/elants_i2c.c
10320 ++++ b/drivers/input/touchscreen/elants_i2c.c
10321 +@@ -90,7 +90,7 @@
10322 + /* FW read command, 0x53 0x?? 0x0, 0x01 */
10323 + #define E_ELAN_INFO_FW_VER 0x00
10324 + #define E_ELAN_INFO_BC_VER 0x10
10325 +-#define E_ELAN_INFO_REK 0xE0
10326 ++#define E_ELAN_INFO_REK 0xD0
10327 + #define E_ELAN_INFO_TEST_VER 0xE0
10328 + #define E_ELAN_INFO_FW_ID 0xF0
10329 + #define E_INFO_OSR 0xD6
10330 +diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
10331 +index 9ed258854349b..5e6ba5c4eca2a 100644
10332 +--- a/drivers/input/touchscreen/imx6ul_tsc.c
10333 ++++ b/drivers/input/touchscreen/imx6ul_tsc.c
10334 +@@ -530,20 +530,25 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
10335 +
10336 + mutex_lock(&input_dev->mutex);
10337 +
10338 +- if (input_dev->users) {
10339 +- retval = clk_prepare_enable(tsc->adc_clk);
10340 +- if (retval)
10341 +- goto out;
10342 +-
10343 +- retval = clk_prepare_enable(tsc->tsc_clk);
10344 +- if (retval) {
10345 +- clk_disable_unprepare(tsc->adc_clk);
10346 +- goto out;
10347 +- }
10348 ++ if (!input_dev->users)
10349 ++ goto out;
10350 +
10351 +- retval = imx6ul_tsc_init(tsc);
10352 ++ retval = clk_prepare_enable(tsc->adc_clk);
10353 ++ if (retval)
10354 ++ goto out;
10355 ++
10356 ++ retval = clk_prepare_enable(tsc->tsc_clk);
10357 ++ if (retval) {
10358 ++ clk_disable_unprepare(tsc->adc_clk);
10359 ++ goto out;
10360 + }
10361 +
10362 ++ retval = imx6ul_tsc_init(tsc);
10363 ++ if (retval) {
10364 ++ clk_disable_unprepare(tsc->tsc_clk);
10365 ++ clk_disable_unprepare(tsc->adc_clk);
10366 ++ goto out;
10367 ++ }
10368 + out:
10369 + mutex_unlock(&input_dev->mutex);
10370 + return retval;
10371 +diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
10372 +index df946869d4cd1..9a64e1dbc04ad 100644
10373 +--- a/drivers/input/touchscreen/stmfts.c
10374 ++++ b/drivers/input/touchscreen/stmfts.c
10375 +@@ -479,7 +479,7 @@ static ssize_t stmfts_sysfs_hover_enable_write(struct device *dev,
10376 +
10377 + mutex_lock(&sdata->mutex);
10378 +
10379 +- if (value & sdata->hover_enabled)
10380 ++ if (value && sdata->hover_enabled)
10381 + goto out;
10382 +
10383 + if (sdata->running)
10384 +diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
10385 +index af6bec3ace007..ef3dd32aa6d97 100644
10386 +--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
10387 ++++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
10388 +@@ -584,8 +584,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
10389 + * index into qcom_iommu->ctxs:
10390 + */
10391 + if (WARN_ON(asid < 1) ||
10392 +- WARN_ON(asid > qcom_iommu->num_ctxs))
10393 ++ WARN_ON(asid > qcom_iommu->num_ctxs)) {
10394 ++ put_device(&iommu_pdev->dev);
10395 + return -EINVAL;
10396 ++ }
10397 +
10398 + if (!dev_iommu_priv_get(dev)) {
10399 + dev_iommu_priv_set(dev, qcom_iommu);
10400 +@@ -594,8 +596,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
10401 + * multiple different iommu devices. Multiple context
10402 + * banks are ok, but multiple devices are not:
10403 + */
10404 +- if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
10405 ++ if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) {
10406 ++ put_device(&iommu_pdev->dev);
10407 + return -EINVAL;
10408 ++ }
10409 + }
10410 +
10411 + return iommu_fwspec_add_ids(dev, &asid, 1);
10412 +diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
10413 +index d4e97605456bb..05bf94b87b938 100644
10414 +--- a/drivers/irqchip/irq-ti-sci-inta.c
10415 ++++ b/drivers/irqchip/irq-ti-sci-inta.c
10416 +@@ -175,8 +175,8 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
10417 + struct irq_fwspec parent_fwspec;
10418 + struct device_node *parent_node;
10419 + unsigned int parent_virq;
10420 +- u16 vint_id, p_hwirq;
10421 +- int ret;
10422 ++ int p_hwirq, ret;
10423 ++ u16 vint_id;
10424 +
10425 + vint_id = ti_sci_get_free_resource(inta->vint);
10426 + if (vint_id == TI_SCI_RESOURCE_NULL)
10427 +diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
10428 +index cbc1758228d9e..85a72b56177cf 100644
10429 +--- a/drivers/irqchip/irq-ti-sci-intr.c
10430 ++++ b/drivers/irqchip/irq-ti-sci-intr.c
10431 +@@ -137,8 +137,8 @@ static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain,
10432 + struct ti_sci_intr_irq_domain *intr = domain->host_data;
10433 + struct device_node *parent_node;
10434 + struct irq_fwspec fwspec;
10435 +- u16 out_irq, p_hwirq;
10436 +- int err = 0;
10437 ++ int p_hwirq, err = 0;
10438 ++ u16 out_irq;
10439 +
10440 + out_irq = ti_sci_get_free_resource(intr->out_irqs);
10441 + if (out_irq == TI_SCI_RESOURCE_NULL)
10442 +diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
10443 +index fe78bf0fdce54..c1bcac71008c6 100644
10444 +--- a/drivers/lightnvm/core.c
10445 ++++ b/drivers/lightnvm/core.c
10446 +@@ -1311,8 +1311,9 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
10447 + strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
10448 + i++;
10449 +
10450 +- if (i > 31) {
10451 +- pr_err("max 31 devices can be reported.\n");
10452 ++ if (i >= ARRAY_SIZE(devices->info)) {
10453 ++ pr_err("max %zd devices can be reported.\n",
10454 ++ ARRAY_SIZE(devices->info));
10455 + break;
10456 + }
10457 + }
10458 +diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
10459 +index 0b821a5b2db84..3e7d4b20ab34f 100644
10460 +--- a/drivers/mailbox/mailbox.c
10461 ++++ b/drivers/mailbox/mailbox.c
10462 +@@ -82,9 +82,12 @@ static void msg_submit(struct mbox_chan *chan)
10463 + exit:
10464 + spin_unlock_irqrestore(&chan->lock, flags);
10465 +
10466 +- if (!err && (chan->txdone_method & TXDONE_BY_POLL))
10467 +- /* kick start the timer immediately to avoid delays */
10468 +- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
10469 ++ /* kick start the timer immediately to avoid delays */
10470 ++ if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
10471 ++ /* but only if not already active */
10472 ++ if (!hrtimer_active(&chan->mbox->poll_hrt))
10473 ++ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
10474 ++ }
10475 + }
10476 +
10477 + static void tx_tick(struct mbox_chan *chan, int r)
10478 +@@ -122,11 +125,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
10479 + struct mbox_chan *chan = &mbox->chans[i];
10480 +
10481 + if (chan->active_req && chan->cl) {
10482 ++ resched = true;
10483 + txdone = chan->mbox->ops->last_tx_done(chan);
10484 + if (txdone)
10485 + tx_tick(chan, 0);
10486 +- else
10487 +- resched = true;
10488 + }
10489 + }
10490 +
10491 +diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
10492 +index 484d4438cd835..5665b6ea8119f 100644
10493 +--- a/drivers/mailbox/mtk-cmdq-mailbox.c
10494 ++++ b/drivers/mailbox/mtk-cmdq-mailbox.c
10495 +@@ -69,7 +69,7 @@ struct cmdq_task {
10496 + struct cmdq {
10497 + struct mbox_controller mbox;
10498 + void __iomem *base;
10499 +- u32 irq;
10500 ++ int irq;
10501 + u32 thread_nr;
10502 + u32 irq_mask;
10503 + struct cmdq_thread *thread;
10504 +@@ -525,10 +525,8 @@ static int cmdq_probe(struct platform_device *pdev)
10505 + }
10506 +
10507 + cmdq->irq = platform_get_irq(pdev, 0);
10508 +- if (!cmdq->irq) {
10509 +- dev_err(dev, "failed to get irq\n");
10510 +- return -EINVAL;
10511 +- }
10512 ++ if (cmdq->irq < 0)
10513 ++ return cmdq->irq;
10514 +
10515 + plat_data = (struct gce_plat *)of_device_get_match_data(dev);
10516 + if (!plat_data) {
10517 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
10518 +index 6ed05ca65a0f8..9b005e144014f 100644
10519 +--- a/drivers/md/dm.c
10520 ++++ b/drivers/md/dm.c
10521 +@@ -1744,17 +1744,11 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
10522 + }
10523 +
10524 + /*
10525 +- * If in ->submit_bio we need to use blk_queue_split(), otherwise
10526 +- * queue_limits for abnormal requests (e.g. discard, writesame, etc)
10527 +- * won't be imposed.
10528 +- * If called from dm_wq_work() for deferred bio processing, bio
10529 +- * was already handled by following code with previous ->submit_bio.
10530 ++ * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
10531 ++ * otherwise associated queue_limits won't be imposed.
10532 + */
10533 +- if (current->bio_list) {
10534 +- if (is_abnormal_io(bio))
10535 +- blk_queue_split(&bio);
10536 +- /* regular IO is split by __split_and_process_bio */
10537 +- }
10538 ++ if (is_abnormal_io(bio))
10539 ++ blk_queue_split(&bio);
10540 +
10541 + if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
10542 + return __process_bio(md, map, bio, ti);
10543 +@@ -1768,18 +1762,6 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
10544 + int srcu_idx;
10545 + struct dm_table *map;
10546 +
10547 +- if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
10548 +- /*
10549 +- * We are called with a live reference on q_usage_counter, but
10550 +- * that one will be released as soon as we return. Grab an
10551 +- * extra one as blk_mq_submit_bio expects to be able to consume
10552 +- * a reference (which lives until the request is freed in case a
10553 +- * request is allocated).
10554 +- */
10555 +- percpu_ref_get(&bio->bi_disk->queue->q_usage_counter);
10556 +- return blk_mq_submit_bio(bio);
10557 +- }
10558 +-
10559 + map = dm_get_live_table(md, &srcu_idx);
10560 +
10561 + /* if we're suspended, we have to queue this io for later */
10562 +@@ -1849,6 +1831,7 @@ static int next_free_minor(int *minor)
10563 + }
10564 +
10565 + static const struct block_device_operations dm_blk_dops;
10566 ++static const struct block_device_operations dm_rq_blk_dops;
10567 + static const struct dax_operations dm_dax_ops;
10568 +
10569 + static void dm_wq_work(struct work_struct *work);
10570 +@@ -2248,9 +2231,10 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
10571 +
10572 + switch (type) {
10573 + case DM_TYPE_REQUEST_BASED:
10574 ++ md->disk->fops = &dm_rq_blk_dops;
10575 + r = dm_mq_init_request_queue(md, t);
10576 + if (r) {
10577 +- DMERR("Cannot initialize queue for request-based dm-mq mapped device");
10578 ++ DMERR("Cannot initialize queue for request-based dm mapped device");
10579 + return r;
10580 + }
10581 + break;
10582 +@@ -2461,29 +2445,19 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state)
10583 + */
10584 + static void dm_wq_work(struct work_struct *work)
10585 + {
10586 +- struct mapped_device *md = container_of(work, struct mapped_device,
10587 +- work);
10588 +- struct bio *c;
10589 +- int srcu_idx;
10590 +- struct dm_table *map;
10591 +-
10592 +- map = dm_get_live_table(md, &srcu_idx);
10593 ++ struct mapped_device *md = container_of(work, struct mapped_device, work);
10594 ++ struct bio *bio;
10595 +
10596 + while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
10597 + spin_lock_irq(&md->deferred_lock);
10598 +- c = bio_list_pop(&md->deferred);
10599 ++ bio = bio_list_pop(&md->deferred);
10600 + spin_unlock_irq(&md->deferred_lock);
10601 +
10602 +- if (!c)
10603 ++ if (!bio)
10604 + break;
10605 +
10606 +- if (dm_request_based(md))
10607 +- (void) submit_bio_noacct(c);
10608 +- else
10609 +- (void) dm_process_bio(md, map, c);
10610 ++ submit_bio_noacct(bio);
10611 + }
10612 +-
10613 +- dm_put_live_table(md, srcu_idx);
10614 + }
10615 +
10616 + static void dm_queue_flush(struct mapped_device *md)
10617 +@@ -3243,6 +3217,15 @@ static const struct block_device_operations dm_blk_dops = {
10618 + .owner = THIS_MODULE
10619 + };
10620 +
10621 ++static const struct block_device_operations dm_rq_blk_dops = {
10622 ++ .open = dm_blk_open,
10623 ++ .release = dm_blk_close,
10624 ++ .ioctl = dm_blk_ioctl,
10625 ++ .getgeo = dm_blk_getgeo,
10626 ++ .pr_ops = &dm_pr_ops,
10627 ++ .owner = THIS_MODULE
10628 ++};
10629 ++
10630 + static const struct dax_operations dm_dax_ops = {
10631 + .direct_access = dm_dax_direct_access,
10632 + .dax_supported = dm_dax_supported,
10633 +diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
10634 +index b10c51988c8ee..c61ab86a28b52 100644
10635 +--- a/drivers/md/md-bitmap.c
10636 ++++ b/drivers/md/md-bitmap.c
10637 +@@ -1949,6 +1949,7 @@ out:
10638 + }
10639 + EXPORT_SYMBOL_GPL(md_bitmap_load);
10640 +
10641 ++/* caller need to free returned bitmap with md_bitmap_free() */
10642 + struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
10643 + {
10644 + int rv = 0;
10645 +@@ -2012,6 +2013,7 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
10646 + md_bitmap_unplug(mddev->bitmap);
10647 + *low = lo;
10648 + *high = hi;
10649 ++ md_bitmap_free(bitmap);
10650 +
10651 + return rv;
10652 + }
10653 +@@ -2615,4 +2617,3 @@ struct attribute_group md_bitmap_group = {
10654 + .name = "bitmap",
10655 + .attrs = md_bitmap_attrs,
10656 + };
10657 +-
10658 +diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
10659 +index d50737ec40394..afbbc552c3275 100644
10660 +--- a/drivers/md/md-cluster.c
10661 ++++ b/drivers/md/md-cluster.c
10662 +@@ -1166,6 +1166,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
10663 + * can't resize bitmap
10664 + */
10665 + goto out;
10666 ++ md_bitmap_free(bitmap);
10667 + }
10668 +
10669 + return 0;
10670 +diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c
10671 +index 3f1ca40b9b987..8a8585261bb80 100644
10672 +--- a/drivers/media/firewire/firedtv-fw.c
10673 ++++ b/drivers/media/firewire/firedtv-fw.c
10674 +@@ -272,8 +272,10 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
10675 +
10676 + name_len = fw_csr_string(unit->directory, CSR_MODEL,
10677 + name, sizeof(name));
10678 +- if (name_len < 0)
10679 +- return name_len;
10680 ++ if (name_len < 0) {
10681 ++ err = name_len;
10682 ++ goto fail_free;
10683 ++ }
10684 + for (i = ARRAY_SIZE(model_names); --i; )
10685 + if (strlen(model_names[i]) <= name_len &&
10686 + strncmp(name, model_names[i], name_len) == 0)
10687 +diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
10688 +index de295114ca482..21666d705e372 100644
10689 +--- a/drivers/media/i2c/m5mols/m5mols_core.c
10690 ++++ b/drivers/media/i2c/m5mols/m5mols_core.c
10691 +@@ -764,7 +764,8 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
10692 +
10693 + ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
10694 + if (ret) {
10695 +- info->set_power(&client->dev, 0);
10696 ++ if (info->set_power)
10697 ++ info->set_power(&client->dev, 0);
10698 + return ret;
10699 + }
10700 +
10701 +diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
10702 +index 47f280518fdb6..c82c1493e099d 100644
10703 +--- a/drivers/media/i2c/max9286.c
10704 ++++ b/drivers/media/i2c/max9286.c
10705 +@@ -135,13 +135,19 @@
10706 + #define MAX9286_SRC_PAD 4
10707 +
10708 + struct max9286_source {
10709 +- struct v4l2_async_subdev asd;
10710 + struct v4l2_subdev *sd;
10711 + struct fwnode_handle *fwnode;
10712 + };
10713 +
10714 +-#define asd_to_max9286_source(_asd) \
10715 +- container_of(_asd, struct max9286_source, asd)
10716 ++struct max9286_asd {
10717 ++ struct v4l2_async_subdev base;
10718 ++ struct max9286_source *source;
10719 ++};
10720 ++
10721 ++static inline struct max9286_asd *to_max9286_asd(struct v4l2_async_subdev *asd)
10722 ++{
10723 ++ return container_of(asd, struct max9286_asd, base);
10724 ++}
10725 +
10726 + struct max9286_priv {
10727 + struct i2c_client *client;
10728 +@@ -405,10 +411,11 @@ static int max9286_check_config_link(struct max9286_priv *priv,
10729 + * to 5 milliseconds.
10730 + */
10731 + for (i = 0; i < 10; i++) {
10732 +- ret = max9286_read(priv, 0x49) & 0xf0;
10733 ++ ret = max9286_read(priv, 0x49);
10734 + if (ret < 0)
10735 + return -EIO;
10736 +
10737 ++ ret &= 0xf0;
10738 + if (ret == conflink_mask)
10739 + break;
10740 +
10741 +@@ -480,7 +487,7 @@ static int max9286_notify_bound(struct v4l2_async_notifier *notifier,
10742 + struct v4l2_async_subdev *asd)
10743 + {
10744 + struct max9286_priv *priv = sd_to_max9286(notifier->sd);
10745 +- struct max9286_source *source = asd_to_max9286_source(asd);
10746 ++ struct max9286_source *source = to_max9286_asd(asd)->source;
10747 + unsigned int index = to_index(priv, source);
10748 + unsigned int src_pad;
10749 + int ret;
10750 +@@ -544,7 +551,7 @@ static void max9286_notify_unbind(struct v4l2_async_notifier *notifier,
10751 + struct v4l2_async_subdev *asd)
10752 + {
10753 + struct max9286_priv *priv = sd_to_max9286(notifier->sd);
10754 +- struct max9286_source *source = asd_to_max9286_source(asd);
10755 ++ struct max9286_source *source = to_max9286_asd(asd)->source;
10756 + unsigned int index = to_index(priv, source);
10757 +
10758 + source->sd = NULL;
10759 +@@ -569,23 +576,19 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
10760 +
10761 + for_each_source(priv, source) {
10762 + unsigned int i = to_index(priv, source);
10763 +-
10764 +- source->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
10765 +- source->asd.match.fwnode = source->fwnode;
10766 +-
10767 +- ret = v4l2_async_notifier_add_subdev(&priv->notifier,
10768 +- &source->asd);
10769 +- if (ret) {
10770 +- dev_err(dev, "Failed to add subdev for source %d", i);
10771 ++ struct v4l2_async_subdev *asd;
10772 ++
10773 ++ asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier,
10774 ++ source->fwnode,
10775 ++ sizeof(*asd));
10776 ++ if (IS_ERR(asd)) {
10777 ++ dev_err(dev, "Failed to add subdev for source %u: %ld",
10778 ++ i, PTR_ERR(asd));
10779 + v4l2_async_notifier_cleanup(&priv->notifier);
10780 +- return ret;
10781 ++ return PTR_ERR(asd);
10782 + }
10783 +
10784 +- /*
10785 +- * Balance the reference counting handled through
10786 +- * v4l2_async_notifier_cleanup()
10787 +- */
10788 +- fwnode_handle_get(source->fwnode);
10789 ++ to_max9286_asd(asd)->source = source;
10790 + }
10791 +
10792 + priv->notifier.ops = &max9286_notify_ops;
10793 +diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
10794 +index 2fe4a7ac05929..3a4268aa5f023 100644
10795 +--- a/drivers/media/i2c/ov5640.c
10796 ++++ b/drivers/media/i2c/ov5640.c
10797 +@@ -34,6 +34,8 @@
10798 + #define OV5640_REG_SYS_RESET02 0x3002
10799 + #define OV5640_REG_SYS_CLOCK_ENABLE02 0x3006
10800 + #define OV5640_REG_SYS_CTRL0 0x3008
10801 ++#define OV5640_REG_SYS_CTRL0_SW_PWDN 0x42
10802 ++#define OV5640_REG_SYS_CTRL0_SW_PWUP 0x02
10803 + #define OV5640_REG_CHIP_ID 0x300a
10804 + #define OV5640_REG_IO_MIPI_CTRL00 0x300e
10805 + #define OV5640_REG_PAD_OUTPUT_ENABLE01 0x3017
10806 +@@ -274,8 +276,7 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
10807 + /* YUV422 UYVY VGA@30fps */
10808 + static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
10809 + {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
10810 +- {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0},
10811 +- {0x3630, 0x36, 0, 0},
10812 ++ {0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0},
10813 + {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
10814 + {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
10815 + {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
10816 +@@ -751,7 +752,7 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg,
10817 + * +->| PLL Root Div | - reg 0x3037, bit 4
10818 + * +-+------------+
10819 + * | +---------+
10820 +- * +->| Bit Div | - reg 0x3035, bits 0-3
10821 ++ * +->| Bit Div | - reg 0x3034, bits 0-3
10822 + * +-+-------+
10823 + * | +-------------+
10824 + * +->| SCLK Div | - reg 0x3108, bits 0-1
10825 +@@ -1120,6 +1121,12 @@ static int ov5640_load_regs(struct ov5640_dev *sensor,
10826 + val = regs->val;
10827 + mask = regs->mask;
10828 +
10829 ++ /* remain in power down mode for DVP */
10830 ++ if (regs->reg_addr == OV5640_REG_SYS_CTRL0 &&
10831 ++ val == OV5640_REG_SYS_CTRL0_SW_PWUP &&
10832 ++ sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
10833 ++ continue;
10834 ++
10835 + if (mask)
10836 + ret = ov5640_mod_reg(sensor, reg_addr, mask, val);
10837 + else
10838 +@@ -1275,31 +1282,9 @@ static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on)
10839 + if (ret)
10840 + return ret;
10841 +
10842 +- /*
10843 +- * enable VSYNC/HREF/PCLK DVP control lines
10844 +- * & D[9:6] DVP data lines
10845 +- *
10846 +- * PAD OUTPUT ENABLE 01
10847 +- * - 6: VSYNC output enable
10848 +- * - 5: HREF output enable
10849 +- * - 4: PCLK output enable
10850 +- * - [3:0]: D[9:6] output enable
10851 +- */
10852 +- ret = ov5640_write_reg(sensor,
10853 +- OV5640_REG_PAD_OUTPUT_ENABLE01,
10854 +- on ? 0x7f : 0);
10855 +- if (ret)
10856 +- return ret;
10857 +-
10858 +- /*
10859 +- * enable D[5:0] DVP data lines
10860 +- *
10861 +- * PAD OUTPUT ENABLE 02
10862 +- * - [7:2]: D[5:0] output enable
10863 +- */
10864 +- return ov5640_write_reg(sensor,
10865 +- OV5640_REG_PAD_OUTPUT_ENABLE02,
10866 +- on ? 0xfc : 0);
10867 ++ return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ?
10868 ++ OV5640_REG_SYS_CTRL0_SW_PWUP :
10869 ++ OV5640_REG_SYS_CTRL0_SW_PWDN);
10870 + }
10871 +
10872 + static int ov5640_set_stream_mipi(struct ov5640_dev *sensor, bool on)
10873 +@@ -2001,6 +1986,95 @@ static void ov5640_set_power_off(struct ov5640_dev *sensor)
10874 + clk_disable_unprepare(sensor->xclk);
10875 + }
10876 +
10877 ++static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on)
10878 ++{
10879 ++ int ret;
10880 ++
10881 ++ if (!on) {
10882 ++ /* Reset MIPI bus settings to their default values. */
10883 ++ ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58);
10884 ++ ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x04);
10885 ++ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x00);
10886 ++ return 0;
10887 ++ }
10888 ++
10889 ++ /*
10890 ++ * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
10891 ++ *
10892 ++ * 0x300e = 0x40
10893 ++ * [7:5] = 010 : 2 data lanes mode (see FIXME note in
10894 ++ * "ov5640_set_stream_mipi()")
10895 ++ * [4] = 0 : Power up MIPI HS Tx
10896 ++ * [3] = 0 : Power up MIPI LS Rx
10897 ++ * [2] = 0 : MIPI interface disabled
10898 ++ */
10899 ++ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40);
10900 ++ if (ret)
10901 ++ return ret;
10902 ++
10903 ++ /*
10904 ++ * Gate clock and set LP11 in 'no packets mode' (idle)
10905 ++ *
10906 ++ * 0x4800 = 0x24
10907 ++ * [5] = 1 : Gate clock when 'no packets'
10908 ++ * [2] = 1 : MIPI bus in LP11 when 'no packets'
10909 ++ */
10910 ++ ret = ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x24);
10911 ++ if (ret)
10912 ++ return ret;
10913 ++
10914 ++ /*
10915 ++ * Set data lanes and clock in LP11 when 'sleeping'
10916 ++ *
10917 ++ * 0x3019 = 0x70
10918 ++ * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping'
10919 ++ * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping'
10920 ++ * [4] = 1 : MIPI clock lane in LP11 when 'sleeping'
10921 ++ */
10922 ++ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x70);
10923 ++ if (ret)
10924 ++ return ret;
10925 ++
10926 ++ /* Give lanes some time to coax into LP11 state. */
10927 ++ usleep_range(500, 1000);
10928 ++
10929 ++ return 0;
10930 ++}
10931 ++
10932 ++static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on)
10933 ++{
10934 ++ int ret;
10935 ++
10936 ++ if (!on) {
10937 ++ /* Reset settings to their default values. */
10938 ++ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x00);
10939 ++ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0x00);
10940 ++ return 0;
10941 ++ }
10942 ++
10943 ++ /*
10944 ++ * enable VSYNC/HREF/PCLK DVP control lines
10945 ++ * & D[9:6] DVP data lines
10946 ++ *
10947 ++ * PAD OUTPUT ENABLE 01
10948 ++ * - 6: VSYNC output enable
10949 ++ * - 5: HREF output enable
10950 ++ * - 4: PCLK output enable
10951 ++ * - [3:0]: D[9:6] output enable
10952 ++ */
10953 ++ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x7f);
10954 ++ if (ret)
10955 ++ return ret;
10956 ++
10957 ++ /*
10958 ++ * enable D[5:0] DVP data lines
10959 ++ *
10960 ++ * PAD OUTPUT ENABLE 02
10961 ++ * - [7:2]: D[5:0] output enable
10962 ++ */
10963 ++ return ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0xfc);
10964 ++}
10965 ++
10966 + static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
10967 + {
10968 + int ret = 0;
10969 +@@ -2013,67 +2087,17 @@ static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
10970 + ret = ov5640_restore_mode(sensor);
10971 + if (ret)
10972 + goto power_off;
10973 ++ }
10974 +
10975 +- /* We're done here for DVP bus, while CSI-2 needs setup. */
10976 +- if (sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
10977 +- return 0;
10978 +-
10979 +- /*
10980 +- * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
10981 +- *
10982 +- * 0x300e = 0x40
10983 +- * [7:5] = 010 : 2 data lanes mode (see FIXME note in
10984 +- * "ov5640_set_stream_mipi()")
10985 +- * [4] = 0 : Power up MIPI HS Tx
10986 +- * [3] = 0 : Power up MIPI LS Rx
10987 +- * [2] = 0 : MIPI interface disabled
10988 +- */
10989 +- ret = ov5640_write_reg(sensor,
10990 +- OV5640_REG_IO_MIPI_CTRL00, 0x40);
10991 +- if (ret)
10992 +- goto power_off;
10993 +-
10994 +- /*
10995 +- * Gate clock and set LP11 in 'no packets mode' (idle)
10996 +- *
10997 +- * 0x4800 = 0x24
10998 +- * [5] = 1 : Gate clock when 'no packets'
10999 +- * [2] = 1 : MIPI bus in LP11 when 'no packets'
11000 +- */
11001 +- ret = ov5640_write_reg(sensor,
11002 +- OV5640_REG_MIPI_CTRL00, 0x24);
11003 +- if (ret)
11004 +- goto power_off;
11005 +-
11006 +- /*
11007 +- * Set data lanes and clock in LP11 when 'sleeping'
11008 +- *
11009 +- * 0x3019 = 0x70
11010 +- * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping'
11011 +- * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping'
11012 +- * [4] = 1 : MIPI clock lane in LP11 when 'sleeping'
11013 +- */
11014 +- ret = ov5640_write_reg(sensor,
11015 +- OV5640_REG_PAD_OUTPUT00, 0x70);
11016 +- if (ret)
11017 +- goto power_off;
11018 +-
11019 +- /* Give lanes some time to coax into LP11 state. */
11020 +- usleep_range(500, 1000);
11021 +-
11022 +- } else {
11023 +- if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
11024 +- /* Reset MIPI bus settings to their default values. */
11025 +- ov5640_write_reg(sensor,
11026 +- OV5640_REG_IO_MIPI_CTRL00, 0x58);
11027 +- ov5640_write_reg(sensor,
11028 +- OV5640_REG_MIPI_CTRL00, 0x04);
11029 +- ov5640_write_reg(sensor,
11030 +- OV5640_REG_PAD_OUTPUT00, 0x00);
11031 +- }
11032 ++ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
11033 ++ ret = ov5640_set_power_mipi(sensor, on);
11034 ++ else
11035 ++ ret = ov5640_set_power_dvp(sensor, on);
11036 ++ if (ret)
11037 ++ goto power_off;
11038 +
11039 ++ if (!on)
11040 + ov5640_set_power_off(sensor);
11041 +- }
11042 +
11043 + return 0;
11044 +
11045 +diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
11046 +index dbbab75f135ec..cff99cf61ed4d 100644
11047 +--- a/drivers/media/i2c/tc358743.c
11048 ++++ b/drivers/media/i2c/tc358743.c
11049 +@@ -919,8 +919,8 @@ static const struct cec_adap_ops tc358743_cec_adap_ops = {
11050 + .adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable,
11051 + };
11052 +
11053 +-static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
11054 +- bool *handled)
11055 ++static void tc358743_cec_handler(struct v4l2_subdev *sd, u16 intstatus,
11056 ++ bool *handled)
11057 + {
11058 + struct tc358743_state *state = to_state(sd);
11059 + unsigned int cec_rxint, cec_txint;
11060 +@@ -953,7 +953,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
11061 + cec_transmit_attempt_done(state->cec_adap,
11062 + CEC_TX_STATUS_ERROR);
11063 + }
11064 +- *handled = true;
11065 ++ if (handled)
11066 ++ *handled = true;
11067 + }
11068 + if ((intstatus & MASK_CEC_RINT) &&
11069 + (cec_rxint & MASK_CECRIEND)) {
11070 +@@ -968,7 +969,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
11071 + msg.msg[i] = v & 0xff;
11072 + }
11073 + cec_received_msg(state->cec_adap, &msg);
11074 +- *handled = true;
11075 ++ if (handled)
11076 ++ *handled = true;
11077 + }
11078 + i2c_wr16(sd, INTSTATUS,
11079 + intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
11080 +@@ -1432,7 +1434,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
11081 +
11082 + #ifdef CONFIG_VIDEO_TC358743_CEC
11083 + if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) {
11084 +- tc358743_cec_isr(sd, intstatus, handled);
11085 ++ tc358743_cec_handler(sd, intstatus, handled);
11086 + i2c_wr16(sd, INTSTATUS,
11087 + intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
11088 + intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT);
11089 +@@ -1461,7 +1463,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
11090 + static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
11091 + {
11092 + struct tc358743_state *state = dev_id;
11093 +- bool handled;
11094 ++ bool handled = false;
11095 +
11096 + tc358743_isr(&state->sd, 0, &handled);
11097 +
11098 +diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
11099 +index 9144f795fb933..b721720f9845a 100644
11100 +--- a/drivers/media/pci/bt8xx/bttv-driver.c
11101 ++++ b/drivers/media/pci/bt8xx/bttv-driver.c
11102 +@@ -4013,11 +4013,13 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
11103 + btv->id = dev->device;
11104 + if (pci_enable_device(dev)) {
11105 + pr_warn("%d: Can't enable device\n", btv->c.nr);
11106 +- return -EIO;
11107 ++ result = -EIO;
11108 ++ goto free_mem;
11109 + }
11110 + if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
11111 + pr_warn("%d: No suitable DMA available\n", btv->c.nr);
11112 +- return -EIO;
11113 ++ result = -EIO;
11114 ++ goto free_mem;
11115 + }
11116 + if (!request_mem_region(pci_resource_start(dev,0),
11117 + pci_resource_len(dev,0),
11118 +@@ -4025,7 +4027,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
11119 + pr_warn("%d: can't request iomem (0x%llx)\n",
11120 + btv->c.nr,
11121 + (unsigned long long)pci_resource_start(dev, 0));
11122 +- return -EBUSY;
11123 ++ result = -EBUSY;
11124 ++ goto free_mem;
11125 + }
11126 + pci_set_master(dev);
11127 + pci_set_command(dev);
11128 +@@ -4211,6 +4214,10 @@ fail0:
11129 + release_mem_region(pci_resource_start(btv->c.pci,0),
11130 + pci_resource_len(btv->c.pci,0));
11131 + pci_disable_device(btv->c.pci);
11132 ++
11133 ++free_mem:
11134 ++ bttvs[btv->c.nr] = NULL;
11135 ++ kfree(btv);
11136 + return result;
11137 + }
11138 +
11139 +diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c
11140 +index 79e1afb710758..5cc4ef21f9d37 100644
11141 +--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c
11142 ++++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c
11143 +@@ -683,7 +683,8 @@ int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value)
11144 + {
11145 + int err;
11146 +
11147 +- audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", reg << 2, value);
11148 ++ audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n",
11149 ++ (reg << 2) & 0xffffffff, value);
11150 + err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR);
11151 + if (err < 0)
11152 + return err;
11153 +diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
11154 +index cde0d254ec1c4..a77c49b185115 100644
11155 +--- a/drivers/media/platform/exynos4-is/fimc-isp.c
11156 ++++ b/drivers/media/platform/exynos4-is/fimc-isp.c
11157 +@@ -305,8 +305,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
11158 +
11159 + if (on) {
11160 + ret = pm_runtime_get_sync(&is->pdev->dev);
11161 +- if (ret < 0)
11162 ++ if (ret < 0) {
11163 ++ pm_runtime_put(&is->pdev->dev);
11164 + return ret;
11165 ++ }
11166 + set_bit(IS_ST_PWR_ON, &is->state);
11167 +
11168 + ret = fimc_is_start_firmware(is);
11169 +diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
11170 +index 9c666f663ab43..fdd0d369b1925 100644
11171 +--- a/drivers/media/platform/exynos4-is/fimc-lite.c
11172 ++++ b/drivers/media/platform/exynos4-is/fimc-lite.c
11173 +@@ -471,7 +471,7 @@ static int fimc_lite_open(struct file *file)
11174 + set_bit(ST_FLITE_IN_USE, &fimc->state);
11175 + ret = pm_runtime_get_sync(&fimc->pdev->dev);
11176 + if (ret < 0)
11177 +- goto unlock;
11178 ++ goto err_pm;
11179 +
11180 + ret = v4l2_fh_open(file);
11181 + if (ret < 0)
11182 +diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
11183 +index 16dd660137a8d..9034f9cf88481 100644
11184 +--- a/drivers/media/platform/exynos4-is/media-dev.c
11185 ++++ b/drivers/media/platform/exynos4-is/media-dev.c
11186 +@@ -484,8 +484,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
11187 + return -ENXIO;
11188 +
11189 + ret = pm_runtime_get_sync(fmd->pmf);
11190 +- if (ret < 0)
11191 ++ if (ret < 0) {
11192 ++ pm_runtime_put(fmd->pmf);
11193 + return ret;
11194 ++ }
11195 +
11196 + fmd->num_sensors = 0;
11197 +
11198 +@@ -1268,11 +1270,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd)
11199 + if (IS_ERR(pctl->state_default))
11200 + return PTR_ERR(pctl->state_default);
11201 +
11202 ++ /* PINCTRL_STATE_IDLE is optional */
11203 + pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl,
11204 + PINCTRL_STATE_IDLE);
11205 +- if (IS_ERR(pctl->state_idle))
11206 +- return PTR_ERR(pctl->state_idle);
11207 +-
11208 + return 0;
11209 + }
11210 +
11211 +diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
11212 +index 540151bbf58f2..1aac167abb175 100644
11213 +--- a/drivers/media/platform/exynos4-is/mipi-csis.c
11214 ++++ b/drivers/media/platform/exynos4-is/mipi-csis.c
11215 +@@ -510,8 +510,10 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
11216 + if (enable) {
11217 + s5pcsis_clear_counters(state);
11218 + ret = pm_runtime_get_sync(&state->pdev->dev);
11219 +- if (ret && ret != 1)
11220 ++ if (ret && ret != 1) {
11221 ++ pm_runtime_put_noidle(&state->pdev->dev);
11222 + return ret;
11223 ++ }
11224 + }
11225 +
11226 + mutex_lock(&state->lock);
11227 +diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
11228 +index f96c8b3bf8618..976aa1f4829b8 100644
11229 +--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
11230 ++++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
11231 +@@ -94,7 +94,7 @@ static void mtk_mdp_reset_handler(void *priv)
11232 + void mtk_mdp_register_component(struct mtk_mdp_dev *mdp,
11233 + struct mtk_mdp_comp *comp)
11234 + {
11235 +- list_add(&mdp->comp_list, &comp->node);
11236 ++ list_add(&comp->node, &mdp->comp_list);
11237 + }
11238 +
11239 + void mtk_mdp_unregister_component(struct mtk_mdp_dev *mdp,
11240 +diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
11241 +index df78df59da456..08a5473b56104 100644
11242 +--- a/drivers/media/platform/mx2_emmaprp.c
11243 ++++ b/drivers/media/platform/mx2_emmaprp.c
11244 +@@ -852,8 +852,11 @@ static int emmaprp_probe(struct platform_device *pdev)
11245 + platform_set_drvdata(pdev, pcdev);
11246 +
11247 + irq = platform_get_irq(pdev, 0);
11248 +- if (irq < 0)
11249 +- return irq;
11250 ++ if (irq < 0) {
11251 ++ ret = irq;
11252 ++ goto rel_vdev;
11253 ++ }
11254 ++
11255 + ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0,
11256 + dev_name(&pdev->dev), pcdev);
11257 + if (ret)
11258 +diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
11259 +index b91e472ee764e..de066757726de 100644
11260 +--- a/drivers/media/platform/omap3isp/isp.c
11261 ++++ b/drivers/media/platform/omap3isp/isp.c
11262 +@@ -2328,8 +2328,10 @@ static int isp_probe(struct platform_device *pdev)
11263 + mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
11264 + isp->mmio_base[map_idx] =
11265 + devm_ioremap_resource(isp->dev, mem);
11266 +- if (IS_ERR(isp->mmio_base[map_idx]))
11267 +- return PTR_ERR(isp->mmio_base[map_idx]);
11268 ++ if (IS_ERR(isp->mmio_base[map_idx])) {
11269 ++ ret = PTR_ERR(isp->mmio_base[map_idx]);
11270 ++ goto error;
11271 ++ }
11272 + }
11273 +
11274 + ret = isp_get_clocks(isp);
11275 +diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
11276 +index 03ef9c5f4774d..85b24054f35e6 100644
11277 +--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
11278 ++++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
11279 +@@ -176,8 +176,10 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on)
11280 + int ret;
11281 +
11282 + ret = pm_runtime_get_sync(dev);
11283 +- if (ret < 0)
11284 ++ if (ret < 0) {
11285 ++ pm_runtime_put_sync(dev);
11286 + return ret;
11287 ++ }
11288 +
11289 + ret = csiphy_set_clock_rates(csiphy);
11290 + if (ret < 0) {
11291 +diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
11292 +index 203c6538044fb..321ad77cb6cf4 100644
11293 +--- a/drivers/media/platform/qcom/venus/core.c
11294 ++++ b/drivers/media/platform/qcom/venus/core.c
11295 +@@ -224,13 +224,15 @@ static int venus_probe(struct platform_device *pdev)
11296 +
11297 + ret = dma_set_mask_and_coherent(dev, core->res->dma_mask);
11298 + if (ret)
11299 +- return ret;
11300 ++ goto err_core_put;
11301 +
11302 + if (!dev->dma_parms) {
11303 + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
11304 + GFP_KERNEL);
11305 +- if (!dev->dma_parms)
11306 +- return -ENOMEM;
11307 ++ if (!dev->dma_parms) {
11308 ++ ret = -ENOMEM;
11309 ++ goto err_core_put;
11310 ++ }
11311 + }
11312 + dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
11313 +
11314 +@@ -242,11 +244,11 @@ static int venus_probe(struct platform_device *pdev)
11315 + IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
11316 + "venus", core);
11317 + if (ret)
11318 +- return ret;
11319 ++ goto err_core_put;
11320 +
11321 + ret = hfi_create(core, &venus_core_ops);
11322 + if (ret)
11323 +- return ret;
11324 ++ goto err_core_put;
11325 +
11326 + pm_runtime_enable(dev);
11327 +
11328 +@@ -287,8 +289,10 @@ static int venus_probe(struct platform_device *pdev)
11329 + goto err_core_deinit;
11330 +
11331 + ret = pm_runtime_put_sync(dev);
11332 +- if (ret)
11333 ++ if (ret) {
11334 ++ pm_runtime_get_noresume(dev);
11335 + goto err_dev_unregister;
11336 ++ }
11337 +
11338 + return 0;
11339 +
11340 +@@ -299,9 +303,13 @@ err_core_deinit:
11341 + err_venus_shutdown:
11342 + venus_shutdown(core);
11343 + err_runtime_disable:
11344 ++ pm_runtime_put_noidle(dev);
11345 + pm_runtime_set_suspended(dev);
11346 + pm_runtime_disable(dev);
11347 + hfi_destroy(core);
11348 ++err_core_put:
11349 ++ if (core->pm_ops->core_put)
11350 ++ core->pm_ops->core_put(dev);
11351 + return ret;
11352 + }
11353 +
11354 +diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
11355 +index 7c4c483d54389..76be14efbfb09 100644
11356 +--- a/drivers/media/platform/qcom/venus/vdec.c
11357 ++++ b/drivers/media/platform/qcom/venus/vdec.c
11358 +@@ -1088,8 +1088,6 @@ static int vdec_stop_capture(struct venus_inst *inst)
11359 + break;
11360 + }
11361 +
11362 +- INIT_LIST_HEAD(&inst->registeredbufs);
11363 +-
11364 + return ret;
11365 + }
11366 +
11367 +@@ -1189,6 +1187,14 @@ static int vdec_buf_init(struct vb2_buffer *vb)
11368 + static void vdec_buf_cleanup(struct vb2_buffer *vb)
11369 + {
11370 + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
11371 ++ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
11372 ++ struct venus_buffer *buf = to_venus_buffer(vbuf);
11373 ++
11374 ++ mutex_lock(&inst->lock);
11375 ++ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
11376 ++ if (!list_empty(&inst->registeredbufs))
11377 ++ list_del_init(&buf->reg_list);
11378 ++ mutex_unlock(&inst->lock);
11379 +
11380 + inst->buf_count--;
11381 + if (!inst->buf_count)
11382 +diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
11383 +index 5c6b00737fe75..05c712e00a2a7 100644
11384 +--- a/drivers/media/platform/rcar-fcp.c
11385 ++++ b/drivers/media/platform/rcar-fcp.c
11386 +@@ -103,8 +103,10 @@ int rcar_fcp_enable(struct rcar_fcp_device *fcp)
11387 + return 0;
11388 +
11389 + ret = pm_runtime_get_sync(fcp->dev);
11390 +- if (ret < 0)
11391 ++ if (ret < 0) {
11392 ++ pm_runtime_put_noidle(fcp->dev);
11393 + return ret;
11394 ++ }
11395 +
11396 + return 0;
11397 + }
11398 +diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
11399 +index c6cc4f473a077..a16c492b31434 100644
11400 +--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
11401 ++++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
11402 +@@ -362,7 +362,6 @@ struct rcar_csi2 {
11403 + struct media_pad pads[NR_OF_RCAR_CSI2_PAD];
11404 +
11405 + struct v4l2_async_notifier notifier;
11406 +- struct v4l2_async_subdev asd;
11407 + struct v4l2_subdev *remote;
11408 +
11409 + struct v4l2_mbus_framefmt mf;
11410 +@@ -811,6 +810,8 @@ static int rcsi2_parse_v4l2(struct rcar_csi2 *priv,
11411 +
11412 + static int rcsi2_parse_dt(struct rcar_csi2 *priv)
11413 + {
11414 ++ struct v4l2_async_subdev *asd;
11415 ++ struct fwnode_handle *fwnode;
11416 + struct device_node *ep;
11417 + struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
11418 + int ret;
11419 +@@ -834,24 +835,19 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
11420 + return ret;
11421 + }
11422 +
11423 +- priv->asd.match.fwnode =
11424 +- fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
11425 +- priv->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
11426 +-
11427 ++ fwnode = fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
11428 + of_node_put(ep);
11429 +
11430 +- v4l2_async_notifier_init(&priv->notifier);
11431 +-
11432 +- ret = v4l2_async_notifier_add_subdev(&priv->notifier, &priv->asd);
11433 +- if (ret) {
11434 +- fwnode_handle_put(priv->asd.match.fwnode);
11435 +- return ret;
11436 +- }
11437 ++ dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode));
11438 +
11439 ++ v4l2_async_notifier_init(&priv->notifier);
11440 + priv->notifier.ops = &rcar_csi2_notify_ops;
11441 +
11442 +- dev_dbg(priv->dev, "Found '%pOF'\n",
11443 +- to_of_node(priv->asd.match.fwnode));
11444 ++ asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, fwnode,
11445 ++ sizeof(*asd));
11446 ++ fwnode_handle_put(fwnode);
11447 ++ if (IS_ERR(asd))
11448 ++ return PTR_ERR(asd);
11449 +
11450 + ret = v4l2_async_subdev_notifier_register(&priv->subdev,
11451 + &priv->notifier);
11452 +diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
11453 +index a5dbb90c5210b..260604dc5791b 100644
11454 +--- a/drivers/media/platform/rcar-vin/rcar-dma.c
11455 ++++ b/drivers/media/platform/rcar-vin/rcar-dma.c
11456 +@@ -1409,8 +1409,10 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
11457 + int ret;
11458 +
11459 + ret = pm_runtime_get_sync(vin->dev);
11460 +- if (ret < 0)
11461 ++ if (ret < 0) {
11462 ++ pm_runtime_put_noidle(vin->dev);
11463 + return ret;
11464 ++ }
11465 +
11466 + /* Make register writes take effect immediately. */
11467 + vnmc = rvin_read(vin, VNMC_REG);
11468 +diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
11469 +index 3d2451ac347d7..f318cd4b8086f 100644
11470 +--- a/drivers/media/platform/rcar_drif.c
11471 ++++ b/drivers/media/platform/rcar_drif.c
11472 +@@ -185,7 +185,6 @@ struct rcar_drif_frame_buf {
11473 + /* OF graph endpoint's V4L2 async data */
11474 + struct rcar_drif_graph_ep {
11475 + struct v4l2_subdev *subdev; /* Async matched subdev */
11476 +- struct v4l2_async_subdev asd; /* Async sub-device descriptor */
11477 + };
11478 +
11479 + /* DMA buffer */
11480 +@@ -1109,12 +1108,6 @@ static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier,
11481 + struct rcar_drif_sdr *sdr =
11482 + container_of(notifier, struct rcar_drif_sdr, notifier);
11483 +
11484 +- if (sdr->ep.asd.match.fwnode !=
11485 +- of_fwnode_handle(subdev->dev->of_node)) {
11486 +- rdrif_err(sdr, "subdev %s cannot bind\n", subdev->name);
11487 +- return -EINVAL;
11488 +- }
11489 +-
11490 + v4l2_set_subdev_hostdata(subdev, sdr);
11491 + sdr->ep.subdev = subdev;
11492 + rdrif_dbg(sdr, "bound asd %s\n", subdev->name);
11493 +@@ -1218,7 +1211,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
11494 + {
11495 + struct v4l2_async_notifier *notifier = &sdr->notifier;
11496 + struct fwnode_handle *fwnode, *ep;
11497 +- int ret;
11498 ++ struct v4l2_async_subdev *asd;
11499 +
11500 + v4l2_async_notifier_init(notifier);
11501 +
11502 +@@ -1227,26 +1220,21 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
11503 + if (!ep)
11504 + return 0;
11505 +
11506 ++ /* Get the endpoint properties */
11507 ++ rcar_drif_get_ep_properties(sdr, ep);
11508 ++
11509 + fwnode = fwnode_graph_get_remote_port_parent(ep);
11510 ++ fwnode_handle_put(ep);
11511 + if (!fwnode) {
11512 + dev_warn(sdr->dev, "bad remote port parent\n");
11513 +- fwnode_handle_put(ep);
11514 + return -EINVAL;
11515 + }
11516 +
11517 +- sdr->ep.asd.match.fwnode = fwnode;
11518 +- sdr->ep.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
11519 +- ret = v4l2_async_notifier_add_subdev(notifier, &sdr->ep.asd);
11520 +- if (ret) {
11521 +- fwnode_handle_put(fwnode);
11522 +- return ret;
11523 +- }
11524 +-
11525 +- /* Get the endpoint properties */
11526 +- rcar_drif_get_ep_properties(sdr, ep);
11527 +-
11528 ++ asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
11529 ++ sizeof(*asd));
11530 + fwnode_handle_put(fwnode);
11531 +- fwnode_handle_put(ep);
11532 ++ if (IS_ERR(asd))
11533 ++ return PTR_ERR(asd);
11534 +
11535 + return 0;
11536 + }
11537 +diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
11538 +index 36b821ccc1dba..bf9a75b75083b 100644
11539 +--- a/drivers/media/platform/rockchip/rga/rga-buf.c
11540 ++++ b/drivers/media/platform/rockchip/rga/rga-buf.c
11541 +@@ -81,6 +81,7 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
11542 +
11543 + ret = pm_runtime_get_sync(rga->dev);
11544 + if (ret < 0) {
11545 ++ pm_runtime_put_noidle(rga->dev);
11546 + rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
11547 + return ret;
11548 + }
11549 +diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
11550 +index 92f43c0cbc0c0..422fd549e9c87 100644
11551 +--- a/drivers/media/platform/s3c-camif/camif-core.c
11552 ++++ b/drivers/media/platform/s3c-camif/camif-core.c
11553 +@@ -464,7 +464,7 @@ static int s3c_camif_probe(struct platform_device *pdev)
11554 +
11555 + ret = camif_media_dev_init(camif);
11556 + if (ret < 0)
11557 +- goto err_alloc;
11558 ++ goto err_pm;
11559 +
11560 + ret = camif_register_sensor(camif);
11561 + if (ret < 0)
11562 +@@ -498,10 +498,9 @@ err_sens:
11563 + media_device_unregister(&camif->media_dev);
11564 + media_device_cleanup(&camif->media_dev);
11565 + camif_unregister_media_entities(camif);
11566 +-err_alloc:
11567 ++err_pm:
11568 + pm_runtime_put(dev);
11569 + pm_runtime_disable(dev);
11570 +-err_pm:
11571 + camif_clk_put(camif);
11572 + err_clk:
11573 + s3c_camif_unregister_subdev(camif);
11574 +diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
11575 +index 7d52431c2c837..62d2320a72186 100644
11576 +--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
11577 ++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
11578 +@@ -79,8 +79,10 @@ int s5p_mfc_power_on(void)
11579 + int i, ret = 0;
11580 +
11581 + ret = pm_runtime_get_sync(pm->device);
11582 +- if (ret < 0)
11583 ++ if (ret < 0) {
11584 ++ pm_runtime_put_noidle(pm->device);
11585 + return ret;
11586 ++ }
11587 +
11588 + /* clock control */
11589 + for (i = 0; i < pm->num_clocks; i++) {
11590 +diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
11591 +index af2d5eb782cee..e1d150584bdc2 100644
11592 +--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
11593 ++++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
11594 +@@ -1371,7 +1371,7 @@ static int bdisp_probe(struct platform_device *pdev)
11595 + ret = pm_runtime_get_sync(dev);
11596 + if (ret < 0) {
11597 + dev_err(dev, "failed to set PM\n");
11598 +- goto err_dbg;
11599 ++ goto err_pm;
11600 + }
11601 +
11602 + /* Filters */
11603 +@@ -1399,7 +1399,6 @@ err_filter:
11604 + bdisp_hw_free_filters(bdisp->dev);
11605 + err_pm:
11606 + pm_runtime_put(dev);
11607 +-err_dbg:
11608 + bdisp_debugfs_remove(bdisp);
11609 + err_v4l2:
11610 + v4l2_device_unregister(&bdisp->v4l2_dev);
11611 +diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
11612 +index 2503224eeee51..c691b3d81549d 100644
11613 +--- a/drivers/media/platform/sti/delta/delta-v4l2.c
11614 ++++ b/drivers/media/platform/sti/delta/delta-v4l2.c
11615 +@@ -954,8 +954,10 @@ static void delta_run_work(struct work_struct *work)
11616 + /* enable the hardware */
11617 + if (!dec->pm) {
11618 + ret = delta_get_sync(ctx);
11619 +- if (ret)
11620 ++ if (ret) {
11621 ++ delta_put_autosuspend(ctx);
11622 + goto err;
11623 ++ }
11624 + }
11625 +
11626 + /* decode this access unit */
11627 +diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
11628 +index 401aaafa17109..43f279e2a6a38 100644
11629 +--- a/drivers/media/platform/sti/hva/hva-hw.c
11630 ++++ b/drivers/media/platform/sti/hva/hva-hw.c
11631 +@@ -272,6 +272,7 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
11632 +
11633 + if (pm_runtime_get_sync(dev) < 0) {
11634 + dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
11635 ++ pm_runtime_put_noidle(dev);
11636 + mutex_unlock(&hva->protect_mutex);
11637 + return -EFAULT;
11638 + }
11639 +@@ -388,7 +389,7 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
11640 + ret = pm_runtime_get_sync(dev);
11641 + if (ret < 0) {
11642 + dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
11643 +- goto err_clk;
11644 ++ goto err_pm;
11645 + }
11646 +
11647 + /* check IP hardware version */
11648 +@@ -553,6 +554,7 @@ void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
11649 +
11650 + if (pm_runtime_get_sync(dev) < 0) {
11651 + seq_puts(s, "Cannot wake up IP\n");
11652 ++ pm_runtime_put_noidle(dev);
11653 + mutex_unlock(&hva->protect_mutex);
11654 + return;
11655 + }
11656 +diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
11657 +index b8931490b83b7..fd1c41cba52fc 100644
11658 +--- a/drivers/media/platform/stm32/stm32-dcmi.c
11659 ++++ b/drivers/media/platform/stm32/stm32-dcmi.c
11660 +@@ -733,7 +733,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
11661 + if (ret < 0) {
11662 + dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
11663 + __func__, ret);
11664 +- goto err_release_buffers;
11665 ++ goto err_pm_put;
11666 + }
11667 +
11668 + ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline);
11669 +@@ -837,8 +837,6 @@ err_media_pipeline_stop:
11670 +
11671 + err_pm_put:
11672 + pm_runtime_put(dcmi->dev);
11673 +-
11674 +-err_release_buffers:
11675 + spin_lock_irq(&dcmi->irqlock);
11676 + /*
11677 + * Return all buffers to vb2 in QUEUED state.
11678 +diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
11679 +index 346f8212791cf..779dd74b82d01 100644
11680 +--- a/drivers/media/platform/ti-vpe/vpe.c
11681 ++++ b/drivers/media/platform/ti-vpe/vpe.c
11682 +@@ -2475,6 +2475,8 @@ static int vpe_runtime_get(struct platform_device *pdev)
11683 +
11684 + r = pm_runtime_get_sync(&pdev->dev);
11685 + WARN_ON(r < 0);
11686 ++ if (r)
11687 ++ pm_runtime_put_noidle(&pdev->dev);
11688 + return r < 0 ? r : 0;
11689 + }
11690 +
11691 +diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
11692 +index c650e45bb0ad1..dc62533cf32ce 100644
11693 +--- a/drivers/media/platform/vsp1/vsp1_drv.c
11694 ++++ b/drivers/media/platform/vsp1/vsp1_drv.c
11695 +@@ -562,7 +562,12 @@ int vsp1_device_get(struct vsp1_device *vsp1)
11696 + int ret;
11697 +
11698 + ret = pm_runtime_get_sync(vsp1->dev);
11699 +- return ret < 0 ? ret : 0;
11700 ++ if (ret < 0) {
11701 ++ pm_runtime_put_noidle(vsp1->dev);
11702 ++ return ret;
11703 ++ }
11704 ++
11705 ++ return 0;
11706 + }
11707 +
11708 + /*
11709 +@@ -845,12 +850,12 @@ static int vsp1_probe(struct platform_device *pdev)
11710 + /* Configure device parameters based on the version register. */
11711 + pm_runtime_enable(&pdev->dev);
11712 +
11713 +- ret = pm_runtime_get_sync(&pdev->dev);
11714 ++ ret = vsp1_device_get(vsp1);
11715 + if (ret < 0)
11716 + goto done;
11717 +
11718 + vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
11719 +- pm_runtime_put_sync(&pdev->dev);
11720 ++ vsp1_device_put(vsp1);
11721 +
11722 + for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
11723 + if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
11724 +diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
11725 +index 9cdef17b4793f..c12dda73cdd53 100644
11726 +--- a/drivers/media/rc/ati_remote.c
11727 ++++ b/drivers/media/rc/ati_remote.c
11728 +@@ -835,6 +835,10 @@ static int ati_remote_probe(struct usb_interface *interface,
11729 + err("%s: endpoint_in message size==0? \n", __func__);
11730 + return -ENODEV;
11731 + }
11732 ++ if (!usb_endpoint_is_int_out(endpoint_out)) {
11733 ++ err("%s: Unexpected endpoint_out\n", __func__);
11734 ++ return -ENODEV;
11735 ++ }
11736 +
11737 + ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL);
11738 + rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
11739 +diff --git a/drivers/media/test-drivers/vivid/vivid-meta-out.c b/drivers/media/test-drivers/vivid/vivid-meta-out.c
11740 +index ff8a039aba72e..95835b52b58fc 100644
11741 +--- a/drivers/media/test-drivers/vivid/vivid-meta-out.c
11742 ++++ b/drivers/media/test-drivers/vivid/vivid-meta-out.c
11743 +@@ -164,10 +164,11 @@ void vivid_meta_out_process(struct vivid_dev *dev,
11744 + {
11745 + struct vivid_meta_out_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
11746 +
11747 +- tpg_s_brightness(&dev->tpg, meta->brightness);
11748 +- tpg_s_contrast(&dev->tpg, meta->contrast);
11749 +- tpg_s_saturation(&dev->tpg, meta->saturation);
11750 +- tpg_s_hue(&dev->tpg, meta->hue);
11751 ++ v4l2_ctrl_s_ctrl(dev->brightness, meta->brightness);
11752 ++ v4l2_ctrl_s_ctrl(dev->contrast, meta->contrast);
11753 ++ v4l2_ctrl_s_ctrl(dev->saturation, meta->saturation);
11754 ++ v4l2_ctrl_s_ctrl(dev->hue, meta->hue);
11755 ++
11756 + dprintk(dev, 2, " %s brightness %u contrast %u saturation %u hue %d\n",
11757 + __func__, meta->brightness, meta->contrast,
11758 + meta->saturation, meta->hue);
11759 +diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
11760 +index b6e70fada3fb2..8fb186b25d6af 100644
11761 +--- a/drivers/media/tuners/tuner-simple.c
11762 ++++ b/drivers/media/tuners/tuner-simple.c
11763 +@@ -500,7 +500,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
11764 + case TUNER_TENA_9533_DI:
11765 + case TUNER_YMEC_TVF_5533MF:
11766 + tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n");
11767 +- return 0;
11768 ++ return -EINVAL;
11769 + case TUNER_PHILIPS_FM1216ME_MK3:
11770 + case TUNER_PHILIPS_FM1236_MK3:
11771 + case TUNER_PHILIPS_FMD1216ME_MK3:
11772 +@@ -702,7 +702,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
11773 + TUNER_RATIO_SELECT_50; /* 50 kHz step */
11774 +
11775 + /* Bandswitch byte */
11776 +- simple_radio_bandswitch(fe, &buffer[0]);
11777 ++ if (simple_radio_bandswitch(fe, &buffer[0]))
11778 ++ return 0;
11779 +
11780 + /* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps
11781 + freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) =
11782 +diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
11783 +index e399b9fad7574..a30a8a731eda8 100644
11784 +--- a/drivers/media/usb/uvc/uvc_ctrl.c
11785 ++++ b/drivers/media/usb/uvc/uvc_ctrl.c
11786 +@@ -773,12 +773,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
11787 + offset &= 7;
11788 + mask = ((1LL << bits) - 1) << offset;
11789 +
11790 +- for (; bits > 0; data++) {
11791 ++ while (1) {
11792 + u8 byte = *data & mask;
11793 + value |= offset > 0 ? (byte >> offset) : (byte << (-offset));
11794 + bits -= 8 - (offset > 0 ? offset : 0);
11795 ++ if (bits <= 0)
11796 ++ break;
11797 ++
11798 + offset -= 8;
11799 + mask = (1 << bits) - 1;
11800 ++ data++;
11801 + }
11802 +
11803 + /* Sign-extend the value if needed. */
11804 +diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
11805 +index b4499cddeffe5..ca3a9c2eec271 100644
11806 +--- a/drivers/media/usb/uvc/uvc_entity.c
11807 ++++ b/drivers/media/usb/uvc/uvc_entity.c
11808 +@@ -73,10 +73,45 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain,
11809 + int ret;
11810 +
11811 + if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
11812 ++ u32 function;
11813 ++
11814 + v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
11815 + strscpy(entity->subdev.name, entity->name,
11816 + sizeof(entity->subdev.name));
11817 +
11818 ++ switch (UVC_ENTITY_TYPE(entity)) {
11819 ++ case UVC_VC_SELECTOR_UNIT:
11820 ++ function = MEDIA_ENT_F_VID_MUX;
11821 ++ break;
11822 ++ case UVC_VC_PROCESSING_UNIT:
11823 ++ case UVC_VC_EXTENSION_UNIT:
11824 ++ /* For lack of a better option. */
11825 ++ function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
11826 ++ break;
11827 ++ case UVC_COMPOSITE_CONNECTOR:
11828 ++ case UVC_COMPONENT_CONNECTOR:
11829 ++ function = MEDIA_ENT_F_CONN_COMPOSITE;
11830 ++ break;
11831 ++ case UVC_SVIDEO_CONNECTOR:
11832 ++ function = MEDIA_ENT_F_CONN_SVIDEO;
11833 ++ break;
11834 ++ case UVC_ITT_CAMERA:
11835 ++ function = MEDIA_ENT_F_CAM_SENSOR;
11836 ++ break;
11837 ++ case UVC_TT_VENDOR_SPECIFIC:
11838 ++ case UVC_ITT_VENDOR_SPECIFIC:
11839 ++ case UVC_ITT_MEDIA_TRANSPORT_INPUT:
11840 ++ case UVC_OTT_VENDOR_SPECIFIC:
11841 ++ case UVC_OTT_DISPLAY:
11842 ++ case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
11843 ++ case UVC_EXTERNAL_VENDOR_SPECIFIC:
11844 ++ default:
11845 ++ function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
11846 ++ break;
11847 ++ }
11848 ++
11849 ++ entity->subdev.entity.function = function;
11850 ++
11851 + ret = media_entity_pads_init(&entity->subdev.entity,
11852 + entity->num_pads, entity->pads);
11853 +
11854 +diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
11855 +index 0335e69b70abe..5e6f3153b5ff8 100644
11856 +--- a/drivers/media/usb/uvc/uvc_v4l2.c
11857 ++++ b/drivers/media/usb/uvc/uvc_v4l2.c
11858 +@@ -247,11 +247,41 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
11859 + if (ret < 0)
11860 + goto done;
11861 +
11862 ++ /* After the probe, update fmt with the values returned from
11863 ++ * negotiation with the device.
11864 ++ */
11865 ++ for (i = 0; i < stream->nformats; ++i) {
11866 ++ if (probe->bFormatIndex == stream->format[i].index) {
11867 ++ format = &stream->format[i];
11868 ++ break;
11869 ++ }
11870 ++ }
11871 ++
11872 ++ if (i == stream->nformats) {
11873 ++ uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n",
11874 ++ probe->bFormatIndex);
11875 ++ return -EINVAL;
11876 ++ }
11877 ++
11878 ++ for (i = 0; i < format->nframes; ++i) {
11879 ++ if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
11880 ++ frame = &format->frame[i];
11881 ++ break;
11882 ++ }
11883 ++ }
11884 ++
11885 ++ if (i == format->nframes) {
11886 ++ uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n",
11887 ++ probe->bFrameIndex);
11888 ++ return -EINVAL;
11889 ++ }
11890 ++
11891 + fmt->fmt.pix.width = frame->wWidth;
11892 + fmt->fmt.pix.height = frame->wHeight;
11893 + fmt->fmt.pix.field = V4L2_FIELD_NONE;
11894 + fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame);
11895 + fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize;
11896 ++ fmt->fmt.pix.pixelformat = format->fcc;
11897 + fmt->fmt.pix.colorspace = format->colorspace;
11898 +
11899 + if (uvc_format != NULL)
11900 +diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
11901 +index 60e8633b11758..ddff687c79eaa 100644
11902 +--- a/drivers/memory/brcmstb_dpfe.c
11903 ++++ b/drivers/memory/brcmstb_dpfe.c
11904 +@@ -188,11 +188,6 @@ struct brcmstb_dpfe_priv {
11905 + struct mutex lock;
11906 + };
11907 +
11908 +-static const char * const error_text[] = {
11909 +- "Success", "Header code incorrect", "Unknown command or argument",
11910 +- "Incorrect checksum", "Malformed command", "Timed out",
11911 +-};
11912 +-
11913 + /*
11914 + * Forward declaration of our sysfs attribute functions, so we can declare the
11915 + * attribute data structures early.
11916 +@@ -307,6 +302,20 @@ static const struct dpfe_api dpfe_api_v3 = {
11917 + },
11918 + };
11919 +
11920 ++static const char *get_error_text(unsigned int i)
11921 ++{
11922 ++ static const char * const error_text[] = {
11923 ++ "Success", "Header code incorrect",
11924 ++ "Unknown command or argument", "Incorrect checksum",
11925 ++ "Malformed command", "Timed out", "Unknown error",
11926 ++ };
11927 ++
11928 ++ if (unlikely(i >= ARRAY_SIZE(error_text)))
11929 ++ i = ARRAY_SIZE(error_text) - 1;
11930 ++
11931 ++ return error_text[i];
11932 ++}
11933 ++
11934 + static bool is_dcpu_enabled(struct brcmstb_dpfe_priv *priv)
11935 + {
11936 + u32 val;
11937 +@@ -445,7 +454,7 @@ static int __send_command(struct brcmstb_dpfe_priv *priv, unsigned int cmd,
11938 + }
11939 + if (resp != 0) {
11940 + mutex_unlock(&priv->lock);
11941 +- return -ETIMEDOUT;
11942 ++ return -ffs(DCPU_RET_ERR_TIMEDOUT);
11943 + }
11944 +
11945 + /* Compute checksum over the message */
11946 +@@ -691,7 +700,7 @@ static ssize_t generic_show(unsigned int command, u32 response[],
11947 +
11948 + ret = __send_command(priv, command, response);
11949 + if (ret < 0)
11950 +- return sprintf(buf, "ERROR: %s\n", error_text[-ret]);
11951 ++ return sprintf(buf, "ERROR: %s\n", get_error_text(-ret));
11952 +
11953 + return 0;
11954 + }
11955 +diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
11956 +index 0b0ed72016da8..0309bd5a18008 100644
11957 +--- a/drivers/memory/fsl-corenet-cf.c
11958 ++++ b/drivers/memory/fsl-corenet-cf.c
11959 +@@ -211,10 +211,8 @@ static int ccf_probe(struct platform_device *pdev)
11960 + dev_set_drvdata(&pdev->dev, ccf);
11961 +
11962 + irq = platform_get_irq(pdev, 0);
11963 +- if (!irq) {
11964 +- dev_err(&pdev->dev, "%s: no irq\n", __func__);
11965 +- return -ENXIO;
11966 +- }
11967 ++ if (irq < 0)
11968 ++ return irq;
11969 +
11970 + ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf);
11971 + if (ret) {
11972 +diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
11973 +index ca0097664b125..057666e1b6cda 100644
11974 +--- a/drivers/memory/omap-gpmc.c
11975 ++++ b/drivers/memory/omap-gpmc.c
11976 +@@ -943,7 +943,7 @@ static int gpmc_cs_remap(int cs, u32 base)
11977 + int ret;
11978 + u32 old_base, size;
11979 +
11980 +- if (cs > gpmc_cs_num) {
11981 ++ if (cs >= gpmc_cs_num) {
11982 + pr_err("%s: requested chip-select is disabled\n", __func__);
11983 + return -ENODEV;
11984 + }
11985 +@@ -978,7 +978,7 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
11986 + struct resource *res = &gpmc->mem;
11987 + int r = -1;
11988 +
11989 +- if (cs > gpmc_cs_num) {
11990 ++ if (cs >= gpmc_cs_num) {
11991 + pr_err("%s: requested chip-select is disabled\n", __func__);
11992 + return -ENODEV;
11993 + }
11994 +@@ -2265,6 +2265,10 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
11995 + }
11996 + }
11997 + #else
11998 ++void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
11999 ++{
12000 ++ memset(p, 0, sizeof(*p));
12001 ++}
12002 + static int gpmc_probe_dt(struct platform_device *pdev)
12003 + {
12004 + return 0;
12005 +diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
12006 +index ccd62b9639528..6d2f4a0a901dc 100644
12007 +--- a/drivers/mfd/sm501.c
12008 ++++ b/drivers/mfd/sm501.c
12009 +@@ -1415,8 +1415,14 @@ static int sm501_plat_probe(struct platform_device *dev)
12010 + goto err_claim;
12011 + }
12012 +
12013 +- return sm501_init_dev(sm);
12014 ++ ret = sm501_init_dev(sm);
12015 ++ if (ret)
12016 ++ goto err_unmap;
12017 ++
12018 ++ return 0;
12019 +
12020 ++ err_unmap:
12021 ++ iounmap(sm->regs);
12022 + err_claim:
12023 + release_mem_region(sm->io_res->start, 0x100);
12024 + err_res:
12025 +diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
12026 +index df5cebb372a59..ca465794ea9c8 100644
12027 +--- a/drivers/mfd/syscon.c
12028 ++++ b/drivers/mfd/syscon.c
12029 +@@ -108,7 +108,6 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
12030 + syscon_config.max_register = resource_size(&res) - reg_io_width;
12031 +
12032 + regmap = regmap_init_mmio(NULL, base, &syscon_config);
12033 +- kfree(syscon_config.name);
12034 + if (IS_ERR(regmap)) {
12035 + pr_err("regmap init failed\n");
12036 + ret = PTR_ERR(regmap);
12037 +@@ -145,6 +144,7 @@ err_clk:
12038 + regmap_exit(regmap);
12039 + err_regmap:
12040 + iounmap(base);
12041 ++ kfree(syscon_config.name);
12042 + err_map:
12043 + kfree(syscon);
12044 + return ERR_PTR(ret);
12045 +diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
12046 +index 37ccc67f4914b..f2b2805942f50 100644
12047 +--- a/drivers/misc/cardreader/rtsx_pcr.c
12048 ++++ b/drivers/misc/cardreader/rtsx_pcr.c
12049 +@@ -1562,12 +1562,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
12050 + ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
12051 + ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
12052 + if (ret < 0)
12053 +- goto disable_irq;
12054 ++ goto free_slots;
12055 +
12056 + schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
12057 +
12058 + return 0;
12059 +
12060 ++free_slots:
12061 ++ kfree(pcr->slots);
12062 + disable_irq:
12063 + free_irq(pcr->irq, (void *)pcr);
12064 + disable_msi:
12065 +diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
12066 +index ed8d38b099251..e26398fd977ec 100644
12067 +--- a/drivers/misc/eeprom/at25.c
12068 ++++ b/drivers/misc/eeprom/at25.c
12069 +@@ -358,7 +358,7 @@ static int at25_probe(struct spi_device *spi)
12070 + at25->nvmem_config.reg_read = at25_ee_read;
12071 + at25->nvmem_config.reg_write = at25_ee_write;
12072 + at25->nvmem_config.priv = at25;
12073 +- at25->nvmem_config.stride = 4;
12074 ++ at25->nvmem_config.stride = 1;
12075 + at25->nvmem_config.word_size = 1;
12076 + at25->nvmem_config.size = chip.byte_len;
12077 +
12078 +diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
12079 +index 4009b7df4cafe..2e55890ad6a61 100644
12080 +--- a/drivers/misc/habanalabs/gaudi/gaudi.c
12081 ++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
12082 +@@ -6099,7 +6099,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
12083 + is_idle &= is_eng_idle;
12084 +
12085 + if (mask)
12086 +- *mask |= !is_eng_idle <<
12087 ++ *mask |= ((u64) !is_eng_idle) <<
12088 + (GAUDI_ENGINE_ID_DMA_0 + dma_id);
12089 + if (s)
12090 + seq_printf(s, fmt, dma_id,
12091 +@@ -6122,7 +6122,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
12092 + is_idle &= is_eng_idle;
12093 +
12094 + if (mask)
12095 +- *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_TPC_0 + i);
12096 ++ *mask |= ((u64) !is_eng_idle) <<
12097 ++ (GAUDI_ENGINE_ID_TPC_0 + i);
12098 + if (s)
12099 + seq_printf(s, fmt, i,
12100 + is_eng_idle ? "Y" : "N",
12101 +@@ -6150,7 +6151,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
12102 + is_idle &= is_eng_idle;
12103 +
12104 + if (mask)
12105 +- *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_MME_0 + i);
12106 ++ *mask |= ((u64) !is_eng_idle) <<
12107 ++ (GAUDI_ENGINE_ID_MME_0 + i);
12108 + if (s) {
12109 + if (!is_slave)
12110 + seq_printf(s, fmt, i,
12111 +diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
12112 +index 33cd2ae653d23..c09742f440f96 100644
12113 +--- a/drivers/misc/habanalabs/goya/goya.c
12114 ++++ b/drivers/misc/habanalabs/goya/goya.c
12115 +@@ -5166,7 +5166,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
12116 + is_idle &= is_eng_idle;
12117 +
12118 + if (mask)
12119 +- *mask |= !is_eng_idle << (GOYA_ENGINE_ID_DMA_0 + i);
12120 ++ *mask |= ((u64) !is_eng_idle) <<
12121 ++ (GOYA_ENGINE_ID_DMA_0 + i);
12122 + if (s)
12123 + seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
12124 + qm_glbl_sts0, dma_core_sts0);
12125 +@@ -5189,7 +5190,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
12126 + is_idle &= is_eng_idle;
12127 +
12128 + if (mask)
12129 +- *mask |= !is_eng_idle << (GOYA_ENGINE_ID_TPC_0 + i);
12130 ++ *mask |= ((u64) !is_eng_idle) <<
12131 ++ (GOYA_ENGINE_ID_TPC_0 + i);
12132 + if (s)
12133 + seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
12134 + qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
12135 +@@ -5209,7 +5211,7 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
12136 + is_idle &= is_eng_idle;
12137 +
12138 + if (mask)
12139 +- *mask |= !is_eng_idle << GOYA_ENGINE_ID_MME_0;
12140 ++ *mask |= ((u64) !is_eng_idle) << GOYA_ENGINE_ID_MME_0;
12141 + if (s) {
12142 + seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
12143 + cmdq_glbl_sts0, mme_arch_sts);
12144 +diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
12145 +index 2da3b474f4863..18fb9d8b8a4b5 100644
12146 +--- a/drivers/misc/mic/scif/scif_rma.c
12147 ++++ b/drivers/misc/mic/scif/scif_rma.c
12148 +@@ -1392,6 +1392,8 @@ retry:
12149 + (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
12150 + pinned_pages->pages);
12151 + if (nr_pages != pinned_pages->nr_pages) {
12152 ++ if (pinned_pages->nr_pages < 0)
12153 ++ pinned_pages->nr_pages = 0;
12154 + if (try_upgrade) {
12155 + if (ulimit)
12156 + __scif_dec_pinned_vm_lock(mm, nr_pages);
12157 +@@ -1408,7 +1410,6 @@ retry:
12158 +
12159 + if (pinned_pages->nr_pages < nr_pages) {
12160 + err = -EFAULT;
12161 +- pinned_pages->nr_pages = nr_pages;
12162 + goto dec_pinned;
12163 + }
12164 +
12165 +@@ -1421,7 +1422,6 @@ dec_pinned:
12166 + __scif_dec_pinned_vm_lock(mm, nr_pages);
12167 + /* Something went wrong! Rollback */
12168 + error_unmap:
12169 +- pinned_pages->nr_pages = nr_pages;
12170 + scif_destroy_pinned_pages(pinned_pages);
12171 + *pages = NULL;
12172 + dev_dbg(scif_info.mdev.this_device,
12173 +diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
12174 +index 55e7f21e51f44..6722c726b2590 100644
12175 +--- a/drivers/misc/mic/vop/vop_main.c
12176 ++++ b/drivers/misc/mic/vop/vop_main.c
12177 +@@ -320,7 +320,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
12178 + /* First assign the vring's allocated in host memory */
12179 + vqconfig = _vop_vq_config(vdev->desc) + index;
12180 + memcpy_fromio(&config, vqconfig, sizeof(config));
12181 +- _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
12182 ++ _vr_size = round_up(vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN), 4);
12183 + vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
12184 + va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
12185 + if (!va)
12186 +diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
12187 +index 30eac172f0170..7014ffe88632e 100644
12188 +--- a/drivers/misc/mic/vop/vop_vringh.c
12189 ++++ b/drivers/misc/mic/vop/vop_vringh.c
12190 +@@ -296,7 +296,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
12191 +
12192 + num = le16_to_cpu(vqconfig[i].num);
12193 + mutex_init(&vvr->vr_mutex);
12194 +- vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
12195 ++ vr_size = PAGE_ALIGN(round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4) +
12196 + sizeof(struct _mic_vring_info));
12197 + vr->va = (void *)
12198 + __get_free_pages(GFP_KERNEL | __GFP_ZERO,
12199 +@@ -308,7 +308,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
12200 + goto err;
12201 + }
12202 + vr->len = vr_size;
12203 +- vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
12204 ++ vr->info = vr->va + round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4);
12205 + vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
12206 + vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
12207 + DMA_BIDIRECTIONAL);
12208 +@@ -602,6 +602,7 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
12209 + size_t partlen;
12210 + bool dma = VOP_USE_DMA && vi->dma_ch;
12211 + int err = 0;
12212 ++ size_t offset = 0;
12213 +
12214 + if (dma) {
12215 + dma_alignment = 1 << vi->dma_ch->device->copy_align;
12216 +@@ -655,13 +656,20 @@ memcpy:
12217 + * We are copying to IO below and should ideally use something
12218 + * like copy_from_user_toio(..) if it existed.
12219 + */
12220 +- if (copy_from_user((void __force *)dbuf, ubuf, len)) {
12221 +- err = -EFAULT;
12222 +- dev_err(vop_dev(vdev), "%s %d err %d\n",
12223 +- __func__, __LINE__, err);
12224 +- goto err;
12225 ++ while (len) {
12226 ++ partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
12227 ++
12228 ++ if (copy_from_user(vvr->buf, ubuf + offset, partlen)) {
12229 ++ err = -EFAULT;
12230 ++ dev_err(vop_dev(vdev), "%s %d err %d\n",
12231 ++ __func__, __LINE__, err);
12232 ++ goto err;
12233 ++ }
12234 ++ memcpy_toio(dbuf + offset, vvr->buf, partlen);
12235 ++ offset += partlen;
12236 ++ vdev->out_bytes += partlen;
12237 ++ len -= partlen;
12238 + }
12239 +- vdev->out_bytes += len;
12240 + err = 0;
12241 + err:
12242 + vpdev->hw_ops->unmap(vpdev, dbuf);
12243 +diff --git a/drivers/misc/ocxl/Kconfig b/drivers/misc/ocxl/Kconfig
12244 +index 6551007a066ce..947294f6d7f44 100644
12245 +--- a/drivers/misc/ocxl/Kconfig
12246 ++++ b/drivers/misc/ocxl/Kconfig
12247 +@@ -9,9 +9,8 @@ config OCXL_BASE
12248 +
12249 + config OCXL
12250 + tristate "OpenCAPI coherent accelerator support"
12251 +- depends on PPC_POWERNV && PCI && EEH
12252 ++ depends on PPC_POWERNV && PCI && EEH && HOTPLUG_PCI_POWERNV
12253 + select OCXL_BASE
12254 +- select HOTPLUG_PCI_POWERNV
12255 + default m
12256 + help
12257 + Select this option to enable the ocxl driver for Open
12258 +diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
12259 +index 8531ae7811956..c49065887e8f5 100644
12260 +--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
12261 ++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
12262 +@@ -657,8 +657,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
12263 + if (retval < (int)produce_q->kernel_if->num_pages) {
12264 + pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
12265 + retval);
12266 +- qp_release_pages(produce_q->kernel_if->u.h.header_page,
12267 +- retval, false);
12268 ++ if (retval > 0)
12269 ++ qp_release_pages(produce_q->kernel_if->u.h.header_page,
12270 ++ retval, false);
12271 + err = VMCI_ERROR_NO_MEM;
12272 + goto out;
12273 + }
12274 +@@ -670,8 +671,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
12275 + if (retval < (int)consume_q->kernel_if->num_pages) {
12276 + pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
12277 + retval);
12278 +- qp_release_pages(consume_q->kernel_if->u.h.header_page,
12279 +- retval, false);
12280 ++ if (retval > 0)
12281 ++ qp_release_pages(consume_q->kernel_if->u.h.header_page,
12282 ++ retval, false);
12283 + qp_release_pages(produce_q->kernel_if->u.h.header_page,
12284 + produce_q->kernel_if->num_pages, false);
12285 + err = VMCI_ERROR_NO_MEM;
12286 +diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
12287 +index e0655278c5c32..3efaa9534a777 100644
12288 +--- a/drivers/mmc/core/sdio_cis.c
12289 ++++ b/drivers/mmc/core/sdio_cis.c
12290 +@@ -26,6 +26,9 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
12291 + unsigned i, nr_strings;
12292 + char **buffer, *string;
12293 +
12294 ++ if (size < 2)
12295 ++ return 0;
12296 ++
12297 + /* Find all null-terminated (including zero length) strings in
12298 + the TPLLV1_INFO field. Trailing garbage is ignored. */
12299 + buf += 2;
12300 +diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
12301 +index e0e33f6bf513b..1e70ecfffa39f 100644
12302 +--- a/drivers/mtd/hyperbus/hbmc-am654.c
12303 ++++ b/drivers/mtd/hyperbus/hbmc-am654.c
12304 +@@ -70,7 +70,8 @@ static int am654_hbmc_probe(struct platform_device *pdev)
12305 +
12306 + platform_set_drvdata(pdev, priv);
12307 +
12308 +- ret = of_address_to_resource(np, 0, &res);
12309 ++ priv->hbdev.np = of_get_next_child(np, NULL);
12310 ++ ret = of_address_to_resource(priv->hbdev.np, 0, &res);
12311 + if (ret)
12312 + return ret;
12313 +
12314 +@@ -103,7 +104,6 @@ static int am654_hbmc_probe(struct platform_device *pdev)
12315 + priv->ctlr.dev = dev;
12316 + priv->ctlr.ops = &am654_hbmc_ops;
12317 + priv->hbdev.ctlr = &priv->ctlr;
12318 +- priv->hbdev.np = of_get_next_child(dev->of_node, NULL);
12319 + ret = hyperbus_register_device(&priv->hbdev);
12320 + if (ret) {
12321 + dev_err(dev, "failed to register controller\n");
12322 +diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
12323 +index 0f1547f09d08b..72f5c7b300790 100644
12324 +--- a/drivers/mtd/lpddr/lpddr2_nvm.c
12325 ++++ b/drivers/mtd/lpddr/lpddr2_nvm.c
12326 +@@ -393,6 +393,17 @@ static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
12327 + return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
12328 + }
12329 +
12330 ++static const struct mtd_info lpddr2_nvm_mtd_info = {
12331 ++ .type = MTD_RAM,
12332 ++ .writesize = 1,
12333 ++ .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
12334 ++ ._read = lpddr2_nvm_read,
12335 ++ ._write = lpddr2_nvm_write,
12336 ++ ._erase = lpddr2_nvm_erase,
12337 ++ ._unlock = lpddr2_nvm_unlock,
12338 ++ ._lock = lpddr2_nvm_lock,
12339 ++};
12340 ++
12341 + /*
12342 + * lpddr2_nvm driver probe method
12343 + */
12344 +@@ -433,6 +444,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
12345 + .pfow_base = OW_BASE_ADDRESS,
12346 + .fldrv_priv = pcm_data,
12347 + };
12348 ++
12349 + if (IS_ERR(map->virt))
12350 + return PTR_ERR(map->virt);
12351 +
12352 +@@ -444,22 +456,13 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
12353 + return PTR_ERR(pcm_data->ctl_regs);
12354 +
12355 + /* Populate mtd_info data structure */
12356 +- *mtd = (struct mtd_info) {
12357 +- .dev = { .parent = &pdev->dev },
12358 +- .name = pdev->dev.init_name,
12359 +- .type = MTD_RAM,
12360 +- .priv = map,
12361 +- .size = resource_size(add_range),
12362 +- .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width,
12363 +- .writesize = 1,
12364 +- .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width,
12365 +- .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
12366 +- ._read = lpddr2_nvm_read,
12367 +- ._write = lpddr2_nvm_write,
12368 +- ._erase = lpddr2_nvm_erase,
12369 +- ._unlock = lpddr2_nvm_unlock,
12370 +- ._lock = lpddr2_nvm_lock,
12371 +- };
12372 ++ *mtd = lpddr2_nvm_mtd_info;
12373 ++ mtd->dev.parent = &pdev->dev;
12374 ++ mtd->name = pdev->dev.init_name;
12375 ++ mtd->priv = map;
12376 ++ mtd->size = resource_size(add_range);
12377 ++ mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width;
12378 ++ mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width;
12379 +
12380 + /* Verify the presence of the device looking for PFOW string */
12381 + if (!lpddr2_nvm_pfow_present(map)) {
12382 +diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
12383 +index 4ced68be7ed7e..774970bfcf859 100644
12384 +--- a/drivers/mtd/mtdoops.c
12385 ++++ b/drivers/mtd/mtdoops.c
12386 +@@ -279,12 +279,13 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
12387 + kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
12388 + record_size - MTDOOPS_HEADER_SIZE, NULL);
12389 +
12390 +- /* Panics must be written immediately */
12391 +- if (reason != KMSG_DUMP_OOPS)
12392 ++ if (reason != KMSG_DUMP_OOPS) {
12393 ++ /* Panics must be written immediately */
12394 + mtdoops_write(cxt, 1);
12395 +-
12396 +- /* For other cases, schedule work to write it "nicely" */
12397 +- schedule_work(&cxt->work_write);
12398 ++ } else {
12399 ++ /* For other cases, schedule work to write it "nicely" */
12400 ++ schedule_work(&cxt->work_write);
12401 ++ }
12402 + }
12403 +
12404 + static void mtdoops_notify_add(struct mtd_info *mtd)
12405 +diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
12406 +index fdba155416d25..0bf4cfc251472 100644
12407 +--- a/drivers/mtd/nand/raw/ams-delta.c
12408 ++++ b/drivers/mtd/nand/raw/ams-delta.c
12409 +@@ -400,12 +400,14 @@ static int gpio_nand_remove(struct platform_device *pdev)
12410 + return 0;
12411 + }
12412 +
12413 ++#ifdef CONFIG_OF
12414 + static const struct of_device_id gpio_nand_of_id_table[] = {
12415 + {
12416 + /* sentinel */
12417 + },
12418 + };
12419 + MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table);
12420 ++#endif
12421 +
12422 + static const struct platform_device_id gpio_nand_plat_id_table[] = {
12423 + {
12424 +diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
12425 +index 7f4546ae91303..5792fb240cb2b 100644
12426 +--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
12427 ++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
12428 +@@ -1762,7 +1762,7 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
12429 + return ret;
12430 + }
12431 +
12432 +- if (cs > FMC2_MAX_CE) {
12433 ++ if (cs >= FMC2_MAX_CE) {
12434 + dev_err(nfc->dev, "invalid reg value: %d\n", cs);
12435 + return -EINVAL;
12436 + }
12437 +diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
12438 +index 7248c59011836..fcca45e2abe20 100644
12439 +--- a/drivers/mtd/nand/raw/vf610_nfc.c
12440 ++++ b/drivers/mtd/nand/raw/vf610_nfc.c
12441 +@@ -852,8 +852,10 @@ static int vf610_nfc_probe(struct platform_device *pdev)
12442 + }
12443 +
12444 + of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
12445 +- if (!of_id)
12446 +- return -ENODEV;
12447 ++ if (!of_id) {
12448 ++ err = -ENODEV;
12449 ++ goto err_disable_clk;
12450 ++ }
12451 +
12452 + nfc->variant = (enum vf610_nfc_variant)of_id->data;
12453 +
12454 +diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
12455 +index d219c970042a2..0b7667e60780f 100644
12456 +--- a/drivers/mtd/nand/spi/gigadevice.c
12457 ++++ b/drivers/mtd/nand/spi/gigadevice.c
12458 +@@ -21,7 +21,7 @@
12459 + #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
12460 +
12461 + static SPINAND_OP_VARIANTS(read_cache_variants,
12462 +- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
12463 ++ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
12464 + SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
12465 + SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
12466 + SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
12467 +@@ -29,7 +29,7 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
12468 + SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
12469 +
12470 + static SPINAND_OP_VARIANTS(read_cache_variants_f,
12471 +- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
12472 ++ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
12473 + SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
12474 + SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
12475 + SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
12476 +@@ -202,7 +202,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
12477 + SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
12478 + &write_cache_variants,
12479 + &update_cache_variants),
12480 +- 0,
12481 ++ SPINAND_HAS_QE_BIT,
12482 + SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
12483 + gd5fxgq4xa_ecc_get_status)),
12484 + SPINAND_INFO("GD5F2GQ4xA",
12485 +@@ -212,7 +212,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
12486 + SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
12487 + &write_cache_variants,
12488 + &update_cache_variants),
12489 +- 0,
12490 ++ SPINAND_HAS_QE_BIT,
12491 + SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
12492 + gd5fxgq4xa_ecc_get_status)),
12493 + SPINAND_INFO("GD5F4GQ4xA",
12494 +@@ -222,7 +222,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
12495 + SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
12496 + &write_cache_variants,
12497 + &update_cache_variants),
12498 +- 0,
12499 ++ SPINAND_HAS_QE_BIT,
12500 + SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
12501 + gd5fxgq4xa_ecc_get_status)),
12502 + SPINAND_INFO("GD5F1GQ4UExxG",
12503 +@@ -232,7 +232,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
12504 + SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
12505 + &write_cache_variants,
12506 + &update_cache_variants),
12507 +- 0,
12508 ++ SPINAND_HAS_QE_BIT,
12509 + SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
12510 + gd5fxgq4uexxg_ecc_get_status)),
12511 + SPINAND_INFO("GD5F1GQ4UFxxG",
12512 +@@ -242,7 +242,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
12513 + SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
12514 + &write_cache_variants,
12515 + &update_cache_variants),
12516 +- 0,
12517 ++ SPINAND_HAS_QE_BIT,
12518 + SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
12519 + gd5fxgq4ufxxg_ecc_get_status)),
12520 + };
12521 +diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
12522 +index f98363c9b3630..e72354322f628 100644
12523 +--- a/drivers/mtd/parsers/Kconfig
12524 ++++ b/drivers/mtd/parsers/Kconfig
12525 +@@ -12,7 +12,7 @@ config MTD_BCM47XX_PARTS
12526 + boards.
12527 +
12528 + config MTD_BCM63XX_PARTS
12529 +- tristate "BCM63XX CFE partitioning parser"
12530 ++ bool "BCM63XX CFE partitioning parser"
12531 + depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
12532 + select CRC32
12533 + select MTD_PARSER_IMAGETAG
12534 +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
12535 +index 94d10ec954a05..2ac7a667bde35 100644
12536 +--- a/drivers/net/can/flexcan.c
12537 ++++ b/drivers/net/can/flexcan.c
12538 +@@ -1260,18 +1260,23 @@ static int flexcan_chip_start(struct net_device *dev)
12539 + return err;
12540 + }
12541 +
12542 +-/* flexcan_chip_stop
12543 ++/* __flexcan_chip_stop
12544 + *
12545 +- * this functions is entered with clocks enabled
12546 ++ * this function is entered with clocks enabled
12547 + */
12548 +-static void flexcan_chip_stop(struct net_device *dev)
12549 ++static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
12550 + {
12551 + struct flexcan_priv *priv = netdev_priv(dev);
12552 + struct flexcan_regs __iomem *regs = priv->regs;
12553 ++ int err;
12554 +
12555 + /* freeze + disable module */
12556 +- flexcan_chip_freeze(priv);
12557 +- flexcan_chip_disable(priv);
12558 ++ err = flexcan_chip_freeze(priv);
12559 ++ if (err && !disable_on_error)
12560 ++ return err;
12561 ++ err = flexcan_chip_disable(priv);
12562 ++ if (err && !disable_on_error)
12563 ++ goto out_chip_unfreeze;
12564 +
12565 + /* Disable all interrupts */
12566 + priv->write(0, &regs->imask2);
12567 +@@ -1281,6 +1286,23 @@ static void flexcan_chip_stop(struct net_device *dev)
12568 +
12569 + flexcan_transceiver_disable(priv);
12570 + priv->can.state = CAN_STATE_STOPPED;
12571 ++
12572 ++ return 0;
12573 ++
12574 ++ out_chip_unfreeze:
12575 ++ flexcan_chip_unfreeze(priv);
12576 ++
12577 ++ return err;
12578 ++}
12579 ++
12580 ++static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev)
12581 ++{
12582 ++ return __flexcan_chip_stop(dev, true);
12583 ++}
12584 ++
12585 ++static inline int flexcan_chip_stop(struct net_device *dev)
12586 ++{
12587 ++ return __flexcan_chip_stop(dev, false);
12588 + }
12589 +
12590 + static int flexcan_open(struct net_device *dev)
12591 +@@ -1362,7 +1384,7 @@ static int flexcan_close(struct net_device *dev)
12592 +
12593 + netif_stop_queue(dev);
12594 + can_rx_offload_disable(&priv->offload);
12595 +- flexcan_chip_stop(dev);
12596 ++ flexcan_chip_stop_disable_on_error(dev);
12597 +
12598 + can_rx_offload_del(&priv->offload);
12599 + free_irq(dev->irq, dev);
12600 +diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
12601 +index 38ea5e600fb84..e6d0cb9ee02f0 100644
12602 +--- a/drivers/net/can/m_can/m_can_platform.c
12603 ++++ b/drivers/net/can/m_can/m_can_platform.c
12604 +@@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_suspend(struct device *dev)
12605 + struct net_device *ndev = dev_get_drvdata(dev);
12606 + struct m_can_classdev *mcan_class = netdev_priv(ndev);
12607 +
12608 +- m_can_class_suspend(dev);
12609 +-
12610 + clk_disable_unprepare(mcan_class->cclk);
12611 + clk_disable_unprepare(mcan_class->hclk);
12612 +
12613 +diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
12614 +index c796d42730bae..e5f047129b150 100644
12615 +--- a/drivers/net/dsa/microchip/ksz_common.c
12616 ++++ b/drivers/net/dsa/microchip/ksz_common.c
12617 +@@ -103,14 +103,8 @@ void ksz_init_mib_timer(struct ksz_device *dev)
12618 +
12619 + INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
12620 +
12621 +- /* Read MIB counters every 30 seconds to avoid overflow. */
12622 +- dev->mib_read_interval = msecs_to_jiffies(30000);
12623 +-
12624 + for (i = 0; i < dev->mib_port_cnt; i++)
12625 + dev->dev_ops->port_init_cnt(dev, i);
12626 +-
12627 +- /* Start the timer 2 seconds later. */
12628 +- schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
12629 + }
12630 + EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
12631 +
12632 +@@ -143,7 +137,9 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
12633 +
12634 + /* Read all MIB counters when the link is going down. */
12635 + p->read = true;
12636 +- schedule_delayed_work(&dev->mib_read, 0);
12637 ++ /* timer started */
12638 ++ if (dev->mib_read_interval)
12639 ++ schedule_delayed_work(&dev->mib_read, 0);
12640 + }
12641 + EXPORT_SYMBOL_GPL(ksz_mac_link_down);
12642 +
12643 +@@ -450,6 +446,12 @@ int ksz_switch_register(struct ksz_device *dev,
12644 + return ret;
12645 + }
12646 +
12647 ++ /* Read MIB counters every 30 seconds to avoid overflow. */
12648 ++ dev->mib_read_interval = msecs_to_jiffies(30000);
12649 ++
12650 ++ /* Start the MIB timer. */
12651 ++ schedule_delayed_work(&dev->mib_read, 0);
12652 ++
12653 + return 0;
12654 + }
12655 + EXPORT_SYMBOL(ksz_switch_register);
12656 +diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
12657 +index 9e9fd19e1d00c..e2cd49eec0370 100644
12658 +--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
12659 ++++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
12660 +@@ -1010,7 +1010,7 @@ static const struct felix_info seville_info_vsc9953 = {
12661 + .vcap_is2_keys = vsc9953_vcap_is2_keys,
12662 + .vcap_is2_actions = vsc9953_vcap_is2_actions,
12663 + .vcap = vsc9953_vcap_props,
12664 +- .shared_queue_sz = 2048 * 1024,
12665 ++ .shared_queue_sz = 256 * 1024,
12666 + .num_mact_rows = 2048,
12667 + .num_ports = 10,
12668 + .mdio_bus_alloc = vsc9953_mdio_bus_alloc,
12669 +diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
12670 +index 9a63b51e1d82f..6f2dab7e33d65 100644
12671 +--- a/drivers/net/dsa/realtek-smi-core.h
12672 ++++ b/drivers/net/dsa/realtek-smi-core.h
12673 +@@ -25,6 +25,9 @@ struct rtl8366_mib_counter {
12674 + const char *name;
12675 + };
12676 +
12677 ++/**
12678 ++ * struct rtl8366_vlan_mc - Virtual LAN member configuration
12679 ++ */
12680 + struct rtl8366_vlan_mc {
12681 + u16 vid;
12682 + u16 untag;
12683 +@@ -119,7 +122,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi);
12684 + int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
12685 + int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
12686 + u32 untag, u32 fid);
12687 +-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
12688 + int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
12689 + unsigned int vid);
12690 + int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
12691 +diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
12692 +index a8c5a934c3d30..c58ca324a4b24 100644
12693 +--- a/drivers/net/dsa/rtl8366.c
12694 ++++ b/drivers/net/dsa/rtl8366.c
12695 +@@ -36,12 +36,113 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
12696 + }
12697 + EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
12698 +
12699 ++/**
12700 ++ * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
12701 ++ * @smi: the Realtek SMI device instance
12702 ++ * @vid: the VLAN ID to look up or allocate
12703 ++ * @vlanmc: the pointer will be assigned to a pointer to a valid member config
12704 ++ * if successful
12705 ++ * @return: index of a new member config or negative error number
12706 ++ */
12707 ++static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
12708 ++ struct rtl8366_vlan_mc *vlanmc)
12709 ++{
12710 ++ struct rtl8366_vlan_4k vlan4k;
12711 ++ int ret;
12712 ++ int i;
12713 ++
12714 ++ /* Try to find an existing member config entry for this VID */
12715 ++ for (i = 0; i < smi->num_vlan_mc; i++) {
12716 ++ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
12717 ++ if (ret) {
12718 ++ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
12719 ++ i, vid);
12720 ++ return ret;
12721 ++ }
12722 ++
12723 ++ if (vid == vlanmc->vid)
12724 ++ return i;
12725 ++ }
12726 ++
12727 ++ /* We have no MC entry for this VID, try to find an empty one */
12728 ++ for (i = 0; i < smi->num_vlan_mc; i++) {
12729 ++ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
12730 ++ if (ret) {
12731 ++ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
12732 ++ i, vid);
12733 ++ return ret;
12734 ++ }
12735 ++
12736 ++ if (vlanmc->vid == 0 && vlanmc->member == 0) {
12737 ++ /* Update the entry from the 4K table */
12738 ++ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
12739 ++ if (ret) {
12740 ++ dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
12741 ++ i, vid);
12742 ++ return ret;
12743 ++ }
12744 ++
12745 ++ vlanmc->vid = vid;
12746 ++ vlanmc->member = vlan4k.member;
12747 ++ vlanmc->untag = vlan4k.untag;
12748 ++ vlanmc->fid = vlan4k.fid;
12749 ++ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
12750 ++ if (ret) {
12751 ++ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
12752 ++ i, vid);
12753 ++ return ret;
12754 ++ }
12755 ++
12756 ++ dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
12757 ++ i, vid);
12758 ++ return i;
12759 ++ }
12760 ++ }
12761 ++
12762 ++ /* MC table is full, try to find an unused entry and replace it */
12763 ++ for (i = 0; i < smi->num_vlan_mc; i++) {
12764 ++ int used;
12765 ++
12766 ++ ret = rtl8366_mc_is_used(smi, i, &used);
12767 ++ if (ret)
12768 ++ return ret;
12769 ++
12770 ++ if (!used) {
12771 ++ /* Update the entry from the 4K table */
12772 ++ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
12773 ++ if (ret)
12774 ++ return ret;
12775 ++
12776 ++ vlanmc->vid = vid;
12777 ++ vlanmc->member = vlan4k.member;
12778 ++ vlanmc->untag = vlan4k.untag;
12779 ++ vlanmc->fid = vlan4k.fid;
12780 ++ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
12781 ++ if (ret) {
12782 ++ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
12783 ++ i, vid);
12784 ++ return ret;
12785 ++ }
12786 ++ dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
12787 ++ i, vid);
12788 ++ return i;
12789 ++ }
12790 ++ }
12791 ++
12792 ++ dev_err(smi->dev, "all VLAN member configurations are in use\n");
12793 ++ return -ENOSPC;
12794 ++}
12795 ++
12796 + int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
12797 + u32 untag, u32 fid)
12798 + {
12799 ++ struct rtl8366_vlan_mc vlanmc;
12800 + struct rtl8366_vlan_4k vlan4k;
12801 ++ int mc;
12802 + int ret;
12803 +- int i;
12804 ++
12805 ++ if (!smi->ops->is_vlan_valid(smi, vid))
12806 ++ return -EINVAL;
12807 +
12808 + dev_dbg(smi->dev,
12809 + "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
12810 +@@ -63,133 +164,58 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
12811 + "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
12812 + vid, vlan4k.member, vlan4k.untag);
12813 +
12814 +- /* Try to find an existing MC entry for this VID */
12815 +- for (i = 0; i < smi->num_vlan_mc; i++) {
12816 +- struct rtl8366_vlan_mc vlanmc;
12817 +-
12818 +- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
12819 +- if (ret)
12820 +- return ret;
12821 +-
12822 +- if (vid == vlanmc.vid) {
12823 +- /* update the MC entry */
12824 +- vlanmc.member |= member;
12825 +- vlanmc.untag |= untag;
12826 +- vlanmc.fid = fid;
12827 +-
12828 +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
12829 ++ /* Find or allocate a member config for this VID */
12830 ++ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
12831 ++ if (ret < 0)
12832 ++ return ret;
12833 ++ mc = ret;
12834 +
12835 +- dev_dbg(smi->dev,
12836 +- "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
12837 +- vid, vlanmc.member, vlanmc.untag);
12838 ++ /* Update the MC entry */
12839 ++ vlanmc.member |= member;
12840 ++ vlanmc.untag |= untag;
12841 ++ vlanmc.fid = fid;
12842 +
12843 +- break;
12844 +- }
12845 +- }
12846 ++ /* Commit updates to the MC entry */
12847 ++ ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
12848 ++ if (ret)
12849 ++ dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
12850 ++ mc, vid);
12851 ++ else
12852 ++ dev_dbg(smi->dev,
12853 ++ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
12854 ++ vid, vlanmc.member, vlanmc.untag);
12855 +
12856 + return ret;
12857 + }
12858 + EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
12859 +
12860 +-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
12861 +-{
12862 +- struct rtl8366_vlan_mc vlanmc;
12863 +- int ret;
12864 +- int index;
12865 +-
12866 +- ret = smi->ops->get_mc_index(smi, port, &index);
12867 +- if (ret)
12868 +- return ret;
12869 +-
12870 +- ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
12871 +- if (ret)
12872 +- return ret;
12873 +-
12874 +- *val = vlanmc.vid;
12875 +- return 0;
12876 +-}
12877 +-EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
12878 +-
12879 + int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
12880 + unsigned int vid)
12881 + {
12882 + struct rtl8366_vlan_mc vlanmc;
12883 +- struct rtl8366_vlan_4k vlan4k;
12884 ++ int mc;
12885 + int ret;
12886 +- int i;
12887 +-
12888 +- /* Try to find an existing MC entry for this VID */
12889 +- for (i = 0; i < smi->num_vlan_mc; i++) {
12890 +- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
12891 +- if (ret)
12892 +- return ret;
12893 +-
12894 +- if (vid == vlanmc.vid) {
12895 +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
12896 +- if (ret)
12897 +- return ret;
12898 +-
12899 +- ret = smi->ops->set_mc_index(smi, port, i);
12900 +- return ret;
12901 +- }
12902 +- }
12903 +-
12904 +- /* We have no MC entry for this VID, try to find an empty one */
12905 +- for (i = 0; i < smi->num_vlan_mc; i++) {
12906 +- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
12907 +- if (ret)
12908 +- return ret;
12909 +-
12910 +- if (vlanmc.vid == 0 && vlanmc.member == 0) {
12911 +- /* Update the entry from the 4K table */
12912 +- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
12913 +- if (ret)
12914 +- return ret;
12915 +
12916 +- vlanmc.vid = vid;
12917 +- vlanmc.member = vlan4k.member;
12918 +- vlanmc.untag = vlan4k.untag;
12919 +- vlanmc.fid = vlan4k.fid;
12920 +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
12921 +- if (ret)
12922 +- return ret;
12923 +-
12924 +- ret = smi->ops->set_mc_index(smi, port, i);
12925 +- return ret;
12926 +- }
12927 +- }
12928 +-
12929 +- /* MC table is full, try to find an unused entry and replace it */
12930 +- for (i = 0; i < smi->num_vlan_mc; i++) {
12931 +- int used;
12932 +-
12933 +- ret = rtl8366_mc_is_used(smi, i, &used);
12934 +- if (ret)
12935 +- return ret;
12936 +-
12937 +- if (!used) {
12938 +- /* Update the entry from the 4K table */
12939 +- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
12940 +- if (ret)
12941 +- return ret;
12942 ++ if (!smi->ops->is_vlan_valid(smi, vid))
12943 ++ return -EINVAL;
12944 +
12945 +- vlanmc.vid = vid;
12946 +- vlanmc.member = vlan4k.member;
12947 +- vlanmc.untag = vlan4k.untag;
12948 +- vlanmc.fid = vlan4k.fid;
12949 +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
12950 +- if (ret)
12951 +- return ret;
12952 ++ /* Find or allocate a member config for this VID */
12953 ++ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
12954 ++ if (ret < 0)
12955 ++ return ret;
12956 ++ mc = ret;
12957 +
12958 +- ret = smi->ops->set_mc_index(smi, port, i);
12959 +- return ret;
12960 +- }
12961 ++ ret = smi->ops->set_mc_index(smi, port, mc);
12962 ++ if (ret) {
12963 ++ dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
12964 ++ mc, port);
12965 ++ return ret;
12966 + }
12967 +
12968 +- dev_err(smi->dev,
12969 +- "all VLAN member configurations are in use\n");
12970 ++ dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
12971 ++ port, vid, mc);
12972 +
12973 +- return -ENOSPC;
12974 ++ return 0;
12975 + }
12976 + EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
12977 +
12978 +@@ -389,7 +415,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
12979 + if (!smi->ops->is_vlan_valid(smi, vid))
12980 + return;
12981 +
12982 +- dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
12983 ++ dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
12984 ++ vlan->vid_begin,
12985 + port,
12986 + untagged ? "untagged" : "tagged",
12987 + pvid ? " PVID" : "no PVID");
12988 +@@ -398,34 +425,29 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
12989 + dev_err(smi->dev, "port is DSA or CPU port\n");
12990 +
12991 + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
12992 +- int pvid_val = 0;
12993 +-
12994 +- dev_info(smi->dev, "add VLAN %04x\n", vid);
12995 + member |= BIT(port);
12996 +
12997 + if (untagged)
12998 + untag |= BIT(port);
12999 +
13000 +- /* To ensure that we have a valid MC entry for this VLAN,
13001 +- * initialize the port VLAN ID here.
13002 +- */
13003 +- ret = rtl8366_get_pvid(smi, port, &pvid_val);
13004 +- if (ret < 0) {
13005 +- dev_err(smi->dev, "could not lookup PVID for port %d\n",
13006 +- port);
13007 +- return;
13008 +- }
13009 +- if (pvid_val == 0) {
13010 +- ret = rtl8366_set_pvid(smi, port, vid);
13011 +- if (ret < 0)
13012 +- return;
13013 +- }
13014 +-
13015 + ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
13016 + if (ret)
13017 + dev_err(smi->dev,
13018 + "failed to set up VLAN %04x",
13019 + vid);
13020 ++
13021 ++ if (!pvid)
13022 ++ continue;
13023 ++
13024 ++ ret = rtl8366_set_pvid(smi, port, vid);
13025 ++ if (ret)
13026 ++ dev_err(smi->dev,
13027 ++ "failed to set PVID on port %d to VLAN %04x",
13028 ++ port, vid);
13029 ++
13030 ++ if (!ret)
13031 ++ dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
13032 ++ vid, port);
13033 + }
13034 + }
13035 + EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
13036 +diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
13037 +index 48f1ff7467999..5cfffa7559c7c 100644
13038 +--- a/drivers/net/dsa/rtl8366rb.c
13039 ++++ b/drivers/net/dsa/rtl8366rb.c
13040 +@@ -1255,7 +1255,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
13041 + if (smi->vlan4k_enabled)
13042 + max = RTL8366RB_NUM_VIDS - 1;
13043 +
13044 +- if (vlan == 0 || vlan >= max)
13045 ++ if (vlan == 0 || vlan > max)
13046 + return false;
13047 +
13048 + return true;
13049 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
13050 +index f642c1b475c42..1b88bd1c2dbe4 100644
13051 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
13052 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
13053 +@@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[] = {
13054 + PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
13055 + };
13056 +
13057 ++static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
13058 ++ /* Default supported NAT modes */
13059 ++ {
13060 ++ .chip = CHELSIO_T5,
13061 ++ .flags = CXGB4_ACTION_NATMODE_NONE,
13062 ++ .natmode = NAT_MODE_NONE,
13063 ++ },
13064 ++ {
13065 ++ .chip = CHELSIO_T5,
13066 ++ .flags = CXGB4_ACTION_NATMODE_DIP,
13067 ++ .natmode = NAT_MODE_DIP,
13068 ++ },
13069 ++ {
13070 ++ .chip = CHELSIO_T5,
13071 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
13072 ++ .natmode = NAT_MODE_DIP_DP,
13073 ++ },
13074 ++ {
13075 ++ .chip = CHELSIO_T5,
13076 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
13077 ++ CXGB4_ACTION_NATMODE_SIP,
13078 ++ .natmode = NAT_MODE_DIP_DP_SIP,
13079 ++ },
13080 ++ {
13081 ++ .chip = CHELSIO_T5,
13082 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
13083 ++ CXGB4_ACTION_NATMODE_SPORT,
13084 ++ .natmode = NAT_MODE_DIP_DP_SP,
13085 ++ },
13086 ++ {
13087 ++ .chip = CHELSIO_T5,
13088 ++ .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
13089 ++ .natmode = NAT_MODE_SIP_SP,
13090 ++ },
13091 ++ {
13092 ++ .chip = CHELSIO_T5,
13093 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
13094 ++ CXGB4_ACTION_NATMODE_SPORT,
13095 ++ .natmode = NAT_MODE_DIP_SIP_SP,
13096 ++ },
13097 ++ {
13098 ++ .chip = CHELSIO_T5,
13099 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
13100 ++ CXGB4_ACTION_NATMODE_DPORT |
13101 ++ CXGB4_ACTION_NATMODE_SPORT,
13102 ++ .natmode = NAT_MODE_ALL,
13103 ++ },
13104 ++ /* T6+ can ignore L4 ports when they're disabled. */
13105 ++ {
13106 ++ .chip = CHELSIO_T6,
13107 ++ .flags = CXGB4_ACTION_NATMODE_SIP,
13108 ++ .natmode = NAT_MODE_SIP_SP,
13109 ++ },
13110 ++ {
13111 ++ .chip = CHELSIO_T6,
13112 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
13113 ++ .natmode = NAT_MODE_DIP_DP_SP,
13114 ++ },
13115 ++ {
13116 ++ .chip = CHELSIO_T6,
13117 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
13118 ++ .natmode = NAT_MODE_ALL,
13119 ++ },
13120 ++};
13121 ++
13122 ++static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
13123 ++ u8 natmode_flags)
13124 ++{
13125 ++ u8 i = 0;
13126 ++
13127 ++ /* Translate the enabled NAT 4-tuple fields to one of the
13128 ++ * hardware supported NAT mode configurations. This ensures
13129 ++ * that we pick a valid combination, where the disabled fields
13130 ++ * do not get overwritten to 0.
13131 ++ */
13132 ++ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
13133 ++ if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
13134 ++ fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
13135 ++ return;
13136 ++ }
13137 ++ }
13138 ++}
13139 ++
13140 + static struct ch_tc_flower_entry *allocate_flower_entry(void)
13141 + {
13142 + struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
13143 +@@ -289,7 +372,8 @@ static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
13144 + }
13145 +
13146 + static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
13147 +- u32 mask, u32 offset, u8 htype)
13148 ++ u32 mask, u32 offset, u8 htype,
13149 ++ u8 *natmode_flags)
13150 + {
13151 + switch (htype) {
13152 + case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
13153 +@@ -314,60 +398,94 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
13154 + switch (offset) {
13155 + case PEDIT_IP4_SRC:
13156 + offload_pedit(fs, val, mask, IP4_SRC);
13157 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
13158 + break;
13159 + case PEDIT_IP4_DST:
13160 + offload_pedit(fs, val, mask, IP4_DST);
13161 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
13162 + }
13163 +- fs->nat_mode = NAT_MODE_ALL;
13164 + break;
13165 + case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
13166 + switch (offset) {
13167 + case PEDIT_IP6_SRC_31_0:
13168 + offload_pedit(fs, val, mask, IP6_SRC_31_0);
13169 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
13170 + break;
13171 + case PEDIT_IP6_SRC_63_32:
13172 + offload_pedit(fs, val, mask, IP6_SRC_63_32);
13173 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
13174 + break;
13175 + case PEDIT_IP6_SRC_95_64:
13176 + offload_pedit(fs, val, mask, IP6_SRC_95_64);
13177 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
13178 + break;
13179 + case PEDIT_IP6_SRC_127_96:
13180 + offload_pedit(fs, val, mask, IP6_SRC_127_96);
13181 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
13182 + break;
13183 + case PEDIT_IP6_DST_31_0:
13184 + offload_pedit(fs, val, mask, IP6_DST_31_0);
13185 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
13186 + break;
13187 + case PEDIT_IP6_DST_63_32:
13188 + offload_pedit(fs, val, mask, IP6_DST_63_32);
13189 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
13190 + break;
13191 + case PEDIT_IP6_DST_95_64:
13192 + offload_pedit(fs, val, mask, IP6_DST_95_64);
13193 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
13194 + break;
13195 + case PEDIT_IP6_DST_127_96:
13196 + offload_pedit(fs, val, mask, IP6_DST_127_96);
13197 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
13198 + }
13199 +- fs->nat_mode = NAT_MODE_ALL;
13200 + break;
13201 + case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
13202 + switch (offset) {
13203 + case PEDIT_TCP_SPORT_DPORT:
13204 +- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
13205 ++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
13206 + fs->nat_fport = val;
13207 +- else
13208 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
13209 ++ } else {
13210 + fs->nat_lport = val >> 16;
13211 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
13212 ++ }
13213 + }
13214 +- fs->nat_mode = NAT_MODE_ALL;
13215 + break;
13216 + case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
13217 + switch (offset) {
13218 + case PEDIT_UDP_SPORT_DPORT:
13219 +- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
13220 ++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
13221 + fs->nat_fport = val;
13222 +- else
13223 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
13224 ++ } else {
13225 + fs->nat_lport = val >> 16;
13226 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
13227 ++ }
13228 + }
13229 +- fs->nat_mode = NAT_MODE_ALL;
13230 ++ break;
13231 ++ }
13232 ++}
13233 ++
13234 ++static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags,
13235 ++ struct netlink_ext_ack *extack)
13236 ++{
13237 ++ u8 i = 0;
13238 ++
13239 ++ /* Extract the NAT mode to enable based on what 4-tuple fields
13240 ++ * are enabled to be overwritten. This ensures that the
13241 ++ * disabled fields don't get overwritten to 0.
13242 ++ */
13243 ++ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
13244 ++ const struct cxgb4_natmode_config *c;
13245 ++
13246 ++ c = &cxgb4_natmode_config_array[i];
13247 ++ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
13248 ++ natmode_flags == c->flags)
13249 ++ return 0;
13250 + }
13251 ++ NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination");
13252 ++ return -EOPNOTSUPP;
13253 + }
13254 +
13255 + void cxgb4_process_flow_actions(struct net_device *in,
13256 +@@ -375,6 +493,7 @@ void cxgb4_process_flow_actions(struct net_device *in,
13257 + struct ch_filter_specification *fs)
13258 + {
13259 + struct flow_action_entry *act;
13260 ++ u8 natmode_flags = 0;
13261 + int i;
13262 +
13263 + flow_action_for_each(i, act, actions) {
13264 +@@ -426,7 +545,8 @@ void cxgb4_process_flow_actions(struct net_device *in,
13265 + val = act->mangle.val;
13266 + offset = act->mangle.offset;
13267 +
13268 +- process_pedit_field(fs, val, mask, offset, htype);
13269 ++ process_pedit_field(fs, val, mask, offset, htype,
13270 ++ &natmode_flags);
13271 + }
13272 + break;
13273 + case FLOW_ACTION_QUEUE:
13274 +@@ -438,6 +558,9 @@ void cxgb4_process_flow_actions(struct net_device *in,
13275 + break;
13276 + }
13277 + }
13278 ++ if (natmode_flags)
13279 ++ cxgb4_action_natmode_tweak(fs, natmode_flags);
13280 ++
13281 + }
13282 +
13283 + static bool valid_l4_mask(u32 mask)
13284 +@@ -454,7 +577,8 @@ static bool valid_l4_mask(u32 mask)
13285 + }
13286 +
13287 + static bool valid_pedit_action(struct net_device *dev,
13288 +- const struct flow_action_entry *act)
13289 ++ const struct flow_action_entry *act,
13290 ++ u8 *natmode_flags)
13291 + {
13292 + u32 mask, offset;
13293 + u8 htype;
13294 +@@ -479,7 +603,10 @@ static bool valid_pedit_action(struct net_device *dev,
13295 + case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
13296 + switch (offset) {
13297 + case PEDIT_IP4_SRC:
13298 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
13299 ++ break;
13300 + case PEDIT_IP4_DST:
13301 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
13302 + break;
13303 + default:
13304 + netdev_err(dev, "%s: Unsupported pedit field\n",
13305 +@@ -493,10 +620,13 @@ static bool valid_pedit_action(struct net_device *dev,
13306 + case PEDIT_IP6_SRC_63_32:
13307 + case PEDIT_IP6_SRC_95_64:
13308 + case PEDIT_IP6_SRC_127_96:
13309 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
13310 ++ break;
13311 + case PEDIT_IP6_DST_31_0:
13312 + case PEDIT_IP6_DST_63_32:
13313 + case PEDIT_IP6_DST_95_64:
13314 + case PEDIT_IP6_DST_127_96:
13315 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
13316 + break;
13317 + default:
13318 + netdev_err(dev, "%s: Unsupported pedit field\n",
13319 +@@ -512,6 +642,10 @@ static bool valid_pedit_action(struct net_device *dev,
13320 + __func__);
13321 + return false;
13322 + }
13323 ++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
13324 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
13325 ++ else
13326 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
13327 + break;
13328 + default:
13329 + netdev_err(dev, "%s: Unsupported pedit field\n",
13330 +@@ -527,6 +661,10 @@ static bool valid_pedit_action(struct net_device *dev,
13331 + __func__);
13332 + return false;
13333 + }
13334 ++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
13335 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
13336 ++ else
13337 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
13338 + break;
13339 + default:
13340 + netdev_err(dev, "%s: Unsupported pedit field\n",
13341 +@@ -546,10 +684,12 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
13342 + struct netlink_ext_ack *extack,
13343 + u8 matchall_filter)
13344 + {
13345 ++ struct adapter *adap = netdev2adap(dev);
13346 + struct flow_action_entry *act;
13347 + bool act_redir = false;
13348 + bool act_pedit = false;
13349 + bool act_vlan = false;
13350 ++ u8 natmode_flags = 0;
13351 + int i;
13352 +
13353 + if (!flow_action_basic_hw_stats_check(actions, extack))
13354 +@@ -563,7 +703,6 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
13355 + break;
13356 + case FLOW_ACTION_MIRRED:
13357 + case FLOW_ACTION_REDIRECT: {
13358 +- struct adapter *adap = netdev2adap(dev);
13359 + struct net_device *n_dev, *target_dev;
13360 + bool found = false;
13361 + unsigned int i;
13362 +@@ -620,7 +759,8 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
13363 + }
13364 + break;
13365 + case FLOW_ACTION_MANGLE: {
13366 +- bool pedit_valid = valid_pedit_action(dev, act);
13367 ++ bool pedit_valid = valid_pedit_action(dev, act,
13368 ++ &natmode_flags);
13369 +
13370 + if (!pedit_valid)
13371 + return -EOPNOTSUPP;
13372 +@@ -642,6 +782,15 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
13373 + return -EINVAL;
13374 + }
13375 +
13376 ++ if (act_pedit) {
13377 ++ int ret;
13378 ++
13379 ++ ret = cxgb4_action_natmode_validate(adap, natmode_flags,
13380 ++ extack);
13381 ++ if (ret)
13382 ++ return ret;
13383 ++ }
13384 ++
13385 + return 0;
13386 + }
13387 +
13388 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
13389 +index 6296e1d5a12bb..3a2fa00c8cdee 100644
13390 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
13391 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
13392 +@@ -108,6 +108,21 @@ struct ch_tc_pedit_fields {
13393 + #define PEDIT_TCP_SPORT_DPORT 0x0
13394 + #define PEDIT_UDP_SPORT_DPORT 0x0
13395 +
13396 ++enum cxgb4_action_natmode_flags {
13397 ++ CXGB4_ACTION_NATMODE_NONE = 0,
13398 ++ CXGB4_ACTION_NATMODE_DIP = (1 << 0),
13399 ++ CXGB4_ACTION_NATMODE_SIP = (1 << 1),
13400 ++ CXGB4_ACTION_NATMODE_DPORT = (1 << 2),
13401 ++ CXGB4_ACTION_NATMODE_SPORT = (1 << 3),
13402 ++};
13403 ++
13404 ++/* TC PEDIT action to NATMODE translation entry */
13405 ++struct cxgb4_natmode_config {
13406 ++ enum chip_type chip;
13407 ++ u8 flags;
13408 ++ u8 natmode;
13409 ++};
13410 ++
13411 + void cxgb4_process_flow_actions(struct net_device *in,
13412 + struct flow_action *actions,
13413 + struct ch_filter_specification *fs);
13414 +diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
13415 +index 18f3aeb88f22a..c67a16a48d624 100644
13416 +--- a/drivers/net/ethernet/cisco/enic/enic.h
13417 ++++ b/drivers/net/ethernet/cisco/enic/enic.h
13418 +@@ -169,6 +169,7 @@ struct enic {
13419 + u16 num_vfs;
13420 + #endif
13421 + spinlock_t enic_api_lock;
13422 ++ bool enic_api_busy;
13423 + struct enic_port_profile *pp;
13424 +
13425 + /* work queue cache line section */
13426 +diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
13427 +index b161f24522b87..b028ea2dec2b9 100644
13428 +--- a/drivers/net/ethernet/cisco/enic/enic_api.c
13429 ++++ b/drivers/net/ethernet/cisco/enic/enic_api.c
13430 +@@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
13431 + struct vnic_dev *vdev = enic->vdev;
13432 +
13433 + spin_lock(&enic->enic_api_lock);
13434 ++ while (enic->enic_api_busy) {
13435 ++ spin_unlock(&enic->enic_api_lock);
13436 ++ cpu_relax();
13437 ++ spin_lock(&enic->enic_api_lock);
13438 ++ }
13439 ++
13440 + spin_lock_bh(&enic->devcmd_lock);
13441 +
13442 + vnic_dev_cmd_proxy_by_index_start(vdev, vf);
13443 +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
13444 +index 552d89fdf54a5..988c0a72e6836 100644
13445 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c
13446 ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
13447 +@@ -2106,8 +2106,6 @@ static int enic_dev_wait(struct vnic_dev *vdev,
13448 + int done;
13449 + int err;
13450 +
13451 +- BUG_ON(in_interrupt());
13452 +-
13453 + err = start(vdev, arg);
13454 + if (err)
13455 + return err;
13456 +@@ -2295,6 +2293,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
13457 + rss_hash_bits, rss_base_cpu, rss_enable);
13458 + }
13459 +
13460 ++static void enic_set_api_busy(struct enic *enic, bool busy)
13461 ++{
13462 ++ spin_lock(&enic->enic_api_lock);
13463 ++ enic->enic_api_busy = busy;
13464 ++ spin_unlock(&enic->enic_api_lock);
13465 ++}
13466 ++
13467 + static void enic_reset(struct work_struct *work)
13468 + {
13469 + struct enic *enic = container_of(work, struct enic, reset);
13470 +@@ -2304,7 +2309,9 @@ static void enic_reset(struct work_struct *work)
13471 +
13472 + rtnl_lock();
13473 +
13474 +- spin_lock(&enic->enic_api_lock);
13475 ++ /* Stop any activity from infiniband */
13476 ++ enic_set_api_busy(enic, true);
13477 ++
13478 + enic_stop(enic->netdev);
13479 + enic_dev_soft_reset(enic);
13480 + enic_reset_addr_lists(enic);
13481 +@@ -2312,7 +2319,10 @@ static void enic_reset(struct work_struct *work)
13482 + enic_set_rss_nic_cfg(enic);
13483 + enic_dev_set_ig_vlan_rewrite_mode(enic);
13484 + enic_open(enic->netdev);
13485 +- spin_unlock(&enic->enic_api_lock);
13486 ++
13487 ++ /* Allow infiniband to fiddle with the device again */
13488 ++ enic_set_api_busy(enic, false);
13489 ++
13490 + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
13491 +
13492 + rtnl_unlock();
13493 +@@ -2324,7 +2334,9 @@ static void enic_tx_hang_reset(struct work_struct *work)
13494 +
13495 + rtnl_lock();
13496 +
13497 +- spin_lock(&enic->enic_api_lock);
13498 ++ /* Stop any activity from infiniband */
13499 ++ enic_set_api_busy(enic, true);
13500 ++
13501 + enic_dev_hang_notify(enic);
13502 + enic_stop(enic->netdev);
13503 + enic_dev_hang_reset(enic);
13504 +@@ -2333,7 +2345,10 @@ static void enic_tx_hang_reset(struct work_struct *work)
13505 + enic_set_rss_nic_cfg(enic);
13506 + enic_dev_set_ig_vlan_rewrite_mode(enic);
13507 + enic_open(enic->netdev);
13508 +- spin_unlock(&enic->enic_api_lock);
13509 ++
13510 ++ /* Allow infiniband to fiddle with the device again */
13511 ++ enic_set_api_busy(enic, false);
13512 ++
13513 + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
13514 +
13515 + rtnl_unlock();
13516 +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
13517 +index 87236206366fd..00024dd411471 100644
13518 +--- a/drivers/net/ethernet/faraday/ftgmac100.c
13519 ++++ b/drivers/net/ethernet/faraday/ftgmac100.c
13520 +@@ -1817,6 +1817,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
13521 + priv->rxdes0_edorr_mask = BIT(30);
13522 + priv->txdes0_edotr_mask = BIT(30);
13523 + priv->is_aspeed = true;
13524 ++ /* Disable ast2600 problematic HW arbitration */
13525 ++ if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
13526 ++ iowrite32(FTGMAC100_TM_DEFAULT,
13527 ++ priv->base + FTGMAC100_OFFSET_TM);
13528 ++ }
13529 + } else {
13530 + priv->rxdes0_edorr_mask = BIT(15);
13531 + priv->txdes0_edotr_mask = BIT(15);
13532 +diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
13533 +index e5876a3fda91d..63b3e02fab162 100644
13534 +--- a/drivers/net/ethernet/faraday/ftgmac100.h
13535 ++++ b/drivers/net/ethernet/faraday/ftgmac100.h
13536 +@@ -169,6 +169,14 @@
13537 + #define FTGMAC100_MACCR_FAST_MODE (1 << 19)
13538 + #define FTGMAC100_MACCR_SW_RST (1 << 31)
13539 +
13540 ++/*
13541 ++ * test mode control register
13542 ++ */
13543 ++#define FTGMAC100_TM_RQ_TX_VALID_DIS (1 << 28)
13544 ++#define FTGMAC100_TM_RQ_RR_IDLE_PREV (1 << 27)
13545 ++#define FTGMAC100_TM_DEFAULT \
13546 ++ (FTGMAC100_TM_RQ_TX_VALID_DIS | FTGMAC100_TM_RQ_RR_IDLE_PREV)
13547 ++
13548 + /*
13549 + * PHY control register
13550 + */
13551 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
13552 +index fb37816a74db9..31f60b542feb4 100644
13553 +--- a/drivers/net/ethernet/freescale/fec_main.c
13554 ++++ b/drivers/net/ethernet/freescale/fec_main.c
13555 +@@ -1912,6 +1912,27 @@ out:
13556 + return ret;
13557 + }
13558 +
13559 ++static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
13560 ++{
13561 ++ struct fec_enet_private *fep = netdev_priv(ndev);
13562 ++ struct phy_device *phy_dev = ndev->phydev;
13563 ++
13564 ++ if (phy_dev) {
13565 ++ phy_reset_after_clk_enable(phy_dev);
13566 ++ } else if (fep->phy_node) {
13567 ++ /*
13568 ++ * If the PHY still is not bound to the MAC, but there is
13569 ++ * OF PHY node and a matching PHY device instance already,
13570 ++ * use the OF PHY node to obtain the PHY device instance,
13571 ++ * and then use that PHY device instance when triggering
13572 ++ * the PHY reset.
13573 ++ */
13574 ++ phy_dev = of_phy_find_device(fep->phy_node);
13575 ++ phy_reset_after_clk_enable(phy_dev);
13576 ++ put_device(&phy_dev->mdio.dev);
13577 ++ }
13578 ++}
13579 ++
13580 + static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
13581 + {
13582 + struct fec_enet_private *fep = netdev_priv(ndev);
13583 +@@ -1938,7 +1959,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
13584 + if (ret)
13585 + goto failed_clk_ref;
13586 +
13587 +- phy_reset_after_clk_enable(ndev->phydev);
13588 ++ fec_enet_phy_reset_after_clk_enable(ndev);
13589 + } else {
13590 + clk_disable_unprepare(fep->clk_enet_out);
13591 + if (fep->clk_ptp) {
13592 +@@ -2984,16 +3005,16 @@ fec_enet_open(struct net_device *ndev)
13593 + /* Init MAC prior to mii bus probe */
13594 + fec_restart(ndev);
13595 +
13596 +- /* Probe and connect to PHY when open the interface */
13597 +- ret = fec_enet_mii_probe(ndev);
13598 +- if (ret)
13599 +- goto err_enet_mii_probe;
13600 +-
13601 + /* Call phy_reset_after_clk_enable() again if it failed during
13602 + * phy_reset_after_clk_enable() before because the PHY wasn't probed.
13603 + */
13604 + if (reset_again)
13605 +- phy_reset_after_clk_enable(ndev->phydev);
13606 ++ fec_enet_phy_reset_after_clk_enable(ndev);
13607 ++
13608 ++ /* Probe and connect to PHY when open the interface */
13609 ++ ret = fec_enet_mii_probe(ndev);
13610 ++ if (ret)
13611 ++ goto err_enet_mii_probe;
13612 +
13613 + if (fep->quirks & FEC_QUIRK_ERR006687)
13614 + imx6q_cpuidle_fec_irqs_used();
13615 +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
13616 +index c5c732601e35e..7ef3369953b6a 100644
13617 +--- a/drivers/net/ethernet/ibm/ibmveth.c
13618 ++++ b/drivers/net/ethernet/ibm/ibmveth.c
13619 +@@ -1349,6 +1349,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
13620 + int offset = ibmveth_rxq_frame_offset(adapter);
13621 + int csum_good = ibmveth_rxq_csum_good(adapter);
13622 + int lrg_pkt = ibmveth_rxq_large_packet(adapter);
13623 ++ __sum16 iph_check = 0;
13624 +
13625 + skb = ibmveth_rxq_get_buffer(adapter);
13626 +
13627 +@@ -1385,16 +1386,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
13628 + skb_put(skb, length);
13629 + skb->protocol = eth_type_trans(skb, netdev);
13630 +
13631 +- if (csum_good) {
13632 +- skb->ip_summed = CHECKSUM_UNNECESSARY;
13633 +- ibmveth_rx_csum_helper(skb, adapter);
13634 ++ /* PHYP without PLSO support places a -1 in the ip
13635 ++ * checksum for large send frames.
13636 ++ */
13637 ++ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
13638 ++ struct iphdr *iph = (struct iphdr *)skb->data;
13639 ++
13640 ++ iph_check = iph->check;
13641 + }
13642 +
13643 +- if (length > netdev->mtu + ETH_HLEN) {
13644 ++ if ((length > netdev->mtu + ETH_HLEN) ||
13645 ++ lrg_pkt || iph_check == 0xffff) {
13646 + ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
13647 + adapter->rx_large_packets++;
13648 + }
13649 +
13650 ++ if (csum_good) {
13651 ++ skb->ip_summed = CHECKSUM_UNNECESSARY;
13652 ++ ibmveth_rx_csum_helper(skb, adapter);
13653 ++ }
13654 ++
13655 + napi_gro_receive(napi, skb); /* send it up */
13656 +
13657 + netdev->stats.rx_packets++;
13658 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
13659 +index 1b702a43a5d01..3e0aab04d86fb 100644
13660 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
13661 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
13662 +@@ -4194,8 +4194,13 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
13663 + dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
13664 + goto out;
13665 + }
13666 ++ /* crq->change_mac_addr.mac_addr is the requested one
13667 ++ * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
13668 ++ */
13669 + ether_addr_copy(netdev->dev_addr,
13670 + &crq->change_mac_addr_rsp.mac_addr[0]);
13671 ++ ether_addr_copy(adapter->mac_addr,
13672 ++ &crq->change_mac_addr_rsp.mac_addr[0]);
13673 + out:
13674 + complete(&adapter->fw_done);
13675 + return rc;
13676 +@@ -4605,7 +4610,7 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
13677 + case IBMVNIC_1GBPS:
13678 + adapter->speed = SPEED_1000;
13679 + break;
13680 +- case IBMVNIC_10GBP:
13681 ++ case IBMVNIC_10GBPS:
13682 + adapter->speed = SPEED_10000;
13683 + break;
13684 + case IBMVNIC_25GBPS:
13685 +@@ -4620,6 +4625,9 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
13686 + case IBMVNIC_100GBPS:
13687 + adapter->speed = SPEED_100000;
13688 + break;
13689 ++ case IBMVNIC_200GBPS:
13690 ++ adapter->speed = SPEED_200000;
13691 ++ break;
13692 + default:
13693 + if (netif_carrier_ok(netdev))
13694 + netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
13695 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
13696 +index f8416e1d4cf09..43feb96b0a68a 100644
13697 +--- a/drivers/net/ethernet/ibm/ibmvnic.h
13698 ++++ b/drivers/net/ethernet/ibm/ibmvnic.h
13699 +@@ -373,7 +373,7 @@ struct ibmvnic_phys_parms {
13700 + #define IBMVNIC_10MBPS 0x40000000
13701 + #define IBMVNIC_100MBPS 0x20000000
13702 + #define IBMVNIC_1GBPS 0x10000000
13703 +-#define IBMVNIC_10GBP 0x08000000
13704 ++#define IBMVNIC_10GBPS 0x08000000
13705 + #define IBMVNIC_40GBPS 0x04000000
13706 + #define IBMVNIC_100GBPS 0x02000000
13707 + #define IBMVNIC_25GBPS 0x01000000
13708 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
13709 +index 7980d7265e106..d26f40c0aff01 100644
13710 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
13711 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
13712 +@@ -901,15 +901,13 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
13713 + **/
13714 + s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
13715 + {
13716 ++ s32 (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
13717 ++ s32 (*read)(struct mii_bus *bus, int addr, int regnum);
13718 + struct ixgbe_adapter *adapter = hw->back;
13719 + struct pci_dev *pdev = adapter->pdev;
13720 + struct device *dev = &adapter->netdev->dev;
13721 + struct mii_bus *bus;
13722 +
13723 +- bus = devm_mdiobus_alloc(dev);
13724 +- if (!bus)
13725 +- return -ENOMEM;
13726 +-
13727 + switch (hw->device_id) {
13728 + /* C3000 SoCs */
13729 + case IXGBE_DEV_ID_X550EM_A_KR:
13730 +@@ -922,16 +920,23 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
13731 + case IXGBE_DEV_ID_X550EM_A_1G_T:
13732 + case IXGBE_DEV_ID_X550EM_A_1G_T_L:
13733 + if (!ixgbe_x550em_a_has_mii(hw))
13734 +- return -ENODEV;
13735 +- bus->read = &ixgbe_x550em_a_mii_bus_read;
13736 +- bus->write = &ixgbe_x550em_a_mii_bus_write;
13737 ++ return 0;
13738 ++ read = &ixgbe_x550em_a_mii_bus_read;
13739 ++ write = &ixgbe_x550em_a_mii_bus_write;
13740 + break;
13741 + default:
13742 +- bus->read = &ixgbe_mii_bus_read;
13743 +- bus->write = &ixgbe_mii_bus_write;
13744 ++ read = &ixgbe_mii_bus_read;
13745 ++ write = &ixgbe_mii_bus_write;
13746 + break;
13747 + }
13748 +
13749 ++ bus = devm_mdiobus_alloc(dev);
13750 ++ if (!bus)
13751 ++ return -ENOMEM;
13752 ++
13753 ++ bus->read = read;
13754 ++ bus->write = write;
13755 ++
13756 + /* Use the position of the device in the PCI hierarchy as the id */
13757 + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name,
13758 + pci_name(pdev));
13759 +diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
13760 +index 03e034918d147..bf48f0ded9c7d 100644
13761 +--- a/drivers/net/ethernet/korina.c
13762 ++++ b/drivers/net/ethernet/korina.c
13763 +@@ -1113,7 +1113,7 @@ out:
13764 + return rc;
13765 +
13766 + probe_err_register:
13767 +- kfree(lp->td_ring);
13768 ++ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
13769 + probe_err_td_ring:
13770 + iounmap(lp->tx_dma_regs);
13771 + probe_err_dma_tx:
13772 +@@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev)
13773 + iounmap(lp->eth_regs);
13774 + iounmap(lp->rx_dma_regs);
13775 + iounmap(lp->tx_dma_regs);
13776 ++ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
13777 +
13778 + unregister_netdev(bif->dev);
13779 + free_netdev(bif->dev);
13780 +diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
13781 +index 62a820b1eb163..3362b148de23c 100644
13782 +--- a/drivers/net/ethernet/mediatek/Kconfig
13783 ++++ b/drivers/net/ethernet/mediatek/Kconfig
13784 +@@ -17,6 +17,7 @@ config NET_MEDIATEK_SOC
13785 + config NET_MEDIATEK_STAR_EMAC
13786 + tristate "MediaTek STAR Ethernet MAC support"
13787 + select PHYLIB
13788 ++ select REGMAP_MMIO
13789 + help
13790 + This driver supports the ethernet MAC IP first used on
13791 + MediaTek MT85** SoCs.
13792 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
13793 +index b50c567ef508e..24006440e86e2 100644
13794 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
13795 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
13796 +@@ -943,6 +943,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
13797 + bool clean_complete = true;
13798 + int done;
13799 +
13800 ++ if (!budget)
13801 ++ return 0;
13802 ++
13803 + if (priv->tx_ring_num[TX_XDP]) {
13804 + xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
13805 + if (xdp_tx_cq->xdp_busy) {
13806 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
13807 +index 9dff7b086c9fb..1f11379ad5b64 100644
13808 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
13809 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
13810 +@@ -350,7 +350,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
13811 + .dma = tx_info->map0_dma,
13812 + };
13813 +
13814 +- if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
13815 ++ if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
13816 + dma_unmap_page(priv->ddev, tx_info->map0_dma,
13817 + PAGE_SIZE, priv->dma_dir);
13818 + put_page(tx_info->page);
13819 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
13820 +index 3dc200bcfabde..69a05da0e3e3d 100644
13821 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
13822 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
13823 +@@ -242,8 +242,8 @@ static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
13824 +
13825 + {
13826 + u32 data_size;
13827 ++ int err = 0;
13828 + u32 offset;
13829 +- int err;
13830 +
13831 + for (offset = 0; offset < value_len; offset += data_size) {
13832 + data_size = value_len - offset;
13833 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
13834 +index 429428bbc903c..b974f3cd10058 100644
13835 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
13836 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
13837 +@@ -228,8 +228,8 @@ static int rx_fs_create(struct mlx5e_priv *priv,
13838 + fs_prot->miss_rule = miss_rule;
13839 +
13840 + out:
13841 +- kfree(flow_group_in);
13842 +- kfree(spec);
13843 ++ kvfree(flow_group_in);
13844 ++ kvfree(spec);
13845 + return err;
13846 + }
13847 +
13848 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
13849 +index 2d55b7c22c034..4e7cfa22b3d2f 100644
13850 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
13851 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
13852 +@@ -550,8 +550,9 @@ static int mlx5_pps_event(struct notifier_block *nb,
13853 + switch (clock->ptp_info.pin_config[pin].func) {
13854 + case PTP_PF_EXTTS:
13855 + ptp_event.index = pin;
13856 +- ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
13857 +- be64_to_cpu(eqe->data.pps.time_stamp));
13858 ++ ptp_event.timestamp =
13859 ++ mlx5_timecounter_cyc2time(clock,
13860 ++ be64_to_cpu(eqe->data.pps.time_stamp));
13861 + if (clock->pps_info.enabled) {
13862 + ptp_event.type = PTP_CLOCK_PPSUSR;
13863 + ptp_event.pps_times.ts_real =
13864 +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
13865 +index 11e6962a18e42..88b4b17ea22c9 100644
13866 +--- a/drivers/net/ethernet/realtek/r8169_main.c
13867 ++++ b/drivers/net/ethernet/realtek/r8169_main.c
13868 +@@ -4686,7 +4686,7 @@ static int rtl8169_close(struct net_device *dev)
13869 +
13870 + phy_disconnect(tp->phydev);
13871 +
13872 +- pci_free_irq(pdev, 0, tp);
13873 ++ free_irq(pci_irq_vector(pdev, 0), tp);
13874 +
13875 + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
13876 + tp->RxPhyAddr);
13877 +@@ -4737,8 +4737,8 @@ static int rtl_open(struct net_device *dev)
13878 +
13879 + rtl_request_firmware(tp);
13880 +
13881 +- retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
13882 +- dev->name);
13883 ++ retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
13884 ++ IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp);
13885 + if (retval < 0)
13886 + goto err_release_fw_2;
13887 +
13888 +@@ -4755,7 +4755,7 @@ out:
13889 + return retval;
13890 +
13891 + err_free_irq:
13892 +- pci_free_irq(pdev, 0, tp);
13893 ++ free_irq(pci_irq_vector(pdev, 0), tp);
13894 + err_release_fw_2:
13895 + rtl_release_firmware(tp);
13896 + rtl8169_rx_clear(tp);
13897 +diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
13898 +index 19fe86b3b3169..9cf5b8f8fab9a 100644
13899 +--- a/drivers/net/ethernet/sfc/ef100_nic.c
13900 ++++ b/drivers/net/ethernet/sfc/ef100_nic.c
13901 +@@ -428,24 +428,12 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type)
13902 + __clear_bit(reset_type, &efx->reset_pending);
13903 + rc = dev_open(efx->net_dev, NULL);
13904 + } else if (reset_type == RESET_TYPE_ALL) {
13905 +- /* A RESET_TYPE_ALL will cause filters to be removed, so we remove filters
13906 +- * and reprobe after reset to avoid removing filters twice
13907 +- */
13908 +- down_write(&efx->filter_sem);
13909 +- ef100_filter_table_down(efx);
13910 +- up_write(&efx->filter_sem);
13911 + rc = efx_mcdi_reset(efx, reset_type);
13912 + if (rc)
13913 + return rc;
13914 +
13915 + netif_device_attach(efx->net_dev);
13916 +
13917 +- down_write(&efx->filter_sem);
13918 +- rc = ef100_filter_table_up(efx);
13919 +- up_write(&efx->filter_sem);
13920 +- if (rc)
13921 +- return rc;
13922 +-
13923 + rc = dev_open(efx->net_dev, NULL);
13924 + } else {
13925 + rc = 1; /* Leave the device closed */
13926 +diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
13927 +index dfc6032e75f48..ea0f8eb036ae5 100644
13928 +--- a/drivers/net/ethernet/sfc/efx_common.c
13929 ++++ b/drivers/net/ethernet/sfc/efx_common.c
13930 +@@ -1030,6 +1030,7 @@ int efx_init_struct(struct efx_nic *efx,
13931 + efx->num_mac_stats = MC_CMD_MAC_NSTATS;
13932 + BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
13933 + mutex_init(&efx->mac_lock);
13934 ++ init_rwsem(&efx->filter_sem);
13935 + #ifdef CONFIG_RFS_ACCEL
13936 + mutex_init(&efx->rps_mutex);
13937 + spin_lock_init(&efx->rps_hash_lock);
13938 +diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
13939 +index 5e29284c89c98..19cf7cac1e6e9 100644
13940 +--- a/drivers/net/ethernet/sfc/rx_common.c
13941 ++++ b/drivers/net/ethernet/sfc/rx_common.c
13942 +@@ -797,7 +797,6 @@ int efx_probe_filters(struct efx_nic *efx)
13943 + {
13944 + int rc;
13945 +
13946 +- init_rwsem(&efx->filter_sem);
13947 + mutex_lock(&efx->mac_lock);
13948 + down_write(&efx->filter_sem);
13949 + rc = efx->type->filter_table_probe(efx);
13950 +diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
13951 +index 806eb651cea30..1503cc9ec6e2d 100644
13952 +--- a/drivers/net/ethernet/socionext/netsec.c
13953 ++++ b/drivers/net/ethernet/socionext/netsec.c
13954 +@@ -6,6 +6,7 @@
13955 + #include <linux/pm_runtime.h>
13956 + #include <linux/acpi.h>
13957 + #include <linux/of_mdio.h>
13958 ++#include <linux/of_net.h>
13959 + #include <linux/etherdevice.h>
13960 + #include <linux/interrupt.h>
13961 + #include <linux/io.h>
13962 +@@ -1833,6 +1834,14 @@ static const struct net_device_ops netsec_netdev_ops = {
13963 + static int netsec_of_probe(struct platform_device *pdev,
13964 + struct netsec_priv *priv, u32 *phy_addr)
13965 + {
13966 ++ int err;
13967 ++
13968 ++ err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface);
13969 ++ if (err) {
13970 ++ dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
13971 ++ return err;
13972 ++ }
13973 ++
13974 + priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
13975 + if (!priv->phy_np) {
13976 + dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
13977 +@@ -1859,6 +1868,14 @@ static int netsec_acpi_probe(struct platform_device *pdev,
13978 + if (!IS_ENABLED(CONFIG_ACPI))
13979 + return -ENODEV;
13980 +
13981 ++ /* ACPI systems are assumed to configure the PHY in firmware, so
13982 ++ * there is really no need to discover the PHY mode from the DSDT.
13983 ++ * Since firmware is known to exist in the field that configures the
13984 ++ * PHY correctly but passes the wrong mode string in the phy-mode
13985 ++ * device property, we have no choice but to ignore it.
13986 ++ */
13987 ++ priv->phy_interface = PHY_INTERFACE_MODE_NA;
13988 ++
13989 + ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
13990 + if (ret) {
13991 + dev_err(&pdev->dev,
13992 +@@ -1995,13 +2012,6 @@ static int netsec_probe(struct platform_device *pdev)
13993 + priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
13994 + NETIF_MSG_LINK | NETIF_MSG_PROBE;
13995 +
13996 +- priv->phy_interface = device_get_phy_mode(&pdev->dev);
13997 +- if ((int)priv->phy_interface < 0) {
13998 +- dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
13999 +- ret = -ENODEV;
14000 +- goto free_ndev;
14001 +- }
14002 +-
14003 + priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
14004 + resource_size(mmio_res));
14005 + if (!priv->ioaddr) {
14006 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
14007 +index b56b13d64ab48..122a0697229af 100644
14008 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
14009 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
14010 +@@ -176,32 +176,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
14011 + }
14012 + }
14013 +
14014 +-/**
14015 +- * stmmac_stop_all_queues - Stop all queues
14016 +- * @priv: driver private structure
14017 +- */
14018 +-static void stmmac_stop_all_queues(struct stmmac_priv *priv)
14019 +-{
14020 +- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
14021 +- u32 queue;
14022 +-
14023 +- for (queue = 0; queue < tx_queues_cnt; queue++)
14024 +- netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
14025 +-}
14026 +-
14027 +-/**
14028 +- * stmmac_start_all_queues - Start all queues
14029 +- * @priv: driver private structure
14030 +- */
14031 +-static void stmmac_start_all_queues(struct stmmac_priv *priv)
14032 +-{
14033 +- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
14034 +- u32 queue;
14035 +-
14036 +- for (queue = 0; queue < tx_queues_cnt; queue++)
14037 +- netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
14038 +-}
14039 +-
14040 + static void stmmac_service_event_schedule(struct stmmac_priv *priv)
14041 + {
14042 + if (!test_bit(STMMAC_DOWN, &priv->state) &&
14043 +@@ -2740,6 +2714,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
14044 + stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
14045 + }
14046 +
14047 ++ /* Configure real RX and TX queues */
14048 ++ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
14049 ++ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
14050 ++
14051 + /* Start the ball rolling... */
14052 + stmmac_start_all_dma(priv);
14053 +
14054 +@@ -2868,7 +2846,7 @@ static int stmmac_open(struct net_device *dev)
14055 + }
14056 +
14057 + stmmac_enable_all_queues(priv);
14058 +- stmmac_start_all_queues(priv);
14059 ++ netif_tx_start_all_queues(priv->dev);
14060 +
14061 + return 0;
14062 +
14063 +@@ -2911,8 +2889,6 @@ static int stmmac_release(struct net_device *dev)
14064 + phylink_stop(priv->phylink);
14065 + phylink_disconnect_phy(priv->phylink);
14066 +
14067 +- stmmac_stop_all_queues(priv);
14068 +-
14069 + stmmac_disable_all_queues(priv);
14070 +
14071 + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
14072 +@@ -4827,10 +4803,6 @@ int stmmac_dvr_probe(struct device *device,
14073 +
14074 + stmmac_check_ether_addr(priv);
14075 +
14076 +- /* Configure real RX and TX queues */
14077 +- netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
14078 +- netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
14079 +-
14080 + ndev->netdev_ops = &stmmac_netdev_ops;
14081 +
14082 + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
14083 +@@ -5086,7 +5058,6 @@ int stmmac_suspend(struct device *dev)
14084 + mutex_lock(&priv->lock);
14085 +
14086 + netif_device_detach(ndev);
14087 +- stmmac_stop_all_queues(priv);
14088 +
14089 + stmmac_disable_all_queues(priv);
14090 +
14091 +@@ -5213,8 +5184,6 @@ int stmmac_resume(struct device *dev)
14092 +
14093 + stmmac_enable_all_queues(priv);
14094 +
14095 +- stmmac_start_all_queues(priv);
14096 +-
14097 + mutex_unlock(&priv->lock);
14098 +
14099 + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
14100 +diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
14101 +index b7efd7c95e9c8..ed60fa5bcdaca 100644
14102 +--- a/drivers/net/ipa/ipa_endpoint.c
14103 ++++ b/drivers/net/ipa/ipa_endpoint.c
14104 +@@ -1471,6 +1471,9 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
14105 +
14106 + void ipa_endpoint_suspend(struct ipa *ipa)
14107 + {
14108 ++ if (!ipa->setup_complete)
14109 ++ return;
14110 ++
14111 + if (ipa->modem_netdev)
14112 + ipa_modem_suspend(ipa->modem_netdev);
14113 +
14114 +@@ -1482,6 +1485,9 @@ void ipa_endpoint_suspend(struct ipa *ipa)
14115 +
14116 + void ipa_endpoint_resume(struct ipa *ipa)
14117 + {
14118 ++ if (!ipa->setup_complete)
14119 ++ return;
14120 ++
14121 + ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
14122 + ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
14123 +
14124 +diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
14125 +index 9b00708676cf7..1bdd3df0867a5 100644
14126 +--- a/drivers/net/wan/hdlc.c
14127 ++++ b/drivers/net/wan/hdlc.c
14128 +@@ -46,7 +46,15 @@ static struct hdlc_proto *first_proto;
14129 + static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
14130 + struct packet_type *p, struct net_device *orig_dev)
14131 + {
14132 +- struct hdlc_device *hdlc = dev_to_hdlc(dev);
14133 ++ struct hdlc_device *hdlc;
14134 ++
14135 ++ /* First make sure "dev" is an HDLC device */
14136 ++ if (!(dev->priv_flags & IFF_WAN_HDLC)) {
14137 ++ kfree_skb(skb);
14138 ++ return NET_RX_SUCCESS;
14139 ++ }
14140 ++
14141 ++ hdlc = dev_to_hdlc(dev);
14142 +
14143 + if (!net_eq(dev_net(dev), &init_net)) {
14144 + kfree_skb(skb);
14145 +diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
14146 +index 08e0a46501dec..c70a518b8b478 100644
14147 +--- a/drivers/net/wan/hdlc_raw_eth.c
14148 ++++ b/drivers/net/wan/hdlc_raw_eth.c
14149 +@@ -99,6 +99,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
14150 + old_qlen = dev->tx_queue_len;
14151 + ether_setup(dev);
14152 + dev->tx_queue_len = old_qlen;
14153 ++ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
14154 + eth_hw_addr_random(dev);
14155 + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
14156 + netif_dormant_off(dev);
14157 +diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
14158 +index 294fbc1e89ab8..e6e0284e47837 100644
14159 +--- a/drivers/net/wireless/ath/ath10k/ce.c
14160 ++++ b/drivers/net/wireless/ath/ath10k/ce.c
14161 +@@ -1555,7 +1555,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
14162 + ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
14163 + if (ret) {
14164 + dma_free_coherent(ar->dev,
14165 +- (nentries * sizeof(struct ce_desc_64) +
14166 ++ (nentries * sizeof(struct ce_desc) +
14167 + CE_DESC_RING_ALIGN),
14168 + src_ring->base_addr_owner_space_unaligned,
14169 + base_addr);
14170 +diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
14171 +index d787cbead56ab..215ade6faf328 100644
14172 +--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
14173 ++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
14174 +@@ -142,6 +142,14 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
14175 + BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
14176 +
14177 + idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
14178 ++
14179 ++ if (idx < 0 || idx >= htt->rx_ring.size) {
14180 ++ ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
14181 ++ idx &= htt->rx_ring.size_mask;
14182 ++ ret = -ENOMEM;
14183 ++ goto fail;
14184 ++ }
14185 ++
14186 + while (num > 0) {
14187 + skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
14188 + if (!skb) {
14189 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
14190 +index 3c0c33a9f30cb..2177e9d92bdff 100644
14191 +--- a/drivers/net/wireless/ath/ath10k/mac.c
14192 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
14193 +@@ -7278,7 +7278,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
14194 + struct ieee80211_channel *channel)
14195 + {
14196 + int ret;
14197 +- enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
14198 ++ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
14199 +
14200 + lockdep_assert_held(&ar->conf_mutex);
14201 +
14202 +diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
14203 +index 30092841ac464..a0314c1c84653 100644
14204 +--- a/drivers/net/wireless/ath/ath11k/ahb.c
14205 ++++ b/drivers/net/wireless/ath/ath11k/ahb.c
14206 +@@ -981,12 +981,16 @@ err_core_free:
14207 + static int ath11k_ahb_remove(struct platform_device *pdev)
14208 + {
14209 + struct ath11k_base *ab = platform_get_drvdata(pdev);
14210 ++ unsigned long left;
14211 +
14212 + reinit_completion(&ab->driver_recovery);
14213 +
14214 +- if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags))
14215 +- wait_for_completion_timeout(&ab->driver_recovery,
14216 +- ATH11K_AHB_RECOVERY_TIMEOUT);
14217 ++ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
14218 ++ left = wait_for_completion_timeout(&ab->driver_recovery,
14219 ++ ATH11K_AHB_RECOVERY_TIMEOUT);
14220 ++ if (!left)
14221 ++ ath11k_warn(ab, "failed to receive recovery response completion\n");
14222 ++ }
14223 +
14224 + set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
14225 + cancel_work_sync(&ab->restart_work);
14226 +diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
14227 +index 94ae2b9ea6635..4674f0aca8e9b 100644
14228 +--- a/drivers/net/wireless/ath/ath11k/mac.c
14229 ++++ b/drivers/net/wireless/ath/ath11k/mac.c
14230 +@@ -6006,7 +6006,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
14231 + ret = ath11k_mac_setup_channels_rates(ar,
14232 + cap->supported_bands);
14233 + if (ret)
14234 +- goto err_free;
14235 ++ goto err;
14236 +
14237 + ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
14238 + ath11k_mac_setup_he_cap(ar, cap);
14239 +@@ -6120,7 +6120,9 @@ static int __ath11k_mac_register(struct ath11k *ar)
14240 + err_free:
14241 + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
14242 + kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
14243 ++ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
14244 +
14245 ++err:
14246 + SET_IEEE80211_DEV(ar->hw, NULL);
14247 + return ret;
14248 + }
14249 +diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
14250 +index c00a99ad8dbc1..497cff7e64cc5 100644
14251 +--- a/drivers/net/wireless/ath/ath11k/qmi.c
14252 ++++ b/drivers/net/wireless/ath/ath11k/qmi.c
14253 +@@ -2419,6 +2419,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab)
14254 + ATH11K_QMI_WLFW_SERVICE_INS_ID_V01);
14255 + if (ret < 0) {
14256 + ath11k_warn(ab, "failed to add qmi lookup\n");
14257 ++ destroy_workqueue(ab->qmi.event_wq);
14258 + return ret;
14259 + }
14260 +
14261 +diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
14262 +index 1c5d65bb411f7..6d6a7e34645f2 100644
14263 +--- a/drivers/net/wireless/ath/ath11k/spectral.c
14264 ++++ b/drivers/net/wireless/ath/ath11k/spectral.c
14265 +@@ -773,6 +773,8 @@ static int ath11k_spectral_process_data(struct ath11k *ar,
14266 + i += sizeof(*tlv) + tlv_len;
14267 + }
14268 +
14269 ++ ret = 0;
14270 ++
14271 + err:
14272 + kfree(fft_sample);
14273 + unlock:
14274 +diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
14275 +index 5e7ea838a9218..814131a0680a4 100644
14276 +--- a/drivers/net/wireless/ath/ath6kl/main.c
14277 ++++ b/drivers/net/wireless/ath/ath6kl/main.c
14278 +@@ -430,6 +430,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
14279 +
14280 + ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
14281 +
14282 ++ if (aid < 1 || aid > AP_MAX_NUM_STA)
14283 ++ return;
14284 ++
14285 + if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
14286 + struct ieee80211_mgmt *mgmt =
14287 + (struct ieee80211_mgmt *) assoc_info;
14288 +diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
14289 +index 6885d2ded53a8..3d5db84d64650 100644
14290 +--- a/drivers/net/wireless/ath/ath6kl/wmi.c
14291 ++++ b/drivers/net/wireless/ath/ath6kl/wmi.c
14292 +@@ -2645,6 +2645,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
14293 + return -EINVAL;
14294 + }
14295 +
14296 ++ if (tsid >= 16) {
14297 ++ ath6kl_err("invalid tsid: %d\n", tsid);
14298 ++ return -EINVAL;
14299 ++ }
14300 ++
14301 + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
14302 + if (!skb)
14303 + return -ENOMEM;
14304 +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
14305 +index 3f563e02d17da..2ed98aaed6fb5 100644
14306 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
14307 ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
14308 +@@ -449,10 +449,19 @@ static void hif_usb_stop(void *hif_handle)
14309 + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
14310 +
14311 + /* The pending URBs have to be canceled. */
14312 ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
14313 + list_for_each_entry_safe(tx_buf, tx_buf_tmp,
14314 + &hif_dev->tx.tx_pending, list) {
14315 ++ usb_get_urb(tx_buf->urb);
14316 ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
14317 + usb_kill_urb(tx_buf->urb);
14318 ++ list_del(&tx_buf->list);
14319 ++ usb_free_urb(tx_buf->urb);
14320 ++ kfree(tx_buf->buf);
14321 ++ kfree(tx_buf);
14322 ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
14323 + }
14324 ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
14325 +
14326 + usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
14327 + }
14328 +@@ -762,27 +771,37 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
14329 + struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
14330 + unsigned long flags;
14331 +
14332 ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
14333 + list_for_each_entry_safe(tx_buf, tx_buf_tmp,
14334 + &hif_dev->tx.tx_buf, list) {
14335 ++ usb_get_urb(tx_buf->urb);
14336 ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
14337 + usb_kill_urb(tx_buf->urb);
14338 + list_del(&tx_buf->list);
14339 + usb_free_urb(tx_buf->urb);
14340 + kfree(tx_buf->buf);
14341 + kfree(tx_buf);
14342 ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
14343 + }
14344 ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
14345 +
14346 + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
14347 + hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
14348 + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
14349 +
14350 ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
14351 + list_for_each_entry_safe(tx_buf, tx_buf_tmp,
14352 + &hif_dev->tx.tx_pending, list) {
14353 ++ usb_get_urb(tx_buf->urb);
14354 ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
14355 + usb_kill_urb(tx_buf->urb);
14356 + list_del(&tx_buf->list);
14357 + usb_free_urb(tx_buf->urb);
14358 + kfree(tx_buf->buf);
14359 + kfree(tx_buf);
14360 ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
14361 + }
14362 ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
14363 +
14364 + usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
14365 + }
14366 +diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
14367 +index d2e062eaf5614..510e61e97dbcb 100644
14368 +--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
14369 ++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
14370 +@@ -339,6 +339,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
14371 +
14372 + if (skb) {
14373 + htc_hdr = (struct htc_frame_hdr *) skb->data;
14374 ++ if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint))
14375 ++ goto ret;
14376 + endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
14377 + skb_pull(skb, sizeof(struct htc_frame_hdr));
14378 +
14379 +diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
14380 +index 702b689c06df3..f3ea629764fa8 100644
14381 +--- a/drivers/net/wireless/ath/wcn36xx/main.c
14382 ++++ b/drivers/net/wireless/ath/wcn36xx/main.c
14383 +@@ -163,7 +163,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = {
14384 + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
14385 + .mcs = {
14386 + .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
14387 +- .rx_highest = cpu_to_le16(72),
14388 ++ .rx_highest = cpu_to_le16(150),
14389 + .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
14390 + }
14391 + }
14392 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
14393 +index f89010a81ffbe..aa9ced3c86fbd 100644
14394 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
14395 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
14396 +@@ -486,7 +486,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
14397 + ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
14398 +
14399 + if (ret || !(*ifp) || !(*ifp)->ndev) {
14400 +- if (ret != -ENODATA && *ifp)
14401 ++ if (ret != -ENODATA && *ifp && (*ifp)->ndev)
14402 + (*ifp)->ndev->stats.rx_errors++;
14403 + brcmu_pkt_buf_free_skb(skb);
14404 + return -ENODATA;
14405 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
14406 +index f1a20db8daab9..bfddb851e386e 100644
14407 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
14408 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
14409 +@@ -1620,6 +1620,8 @@ fail:
14410 + BRCMF_TX_IOCTL_MAX_MSG_SIZE,
14411 + msgbuf->ioctbuf,
14412 + msgbuf->ioctbuf_handle);
14413 ++ if (msgbuf->txflow_wq)
14414 ++ destroy_workqueue(msgbuf->txflow_wq);
14415 + kfree(msgbuf);
14416 + }
14417 + return -ENOMEM;
14418 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
14419 +index 7ef36234a25dc..66797dc5e90d5 100644
14420 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
14421 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
14422 +@@ -5065,8 +5065,10 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
14423 + pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft;
14424 + pi->pi_fptr.detach = wlc_phy_detach_lcnphy;
14425 +
14426 +- if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
14427 ++ if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) {
14428 ++ kfree(pi->u.pi_lcnphy);
14429 + return false;
14430 ++ }
14431 +
14432 + if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
14433 + if (pi_lcn->lcnphy_tempsense_option == 3) {
14434 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
14435 +index 9ce7207d9ec5b..83caaa3c60a95 100644
14436 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
14437 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
14438 +@@ -947,9 +947,8 @@ static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
14439 + struct iwl_rx_packet *pkt = tp_data->fw_pkt;
14440 + struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
14441 +
14442 +- if (pkt && ((wanted_hdr->cmd == 0 && wanted_hdr->group_id == 0) ||
14443 +- (pkt->hdr.cmd == wanted_hdr->cmd &&
14444 +- pkt->hdr.group_id == wanted_hdr->group_id))) {
14445 ++ if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd &&
14446 ++ pkt->hdr.group_id == wanted_hdr->group_id)) {
14447 + struct iwl_rx_packet *fw_pkt =
14448 + kmemdup(pkt,
14449 + sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
14450 +@@ -1012,6 +1011,9 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
14451 + enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
14452 + int ret, i;
14453 +
14454 ++ if (*ini_dest != IWL_FW_INI_LOCATION_INVALID)
14455 ++ return;
14456 ++
14457 + IWL_DEBUG_FW(fwrt,
14458 + "WRT: Generating active triggers list, domain 0x%x\n",
14459 + fwrt->trans->dbg.domains_bitmap);
14460 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
14461 +index 9374c85c5caf9..c918c0887ed01 100644
14462 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
14463 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
14464 +@@ -3693,9 +3693,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
14465 + tail->apply_time_max_delay = cpu_to_le32(delay);
14466 +
14467 + IWL_DEBUG_TE(mvm,
14468 +- "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
14469 +- channel->hw_value, req_dur, duration, delay,
14470 +- dtim_interval);
14471 ++ "ROC: Requesting to remain on channel %u for %ums\n",
14472 ++ channel->hw_value, req_dur);
14473 ++ IWL_DEBUG_TE(mvm,
14474 ++ "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
14475 ++ duration, delay, dtim_interval);
14476 ++
14477 + /* Set the node address */
14478 + memcpy(tail->node_addr, vif->addr, ETH_ALEN);
14479 +
14480 +diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
14481 +index ff932627a46c1..2fb69a590bd8e 100644
14482 +--- a/drivers/net/wireless/marvell/mwifiex/scan.c
14483 ++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
14484 +@@ -1889,7 +1889,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
14485 + chan, CFG80211_BSS_FTYPE_UNKNOWN,
14486 + bssid, timestamp,
14487 + cap_info_bitmap, beacon_period,
14488 +- ie_buf, ie_len, rssi, GFP_KERNEL);
14489 ++ ie_buf, ie_len, rssi, GFP_ATOMIC);
14490 + if (bss) {
14491 + bss_priv = (struct mwifiex_bss_priv *)bss->priv;
14492 + bss_priv->band = band;
14493 +diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
14494 +index a042965962a2d..1b6bee5465288 100644
14495 +--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
14496 ++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
14497 +@@ -1976,6 +1976,8 @@ error:
14498 + kfree(card->mpa_rx.buf);
14499 + card->mpa_tx.buf_size = 0;
14500 + card->mpa_rx.buf_size = 0;
14501 ++ card->mpa_tx.buf = NULL;
14502 ++ card->mpa_rx.buf = NULL;
14503 + }
14504 +
14505 + return ret;
14506 +diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
14507 +index 6f3cfde4654cc..426e39d4ccf0f 100644
14508 +--- a/drivers/net/wireless/marvell/mwifiex/usb.c
14509 ++++ b/drivers/net/wireless/marvell/mwifiex/usb.c
14510 +@@ -1353,7 +1353,8 @@ static void mwifiex_usb_cleanup_tx_aggr(struct mwifiex_adapter *adapter)
14511 + skb_dequeue(&port->tx_aggr.aggr_list)))
14512 + mwifiex_write_data_complete(adapter, skb_tmp,
14513 + 0, -1);
14514 +- del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
14515 ++ if (port->tx_aggr.timer_cnxt.hold_timer.function)
14516 ++ del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
14517 + port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
14518 + port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
14519 + }
14520 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
14521 +index 88931658a9fbb..937cb71bed642 100644
14522 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
14523 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
14524 +@@ -165,15 +165,14 @@ mt7615_reset_test_set(void *data, u64 val)
14525 + if (!mt7615_wait_for_mcu_init(dev))
14526 + return 0;
14527 +
14528 +- mt7615_mutex_acquire(dev);
14529 +-
14530 + skb = alloc_skb(1, GFP_KERNEL);
14531 + if (!skb)
14532 + return -ENOMEM;
14533 +
14534 + skb_put(skb, 1);
14535 +- mt76_tx_queue_skb_raw(dev, 0, skb, 0);
14536 +
14537 ++ mt7615_mutex_acquire(dev);
14538 ++ mt76_tx_queue_skb_raw(dev, 0, skb, 0);
14539 + mt7615_mutex_release(dev);
14540 +
14541 + return 0;
14542 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
14543 +index 3dd8dd28690ed..019031d436de8 100644
14544 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
14545 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
14546 +@@ -1845,7 +1845,7 @@ void mt7615_pm_wake_work(struct work_struct *work)
14547 + pm.wake_work);
14548 + mphy = dev->phy.mt76;
14549 +
14550 +- if (mt7615_driver_own(dev)) {
14551 ++ if (mt7615_mcu_set_drv_ctrl(dev)) {
14552 + dev_err(mphy->dev->dev, "failed to wake device\n");
14553 + goto out;
14554 + }
14555 +@@ -1853,12 +1853,13 @@ void mt7615_pm_wake_work(struct work_struct *work)
14556 + spin_lock_bh(&dev->pm.txq_lock);
14557 + for (i = 0; i < IEEE80211_NUM_ACS; i++) {
14558 + struct mt7615_sta *msta = dev->pm.tx_q[i].msta;
14559 +- struct mt76_wcid *wcid = msta ? &msta->wcid : NULL;
14560 + struct ieee80211_sta *sta = NULL;
14561 ++ struct mt76_wcid *wcid;
14562 +
14563 + if (!dev->pm.tx_q[i].skb)
14564 + continue;
14565 +
14566 ++ wcid = msta ? &msta->wcid : &dev->mt76.global_wcid;
14567 + if (msta && wcid->sta)
14568 + sta = container_of((void *)msta, struct ieee80211_sta,
14569 + drv_priv);
14570 +@@ -1943,7 +1944,7 @@ void mt7615_pm_power_save_work(struct work_struct *work)
14571 + goto out;
14572 + }
14573 +
14574 +- if (!mt7615_firmware_own(dev))
14575 ++ if (!mt7615_mcu_set_fw_ctrl(dev))
14576 + return;
14577 + out:
14578 + queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
14579 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
14580 +index 2d0b1f49fdbcf..bafe2bdeb5eb4 100644
14581 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
14582 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
14583 +@@ -361,7 +361,10 @@ mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd,
14584 + wd->key.keylen = key->keylen;
14585 + wd->key.cmd = cmd;
14586 +
14587 ++ spin_lock_bh(&dev->mt76.lock);
14588 + list_add_tail(&wd->node, &dev->wd_head);
14589 ++ spin_unlock_bh(&dev->mt76.lock);
14590 ++
14591 + queue_work(dev->mt76.wq, &dev->wtbl_work);
14592 +
14593 + return 0;
14594 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
14595 +index bd316dbd9041d..f42a69ee5635a 100644
14596 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
14597 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
14598 +@@ -324,6 +324,97 @@ int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val)
14599 + sizeof(req), false);
14600 + }
14601 +
14602 ++static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
14603 ++{
14604 ++ if (!is_mt7622(&dev->mt76))
14605 ++ return;
14606 ++
14607 ++ regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC,
14608 ++ MT_INFRACFG_MISC_AP2CONN_WAKE,
14609 ++ !en * MT_INFRACFG_MISC_AP2CONN_WAKE);
14610 ++}
14611 ++
14612 ++static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
14613 ++{
14614 ++ struct mt76_phy *mphy = &dev->mt76.phy;
14615 ++ struct mt76_dev *mdev = &dev->mt76;
14616 ++ u32 addr;
14617 ++ int err;
14618 ++
14619 ++ addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
14620 ++ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
14621 ++
14622 ++ mt7622_trigger_hif_int(dev, true);
14623 ++
14624 ++ addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
14625 ++ err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
14626 ++
14627 ++ mt7622_trigger_hif_int(dev, false);
14628 ++
14629 ++ if (err) {
14630 ++ dev_err(mdev->dev, "driver own failed\n");
14631 ++ return -ETIMEDOUT;
14632 ++ }
14633 ++
14634 ++ clear_bit(MT76_STATE_PM, &mphy->state);
14635 ++
14636 ++ return 0;
14637 ++}
14638 ++
14639 ++static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev)
14640 ++{
14641 ++ struct mt76_phy *mphy = &dev->mt76.phy;
14642 ++ int i;
14643 ++
14644 ++ if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
14645 ++ goto out;
14646 ++
14647 ++ for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
14648 ++ mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
14649 ++ if (mt76_poll_msec(dev, MT_CONN_HIF_ON_LPCTL,
14650 ++ MT_CFG_LPCR_HOST_FW_OWN, 0, 50))
14651 ++ break;
14652 ++ }
14653 ++
14654 ++ if (i == MT7615_DRV_OWN_RETRY_COUNT) {
14655 ++ dev_err(dev->mt76.dev, "driver own failed\n");
14656 ++ set_bit(MT76_STATE_PM, &mphy->state);
14657 ++ return -EIO;
14658 ++ }
14659 ++
14660 ++out:
14661 ++ dev->pm.last_activity = jiffies;
14662 ++
14663 ++ return 0;
14664 ++}
14665 ++
14666 ++static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev)
14667 ++{
14668 ++ struct mt76_phy *mphy = &dev->mt76.phy;
14669 ++ int err = 0;
14670 ++ u32 addr;
14671 ++
14672 ++ if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
14673 ++ return 0;
14674 ++
14675 ++ mt7622_trigger_hif_int(dev, true);
14676 ++
14677 ++ addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
14678 ++ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
14679 ++
14680 ++ if (is_mt7622(&dev->mt76) &&
14681 ++ !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN,
14682 ++ MT_CFG_LPCR_HOST_FW_OWN, 3000)) {
14683 ++ dev_err(dev->mt76.dev, "Timeout for firmware own\n");
14684 ++ clear_bit(MT76_STATE_PM, &mphy->state);
14685 ++ err = -EIO;
14686 ++ }
14687 ++
14688 ++ mt7622_trigger_hif_int(dev, false);
14689 ++
14690 ++ return err;
14691 ++}
14692 ++
14693 + static void
14694 + mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
14695 + {
14696 +@@ -1314,6 +1405,8 @@ static const struct mt7615_mcu_ops wtbl_update_ops = {
14697 + .add_tx_ba = mt7615_mcu_wtbl_tx_ba,
14698 + .add_rx_ba = mt7615_mcu_wtbl_rx_ba,
14699 + .sta_add = mt7615_mcu_wtbl_sta_add,
14700 ++ .set_drv_ctrl = mt7615_mcu_drv_pmctrl,
14701 ++ .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
14702 + };
14703 +
14704 + static int
14705 +@@ -1410,6 +1503,8 @@ static const struct mt7615_mcu_ops sta_update_ops = {
14706 + .add_tx_ba = mt7615_mcu_sta_tx_ba,
14707 + .add_rx_ba = mt7615_mcu_sta_rx_ba,
14708 + .sta_add = mt7615_mcu_add_sta,
14709 ++ .set_drv_ctrl = mt7615_mcu_drv_pmctrl,
14710 ++ .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
14711 + };
14712 +
14713 + static int
14714 +@@ -1823,6 +1918,8 @@ static const struct mt7615_mcu_ops uni_update_ops = {
14715 + .add_tx_ba = mt7615_mcu_uni_tx_ba,
14716 + .add_rx_ba = mt7615_mcu_uni_rx_ba,
14717 + .sta_add = mt7615_mcu_uni_add_sta,
14718 ++ .set_drv_ctrl = mt7615_mcu_lp_drv_pmctrl,
14719 ++ .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
14720 + };
14721 +
14722 + static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
14723 +@@ -1895,81 +1992,6 @@ static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
14724 + &req, sizeof(req), true);
14725 + }
14726 +
14727 +-static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
14728 +-{
14729 +- if (!is_mt7622(&dev->mt76))
14730 +- return;
14731 +-
14732 +- regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC,
14733 +- MT_INFRACFG_MISC_AP2CONN_WAKE,
14734 +- !en * MT_INFRACFG_MISC_AP2CONN_WAKE);
14735 +-}
14736 +-
14737 +-int mt7615_driver_own(struct mt7615_dev *dev)
14738 +-{
14739 +- struct mt76_phy *mphy = &dev->mt76.phy;
14740 +- struct mt76_dev *mdev = &dev->mt76;
14741 +- int i;
14742 +-
14743 +- if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
14744 +- goto out;
14745 +-
14746 +- mt7622_trigger_hif_int(dev, true);
14747 +-
14748 +- for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
14749 +- u32 addr;
14750 +-
14751 +- addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
14752 +- mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
14753 +-
14754 +- addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
14755 +- if (mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 50))
14756 +- break;
14757 +- }
14758 +-
14759 +- mt7622_trigger_hif_int(dev, false);
14760 +-
14761 +- if (i == MT7615_DRV_OWN_RETRY_COUNT) {
14762 +- dev_err(mdev->dev, "driver own failed\n");
14763 +- set_bit(MT76_STATE_PM, &mphy->state);
14764 +- return -EIO;
14765 +- }
14766 +-
14767 +-out:
14768 +- dev->pm.last_activity = jiffies;
14769 +-
14770 +- return 0;
14771 +-}
14772 +-EXPORT_SYMBOL_GPL(mt7615_driver_own);
14773 +-
14774 +-int mt7615_firmware_own(struct mt7615_dev *dev)
14775 +-{
14776 +- struct mt76_phy *mphy = &dev->mt76.phy;
14777 +- int err = 0;
14778 +- u32 addr;
14779 +-
14780 +- if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
14781 +- return 0;
14782 +-
14783 +- mt7622_trigger_hif_int(dev, true);
14784 +-
14785 +- addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
14786 +- mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
14787 +-
14788 +- if (is_mt7622(&dev->mt76) &&
14789 +- !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN,
14790 +- MT_CFG_LPCR_HOST_FW_OWN, 300)) {
14791 +- dev_err(dev->mt76.dev, "Timeout for firmware own\n");
14792 +- clear_bit(MT76_STATE_PM, &mphy->state);
14793 +- err = -EIO;
14794 +- }
14795 +-
14796 +- mt7622_trigger_hif_int(dev, false);
14797 +-
14798 +- return err;
14799 +-}
14800 +-EXPORT_SYMBOL_GPL(mt7615_firmware_own);
14801 +-
14802 + static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
14803 + {
14804 + const struct mt7615_patch_hdr *hdr;
14805 +@@ -2452,7 +2474,7 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
14806 +
14807 + dev->mt76.mcu_ops = &mt7615_mcu_ops,
14808 +
14809 +- ret = mt7615_driver_own(dev);
14810 ++ ret = mt7615_mcu_drv_pmctrl(dev);
14811 + if (ret)
14812 + return ret;
14813 +
14814 +@@ -2482,7 +2504,7 @@ EXPORT_SYMBOL_GPL(mt7615_mcu_init);
14815 + void mt7615_mcu_exit(struct mt7615_dev *dev)
14816 + {
14817 + __mt76_mcu_restart(&dev->mt76);
14818 +- mt7615_firmware_own(dev);
14819 ++ mt7615_mcu_set_fw_ctrl(dev);
14820 + skb_queue_purge(&dev->mt76.mcu.res_q);
14821 + }
14822 + EXPORT_SYMBOL_GPL(mt7615_mcu_exit);
14823 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
14824 +index 571eadc033a3b..c2e1cfb071a82 100644
14825 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
14826 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
14827 +@@ -220,6 +220,8 @@ struct mt7615_phy {
14828 + #define mt7615_mcu_add_bss_info(phy, ...) (phy->dev)->mcu_ops->add_bss_info((phy), __VA_ARGS__)
14829 + #define mt7615_mcu_add_beacon(dev, ...) (dev)->mcu_ops->add_beacon_offload((dev), __VA_ARGS__)
14830 + #define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__)
14831 ++#define mt7615_mcu_set_drv_ctrl(dev) (dev)->mcu_ops->set_drv_ctrl((dev))
14832 ++#define mt7615_mcu_set_fw_ctrl(dev) (dev)->mcu_ops->set_fw_ctrl((dev))
14833 + struct mt7615_mcu_ops {
14834 + int (*add_tx_ba)(struct mt7615_dev *dev,
14835 + struct ieee80211_ampdu_params *params,
14836 +@@ -238,6 +240,8 @@ struct mt7615_mcu_ops {
14837 + struct ieee80211_hw *hw,
14838 + struct ieee80211_vif *vif, bool enable);
14839 + int (*set_pm_state)(struct mt7615_dev *dev, int band, int state);
14840 ++ int (*set_drv_ctrl)(struct mt7615_dev *dev);
14841 ++ int (*set_fw_ctrl)(struct mt7615_dev *dev);
14842 + };
14843 +
14844 + struct mt7615_dev {
14845 +@@ -638,8 +642,6 @@ int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
14846 + struct ieee80211_vif *vif);
14847 + int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
14848 + struct ieee80211_channel *chan, int duration);
14849 +-int mt7615_firmware_own(struct mt7615_dev *dev);
14850 +-int mt7615_driver_own(struct mt7615_dev *dev);
14851 +
14852 + int mt7615_init_debugfs(struct mt7615_dev *dev);
14853 + int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
14854 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
14855 +index 2328d78e06a10..b9794f8a8df41 100644
14856 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
14857 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
14858 +@@ -118,7 +118,7 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
14859 + if (err)
14860 + goto restore;
14861 +
14862 +- err = mt7615_firmware_own(dev);
14863 ++ err = mt7615_mcu_set_fw_ctrl(dev);
14864 + if (err)
14865 + goto restore;
14866 +
14867 +@@ -142,7 +142,7 @@ static int mt7615_pci_resume(struct pci_dev *pdev)
14868 + bool pdma_reset;
14869 + int i, err;
14870 +
14871 +- err = mt7615_driver_own(dev);
14872 ++ err = mt7615_mcu_set_drv_ctrl(dev);
14873 + if (err < 0)
14874 + return err;
14875 +
14876 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
14877 +index dabce51117b0a..57d60876db544 100644
14878 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
14879 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
14880 +@@ -426,6 +426,8 @@ static int mt7663s_suspend(struct device *dev)
14881 + return err;
14882 + }
14883 +
14884 ++ sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
14885 ++
14886 + mt76s_stop_txrx(&mdev->mt76);
14887 +
14888 + return mt7663s_firmware_own(mdev);
14889 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
14890 +index 1730751133aa2..2cfa58d49832f 100644
14891 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
14892 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
14893 +@@ -70,7 +70,7 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy)
14894 + if (dev->mt76.test.state != MT76_TM_STATE_OFF)
14895 + tx_power = dev->mt76.test.tx_power;
14896 +
14897 +- len = sizeof(req_hdr) + MT7615_EE_MAX - MT_EE_NIC_CONF_0;
14898 ++ len = MT7615_EE_MAX - MT_EE_NIC_CONF_0;
14899 + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + len);
14900 + if (!skb)
14901 + return -ENOMEM;
14902 +@@ -83,8 +83,10 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy)
14903 + int index;
14904 +
14905 + ret = mt7615_eeprom_get_target_power_index(dev, chandef->chan, i);
14906 +- if (ret < 0)
14907 ++ if (ret < 0) {
14908 ++ dev_kfree_skb(skb);
14909 + return -EINVAL;
14910 ++ }
14911 +
14912 + index = ret - MT_EE_NIC_CONF_0;
14913 + if (tx_power && tx_power[i])
14914 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
14915 +index 0b33df3e3bfec..adbed373798e8 100644
14916 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
14917 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
14918 +@@ -19,6 +19,7 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
14919 + {
14920 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
14921 + int ret, seq, ep;
14922 ++ u32 len;
14923 +
14924 + mutex_lock(&mdev->mcu.mutex);
14925 +
14926 +@@ -28,7 +29,8 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
14927 + else
14928 + ep = MT_EP_OUT_AC_BE;
14929 +
14930 +- put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len)));
14931 ++ len = skb->len;
14932 ++ put_unaligned_le32(len, skb_push(skb, sizeof(len)));
14933 + ret = mt76_skb_adjust_pad(skb);
14934 + if (ret < 0)
14935 + goto out;
14936 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
14937 +index 6dffdaaa9ad53..294276e2280d2 100644
14938 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
14939 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
14940 +@@ -259,8 +259,11 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
14941 + }
14942 +
14943 + mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb);
14944 +- if (mt76_is_usb(mdev))
14945 +- put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len)));
14946 ++ if (mt76_is_usb(mdev)) {
14947 ++ u32 len = skb->len;
14948 ++
14949 ++ put_unaligned_le32(len, skb_push(skb, sizeof(len)));
14950 ++ }
14951 +
14952 + return mt76_skb_adjust_pad(skb);
14953 + }
14954 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
14955 +index a8832c5e60041..8a1ae08d9572e 100644
14956 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
14957 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
14958 +@@ -95,16 +95,13 @@ static int mt7915_poll_tx(struct napi_struct *napi, int budget)
14959 + dev = container_of(napi, struct mt7915_dev, mt76.tx_napi);
14960 +
14961 + mt7915_tx_cleanup(dev);
14962 +-
14963 +- if (napi_complete_done(napi, 0))
14964 +- mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL);
14965 +-
14966 +- mt7915_tx_cleanup(dev);
14967 +-
14968 + mt7915_mac_sta_poll(dev);
14969 +
14970 + tasklet_schedule(&dev->mt76.tx_tasklet);
14971 +
14972 ++ if (napi_complete_done(napi, 0))
14973 ++ mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL);
14974 ++
14975 + return 0;
14976 + }
14977 +
14978 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
14979 +index eaed5ef054016..bfd87974a5796 100644
14980 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
14981 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
14982 +@@ -2335,14 +2335,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
14983 + struct bss_info_bcn *bcn;
14984 + int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
14985 +
14986 +- rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
14987 +- if (IS_ERR(rskb))
14988 +- return PTR_ERR(rskb);
14989 +-
14990 +- tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
14991 +- bcn = (struct bss_info_bcn *)tlv;
14992 +- bcn->enable = en;
14993 +-
14994 + skb = ieee80211_beacon_get_template(hw, vif, &offs);
14995 + if (!skb)
14996 + return -EINVAL;
14997 +@@ -2353,6 +2345,16 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
14998 + return -EINVAL;
14999 + }
15000 +
15001 ++ rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
15002 ++ if (IS_ERR(rskb)) {
15003 ++ dev_kfree_skb(skb);
15004 ++ return PTR_ERR(rskb);
15005 ++ }
15006 ++
15007 ++ tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
15008 ++ bcn = (struct bss_info_bcn *)tlv;
15009 ++ bcn->enable = en;
15010 ++
15011 + if (mvif->band_idx) {
15012 + info = IEEE80211_SKB_CB(skb);
15013 + info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
15014 +diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
15015 +index 75bb02cdfdae4..5bd6ac1ba3b5b 100644
15016 +--- a/drivers/net/wireless/mediatek/mt76/testmode.c
15017 ++++ b/drivers/net/wireless/mediatek/mt76/testmode.c
15018 +@@ -442,9 +442,13 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
15019 + mutex_lock(&dev->mutex);
15020 +
15021 + if (tb[MT76_TM_ATTR_STATS]) {
15022 ++ err = -EINVAL;
15023 ++
15024 + a = nla_nest_start(msg, MT76_TM_ATTR_STATS);
15025 +- err = mt76_testmode_dump_stats(dev, msg);
15026 +- nla_nest_end(msg, a);
15027 ++ if (a) {
15028 ++ err = mt76_testmode_dump_stats(dev, msg);
15029 ++ nla_nest_end(msg, a);
15030 ++ }
15031 +
15032 + goto out;
15033 + }
15034 +diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c
15035 +index 358ac86013338..b5a1b65c087ca 100644
15036 +--- a/drivers/net/wireless/microchip/wilc1000/mon.c
15037 ++++ b/drivers/net/wireless/microchip/wilc1000/mon.c
15038 +@@ -235,11 +235,10 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
15039 +
15040 + if (register_netdevice(wl->monitor_dev)) {
15041 + netdev_err(real_dev, "register_netdevice failed\n");
15042 ++ free_netdev(wl->monitor_dev);
15043 + return NULL;
15044 + }
15045 + priv = netdev_priv(wl->monitor_dev);
15046 +- if (!priv)
15047 +- return NULL;
15048 +
15049 + priv->real_ndev = real_dev;
15050 +
15051 +diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
15052 +index 3ece7b0b03929..351ff909ab1c7 100644
15053 +--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
15054 ++++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
15055 +@@ -149,9 +149,10 @@ static int wilc_sdio_probe(struct sdio_func *func,
15056 + wilc->dev = &func->dev;
15057 +
15058 + wilc->rtc_clk = devm_clk_get(&func->card->dev, "rtc");
15059 +- if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
15060 ++ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) {
15061 ++ kfree(sdio_priv);
15062 + return -EPROBE_DEFER;
15063 +- else if (!IS_ERR(wilc->rtc_clk))
15064 ++ } else if (!IS_ERR(wilc->rtc_clk))
15065 + clk_prepare_enable(wilc->rtc_clk);
15066 +
15067 + dev_info(&func->dev, "Driver Initializing success\n");
15068 +diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
15069 +index 3f19e3f38a397..a18dac0aa6b67 100644
15070 +--- a/drivers/net/wireless/microchip/wilc1000/spi.c
15071 ++++ b/drivers/net/wireless/microchip/wilc1000/spi.c
15072 +@@ -112,9 +112,10 @@ static int wilc_bus_probe(struct spi_device *spi)
15073 + wilc->dev_irq_num = spi->irq;
15074 +
15075 + wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc_clk");
15076 +- if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
15077 ++ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) {
15078 ++ kfree(spi_priv);
15079 + return -EPROBE_DEFER;
15080 +- else if (!IS_ERR(wilc->rtc_clk))
15081 ++ } else if (!IS_ERR(wilc->rtc_clk))
15082 + clk_prepare_enable(wilc->rtc_clk);
15083 +
15084 + return 0;
15085 +diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
15086 +index f40d8c3c3d9e5..f3ccbd2b10847 100644
15087 +--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
15088 ++++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
15089 +@@ -869,6 +869,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
15090 + default:
15091 + pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid,
15092 + vif->vifid, vif->wdev.iftype);
15093 ++ dev_kfree_skb(cmd_skb);
15094 + ret = -EINVAL;
15095 + goto out;
15096 + }
15097 +@@ -1924,6 +1925,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
15098 + break;
15099 + default:
15100 + pr_err("unsupported iftype %d\n", vif->wdev.iftype);
15101 ++ dev_kfree_skb(cmd_skb);
15102 + ret = -EINVAL;
15103 + goto out;
15104 + }
15105 +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
15106 +index 19efae462a242..5cd7ef3625c5e 100644
15107 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
15108 ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
15109 +@@ -5795,7 +5795,6 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
15110 + ret = usb_submit_urb(urb, GFP_KERNEL);
15111 + if (ret) {
15112 + usb_unanchor_urb(urb);
15113 +- usb_free_urb(urb);
15114 + goto error;
15115 + }
15116 +
15117 +@@ -5804,6 +5803,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
15118 + rtl8xxxu_write32(priv, REG_USB_HIMR, val32);
15119 +
15120 + error:
15121 ++ usb_free_urb(urb);
15122 + return ret;
15123 + }
15124 +
15125 +@@ -6318,6 +6318,7 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
15126 + struct rtl8xxxu_priv *priv = hw->priv;
15127 + struct rtl8xxxu_rx_urb *rx_urb;
15128 + struct rtl8xxxu_tx_urb *tx_urb;
15129 ++ struct sk_buff *skb;
15130 + unsigned long flags;
15131 + int ret, i;
15132 +
15133 +@@ -6368,6 +6369,13 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
15134 + rx_urb->hw = hw;
15135 +
15136 + ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
15137 ++ if (ret) {
15138 ++ if (ret != -ENOMEM) {
15139 ++ skb = (struct sk_buff *)rx_urb->urb.context;
15140 ++ dev_kfree_skb(skb);
15141 ++ }
15142 ++ rtl8xxxu_queue_rx_urb(priv, rx_urb);
15143 ++ }
15144 + }
15145 +
15146 + schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
15147 +diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
15148 +index 54044abf30d7c..d69e4c6fc680a 100644
15149 +--- a/drivers/net/wireless/realtek/rtw88/main.c
15150 ++++ b/drivers/net/wireless/realtek/rtw88/main.c
15151 +@@ -1473,6 +1473,9 @@ int rtw_core_init(struct rtw_dev *rtwdev)
15152 + ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW);
15153 + if (ret) {
15154 + rtw_warn(rtwdev, "no wow firmware loaded\n");
15155 ++ wait_for_completion(&rtwdev->fw.completion);
15156 ++ if (rtwdev->fw.firmware)
15157 ++ release_firmware(rtwdev->fw.firmware);
15158 + return ret;
15159 + }
15160 + }
15161 +@@ -1487,6 +1490,8 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
15162 + struct rtw_rsvd_page *rsvd_pkt, *tmp;
15163 + unsigned long flags;
15164 +
15165 ++ rtw_wait_firmware_completion(rtwdev);
15166 ++
15167 + if (fw->firmware)
15168 + release_firmware(fw->firmware);
15169 +
15170 +diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
15171 +index 3413973bc4750..7f1f5073b9f4d 100644
15172 +--- a/drivers/net/wireless/realtek/rtw88/pci.c
15173 ++++ b/drivers/net/wireless/realtek/rtw88/pci.c
15174 +@@ -1599,6 +1599,8 @@ void rtw_pci_shutdown(struct pci_dev *pdev)
15175 +
15176 + if (chip->ops->shutdown)
15177 + chip->ops->shutdown(rtwdev);
15178 ++
15179 ++ pci_set_power_state(pdev, PCI_D3hot);
15180 + }
15181 + EXPORT_SYMBOL(rtw_pci_shutdown);
15182 +
15183 +diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
15184 +index 024c2bc275cbe..ca17aa9cf7dc7 100644
15185 +--- a/drivers/net/wireless/realtek/rtw88/pci.h
15186 ++++ b/drivers/net/wireless/realtek/rtw88/pci.h
15187 +@@ -9,8 +9,8 @@
15188 + #define RTK_BEQ_TX_DESC_NUM 256
15189 +
15190 + #define RTK_MAX_RX_DESC_NUM 512
15191 +-/* 8K + rx desc size */
15192 +-#define RTK_PCI_RX_BUF_SIZE (8192 + 24)
15193 ++/* 11K + rx desc size */
15194 ++#define RTK_PCI_RX_BUF_SIZE (11454 + 24)
15195 +
15196 + #define RTK_PCI_CTRL 0x300
15197 + #define BIT_RST_TRXDMA_INTF BIT(20)
15198 +diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
15199 +index 8d93f31597469..9687b376d221b 100644
15200 +--- a/drivers/net/wireless/realtek/rtw88/phy.c
15201 ++++ b/drivers/net/wireless/realtek/rtw88/phy.c
15202 +@@ -147,12 +147,13 @@ void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
15203 + {
15204 + struct rtw_chip_info *chip = rtwdev->chip;
15205 + struct rtw_hal *hal = &rtwdev->hal;
15206 +- const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
15207 + u32 addr, mask;
15208 + u8 path;
15209 +
15210 +- if (dig_cck)
15211 ++ if (chip->dig_cck) {
15212 ++ const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
15213 + rtw_write32_mask(rtwdev, dig_cck->addr, dig_cck->mask, igi >> 1);
15214 ++ }
15215 +
15216 + for (path = 0; path < hal->rf_path_num; path++) {
15217 + addr = chip->dig[path].addr;
15218 +diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
15219 +index 88e1db65be02c..71428d8cbcfc5 100644
15220 +--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
15221 ++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
15222 +@@ -1203,6 +1203,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
15223 +
15224 + err_dma_mask:
15225 + pci_clear_master(pdev);
15226 ++ pci_release_regions(pdev);
15227 + err_pci_regions:
15228 + pci_disable_device(pdev);
15229 + err_pci_enable:
15230 +diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
15231 +index 3185efeab487b..093dd20057b92 100644
15232 +--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
15233 ++++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
15234 +@@ -1893,7 +1893,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
15235 + goto err_init_dev;
15236 + } else {
15237 + rc = -EINVAL;
15238 +- goto err_ndev;
15239 ++ goto err_init_pci;
15240 + }
15241 +
15242 + ndev_reset_unsafe_flags(ndev);
15243 +diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
15244 +index 57cfd78731fbb..53efecb678983 100644
15245 +--- a/drivers/nvme/host/zns.c
15246 ++++ b/drivers/nvme/host/zns.c
15247 +@@ -133,28 +133,6 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
15248 + return NULL;
15249 + }
15250 +
15251 +-static int __nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
15252 +- struct nvme_zone_report *report,
15253 +- size_t buflen)
15254 +-{
15255 +- struct nvme_command c = { };
15256 +- int ret;
15257 +-
15258 +- c.zmr.opcode = nvme_cmd_zone_mgmt_recv;
15259 +- c.zmr.nsid = cpu_to_le32(ns->head->ns_id);
15260 +- c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
15261 +- c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen));
15262 +- c.zmr.zra = NVME_ZRA_ZONE_REPORT;
15263 +- c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
15264 +- c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
15265 +-
15266 +- ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
15267 +- if (ret)
15268 +- return ret;
15269 +-
15270 +- return le64_to_cpu(report->nr_zones);
15271 +-}
15272 +-
15273 + static int nvme_zone_parse_entry(struct nvme_ns *ns,
15274 + struct nvme_zone_descriptor *entry,
15275 + unsigned int idx, report_zones_cb cb,
15276 +@@ -182,6 +160,7 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
15277 + unsigned int nr_zones, report_zones_cb cb, void *data)
15278 + {
15279 + struct nvme_zone_report *report;
15280 ++ struct nvme_command c = { };
15281 + int ret, zone_idx = 0;
15282 + unsigned int nz, i;
15283 + size_t buflen;
15284 +@@ -190,14 +169,26 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
15285 + if (!report)
15286 + return -ENOMEM;
15287 +
15288 ++ c.zmr.opcode = nvme_cmd_zone_mgmt_recv;
15289 ++ c.zmr.nsid = cpu_to_le32(ns->head->ns_id);
15290 ++ c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen));
15291 ++ c.zmr.zra = NVME_ZRA_ZONE_REPORT;
15292 ++ c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
15293 ++ c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
15294 ++
15295 + sector &= ~(ns->zsze - 1);
15296 + while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) {
15297 + memset(report, 0, buflen);
15298 +- ret = __nvme_ns_report_zones(ns, sector, report, buflen);
15299 +- if (ret < 0)
15300 ++
15301 ++ c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
15302 ++ ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
15303 ++ if (ret) {
15304 ++ if (ret > 0)
15305 ++ ret = -EIO;
15306 + goto out_free;
15307 ++ }
15308 +
15309 +- nz = min_t(unsigned int, ret, nr_zones);
15310 ++ nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones);
15311 + if (!nz)
15312 + break;
15313 +
15314 +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
15315 +index b7b63330b5efd..90e0c84df2af9 100644
15316 +--- a/drivers/nvme/target/core.c
15317 ++++ b/drivers/nvme/target/core.c
15318 +@@ -1126,7 +1126,8 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
15319 + * in case a host died before it enabled the controller. Hence, simply
15320 + * reset the keep alive timer when the controller is enabled.
15321 + */
15322 +- mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
15323 ++ if (ctrl->kato)
15324 ++ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
15325 + }
15326 +
15327 + static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
15328 +diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
15329 +index dacfa7435d0b2..1ab88df3310f6 100644
15330 +--- a/drivers/nvme/target/passthru.c
15331 ++++ b/drivers/nvme/target/passthru.c
15332 +@@ -26,7 +26,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
15333 + struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
15334 + u16 status = NVME_SC_SUCCESS;
15335 + struct nvme_id_ctrl *id;
15336 +- u32 max_hw_sectors;
15337 ++ int max_hw_sectors;
15338 + int page_shift;
15339 +
15340 + id = kzalloc(sizeof(*id), GFP_KERNEL);
15341 +@@ -48,6 +48,13 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
15342 + max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
15343 + pctrl->max_hw_sectors);
15344 +
15345 ++ /*
15346 ++ * nvmet_passthru_map_sg is limitted to using a single bio so limit
15347 ++ * the mdts based on BIO_MAX_PAGES as well
15348 ++ */
15349 ++ max_hw_sectors = min_not_zero(BIO_MAX_PAGES << (PAGE_SHIFT - 9),
15350 ++ max_hw_sectors);
15351 ++
15352 + page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
15353 +
15354 + id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
15355 +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
15356 +index 6cd3edb2eaf65..29a51cd795609 100644
15357 +--- a/drivers/nvmem/core.c
15358 ++++ b/drivers/nvmem/core.c
15359 +@@ -361,16 +361,14 @@ static void nvmem_cell_add(struct nvmem_cell *cell)
15360 + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
15361 + }
15362 +
15363 +-static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
15364 +- const struct nvmem_cell_info *info,
15365 +- struct nvmem_cell *cell)
15366 ++static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
15367 ++ const struct nvmem_cell_info *info,
15368 ++ struct nvmem_cell *cell)
15369 + {
15370 + cell->nvmem = nvmem;
15371 + cell->offset = info->offset;
15372 + cell->bytes = info->bytes;
15373 +- cell->name = kstrdup_const(info->name, GFP_KERNEL);
15374 +- if (!cell->name)
15375 +- return -ENOMEM;
15376 ++ cell->name = info->name;
15377 +
15378 + cell->bit_offset = info->bit_offset;
15379 + cell->nbits = info->nbits;
15380 +@@ -382,13 +380,30 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
15381 + if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
15382 + dev_err(&nvmem->dev,
15383 + "cell %s unaligned to nvmem stride %d\n",
15384 +- cell->name, nvmem->stride);
15385 ++ cell->name ?: "<unknown>", nvmem->stride);
15386 + return -EINVAL;
15387 + }
15388 +
15389 + return 0;
15390 + }
15391 +
15392 ++static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
15393 ++ const struct nvmem_cell_info *info,
15394 ++ struct nvmem_cell *cell)
15395 ++{
15396 ++ int err;
15397 ++
15398 ++ err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
15399 ++ if (err)
15400 ++ return err;
15401 ++
15402 ++ cell->name = kstrdup_const(info->name, GFP_KERNEL);
15403 ++ if (!cell->name)
15404 ++ return -ENOMEM;
15405 ++
15406 ++ return 0;
15407 ++}
15408 ++
15409 + /**
15410 + * nvmem_add_cells() - Add cell information to an nvmem device
15411 + *
15412 +@@ -835,6 +850,7 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
15413 + {
15414 +
15415 + struct device_node *nvmem_np;
15416 ++ struct nvmem_device *nvmem;
15417 + int index = 0;
15418 +
15419 + if (id)
15420 +@@ -844,7 +860,9 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
15421 + if (!nvmem_np)
15422 + return ERR_PTR(-ENOENT);
15423 +
15424 +- return __nvmem_device_get(nvmem_np, device_match_of_node);
15425 ++ nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
15426 ++ of_node_put(nvmem_np);
15427 ++ return nvmem;
15428 + }
15429 + EXPORT_SYMBOL_GPL(of_nvmem_device_get);
15430 + #endif
15431 +@@ -1460,7 +1478,7 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
15432 + if (!nvmem)
15433 + return -EINVAL;
15434 +
15435 +- rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
15436 ++ rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
15437 + if (rc)
15438 + return rc;
15439 +
15440 +@@ -1490,7 +1508,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
15441 + if (!nvmem)
15442 + return -EINVAL;
15443 +
15444 +- rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
15445 ++ rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
15446 + if (rc)
15447 + return rc;
15448 +
15449 +diff --git a/drivers/opp/core.c b/drivers/opp/core.c
15450 +index 3ca7543142bf3..1a95ad40795be 100644
15451 +--- a/drivers/opp/core.c
15452 ++++ b/drivers/opp/core.c
15453 +@@ -1949,6 +1949,9 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
15454 + {
15455 + int index;
15456 +
15457 ++ if (!opp_table->genpd_virt_devs)
15458 ++ return;
15459 ++
15460 + for (index = 0; index < opp_table->required_opp_count; index++) {
15461 + if (!opp_table->genpd_virt_devs[index])
15462 + continue;
15463 +@@ -1995,6 +1998,9 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
15464 + if (!opp_table)
15465 + return ERR_PTR(-ENOMEM);
15466 +
15467 ++ if (opp_table->genpd_virt_devs)
15468 ++ return opp_table;
15469 ++
15470 + /*
15471 + * If the genpd's OPP table isn't already initialized, parsing of the
15472 + * required-opps fail for dev. We should retry this after genpd's OPP
15473 +diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
15474 +index 305bfec2424d8..29f5c616c3bc6 100644
15475 +--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
15476 ++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
15477 +@@ -505,7 +505,8 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
15478 + u32 reg;
15479 + int i;
15480 +
15481 +- hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
15482 ++ hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
15483 ++ PCI_HEADER_TYPE_MASK;
15484 + if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
15485 + dev_err(pci->dev,
15486 + "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
15487 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
15488 +index 1559f79e63b6f..2e2e2a2ff51d3 100644
15489 +--- a/drivers/pci/controller/pci-aardvark.c
15490 ++++ b/drivers/pci/controller/pci-aardvark.c
15491 +@@ -9,7 +9,7 @@
15492 + */
15493 +
15494 + #include <linux/delay.h>
15495 +-#include <linux/gpio.h>
15496 ++#include <linux/gpio/consumer.h>
15497 + #include <linux/interrupt.h>
15498 + #include <linux/irq.h>
15499 + #include <linux/irqdomain.h>
15500 +@@ -607,7 +607,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
15501 + * Initialize the configuration space of the PCI-to-PCI bridge
15502 + * associated with the given PCIe interface.
15503 + */
15504 +-static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
15505 ++static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
15506 + {
15507 + struct pci_bridge_emul *bridge = &pcie->bridge;
15508 +
15509 +@@ -633,8 +633,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
15510 + bridge->data = pcie;
15511 + bridge->ops = &advk_pci_bridge_emul_ops;
15512 +
15513 +- pci_bridge_emul_init(bridge, 0);
15514 +-
15515 ++ return pci_bridge_emul_init(bridge, 0);
15516 + }
15517 +
15518 + static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
15519 +@@ -1167,7 +1166,11 @@ static int advk_pcie_probe(struct platform_device *pdev)
15520 +
15521 + advk_pcie_setup_hw(pcie);
15522 +
15523 +- advk_sw_pci_bridge_init(pcie);
15524 ++ ret = advk_sw_pci_bridge_init(pcie);
15525 ++ if (ret) {
15526 ++ dev_err(dev, "Failed to register emulated root PCI bridge\n");
15527 ++ return ret;
15528 ++ }
15529 +
15530 + ret = advk_pcie_init_irq_domain(pcie);
15531 + if (ret) {
15532 +diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
15533 +index fc4c3a15e5707..a9df492fbffa2 100644
15534 +--- a/drivers/pci/controller/pci-hyperv.c
15535 ++++ b/drivers/pci/controller/pci-hyperv.c
15536 +@@ -1276,11 +1276,25 @@ static void hv_irq_unmask(struct irq_data *data)
15537 + exit_unlock:
15538 + spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
15539 +
15540 +- if (res) {
15541 ++ /*
15542 ++ * During hibernation, when a CPU is offlined, the kernel tries
15543 ++ * to move the interrupt to the remaining CPUs that haven't
15544 ++ * been offlined yet. In this case, the below hv_do_hypercall()
15545 ++ * always fails since the vmbus channel has been closed:
15546 ++ * refer to cpu_disable_common() -> fixup_irqs() ->
15547 ++ * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
15548 ++ *
15549 ++ * Suppress the error message for hibernation because the failure
15550 ++ * during hibernation does not matter (at this time all the devices
15551 ++ * have been frozen). Note: the correct affinity info is still updated
15552 ++ * into the irqdata data structure in migrate_one_irq() ->
15553 ++ * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM
15554 ++ * resumes, hv_pci_restore_msi_state() is able to correctly restore
15555 ++ * the interrupt with the correct affinity.
15556 ++ */
15557 ++ if (res && hbus->state != hv_pcibus_removing)
15558 + dev_err(&hbus->hdev->device,
15559 + "%s() failed: %#llx", __func__, res);
15560 +- return;
15561 +- }
15562 +
15563 + pci_msi_unmask_irq(data);
15564 + }
15565 +@@ -3372,6 +3386,34 @@ static int hv_pci_suspend(struct hv_device *hdev)
15566 + return 0;
15567 + }
15568 +
15569 ++static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
15570 ++{
15571 ++ struct msi_desc *entry;
15572 ++ struct irq_data *irq_data;
15573 ++
15574 ++ for_each_pci_msi_entry(entry, pdev) {
15575 ++ irq_data = irq_get_irq_data(entry->irq);
15576 ++ if (WARN_ON_ONCE(!irq_data))
15577 ++ return -EINVAL;
15578 ++
15579 ++ hv_compose_msi_msg(irq_data, &entry->msg);
15580 ++ }
15581 ++
15582 ++ return 0;
15583 ++}
15584 ++
15585 ++/*
15586 ++ * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg()
15587 ++ * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
15588 ++ * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
15589 ++ * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
15590 ++ * Table entries.
15591 ++ */
15592 ++static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
15593 ++{
15594 ++ pci_walk_bus(hbus->pci_bus, hv_pci_restore_msi_msg, NULL);
15595 ++}
15596 ++
15597 + static int hv_pci_resume(struct hv_device *hdev)
15598 + {
15599 + struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
15600 +@@ -3405,6 +3447,8 @@ static int hv_pci_resume(struct hv_device *hdev)
15601 +
15602 + prepopulate_bars(hbus);
15603 +
15604 ++ hv_pci_restore_msi_state(hbus);
15605 ++
15606 + hbus->state = hv_pcibus_installed;
15607 + return 0;
15608 + out:
15609 +diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
15610 +index 3176ad3ab0e52..908475d27e0e7 100644
15611 +--- a/drivers/pci/controller/pcie-iproc-msi.c
15612 ++++ b/drivers/pci/controller/pcie-iproc-msi.c
15613 +@@ -209,15 +209,20 @@ static int iproc_msi_irq_set_affinity(struct irq_data *data,
15614 + struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
15615 + int target_cpu = cpumask_first(mask);
15616 + int curr_cpu;
15617 ++ int ret;
15618 +
15619 + curr_cpu = hwirq_to_cpu(msi, data->hwirq);
15620 + if (curr_cpu == target_cpu)
15621 +- return IRQ_SET_MASK_OK_DONE;
15622 ++ ret = IRQ_SET_MASK_OK_DONE;
15623 ++ else {
15624 ++ /* steer MSI to the target CPU */
15625 ++ data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
15626 ++ ret = IRQ_SET_MASK_OK;
15627 ++ }
15628 +
15629 +- /* steer MSI to the target CPU */
15630 +- data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
15631 ++ irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
15632 +
15633 +- return IRQ_SET_MASK_OK;
15634 ++ return ret;
15635 + }
15636 +
15637 + static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
15638 +diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
15639 +index b37e08c4f9d1a..4afd4ee4f7f04 100644
15640 +--- a/drivers/pci/iov.c
15641 ++++ b/drivers/pci/iov.c
15642 +@@ -180,6 +180,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
15643 + virtfn->device = iov->vf_device;
15644 + virtfn->is_virtfn = 1;
15645 + virtfn->physfn = pci_dev_get(dev);
15646 ++ virtfn->no_command_memory = 1;
15647 +
15648 + if (id == 0)
15649 + pci_read_vf_config_common(virtfn);
15650 +diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
15651 +index aac9823b0c6bb..e116815fa8092 100644
15652 +--- a/drivers/perf/thunderx2_pmu.c
15653 ++++ b/drivers/perf/thunderx2_pmu.c
15654 +@@ -805,14 +805,17 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
15655 + list_for_each_entry(rentry, &list, node) {
15656 + if (resource_type(rentry->res) == IORESOURCE_MEM) {
15657 + res = *rentry->res;
15658 ++ rentry = NULL;
15659 + break;
15660 + }
15661 + }
15662 ++ acpi_dev_free_resource_list(&list);
15663 +
15664 +- if (!rentry->res)
15665 ++ if (rentry) {
15666 ++ dev_err(dev, "PMU type %d: Fail to find resource\n", type);
15667 + return NULL;
15668 ++ }
15669 +
15670 +- acpi_dev_free_resource_list(&list);
15671 + base = devm_ioremap_resource(dev, &res);
15672 + if (IS_ERR(base)) {
15673 + dev_err(dev, "PMU type %d: Fail to map resource\n", type);
15674 +diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
15675 +index edac28cd25ddc..633cf07ba6723 100644
15676 +--- a/drivers/perf/xgene_pmu.c
15677 ++++ b/drivers/perf/xgene_pmu.c
15678 +@@ -1453,17 +1453,6 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
15679 + }
15680 +
15681 + #if defined(CONFIG_ACPI)
15682 +-static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
15683 +-{
15684 +- struct resource *res = data;
15685 +-
15686 +- if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
15687 +- acpi_dev_resource_memory(ares, res);
15688 +-
15689 +- /* Always tell the ACPI core to skip this resource */
15690 +- return 1;
15691 +-}
15692 +-
15693 + static struct
15694 + xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
15695 + struct acpi_device *adev, u32 type)
15696 +@@ -1475,6 +1464,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
15697 + struct hw_pmu_info *inf;
15698 + void __iomem *dev_csr;
15699 + struct resource res;
15700 ++ struct resource_entry *rentry;
15701 + int enable_bit;
15702 + int rc;
15703 +
15704 +@@ -1483,11 +1473,23 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
15705 + return NULL;
15706 +
15707 + INIT_LIST_HEAD(&resource_list);
15708 +- rc = acpi_dev_get_resources(adev, &resource_list,
15709 +- acpi_pmu_dev_add_resource, &res);
15710 ++ rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
15711 ++ if (rc <= 0) {
15712 ++ dev_err(dev, "PMU type %d: No resources found\n", type);
15713 ++ return NULL;
15714 ++ }
15715 ++
15716 ++ list_for_each_entry(rentry, &resource_list, node) {
15717 ++ if (resource_type(rentry->res) == IORESOURCE_MEM) {
15718 ++ res = *rentry->res;
15719 ++ rentry = NULL;
15720 ++ break;
15721 ++ }
15722 ++ }
15723 + acpi_dev_free_resource_list(&resource_list);
15724 +- if (rc < 0) {
15725 +- dev_err(dev, "PMU type %d: No resource address found\n", type);
15726 ++
15727 ++ if (rentry) {
15728 ++ dev_err(dev, "PMU type %d: No memory resource found\n", type);
15729 + return NULL;
15730 + }
15731 +
15732 +diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
15733 +index 53f3f8aec6956..3e6567355d97d 100644
15734 +--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
15735 ++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
15736 +@@ -534,7 +534,7 @@ int aspeed_pin_config_set(struct pinctrl_dev *pctldev, unsigned int offset,
15737 + val = pmap->val << __ffs(pconf->mask);
15738 +
15739 + rc = regmap_update_bits(pdata->scu, pconf->reg,
15740 +- pmap->mask, val);
15741 ++ pconf->mask, val);
15742 +
15743 + if (rc < 0)
15744 + return rc;
15745 +diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
15746 +index dcf7df797af75..0ed14de0134cf 100644
15747 +--- a/drivers/pinctrl/bcm/Kconfig
15748 ++++ b/drivers/pinctrl/bcm/Kconfig
15749 +@@ -23,6 +23,7 @@ config PINCTRL_BCM2835
15750 + select PINMUX
15751 + select PINCONF
15752 + select GENERIC_PINCONF
15753 ++ select GPIOLIB
15754 + select GPIOLIB_IRQCHIP
15755 + default ARCH_BCM2835 || ARCH_BRCMSTB
15756 + help
15757 +diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
15758 +index 5eff8c2965528..3fb2387147189 100644
15759 +--- a/drivers/pinctrl/devicetree.c
15760 ++++ b/drivers/pinctrl/devicetree.c
15761 +@@ -130,9 +130,8 @@ static int dt_to_map_one_config(struct pinctrl *p,
15762 + if (!np_pctldev || of_node_is_root(np_pctldev)) {
15763 + of_node_put(np_pctldev);
15764 + ret = driver_deferred_probe_check_state(p->dev);
15765 +- /* keep deferring if modules are enabled unless we've timed out */
15766 +- if (IS_ENABLED(CONFIG_MODULES) && !allow_default &&
15767 +- (ret == -ENODEV))
15768 ++ /* keep deferring if modules are enabled */
15769 ++ if (IS_ENABLED(CONFIG_MODULES) && !allow_default && ret < 0)
15770 + ret = -EPROBE_DEFER;
15771 + return ret;
15772 + }
15773 +diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
15774 +index 8c162dd5f5a10..3e354e02f4084 100644
15775 +--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
15776 ++++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
15777 +@@ -15,11 +15,13 @@
15778 +
15779 + #include "pinctrl-intel.h"
15780 +
15781 +-#define TGL_PAD_OWN 0x020
15782 +-#define TGL_PADCFGLOCK 0x080
15783 +-#define TGL_HOSTSW_OWN 0x0b0
15784 +-#define TGL_GPI_IS 0x100
15785 +-#define TGL_GPI_IE 0x120
15786 ++#define TGL_PAD_OWN 0x020
15787 ++#define TGL_LP_PADCFGLOCK 0x080
15788 ++#define TGL_H_PADCFGLOCK 0x090
15789 ++#define TGL_LP_HOSTSW_OWN 0x0b0
15790 ++#define TGL_H_HOSTSW_OWN 0x0c0
15791 ++#define TGL_GPI_IS 0x100
15792 ++#define TGL_GPI_IE 0x120
15793 +
15794 + #define TGL_GPP(r, s, e, g) \
15795 + { \
15796 +@@ -29,12 +31,12 @@
15797 + .gpio_base = (g), \
15798 + }
15799 +
15800 +-#define TGL_COMMUNITY(b, s, e, g) \
15801 ++#define TGL_COMMUNITY(b, s, e, pl, ho, g) \
15802 + { \
15803 + .barno = (b), \
15804 + .padown_offset = TGL_PAD_OWN, \
15805 +- .padcfglock_offset = TGL_PADCFGLOCK, \
15806 +- .hostown_offset = TGL_HOSTSW_OWN, \
15807 ++ .padcfglock_offset = (pl), \
15808 ++ .hostown_offset = (ho), \
15809 + .is_offset = TGL_GPI_IS, \
15810 + .ie_offset = TGL_GPI_IE, \
15811 + .pin_base = (s), \
15812 +@@ -43,6 +45,12 @@
15813 + .ngpps = ARRAY_SIZE(g), \
15814 + }
15815 +
15816 ++#define TGL_LP_COMMUNITY(b, s, e, g) \
15817 ++ TGL_COMMUNITY(b, s, e, TGL_LP_PADCFGLOCK, TGL_LP_HOSTSW_OWN, g)
15818 ++
15819 ++#define TGL_H_COMMUNITY(b, s, e, g) \
15820 ++ TGL_COMMUNITY(b, s, e, TGL_H_PADCFGLOCK, TGL_H_HOSTSW_OWN, g)
15821 ++
15822 + /* Tiger Lake-LP */
15823 + static const struct pinctrl_pin_desc tgllp_pins[] = {
15824 + /* GPP_B */
15825 +@@ -367,10 +375,10 @@ static const struct intel_padgroup tgllp_community5_gpps[] = {
15826 + };
15827 +
15828 + static const struct intel_community tgllp_communities[] = {
15829 +- TGL_COMMUNITY(0, 0, 66, tgllp_community0_gpps),
15830 +- TGL_COMMUNITY(1, 67, 170, tgllp_community1_gpps),
15831 +- TGL_COMMUNITY(2, 171, 259, tgllp_community4_gpps),
15832 +- TGL_COMMUNITY(3, 260, 276, tgllp_community5_gpps),
15833 ++ TGL_LP_COMMUNITY(0, 0, 66, tgllp_community0_gpps),
15834 ++ TGL_LP_COMMUNITY(1, 67, 170, tgllp_community1_gpps),
15835 ++ TGL_LP_COMMUNITY(2, 171, 259, tgllp_community4_gpps),
15836 ++ TGL_LP_COMMUNITY(3, 260, 276, tgllp_community5_gpps),
15837 + };
15838 +
15839 + static const struct intel_pinctrl_soc_data tgllp_soc_data = {
15840 +@@ -723,11 +731,11 @@ static const struct intel_padgroup tglh_community5_gpps[] = {
15841 + };
15842 +
15843 + static const struct intel_community tglh_communities[] = {
15844 +- TGL_COMMUNITY(0, 0, 78, tglh_community0_gpps),
15845 +- TGL_COMMUNITY(1, 79, 180, tglh_community1_gpps),
15846 +- TGL_COMMUNITY(2, 181, 217, tglh_community3_gpps),
15847 +- TGL_COMMUNITY(3, 218, 266, tglh_community4_gpps),
15848 +- TGL_COMMUNITY(4, 267, 290, tglh_community5_gpps),
15849 ++ TGL_H_COMMUNITY(0, 0, 78, tglh_community0_gpps),
15850 ++ TGL_H_COMMUNITY(1, 79, 180, tglh_community1_gpps),
15851 ++ TGL_H_COMMUNITY(2, 181, 217, tglh_community3_gpps),
15852 ++ TGL_H_COMMUNITY(3, 218, 266, tglh_community4_gpps),
15853 ++ TGL_H_COMMUNITY(4, 267, 290, tglh_community5_gpps),
15854 + };
15855 +
15856 + static const struct intel_pinctrl_soc_data tglh_soc_data = {
15857 +diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
15858 +index 42b12ea14d6be..7edb067f5e76a 100644
15859 +--- a/drivers/pinctrl/pinctrl-mcp23s08.c
15860 ++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
15861 +@@ -87,7 +87,7 @@ const struct regmap_config mcp23x08_regmap = {
15862 + };
15863 + EXPORT_SYMBOL_GPL(mcp23x08_regmap);
15864 +
15865 +-static const struct reg_default mcp23x16_defaults[] = {
15866 ++static const struct reg_default mcp23x17_defaults[] = {
15867 + {.reg = MCP_IODIR << 1, .def = 0xffff},
15868 + {.reg = MCP_IPOL << 1, .def = 0x0000},
15869 + {.reg = MCP_GPINTEN << 1, .def = 0x0000},
15870 +@@ -98,23 +98,23 @@ static const struct reg_default mcp23x16_defaults[] = {
15871 + {.reg = MCP_OLAT << 1, .def = 0x0000},
15872 + };
15873 +
15874 +-static const struct regmap_range mcp23x16_volatile_range = {
15875 ++static const struct regmap_range mcp23x17_volatile_range = {
15876 + .range_min = MCP_INTF << 1,
15877 + .range_max = MCP_GPIO << 1,
15878 + };
15879 +
15880 +-static const struct regmap_access_table mcp23x16_volatile_table = {
15881 +- .yes_ranges = &mcp23x16_volatile_range,
15882 ++static const struct regmap_access_table mcp23x17_volatile_table = {
15883 ++ .yes_ranges = &mcp23x17_volatile_range,
15884 + .n_yes_ranges = 1,
15885 + };
15886 +
15887 +-static const struct regmap_range mcp23x16_precious_range = {
15888 +- .range_min = MCP_GPIO << 1,
15889 ++static const struct regmap_range mcp23x17_precious_range = {
15890 ++ .range_min = MCP_INTCAP << 1,
15891 + .range_max = MCP_GPIO << 1,
15892 + };
15893 +
15894 +-static const struct regmap_access_table mcp23x16_precious_table = {
15895 +- .yes_ranges = &mcp23x16_precious_range,
15896 ++static const struct regmap_access_table mcp23x17_precious_table = {
15897 ++ .yes_ranges = &mcp23x17_precious_range,
15898 + .n_yes_ranges = 1,
15899 + };
15900 +
15901 +@@ -124,10 +124,10 @@ const struct regmap_config mcp23x17_regmap = {
15902 +
15903 + .reg_stride = 2,
15904 + .max_register = MCP_OLAT << 1,
15905 +- .volatile_table = &mcp23x16_volatile_table,
15906 +- .precious_table = &mcp23x16_precious_table,
15907 +- .reg_defaults = mcp23x16_defaults,
15908 +- .num_reg_defaults = ARRAY_SIZE(mcp23x16_defaults),
15909 ++ .volatile_table = &mcp23x17_volatile_table,
15910 ++ .precious_table = &mcp23x17_precious_table,
15911 ++ .reg_defaults = mcp23x17_defaults,
15912 ++ .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
15913 + .cache_type = REGCACHE_FLAT,
15914 + .val_format_endian = REGMAP_ENDIAN_LITTLE,
15915 + };
15916 +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
15917 +index efe41abc5d472..f3cd7e2967126 100644
15918 +--- a/drivers/pinctrl/pinctrl-single.c
15919 ++++ b/drivers/pinctrl/pinctrl-single.c
15920 +@@ -1014,7 +1014,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
15921 + if (res)
15922 + return res;
15923 +
15924 +- if (pinctrl_spec.args_count < 2) {
15925 ++ if (pinctrl_spec.args_count < 2 || pinctrl_spec.args_count > 3) {
15926 + dev_err(pcs->dev, "invalid args_count for spec: %i\n",
15927 + pinctrl_spec.args_count);
15928 + break;
15929 +@@ -1033,7 +1033,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
15930 + }
15931 +
15932 + dev_dbg(pcs->dev, "%pOFn index: 0x%x value: 0x%x\n",
15933 +- pinctrl_spec.np, offset, pinctrl_spec.args[1]);
15934 ++ pinctrl_spec.np, offset, vals[found].val);
15935 +
15936 + pin = pcs_get_pin_by_offset(pcs, offset);
15937 + if (pin < 0) {
15938 +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
15939 +index a2567e772cd57..1df232266f63a 100644
15940 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c
15941 ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
15942 +@@ -1077,12 +1077,10 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
15943 + * when TLMM is powered on. To allow that, enable the GPIO
15944 + * summary line to be wakeup capable at GIC.
15945 + */
15946 +- if (d->parent_data)
15947 +- irq_chip_set_wake_parent(d, on);
15948 +-
15949 +- irq_set_irq_wake(pctrl->irq, on);
15950 ++ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
15951 ++ return irq_chip_set_wake_parent(d, on);
15952 +
15953 +- return 0;
15954 ++ return irq_set_irq_wake(pctrl->irq, on);
15955 + }
15956 +
15957 + static int msm_gpio_irq_reqres(struct irq_data *d)
15958 +@@ -1243,6 +1241,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
15959 + pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
15960 + pctrl->irq_chip.irq_set_affinity = msm_gpio_irq_set_affinity;
15961 + pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity;
15962 ++ pctrl->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND |
15963 ++ IRQCHIP_SET_TYPE_MASKED;
15964 +
15965 + np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
15966 + if (np) {
15967 +diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
15968 +index b59180bff5a3e..ef61298c30bdd 100644
15969 +--- a/drivers/platform/chrome/cros_ec_lightbar.c
15970 ++++ b/drivers/platform/chrome/cros_ec_lightbar.c
15971 +@@ -116,6 +116,8 @@ static int get_lightbar_version(struct cros_ec_dev *ec,
15972 +
15973 + param = (struct ec_params_lightbar *)msg->data;
15974 + param->cmd = LIGHTBAR_CMD_VERSION;
15975 ++ msg->outsize = sizeof(param->cmd);
15976 ++ msg->result = sizeof(resp->version);
15977 + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
15978 + if (ret < 0) {
15979 + ret = 0;
15980 +diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
15981 +index 3fcd27ec9ad8f..10ef1fc75c0e1 100644
15982 +--- a/drivers/platform/chrome/cros_ec_typec.c
15983 ++++ b/drivers/platform/chrome/cros_ec_typec.c
15984 +@@ -591,7 +591,8 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
15985 + dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
15986 +
15987 + return usb_role_switch_set_role(typec->ports[port_num]->role_sw,
15988 +- !!(resp.role & PD_CTRL_RESP_ROLE_DATA));
15989 ++ resp.role & PD_CTRL_RESP_ROLE_DATA
15990 ++ ? USB_ROLE_HOST : USB_ROLE_DEVICE);
15991 + }
15992 +
15993 + static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
15994 +diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
15995 +index 1506ec0a47771..04a745095c379 100644
15996 +--- a/drivers/platform/x86/mlx-platform.c
15997 ++++ b/drivers/platform/x86/mlx-platform.c
15998 +@@ -328,15 +328,6 @@ static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
15999 + },
16000 + };
16001 +
16002 +-static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = {
16003 +- {
16004 +- I2C_BOARD_INFO("24c32", 0x51),
16005 +- },
16006 +- {
16007 +- I2C_BOARD_INFO("24c32", 0x50),
16008 +- },
16009 +-};
16010 +-
16011 + static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
16012 + {
16013 + I2C_BOARD_INFO("dps460", 0x59),
16014 +@@ -770,15 +761,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = {
16015 + .label = "psu1",
16016 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
16017 + .mask = BIT(0),
16018 +- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0],
16019 +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
16020 ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
16021 + },
16022 + {
16023 + .label = "psu2",
16024 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
16025 + .mask = BIT(1),
16026 +- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1],
16027 +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
16028 ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
16029 + },
16030 + };
16031 +
16032 +diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
16033 +index 599a0f66a3845..a34d95ed70b20 100644
16034 +--- a/drivers/pwm/pwm-img.c
16035 ++++ b/drivers/pwm/pwm-img.c
16036 +@@ -277,6 +277,8 @@ static int img_pwm_probe(struct platform_device *pdev)
16037 + return PTR_ERR(pwm->pwm_clk);
16038 + }
16039 +
16040 ++ platform_set_drvdata(pdev, pwm);
16041 ++
16042 + pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
16043 + pm_runtime_use_autosuspend(&pdev->dev);
16044 + pm_runtime_enable(&pdev->dev);
16045 +@@ -313,7 +315,6 @@ static int img_pwm_probe(struct platform_device *pdev)
16046 + goto err_suspend;
16047 + }
16048 +
16049 +- platform_set_drvdata(pdev, pwm);
16050 + return 0;
16051 +
16052 + err_suspend:
16053 +diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
16054 +index 9d965ffe66d1e..da9bc3d10104a 100644
16055 +--- a/drivers/pwm/pwm-lpss.c
16056 ++++ b/drivers/pwm/pwm-lpss.c
16057 +@@ -93,10 +93,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
16058 + * The equation is:
16059 + * base_unit = round(base_unit_range * freq / c)
16060 + */
16061 +- base_unit_range = BIT(lpwm->info->base_unit_bits) - 1;
16062 ++ base_unit_range = BIT(lpwm->info->base_unit_bits);
16063 + freq *= base_unit_range;
16064 +
16065 + base_unit = DIV_ROUND_CLOSEST_ULL(freq, c);
16066 ++ /* base_unit must not be 0 and we also want to avoid overflowing it */
16067 ++ base_unit = clamp_val(base_unit, 1, base_unit_range - 1);
16068 +
16069 + on_time_div = 255ULL * duty_ns;
16070 + do_div(on_time_div, period_ns);
16071 +@@ -104,8 +106,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
16072 +
16073 + orig_ctrl = ctrl = pwm_lpss_read(pwm);
16074 + ctrl &= ~PWM_ON_TIME_DIV_MASK;
16075 +- ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT);
16076 +- base_unit &= base_unit_range;
16077 ++ ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT);
16078 + ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
16079 + ctrl |= on_time_div;
16080 +
16081 +diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
16082 +index eb8c9cb645a6c..098e94335cb5b 100644
16083 +--- a/drivers/pwm/pwm-rockchip.c
16084 ++++ b/drivers/pwm/pwm-rockchip.c
16085 +@@ -288,6 +288,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
16086 + const struct of_device_id *id;
16087 + struct rockchip_pwm_chip *pc;
16088 + struct resource *r;
16089 ++ u32 enable_conf, ctrl;
16090 + int ret, count;
16091 +
16092 + id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev);
16093 +@@ -362,7 +363,9 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
16094 + }
16095 +
16096 + /* Keep the PWM clk enabled if the PWM appears to be up and running. */
16097 +- if (!pwm_is_enabled(pc->chip.pwms))
16098 ++ enable_conf = pc->data->enable_conf;
16099 ++ ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
16100 ++ if ((ctrl & enable_conf) != enable_conf)
16101 + clk_disable(pc->clk);
16102 +
16103 + return 0;
16104 +diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
16105 +index a30342942e26f..94331d999d273 100644
16106 +--- a/drivers/rapidio/devices/rio_mport_cdev.c
16107 ++++ b/drivers/rapidio/devices/rio_mport_cdev.c
16108 +@@ -871,15 +871,16 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
16109 + rmcd_error("pin_user_pages_fast err=%ld",
16110 + pinned);
16111 + nr_pages = 0;
16112 +- } else
16113 ++ } else {
16114 + rmcd_error("pinned %ld out of %ld pages",
16115 + pinned, nr_pages);
16116 ++ /*
16117 ++ * Set nr_pages up to mean "how many pages to unpin, in
16118 ++ * the error handler:
16119 ++ */
16120 ++ nr_pages = pinned;
16121 ++ }
16122 + ret = -EFAULT;
16123 +- /*
16124 +- * Set nr_pages up to mean "how many pages to unpin, in
16125 +- * the error handler:
16126 +- */
16127 +- nr_pages = pinned;
16128 + goto err_pg;
16129 + }
16130 +
16131 +@@ -1679,6 +1680,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
16132 + struct rio_dev *rdev;
16133 + struct rio_switch *rswitch = NULL;
16134 + struct rio_mport *mport;
16135 ++ struct device *dev;
16136 + size_t size;
16137 + u32 rval;
16138 + u32 swpinfo = 0;
16139 +@@ -1693,8 +1695,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
16140 + rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
16141 + dev_info.comptag, dev_info.destid, dev_info.hopcount);
16142 +
16143 +- if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
16144 ++ dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name);
16145 ++ if (dev) {
16146 + rmcd_debug(RDEV, "device %s already exists", dev_info.name);
16147 ++ put_device(dev);
16148 + return -EEXIST;
16149 + }
16150 +
16151 +diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
16152 +index 569d9ad2c5942..6939aa5b3dc7f 100644
16153 +--- a/drivers/ras/cec.c
16154 ++++ b/drivers/ras/cec.c
16155 +@@ -553,20 +553,20 @@ static struct notifier_block cec_nb = {
16156 + .priority = MCE_PRIO_CEC,
16157 + };
16158 +
16159 +-static void __init cec_init(void)
16160 ++static int __init cec_init(void)
16161 + {
16162 + if (ce_arr.disabled)
16163 +- return;
16164 ++ return -ENODEV;
16165 +
16166 + ce_arr.array = (void *)get_zeroed_page(GFP_KERNEL);
16167 + if (!ce_arr.array) {
16168 + pr_err("Error allocating CE array page!\n");
16169 +- return;
16170 ++ return -ENOMEM;
16171 + }
16172 +
16173 + if (create_debugfs_nodes()) {
16174 + free_page((unsigned long)ce_arr.array);
16175 +- return;
16176 ++ return -ENOMEM;
16177 + }
16178 +
16179 + INIT_DELAYED_WORK(&cec_work, cec_work_fn);
16180 +@@ -575,6 +575,7 @@ static void __init cec_init(void)
16181 + mce_register_decode_chain(&cec_nb);
16182 +
16183 + pr_info("Correctable Errors collector initialized.\n");
16184 ++ return 0;
16185 + }
16186 + late_initcall(cec_init);
16187 +
16188 +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
16189 +index 7ff507ec875a8..4859cf84c0b2f 100644
16190 +--- a/drivers/regulator/core.c
16191 ++++ b/drivers/regulator/core.c
16192 +@@ -5256,15 +5256,20 @@ regulator_register(const struct regulator_desc *regulator_desc,
16193 + else if (regulator_desc->supply_name)
16194 + rdev->supply_name = regulator_desc->supply_name;
16195 +
16196 +- /*
16197 +- * Attempt to resolve the regulator supply, if specified,
16198 +- * but don't return an error if we fail because we will try
16199 +- * to resolve it again later as more regulators are added.
16200 +- */
16201 +- if (regulator_resolve_supply(rdev))
16202 +- rdev_dbg(rdev, "unable to resolve supply\n");
16203 +-
16204 + ret = set_machine_constraints(rdev, constraints);
16205 ++ if (ret == -EPROBE_DEFER) {
16206 ++ /* Regulator might be in bypass mode and so needs its supply
16207 ++ * to set the constraints */
16208 ++ /* FIXME: this currently triggers a chicken-and-egg problem
16209 ++ * when creating -SUPPLY symlink in sysfs to a regulator
16210 ++ * that is just being created */
16211 ++ ret = regulator_resolve_supply(rdev);
16212 ++ if (!ret)
16213 ++ ret = set_machine_constraints(rdev, constraints);
16214 ++ else
16215 ++ rdev_dbg(rdev, "unable to resolve supply early: %pe\n",
16216 ++ ERR_PTR(ret));
16217 ++ }
16218 + if (ret < 0)
16219 + goto wash;
16220 +
16221 +diff --git a/drivers/regulator/qcom_usb_vbus-regulator.c b/drivers/regulator/qcom_usb_vbus-regulator.c
16222 +index 8ba947f3585f5..457788b505720 100644
16223 +--- a/drivers/regulator/qcom_usb_vbus-regulator.c
16224 ++++ b/drivers/regulator/qcom_usb_vbus-regulator.c
16225 +@@ -63,6 +63,7 @@ static int qcom_usb_vbus_regulator_probe(struct platform_device *pdev)
16226 + qcom_usb_vbus_rdesc.enable_mask = OTG_EN;
16227 + config.dev = dev;
16228 + config.init_data = init_data;
16229 ++ config.of_node = dev->of_node;
16230 + config.regmap = regmap;
16231 +
16232 + rdev = devm_regulator_register(dev, &qcom_usb_vbus_rdesc, &config);
16233 +diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
16234 +index 3d3d87210ef2c..58d1d7e571d66 100644
16235 +--- a/drivers/remoteproc/mtk_scp_ipi.c
16236 ++++ b/drivers/remoteproc/mtk_scp_ipi.c
16237 +@@ -30,10 +30,8 @@ int scp_ipi_register(struct mtk_scp *scp,
16238 + scp_ipi_handler_t handler,
16239 + void *priv)
16240 + {
16241 +- if (!scp) {
16242 +- dev_err(scp->dev, "scp device is not ready\n");
16243 ++ if (!scp)
16244 + return -EPROBE_DEFER;
16245 +- }
16246 +
16247 + if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL))
16248 + return -EINVAL;
16249 +diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
16250 +index f4da42fc0eeb1..d2414cc1d90d6 100644
16251 +--- a/drivers/remoteproc/stm32_rproc.c
16252 ++++ b/drivers/remoteproc/stm32_rproc.c
16253 +@@ -685,7 +685,7 @@ static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata,
16254 + * We couldn't get the coprocessor's state, assume
16255 + * it is not running.
16256 + */
16257 +- state = M4_STATE_OFF;
16258 ++ *state = M4_STATE_OFF;
16259 + return 0;
16260 + }
16261 +
16262 +diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
16263 +index 83f2b8804ee98..96a17ec291401 100644
16264 +--- a/drivers/rpmsg/mtk_rpmsg.c
16265 ++++ b/drivers/rpmsg/mtk_rpmsg.c
16266 +@@ -200,7 +200,6 @@ static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
16267 + struct rpmsg_device *rpdev;
16268 + struct mtk_rpmsg_device *mdev;
16269 + struct platform_device *pdev = mtk_subdev->pdev;
16270 +- int ret;
16271 +
16272 + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
16273 + if (!mdev)
16274 +@@ -219,13 +218,7 @@ static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
16275 + rpdev->dev.parent = &pdev->dev;
16276 + rpdev->dev.release = mtk_rpmsg_release_device;
16277 +
16278 +- ret = rpmsg_register_device(rpdev);
16279 +- if (ret) {
16280 +- kfree(mdev);
16281 +- return ret;
16282 +- }
16283 +-
16284 +- return 0;
16285 ++ return rpmsg_register_device(rpdev);
16286 + }
16287 +
16288 + static void mtk_register_device_work_function(struct work_struct *register_work)
16289 +diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
16290 +index 4abbeea782fa4..19903de6268db 100644
16291 +--- a/drivers/rpmsg/qcom_smd.c
16292 ++++ b/drivers/rpmsg/qcom_smd.c
16293 +@@ -1338,7 +1338,7 @@ static int qcom_smd_parse_edge(struct device *dev,
16294 + ret = of_property_read_u32(node, key, &edge->edge_id);
16295 + if (ret) {
16296 + dev_err(dev, "edge missing %s property\n", key);
16297 +- return -EINVAL;
16298 ++ goto put_node;
16299 + }
16300 +
16301 + edge->remote_pid = QCOM_SMEM_HOST_ANY;
16302 +@@ -1349,32 +1349,37 @@ static int qcom_smd_parse_edge(struct device *dev,
16303 + edge->mbox_client.knows_txdone = true;
16304 + edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
16305 + if (IS_ERR(edge->mbox_chan)) {
16306 +- if (PTR_ERR(edge->mbox_chan) != -ENODEV)
16307 +- return PTR_ERR(edge->mbox_chan);
16308 ++ if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
16309 ++ ret = PTR_ERR(edge->mbox_chan);
16310 ++ goto put_node;
16311 ++ }
16312 +
16313 + edge->mbox_chan = NULL;
16314 +
16315 + syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
16316 + if (!syscon_np) {
16317 + dev_err(dev, "no qcom,ipc node\n");
16318 +- return -ENODEV;
16319 ++ ret = -ENODEV;
16320 ++ goto put_node;
16321 + }
16322 +
16323 + edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
16324 +- if (IS_ERR(edge->ipc_regmap))
16325 +- return PTR_ERR(edge->ipc_regmap);
16326 ++ if (IS_ERR(edge->ipc_regmap)) {
16327 ++ ret = PTR_ERR(edge->ipc_regmap);
16328 ++ goto put_node;
16329 ++ }
16330 +
16331 + key = "qcom,ipc";
16332 + ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
16333 + if (ret < 0) {
16334 + dev_err(dev, "no offset in %s\n", key);
16335 +- return -EINVAL;
16336 ++ goto put_node;
16337 + }
16338 +
16339 + ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
16340 + if (ret < 0) {
16341 + dev_err(dev, "no bit in %s\n", key);
16342 +- return -EINVAL;
16343 ++ goto put_node;
16344 + }
16345 + }
16346 +
16347 +@@ -1385,7 +1390,8 @@ static int qcom_smd_parse_edge(struct device *dev,
16348 + irq = irq_of_parse_and_map(node, 0);
16349 + if (irq < 0) {
16350 + dev_err(dev, "required smd interrupt missing\n");
16351 +- return -EINVAL;
16352 ++ ret = irq;
16353 ++ goto put_node;
16354 + }
16355 +
16356 + ret = devm_request_irq(dev, irq,
16357 +@@ -1393,12 +1399,18 @@ static int qcom_smd_parse_edge(struct device *dev,
16358 + node->name, edge);
16359 + if (ret) {
16360 + dev_err(dev, "failed to request smd irq\n");
16361 +- return ret;
16362 ++ goto put_node;
16363 + }
16364 +
16365 + edge->irq = irq;
16366 +
16367 + return 0;
16368 ++
16369 ++put_node:
16370 ++ of_node_put(node);
16371 ++ edge->of_node = NULL;
16372 ++
16373 ++ return ret;
16374 + }
16375 +
16376 + /*
16377 +diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
16378 +index 54c85cdd019dd..c9c3de14bc62f 100644
16379 +--- a/drivers/rtc/rtc-ds1307.c
16380 ++++ b/drivers/rtc/rtc-ds1307.c
16381 +@@ -352,6 +352,10 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
16382 + regmap_update_bits(ds1307->regmap, DS1340_REG_FLAG,
16383 + DS1340_BIT_OSF, 0);
16384 + break;
16385 ++ case ds_1388:
16386 ++ regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG,
16387 ++ DS1388_BIT_OSF, 0);
16388 ++ break;
16389 + case mcp794xx:
16390 + /*
16391 + * these bits were cleared when preparing the date/time
16392 +diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
16393 +index ecfd6d152e862..6b5cf9ba03e5b 100644
16394 +--- a/drivers/s390/net/qeth_core.h
16395 ++++ b/drivers/s390/net/qeth_core.h
16396 +@@ -680,6 +680,11 @@ struct qeth_card_blkt {
16397 + int inter_packet_jumbo;
16398 + };
16399 +
16400 ++enum qeth_pnso_mode {
16401 ++ QETH_PNSO_NONE,
16402 ++ QETH_PNSO_BRIDGEPORT,
16403 ++};
16404 ++
16405 + #define QETH_BROADCAST_WITH_ECHO 0x01
16406 + #define QETH_BROADCAST_WITHOUT_ECHO 0x02
16407 + struct qeth_card_info {
16408 +@@ -696,6 +701,7 @@ struct qeth_card_info {
16409 + /* no bitfield, we take a pointer on these two: */
16410 + u8 has_lp2lp_cso_v6;
16411 + u8 has_lp2lp_cso_v4;
16412 ++ enum qeth_pnso_mode pnso_mode;
16413 + enum qeth_card_types type;
16414 + enum qeth_link_types link_type;
16415 + int broadcast_capable;
16416 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
16417 +index 6384f7adba660..4af7b5d57b4e4 100644
16418 +--- a/drivers/s390/net/qeth_l2_main.c
16419 ++++ b/drivers/s390/net/qeth_l2_main.c
16420 +@@ -273,6 +273,17 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
16421 + return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
16422 + }
16423 +
16424 ++static void qeth_l2_set_pnso_mode(struct qeth_card *card,
16425 ++ enum qeth_pnso_mode mode)
16426 ++{
16427 ++ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
16428 ++ WRITE_ONCE(card->info.pnso_mode, mode);
16429 ++ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
16430 ++
16431 ++ if (mode == QETH_PNSO_NONE)
16432 ++ drain_workqueue(card->event_wq);
16433 ++}
16434 ++
16435 + static void qeth_l2_stop_card(struct qeth_card *card)
16436 + {
16437 + QETH_CARD_TEXT(card, 2, "stopcard");
16438 +@@ -290,7 +301,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
16439 + qeth_qdio_clear_card(card, 0);
16440 + qeth_drain_output_queues(card);
16441 + qeth_clear_working_pool_list(card);
16442 +- flush_workqueue(card->event_wq);
16443 ++ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
16444 + qeth_flush_local_addrs(card);
16445 + card->info.promisc_mode = 0;
16446 + }
16447 +@@ -1109,12 +1120,6 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
16448 + NULL
16449 + };
16450 +
16451 +- /* Role should not change by itself, but if it did, */
16452 +- /* information from the hardware is authoritative. */
16453 +- mutex_lock(&data->card->sbp_lock);
16454 +- data->card->options.sbp.role = entry->role;
16455 +- mutex_unlock(&data->card->sbp_lock);
16456 +-
16457 + snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
16458 + snprintf(env_role, sizeof(env_role), "ROLE=%s",
16459 + (entry->role == QETH_SBP_ROLE_NONE) ? "none" :
16460 +@@ -1163,19 +1168,34 @@ static void qeth_bridge_state_change(struct qeth_card *card,
16461 + }
16462 +
16463 + struct qeth_addr_change_data {
16464 +- struct work_struct worker;
16465 ++ struct delayed_work dwork;
16466 + struct qeth_card *card;
16467 + struct qeth_ipacmd_addr_change ac_event;
16468 + };
16469 +
16470 + static void qeth_addr_change_event_worker(struct work_struct *work)
16471 + {
16472 +- struct qeth_addr_change_data *data =
16473 +- container_of(work, struct qeth_addr_change_data, worker);
16474 ++ struct delayed_work *dwork = to_delayed_work(work);
16475 ++ struct qeth_addr_change_data *data;
16476 ++ struct qeth_card *card;
16477 + int i;
16478 +
16479 ++ data = container_of(dwork, struct qeth_addr_change_data, dwork);
16480 ++ card = data->card;
16481 ++
16482 + QETH_CARD_TEXT(data->card, 4, "adrchgew");
16483 ++
16484 ++ if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
16485 ++ goto free;
16486 ++
16487 + if (data->ac_event.lost_event_mask) {
16488 ++ /* Potential re-config in progress, try again later: */
16489 ++ if (!mutex_trylock(&card->sbp_lock)) {
16490 ++ queue_delayed_work(card->event_wq, dwork,
16491 ++ msecs_to_jiffies(100));
16492 ++ return;
16493 ++ }
16494 ++
16495 + dev_info(&data->card->gdev->dev,
16496 + "Address change notification stopped on %s (%s)\n",
16497 + data->card->dev->name,
16498 +@@ -1184,8 +1204,9 @@ static void qeth_addr_change_event_worker(struct work_struct *work)
16499 + : (data->ac_event.lost_event_mask == 0x02)
16500 + ? "Bridge port state change"
16501 + : "Unknown reason");
16502 +- mutex_lock(&data->card->sbp_lock);
16503 ++
16504 + data->card->options.sbp.hostnotification = 0;
16505 ++ card->info.pnso_mode = QETH_PNSO_NONE;
16506 + mutex_unlock(&data->card->sbp_lock);
16507 + qeth_bridge_emit_host_event(data->card, anev_abort,
16508 + 0, NULL, NULL);
16509 +@@ -1199,6 +1220,8 @@ static void qeth_addr_change_event_worker(struct work_struct *work)
16510 + &entry->token,
16511 + &entry->addr_lnid);
16512 + }
16513 ++
16514 ++free:
16515 + kfree(data);
16516 + }
16517 +
16518 +@@ -1210,6 +1233,9 @@ static void qeth_addr_change_event(struct qeth_card *card,
16519 + struct qeth_addr_change_data *data;
16520 + int extrasize;
16521 +
16522 ++ if (card->info.pnso_mode == QETH_PNSO_NONE)
16523 ++ return;
16524 ++
16525 + QETH_CARD_TEXT(card, 4, "adrchgev");
16526 + if (cmd->hdr.return_code != 0x0000) {
16527 + if (cmd->hdr.return_code == 0x0010) {
16528 +@@ -1229,11 +1255,11 @@ static void qeth_addr_change_event(struct qeth_card *card,
16529 + QETH_CARD_TEXT(card, 2, "ACNalloc");
16530 + return;
16531 + }
16532 +- INIT_WORK(&data->worker, qeth_addr_change_event_worker);
16533 ++ INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
16534 + data->card = card;
16535 + memcpy(&data->ac_event, hostevs,
16536 + sizeof(struct qeth_ipacmd_addr_change) + extrasize);
16537 +- queue_work(card->event_wq, &data->worker);
16538 ++ queue_delayed_work(card->event_wq, &data->dwork, 0);
16539 + }
16540 +
16541 + /* SETBRIDGEPORT support; sending commands */
16542 +@@ -1554,9 +1580,14 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
16543 +
16544 + if (enable) {
16545 + qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
16546 ++ qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
16547 + rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card);
16548 +- } else
16549 ++ if (rc)
16550 ++ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
16551 ++ } else {
16552 + rc = qeth_l2_pnso(card, 0, NULL, NULL);
16553 ++ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
16554 ++ }
16555 + return rc;
16556 + }
16557 +
16558 +diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
16559 +index 86bcae992f725..4695d25e54f24 100644
16560 +--- a/drivers/s390/net/qeth_l2_sys.c
16561 ++++ b/drivers/s390/net/qeth_l2_sys.c
16562 +@@ -157,6 +157,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
16563 + rc = -EBUSY;
16564 + else if (qeth_card_hw_is_reachable(card)) {
16565 + rc = qeth_bridgeport_an_set(card, enable);
16566 ++ /* sbp_lock ensures ordering vs notifications-stopped events */
16567 + if (!rc)
16568 + card->options.sbp.hostnotification = enable;
16569 + } else
16570 +diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
16571 +index 5c3513a4b450e..202ba925c4940 100644
16572 +--- a/drivers/scsi/be2iscsi/be_main.c
16573 ++++ b/drivers/scsi/be2iscsi/be_main.c
16574 +@@ -3020,6 +3020,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
16575 + goto create_eq_error;
16576 + }
16577 +
16578 ++ mem->dma = paddr;
16579 + mem->va = eq_vaddress;
16580 + ret = be_fill_queue(eq, phba->params.num_eq_entries,
16581 + sizeof(struct be_eq_entry), eq_vaddress);
16582 +@@ -3029,7 +3030,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
16583 + goto create_eq_error;
16584 + }
16585 +
16586 +- mem->dma = paddr;
16587 + ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
16588 + BEISCSI_EQ_DELAY_DEF);
16589 + if (ret) {
16590 +@@ -3086,6 +3086,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
16591 + goto create_cq_error;
16592 + }
16593 +
16594 ++ mem->dma = paddr;
16595 + ret = be_fill_queue(cq, phba->params.num_cq_entries,
16596 + sizeof(struct sol_cqe), cq_vaddress);
16597 + if (ret) {
16598 +@@ -3095,7 +3096,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
16599 + goto create_cq_error;
16600 + }
16601 +
16602 +- mem->dma = paddr;
16603 + ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
16604 + false, 0);
16605 + if (ret) {
16606 +diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
16607 +index bc5d84f87d8fc..440ef32be048f 100644
16608 +--- a/drivers/scsi/bfa/bfad.c
16609 ++++ b/drivers/scsi/bfa/bfad.c
16610 +@@ -749,6 +749,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
16611 +
16612 + if (bfad->pci_bar0_kva == NULL) {
16613 + printk(KERN_ERR "Fail to map bar0\n");
16614 ++ rc = -ENODEV;
16615 + goto out_release_region;
16616 + }
16617 +
16618 +diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
16619 +index 7fa20609d5e7f..e43c5413ce29b 100644
16620 +--- a/drivers/scsi/csiostor/csio_hw.c
16621 ++++ b/drivers/scsi/csiostor/csio_hw.c
16622 +@@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
16623 + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
16624 + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
16625 + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
16626 +- ret = EINVAL;
16627 ++ ret = -EINVAL;
16628 + goto bye;
16629 + }
16630 +
16631 +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
16632 +index ea7c8930592dc..70daa0605082d 100644
16633 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c
16634 ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
16635 +@@ -4928,6 +4928,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
16636 + if (IS_ERR(vhost->work_thread)) {
16637 + dev_err(dev, "Couldn't create kernel thread: %ld\n",
16638 + PTR_ERR(vhost->work_thread));
16639 ++ rc = PTR_ERR(vhost->work_thread);
16640 + goto free_host_mem;
16641 + }
16642 +
16643 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
16644 +index 8062bd99add85..e86682dc34eca 100644
16645 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
16646 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
16647 +@@ -1809,18 +1809,22 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
16648 + /* TMs are on msix_index == 0 */
16649 + if (reply_q->msix_index == 0)
16650 + continue;
16651 ++ synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
16652 + if (reply_q->irq_poll_scheduled) {
16653 + /* Calling irq_poll_disable will wait for any pending
16654 + * callbacks to have completed.
16655 + */
16656 + irq_poll_disable(&reply_q->irqpoll);
16657 + irq_poll_enable(&reply_q->irqpoll);
16658 +- reply_q->irq_poll_scheduled = false;
16659 +- reply_q->irq_line_enable = true;
16660 +- enable_irq(reply_q->os_irq);
16661 +- continue;
16662 ++ /* check how the scheduled poll has ended,
16663 ++ * clean up only if necessary
16664 ++ */
16665 ++ if (reply_q->irq_poll_scheduled) {
16666 ++ reply_q->irq_poll_scheduled = false;
16667 ++ reply_q->irq_line_enable = true;
16668 ++ enable_irq(reply_q->os_irq);
16669 ++ }
16670 + }
16671 +- synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
16672 + }
16673 + }
16674 +
16675 +diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
16676 +index 8906aceda4c43..0354898d7cac1 100644
16677 +--- a/drivers/scsi/mvumi.c
16678 ++++ b/drivers/scsi/mvumi.c
16679 +@@ -2425,6 +2425,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
16680 + if (IS_ERR(mhba->dm_thread)) {
16681 + dev_err(&mhba->pdev->dev,
16682 + "failed to create device scan thread\n");
16683 ++ ret = PTR_ERR(mhba->dm_thread);
16684 + mutex_unlock(&mhba->sas_discovery_mutex);
16685 + goto fail_create_thread;
16686 + }
16687 +diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
16688 +index 5ca424df355c1..bc30e3e039dd2 100644
16689 +--- a/drivers/scsi/qedf/qedf_main.c
16690 ++++ b/drivers/scsi/qedf/qedf_main.c
16691 +@@ -726,7 +726,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
16692 + rdata = fcport->rdata;
16693 + if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
16694 + QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
16695 +- rc = 1;
16696 ++ rc = SUCCESS;
16697 + goto out;
16698 + }
16699 +
16700 +diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
16701 +index 6ed74583b1b9b..f158fde0a43c1 100644
16702 +--- a/drivers/scsi/qedi/qedi_fw.c
16703 ++++ b/drivers/scsi/qedi/qedi_fw.c
16704 +@@ -59,6 +59,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
16705 + "Freeing tid=0x%x for cid=0x%x\n",
16706 + cmd->task_id, qedi_conn->iscsi_conn_id);
16707 +
16708 ++ spin_lock(&qedi_conn->list_lock);
16709 + if (likely(cmd->io_cmd_in_list)) {
16710 + cmd->io_cmd_in_list = false;
16711 + list_del_init(&cmd->io_cmd);
16712 +@@ -69,6 +70,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
16713 + cmd->task_id, qedi_conn->iscsi_conn_id,
16714 + &cmd->io_cmd);
16715 + }
16716 ++ spin_unlock(&qedi_conn->list_lock);
16717 +
16718 + cmd->state = RESPONSE_RECEIVED;
16719 + qedi_clear_task_idx(qedi, cmd->task_id);
16720 +@@ -122,6 +124,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
16721 + "Freeing tid=0x%x for cid=0x%x\n",
16722 + cmd->task_id, qedi_conn->iscsi_conn_id);
16723 +
16724 ++ spin_lock(&qedi_conn->list_lock);
16725 + if (likely(cmd->io_cmd_in_list)) {
16726 + cmd->io_cmd_in_list = false;
16727 + list_del_init(&cmd->io_cmd);
16728 +@@ -132,6 +135,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
16729 + cmd->task_id, qedi_conn->iscsi_conn_id,
16730 + &cmd->io_cmd);
16731 + }
16732 ++ spin_unlock(&qedi_conn->list_lock);
16733 +
16734 + cmd->state = RESPONSE_RECEIVED;
16735 + qedi_clear_task_idx(qedi, cmd->task_id);
16736 +@@ -222,11 +226,13 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
16737 +
16738 + tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
16739 +
16740 ++ spin_lock(&qedi_conn->list_lock);
16741 + if (likely(qedi_cmd->io_cmd_in_list)) {
16742 + qedi_cmd->io_cmd_in_list = false;
16743 + list_del_init(&qedi_cmd->io_cmd);
16744 + qedi_conn->active_cmd_count--;
16745 + }
16746 ++ spin_unlock(&qedi_conn->list_lock);
16747 +
16748 + if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
16749 + ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
16750 +@@ -288,11 +294,13 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
16751 + ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
16752 + qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
16753 +
16754 ++ spin_lock(&qedi_conn->list_lock);
16755 + if (likely(cmd->io_cmd_in_list)) {
16756 + cmd->io_cmd_in_list = false;
16757 + list_del_init(&cmd->io_cmd);
16758 + qedi_conn->active_cmd_count--;
16759 + }
16760 ++ spin_unlock(&qedi_conn->list_lock);
16761 +
16762 + memset(task_ctx, '\0', sizeof(*task_ctx));
16763 +
16764 +@@ -816,8 +824,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
16765 + qedi_clear_task_idx(qedi_conn->qedi, rtid);
16766 +
16767 + spin_lock(&qedi_conn->list_lock);
16768 +- list_del_init(&dbg_cmd->io_cmd);
16769 +- qedi_conn->active_cmd_count--;
16770 ++ if (likely(dbg_cmd->io_cmd_in_list)) {
16771 ++ dbg_cmd->io_cmd_in_list = false;
16772 ++ list_del_init(&dbg_cmd->io_cmd);
16773 ++ qedi_conn->active_cmd_count--;
16774 ++ }
16775 + spin_unlock(&qedi_conn->list_lock);
16776 + qedi_cmd->state = CLEANUP_RECV;
16777 + wake_up_interruptible(&qedi_conn->wait_queue);
16778 +@@ -1235,6 +1246,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
16779 + qedi_conn->cmd_cleanup_req++;
16780 + qedi_iscsi_cleanup_task(ctask, true);
16781 +
16782 ++ cmd->io_cmd_in_list = false;
16783 + list_del_init(&cmd->io_cmd);
16784 + qedi_conn->active_cmd_count--;
16785 + QEDI_WARN(&qedi->dbg_ctx,
16786 +@@ -1446,8 +1458,11 @@ ldel_exit:
16787 + spin_unlock_bh(&qedi_conn->tmf_work_lock);
16788 +
16789 + spin_lock(&qedi_conn->list_lock);
16790 +- list_del_init(&cmd->io_cmd);
16791 +- qedi_conn->active_cmd_count--;
16792 ++ if (likely(cmd->io_cmd_in_list)) {
16793 ++ cmd->io_cmd_in_list = false;
16794 ++ list_del_init(&cmd->io_cmd);
16795 ++ qedi_conn->active_cmd_count--;
16796 ++ }
16797 + spin_unlock(&qedi_conn->list_lock);
16798 +
16799 + clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
16800 +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
16801 +index c14ac7882afac..10b9a986a41dc 100644
16802 +--- a/drivers/scsi/qedi/qedi_iscsi.c
16803 ++++ b/drivers/scsi/qedi/qedi_iscsi.c
16804 +@@ -975,11 +975,13 @@ static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
16805 + {
16806 + struct qedi_cmd *cmd, *cmd_tmp;
16807 +
16808 ++ spin_lock(&qedi_conn->list_lock);
16809 + list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
16810 + io_cmd) {
16811 + list_del_init(&cmd->io_cmd);
16812 + qedi_conn->active_cmd_count--;
16813 + }
16814 ++ spin_unlock(&qedi_conn->list_lock);
16815 + }
16816 +
16817 + static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
16818 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
16819 +index 6f038ae5efcaf..dfe24b505b402 100644
16820 +--- a/drivers/scsi/qedi/qedi_main.c
16821 ++++ b/drivers/scsi/qedi/qedi_main.c
16822 +@@ -1127,6 +1127,15 @@ static void qedi_schedule_recovery_handler(void *dev)
16823 + schedule_delayed_work(&qedi->recovery_work, 0);
16824 + }
16825 +
16826 ++static void qedi_set_conn_recovery(struct iscsi_cls_session *cls_session)
16827 ++{
16828 ++ struct iscsi_session *session = cls_session->dd_data;
16829 ++ struct iscsi_conn *conn = session->leadconn;
16830 ++ struct qedi_conn *qedi_conn = conn->dd_data;
16831 ++
16832 ++ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
16833 ++}
16834 ++
16835 + static void qedi_link_update(void *dev, struct qed_link_output *link)
16836 + {
16837 + struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
16838 +@@ -1138,6 +1147,7 @@ static void qedi_link_update(void *dev, struct qed_link_output *link)
16839 + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
16840 + "Link Down event.\n");
16841 + atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
16842 ++ iscsi_host_for_each_session(qedi->shost, qedi_set_conn_recovery);
16843 + }
16844 + }
16845 +
16846 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
16847 +index 0bd04a62af836..8d4b651e14422 100644
16848 +--- a/drivers/scsi/qla2xxx/qla_init.c
16849 ++++ b/drivers/scsi/qla2xxx/qla_init.c
16850 +@@ -63,6 +63,16 @@ void qla2x00_sp_free(srb_t *sp)
16851 + qla2x00_rel_sp(sp);
16852 + }
16853 +
16854 ++void qla2xxx_rel_done_warning(srb_t *sp, int res)
16855 ++{
16856 ++ WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
16857 ++}
16858 ++
16859 ++void qla2xxx_rel_free_warning(srb_t *sp)
16860 ++{
16861 ++ WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
16862 ++}
16863 ++
16864 + /* Asynchronous Login/Logout Routines -------------------------------------- */
16865 +
16866 + unsigned long
16867 +diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
16868 +index 861dc522723ce..2aa6f81f87c43 100644
16869 +--- a/drivers/scsi/qla2xxx/qla_inline.h
16870 ++++ b/drivers/scsi/qla2xxx/qla_inline.h
16871 +@@ -207,10 +207,15 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
16872 + return sp;
16873 + }
16874 +
16875 ++void qla2xxx_rel_done_warning(srb_t *sp, int res);
16876 ++void qla2xxx_rel_free_warning(srb_t *sp);
16877 ++
16878 + static inline void
16879 + qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
16880 + {
16881 + sp->qpair = NULL;
16882 ++ sp->done = qla2xxx_rel_done_warning;
16883 ++ sp->free = qla2xxx_rel_free_warning;
16884 + mempool_free(sp, qpair->srb_mempool);
16885 + QLA_QPAIR_MARK_NOT_BUSY(qpair);
16886 + }
16887 +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
16888 +index 226f1428d3e52..78ad9827bbb98 100644
16889 +--- a/drivers/scsi/qla2xxx/qla_mbx.c
16890 ++++ b/drivers/scsi/qla2xxx/qla_mbx.c
16891 +@@ -4958,7 +4958,7 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
16892 + "Done %s.\n", __func__);
16893 + }
16894 +
16895 +- dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
16896 ++ dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
16897 + els_cmd_map, els_cmd_map_dma);
16898 +
16899 + return rval;
16900 +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
16901 +index 90bbc61f361b9..0ded9a778bb0d 100644
16902 +--- a/drivers/scsi/qla2xxx/qla_nvme.c
16903 ++++ b/drivers/scsi/qla2xxx/qla_nvme.c
16904 +@@ -683,7 +683,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
16905 + struct nvme_fc_port_template *tmpl;
16906 + struct qla_hw_data *ha;
16907 + struct nvme_fc_port_info pinfo;
16908 +- int ret = EINVAL;
16909 ++ int ret = -EINVAL;
16910 +
16911 + if (!IS_ENABLED(CONFIG_NVME_FC))
16912 + return ret;
16913 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
16914 +index 2d445bdb21290..2a88e7e79bd50 100644
16915 +--- a/drivers/scsi/qla2xxx/qla_target.c
16916 ++++ b/drivers/scsi/qla2xxx/qla_target.c
16917 +@@ -5668,7 +5668,7 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
16918 + /* found existing exchange */
16919 + qpair->retry_term_cnt++;
16920 + if (qpair->retry_term_cnt >= 5) {
16921 +- rc = EIO;
16922 ++ rc = -EIO;
16923 + qpair->retry_term_cnt = 0;
16924 + ql_log(ql_log_warn, vha, 0xffff,
16925 + "Unable to send ABTS Respond. Dumping firmware.\n");
16926 +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
16927 +index 676778cbc5509..4775baac43c29 100644
16928 +--- a/drivers/scsi/qla4xxx/ql4_os.c
16929 ++++ b/drivers/scsi/qla4xxx/ql4_os.c
16930 +@@ -1254,7 +1254,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
16931 + le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
16932 + exit_host_stats:
16933 + if (ql_iscsi_stats)
16934 +- dma_free_coherent(&ha->pdev->dev, host_stats_size,
16935 ++ dma_free_coherent(&ha->pdev->dev, stats_size,
16936 + ql_iscsi_stats, iscsi_stats_dma);
16937 +
16938 + ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
16939 +diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
16940 +index 1129fe7a27edd..ee069a8b442a7 100644
16941 +--- a/drivers/scsi/smartpqi/smartpqi.h
16942 ++++ b/drivers/scsi/smartpqi/smartpqi.h
16943 +@@ -359,7 +359,7 @@ struct pqi_event_response {
16944 + struct pqi_iu_header header;
16945 + u8 event_type;
16946 + u8 reserved2 : 7;
16947 +- u8 request_acknowlege : 1;
16948 ++ u8 request_acknowledge : 1;
16949 + __le16 event_id;
16950 + __le32 additional_event_id;
16951 + union {
16952 +diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
16953 +index ca1e6cf6a38ef..714a3d38fc431 100644
16954 +--- a/drivers/scsi/smartpqi/smartpqi_init.c
16955 ++++ b/drivers/scsi/smartpqi/smartpqi_init.c
16956 +@@ -542,8 +542,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
16957 + put_unaligned_be16(cdb_length, &cdb[7]);
16958 + break;
16959 + default:
16960 +- dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
16961 +- cmd);
16962 ++ dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
16963 + break;
16964 + }
16965 +
16966 +@@ -2462,7 +2461,6 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
16967 + offload_to_mirror =
16968 + (offload_to_mirror >= layout_map_count - 1) ?
16969 + 0 : offload_to_mirror + 1;
16970 +- WARN_ON(offload_to_mirror >= layout_map_count);
16971 + device->offload_to_mirror = offload_to_mirror;
16972 + /*
16973 + * Avoid direct use of device->offload_to_mirror within this
16974 +@@ -2915,10 +2913,14 @@ static int pqi_interpret_task_management_response(
16975 + return rc;
16976 + }
16977 +
16978 +-static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
16979 +- struct pqi_queue_group *queue_group)
16980 ++static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
16981 ++{
16982 ++ pqi_take_ctrl_offline(ctrl_info);
16983 ++}
16984 ++
16985 ++static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
16986 + {
16987 +- unsigned int num_responses;
16988 ++ int num_responses;
16989 + pqi_index_t oq_pi;
16990 + pqi_index_t oq_ci;
16991 + struct pqi_io_request *io_request;
16992 +@@ -2930,6 +2932,13 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
16993 +
16994 + while (1) {
16995 + oq_pi = readl(queue_group->oq_pi);
16996 ++ if (oq_pi >= ctrl_info->num_elements_per_oq) {
16997 ++ pqi_invalid_response(ctrl_info);
16998 ++ dev_err(&ctrl_info->pci_dev->dev,
16999 ++ "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
17000 ++ oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
17001 ++ return -1;
17002 ++ }
17003 + if (oq_pi == oq_ci)
17004 + break;
17005 +
17006 +@@ -2938,10 +2947,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
17007 + (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
17008 +
17009 + request_id = get_unaligned_le16(&response->request_id);
17010 +- WARN_ON(request_id >= ctrl_info->max_io_slots);
17011 ++ if (request_id >= ctrl_info->max_io_slots) {
17012 ++ pqi_invalid_response(ctrl_info);
17013 ++ dev_err(&ctrl_info->pci_dev->dev,
17014 ++ "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
17015 ++ request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
17016 ++ return -1;
17017 ++ }
17018 +
17019 + io_request = &ctrl_info->io_request_pool[request_id];
17020 +- WARN_ON(atomic_read(&io_request->refcount) == 0);
17021 ++ if (atomic_read(&io_request->refcount) == 0) {
17022 ++ pqi_invalid_response(ctrl_info);
17023 ++ dev_err(&ctrl_info->pci_dev->dev,
17024 ++ "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
17025 ++ request_id, oq_pi, oq_ci);
17026 ++ return -1;
17027 ++ }
17028 +
17029 + switch (response->header.iu_type) {
17030 + case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
17031 +@@ -2971,24 +2992,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
17032 + io_request->error_info = ctrl_info->error_buffer +
17033 + (get_unaligned_le16(&response->error_index) *
17034 + PQI_ERROR_BUFFER_ELEMENT_LENGTH);
17035 +- pqi_process_io_error(response->header.iu_type,
17036 +- io_request);
17037 ++ pqi_process_io_error(response->header.iu_type, io_request);
17038 + break;
17039 + default:
17040 ++ pqi_invalid_response(ctrl_info);
17041 + dev_err(&ctrl_info->pci_dev->dev,
17042 +- "unexpected IU type: 0x%x\n",
17043 +- response->header.iu_type);
17044 +- break;
17045 ++ "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
17046 ++ response->header.iu_type, oq_pi, oq_ci);
17047 ++ return -1;
17048 + }
17049 +
17050 +- io_request->io_complete_callback(io_request,
17051 +- io_request->context);
17052 ++ io_request->io_complete_callback(io_request, io_request->context);
17053 +
17054 + /*
17055 + * Note that the I/O request structure CANNOT BE TOUCHED after
17056 + * returning from the I/O completion callback!
17057 + */
17058 +-
17059 + oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
17060 + }
17061 +
17062 +@@ -3300,9 +3319,9 @@ static void pqi_ofa_capture_event_payload(struct pqi_event *event,
17063 + }
17064 + }
17065 +
17066 +-static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
17067 ++static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
17068 + {
17069 +- unsigned int num_events;
17070 ++ int num_events;
17071 + pqi_index_t oq_pi;
17072 + pqi_index_t oq_ci;
17073 + struct pqi_event_queue *event_queue;
17074 +@@ -3316,26 +3335,31 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
17075 +
17076 + while (1) {
17077 + oq_pi = readl(event_queue->oq_pi);
17078 ++ if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
17079 ++ pqi_invalid_response(ctrl_info);
17080 ++ dev_err(&ctrl_info->pci_dev->dev,
17081 ++ "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
17082 ++ oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
17083 ++ return -1;
17084 ++ }
17085 ++
17086 + if (oq_pi == oq_ci)
17087 + break;
17088 +
17089 + num_events++;
17090 +- response = event_queue->oq_element_array +
17091 +- (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
17092 ++ response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
17093 +
17094 + event_index =
17095 + pqi_event_type_to_event_index(response->event_type);
17096 +
17097 +- if (event_index >= 0) {
17098 +- if (response->request_acknowlege) {
17099 +- event = &ctrl_info->events[event_index];
17100 +- event->pending = true;
17101 +- event->event_type = response->event_type;
17102 +- event->event_id = response->event_id;
17103 +- event->additional_event_id =
17104 +- response->additional_event_id;
17105 ++ if (event_index >= 0 && response->request_acknowledge) {
17106 ++ event = &ctrl_info->events[event_index];
17107 ++ event->pending = true;
17108 ++ event->event_type = response->event_type;
17109 ++ event->event_id = response->event_id;
17110 ++ event->additional_event_id = response->additional_event_id;
17111 ++ if (event->event_type == PQI_EVENT_TYPE_OFA)
17112 + pqi_ofa_capture_event_payload(event, response);
17113 +- }
17114 + }
17115 +
17116 + oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
17117 +@@ -3450,7 +3474,8 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
17118 + {
17119 + struct pqi_ctrl_info *ctrl_info;
17120 + struct pqi_queue_group *queue_group;
17121 +- unsigned int num_responses_handled;
17122 ++ int num_io_responses_handled;
17123 ++ int num_events_handled;
17124 +
17125 + queue_group = data;
17126 + ctrl_info = queue_group->ctrl_info;
17127 +@@ -3458,17 +3483,25 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
17128 + if (!pqi_is_valid_irq(ctrl_info))
17129 + return IRQ_NONE;
17130 +
17131 +- num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
17132 ++ num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
17133 ++ if (num_io_responses_handled < 0)
17134 ++ goto out;
17135 +
17136 +- if (irq == ctrl_info->event_irq)
17137 +- num_responses_handled += pqi_process_event_intr(ctrl_info);
17138 ++ if (irq == ctrl_info->event_irq) {
17139 ++ num_events_handled = pqi_process_event_intr(ctrl_info);
17140 ++ if (num_events_handled < 0)
17141 ++ goto out;
17142 ++ } else {
17143 ++ num_events_handled = 0;
17144 ++ }
17145 +
17146 +- if (num_responses_handled)
17147 ++ if (num_io_responses_handled + num_events_handled > 0)
17148 + atomic_inc(&ctrl_info->num_interrupts);
17149 +
17150 + pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
17151 + pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
17152 +
17153 ++out:
17154 + return IRQ_HANDLED;
17155 + }
17156 +
17157 +diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
17158 +index 1755dd6b04aec..6b661135c03b5 100644
17159 +--- a/drivers/scsi/ufs/ufs-mediatek.c
17160 ++++ b/drivers/scsi/ufs/ufs-mediatek.c
17161 +@@ -129,7 +129,10 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
17162 + __func__, err);
17163 + } else if (IS_ERR(host->mphy)) {
17164 + err = PTR_ERR(host->mphy);
17165 +- dev_info(dev, "%s: PHY get failed %d\n", __func__, err);
17166 ++ if (err != -ENODEV) {
17167 ++ dev_info(dev, "%s: PHY get failed %d\n", __func__,
17168 ++ err);
17169 ++ }
17170 + }
17171 +
17172 + if (err)
17173 +@@ -669,13 +672,7 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
17174 +
17175 + static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
17176 + {
17177 +- struct ufs_dev_info *dev_info = &hba->dev_info;
17178 +- u16 mid = dev_info->wmanufacturerid;
17179 +-
17180 + ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
17181 +-
17182 +- if (mid == UFS_VENDOR_SAMSUNG)
17183 +- hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
17184 + }
17185 +
17186 + /**
17187 +diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
17188 +index d0d75527830e9..823eccfdd00af 100644
17189 +--- a/drivers/scsi/ufs/ufs-qcom.c
17190 ++++ b/drivers/scsi/ufs/ufs-qcom.c
17191 +@@ -1614,9 +1614,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
17192 + */
17193 + }
17194 + mask <<= offset;
17195 +-
17196 +- pm_runtime_get_sync(host->hba->dev);
17197 +- ufshcd_hold(host->hba, false);
17198 + ufshcd_rmwl(host->hba, TEST_BUS_SEL,
17199 + (u32)host->testbus.select_major << 19,
17200 + REG_UFS_CFG1);
17201 +@@ -1629,8 +1626,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
17202 + * committed before returning.
17203 + */
17204 + mb();
17205 +- ufshcd_release(host->hba);
17206 +- pm_runtime_put_sync(host->hba->dev);
17207 +
17208 + return 0;
17209 + }
17210 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
17211 +index 1d157ff58d817..316b861305eae 100644
17212 +--- a/drivers/scsi/ufs/ufshcd.c
17213 ++++ b/drivers/scsi/ufs/ufshcd.c
17214 +@@ -474,6 +474,9 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
17215 +
17216 + prdt_length = le16_to_cpu(
17217 + lrbp->utr_descriptor_ptr->prd_table_length);
17218 ++ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
17219 ++ prdt_length /= sizeof(struct ufshcd_sg_entry);
17220 ++
17221 + dev_err(hba->dev,
17222 + "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
17223 + tag, prdt_length,
17224 +diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
17225 +index ae1e248a8fb8a..1d2bc181da050 100644
17226 +--- a/drivers/slimbus/core.c
17227 ++++ b/drivers/slimbus/core.c
17228 +@@ -301,8 +301,6 @@ int slim_unregister_controller(struct slim_controller *ctrl)
17229 + {
17230 + /* Remove all clients */
17231 + device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
17232 +- /* Enter Clock Pause */
17233 +- slim_ctrl_clk_pause(ctrl, false, 0);
17234 + ida_simple_remove(&ctrl_ida, ctrl->id);
17235 +
17236 + return 0;
17237 +@@ -326,8 +324,8 @@ void slim_report_absent(struct slim_device *sbdev)
17238 + mutex_lock(&ctrl->lock);
17239 + sbdev->is_laddr_valid = false;
17240 + mutex_unlock(&ctrl->lock);
17241 +-
17242 +- ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
17243 ++ if (!ctrl->get_laddr)
17244 ++ ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
17245 + slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
17246 + }
17247 + EXPORT_SYMBOL_GPL(slim_report_absent);
17248 +diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
17249 +index 743ee7b4e63f2..218aefc3531cd 100644
17250 +--- a/drivers/slimbus/qcom-ngd-ctrl.c
17251 ++++ b/drivers/slimbus/qcom-ngd-ctrl.c
17252 +@@ -1277,9 +1277,13 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
17253 + {
17254 + struct qcom_slim_ngd_qmi *qmi =
17255 + container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
17256 ++ struct qcom_slim_ngd_ctrl *ctrl =
17257 ++ container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
17258 +
17259 + qmi->svc_info.sq_node = 0;
17260 + qmi->svc_info.sq_port = 0;
17261 ++
17262 ++ qcom_slim_ngd_enable(ctrl, false);
17263 + }
17264 +
17265 + static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
17266 +diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
17267 +index f4fb527d83018..c5dd026fe889f 100644
17268 +--- a/drivers/soc/fsl/qbman/bman.c
17269 ++++ b/drivers/soc/fsl/qbman/bman.c
17270 +@@ -660,7 +660,7 @@ int bm_shutdown_pool(u32 bpid)
17271 + }
17272 + done:
17273 + put_affine_portal();
17274 +- return 0;
17275 ++ return err;
17276 + }
17277 +
17278 + struct gen_pool *bm_bpalloc;
17279 +diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
17280 +index dc644cfb6419e..c4609cd562ac4 100644
17281 +--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
17282 ++++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
17283 +@@ -223,15 +223,16 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
17284 + }
17285 + EXPORT_SYMBOL(cmdq_pkt_write_mask);
17286 +
17287 +-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
17288 ++int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
17289 + {
17290 + struct cmdq_instruction inst = { {0} };
17291 ++ u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
17292 +
17293 + if (event >= CMDQ_MAX_EVENT)
17294 + return -EINVAL;
17295 +
17296 + inst.op = CMDQ_CODE_WFE;
17297 +- inst.value = CMDQ_WFE_OPTION;
17298 ++ inst.value = CMDQ_WFE_OPTION | clear_option;
17299 + inst.event = event;
17300 +
17301 + return cmdq_pkt_append_command(pkt, inst);
17302 +diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
17303 +index 1f35b097c6356..7abfc8c4fdc72 100644
17304 +--- a/drivers/soc/qcom/apr.c
17305 ++++ b/drivers/soc/qcom/apr.c
17306 +@@ -328,7 +328,7 @@ static int of_apr_add_pd_lookups(struct device *dev)
17307 +
17308 + pds = pdr_add_lookup(apr->pdr, service_name, service_path);
17309 + if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
17310 +- dev_err(dev, "pdr add lookup failed: %d\n", ret);
17311 ++ dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds));
17312 + return PTR_ERR(pds);
17313 + }
17314 + }
17315 +diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
17316 +index 15b5002e4127b..ab9ae8cdfa54c 100644
17317 +--- a/drivers/soc/qcom/pdr_internal.h
17318 ++++ b/drivers/soc/qcom/pdr_internal.h
17319 +@@ -185,7 +185,7 @@ struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
17320 + .data_type = QMI_STRUCT,
17321 + .elem_len = SERVREG_DOMAIN_LIST_LENGTH,
17322 + .elem_size = sizeof(struct servreg_location_entry),
17323 +- .array_type = NO_ARRAY,
17324 ++ .array_type = VAR_LEN_ARRAY,
17325 + .tlv_type = 0x12,
17326 + .offset = offsetof(struct servreg_get_domain_list_resp,
17327 + domain_list),
17328 +diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
17329 +index 31ff49fcd078b..c556623dae024 100644
17330 +--- a/drivers/soc/xilinx/zynqmp_power.c
17331 ++++ b/drivers/soc/xilinx/zynqmp_power.c
17332 +@@ -205,7 +205,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
17333 + rx_chan = mbox_request_channel_byname(client, "rx");
17334 + if (IS_ERR(rx_chan)) {
17335 + dev_err(&pdev->dev, "Failed to request rx channel\n");
17336 +- return IS_ERR(rx_chan);
17337 ++ return PTR_ERR(rx_chan);
17338 + }
17339 + } else if (of_find_property(pdev->dev.of_node, "interrupts", NULL)) {
17340 + irq = platform_get_irq(pdev, 0);
17341 +diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
17342 +index 24eafe0aa1c3e..1330ffc475960 100644
17343 +--- a/drivers/soundwire/cadence_master.c
17344 ++++ b/drivers/soundwire/cadence_master.c
17345 +@@ -791,7 +791,16 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
17346 + CDNS_MCP_INT_SLAVE_MASK, 0);
17347 +
17348 + int_status &= ~CDNS_MCP_INT_SLAVE_MASK;
17349 +- schedule_work(&cdns->work);
17350 ++
17351 ++ /*
17352 ++ * Deal with possible race condition between interrupt
17353 ++ * handling and disabling interrupts on suspend.
17354 ++ *
17355 ++ * If the master is in the process of disabling
17356 ++ * interrupts, don't schedule a workqueue
17357 ++ */
17358 ++ if (cdns->interrupt_enabled)
17359 ++ schedule_work(&cdns->work);
17360 + }
17361 +
17362 + cdns_writel(cdns, CDNS_MCP_INTSTAT, int_status);
17363 +@@ -924,6 +933,19 @@ update_masks:
17364 + slave_state = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
17365 + cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave_state);
17366 + }
17367 ++ cdns->interrupt_enabled = state;
17368 ++
17369 ++ /*
17370 ++ * Complete any on-going status updates before updating masks,
17371 ++ * and cancel queued status updates.
17372 ++ *
17373 ++ * There could be a race with a new interrupt thrown before
17374 ++ * the 3 mask updates below are complete, so in the interrupt
17375 ++ * we use the 'interrupt_enabled' status to prevent new work
17376 ++ * from being queued.
17377 ++ */
17378 ++ if (!state)
17379 ++ cancel_work_sync(&cdns->work);
17380 +
17381 + cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0, slave_intmask0);
17382 + cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1, slave_intmask1);
17383 +diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
17384 +index 7638858397df9..4d1aab5b5ec2d 100644
17385 +--- a/drivers/soundwire/cadence_master.h
17386 ++++ b/drivers/soundwire/cadence_master.h
17387 +@@ -84,6 +84,8 @@ struct sdw_cdns_stream_config {
17388 + * @bus: Bus handle
17389 + * @stream_type: Stream type
17390 + * @link_id: Master link id
17391 ++ * @hw_params: hw_params to be applied in .prepare step
17392 ++ * @suspended: status set when suspended, to be used in .prepare
17393 + */
17394 + struct sdw_cdns_dma_data {
17395 + char *name;
17396 +@@ -92,6 +94,8 @@ struct sdw_cdns_dma_data {
17397 + struct sdw_bus *bus;
17398 + enum sdw_stream_type stream_type;
17399 + int link_id;
17400 ++ struct snd_pcm_hw_params *hw_params;
17401 ++ bool suspended;
17402 + };
17403 +
17404 + /**
17405 +@@ -129,6 +133,7 @@ struct sdw_cdns {
17406 +
17407 + bool link_up;
17408 + unsigned int msg_count;
17409 ++ bool interrupt_enabled;
17410 +
17411 + struct work_struct work;
17412 +
17413 +diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
17414 +index a283670659a92..50b9bad8fba7f 100644
17415 +--- a/drivers/soundwire/intel.c
17416 ++++ b/drivers/soundwire/intel.c
17417 +@@ -856,6 +856,10 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
17418 + intel_pdi_alh_configure(sdw, pdi);
17419 + sdw_cdns_config_stream(cdns, ch, dir, pdi);
17420 +
17421 ++ /* store pdi and hw_params, may be needed in prepare step */
17422 ++ dma->suspended = false;
17423 ++ dma->pdi = pdi;
17424 ++ dma->hw_params = params;
17425 +
17426 + /* Inform DSP about PDI stream number */
17427 + ret = intel_params_stream(sdw, substream, dai, params,
17428 +@@ -899,7 +903,11 @@ error:
17429 + static int intel_prepare(struct snd_pcm_substream *substream,
17430 + struct snd_soc_dai *dai)
17431 + {
17432 ++ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
17433 ++ struct sdw_intel *sdw = cdns_to_intel(cdns);
17434 + struct sdw_cdns_dma_data *dma;
17435 ++ int ch, dir;
17436 ++ int ret;
17437 +
17438 + dma = snd_soc_dai_get_dma_data(dai, substream);
17439 + if (!dma) {
17440 +@@ -908,7 +916,41 @@ static int intel_prepare(struct snd_pcm_substream *substream,
17441 + return -EIO;
17442 + }
17443 +
17444 +- return sdw_prepare_stream(dma->stream);
17445 ++ if (dma->suspended) {
17446 ++ dma->suspended = false;
17447 ++
17448 ++ /*
17449 ++ * .prepare() is called after system resume, where we
17450 ++ * need to reinitialize the SHIM/ALH/Cadence IP.
17451 ++ * .prepare() is also called to deal with underflows,
17452 ++ * but in those cases we cannot touch ALH/SHIM
17453 ++ * registers
17454 ++ */
17455 ++
17456 ++ /* configure stream */
17457 ++ ch = params_channels(dma->hw_params);
17458 ++ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
17459 ++ dir = SDW_DATA_DIR_RX;
17460 ++ else
17461 ++ dir = SDW_DATA_DIR_TX;
17462 ++
17463 ++ intel_pdi_shim_configure(sdw, dma->pdi);
17464 ++ intel_pdi_alh_configure(sdw, dma->pdi);
17465 ++ sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
17466 ++
17467 ++ /* Inform DSP about PDI stream number */
17468 ++ ret = intel_params_stream(sdw, substream, dai,
17469 ++ dma->hw_params,
17470 ++ sdw->instance,
17471 ++ dma->pdi->intel_alh_id);
17472 ++ if (ret)
17473 ++ goto err;
17474 ++ }
17475 ++
17476 ++ ret = sdw_prepare_stream(dma->stream);
17477 ++
17478 ++err:
17479 ++ return ret;
17480 + }
17481 +
17482 + static int intel_trigger(struct snd_pcm_substream *substream, int cmd,
17483 +@@ -979,6 +1021,9 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
17484 + return ret;
17485 + }
17486 +
17487 ++ dma->hw_params = NULL;
17488 ++ dma->pdi = NULL;
17489 ++
17490 + return 0;
17491 + }
17492 +
17493 +@@ -988,6 +1033,29 @@ static void intel_shutdown(struct snd_pcm_substream *substream,
17494 +
17495 + }
17496 +
17497 ++static int intel_component_dais_suspend(struct snd_soc_component *component)
17498 ++{
17499 ++ struct sdw_cdns_dma_data *dma;
17500 ++ struct snd_soc_dai *dai;
17501 ++
17502 ++ for_each_component_dais(component, dai) {
17503 ++ /*
17504 ++ * we don't have a .suspend dai_ops, and we don't have access
17505 ++ * to the substream, so let's mark both capture and playback
17506 ++ * DMA contexts as suspended
17507 ++ */
17508 ++ dma = dai->playback_dma_data;
17509 ++ if (dma)
17510 ++ dma->suspended = true;
17511 ++
17512 ++ dma = dai->capture_dma_data;
17513 ++ if (dma)
17514 ++ dma->suspended = true;
17515 ++ }
17516 ++
17517 ++ return 0;
17518 ++}
17519 ++
17520 + static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
17521 + void *stream, int direction)
17522 + {
17523 +@@ -1011,7 +1079,7 @@ static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
17524 + dma = dai->capture_dma_data;
17525 +
17526 + if (!dma)
17527 +- return NULL;
17528 ++ return ERR_PTR(-EINVAL);
17529 +
17530 + return dma->stream;
17531 + }
17532 +@@ -1040,6 +1108,7 @@ static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
17533 +
17534 + static const struct snd_soc_component_driver dai_component = {
17535 + .name = "soundwire",
17536 ++ .suspend = intel_component_dais_suspend
17537 + };
17538 +
17539 + static int intel_create_dai(struct sdw_cdns *cdns,
17540 +diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
17541 +index 6e36deb505b1e..610957f82b39c 100644
17542 +--- a/drivers/soundwire/stream.c
17543 ++++ b/drivers/soundwire/stream.c
17544 +@@ -1913,7 +1913,7 @@ void sdw_shutdown_stream(void *sdw_substream)
17545 +
17546 + sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream);
17547 +
17548 +- if (!sdw_stream) {
17549 ++ if (IS_ERR(sdw_stream)) {
17550 + dev_err(rtd->dev, "no stream found for DAI %s", dai->name);
17551 + return;
17552 + }
17553 +diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
17554 +index 2ea73809ca345..271839a8add0e 100644
17555 +--- a/drivers/spi/spi-dw-pci.c
17556 ++++ b/drivers/spi/spi-dw-pci.c
17557 +@@ -127,18 +127,16 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
17558 + if (desc->setup) {
17559 + ret = desc->setup(dws);
17560 + if (ret)
17561 +- return ret;
17562 ++ goto err_free_irq_vectors;
17563 + }
17564 + } else {
17565 +- pci_free_irq_vectors(pdev);
17566 +- return -ENODEV;
17567 ++ ret = -ENODEV;
17568 ++ goto err_free_irq_vectors;
17569 + }
17570 +
17571 + ret = dw_spi_add_host(&pdev->dev, dws);
17572 +- if (ret) {
17573 +- pci_free_irq_vectors(pdev);
17574 +- return ret;
17575 +- }
17576 ++ if (ret)
17577 ++ goto err_free_irq_vectors;
17578 +
17579 + /* PCI hook and SPI hook use the same drv data */
17580 + pci_set_drvdata(pdev, dws);
17581 +@@ -152,6 +150,10 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
17582 + pm_runtime_allow(&pdev->dev);
17583 +
17584 + return 0;
17585 ++
17586 ++err_free_irq_vectors:
17587 ++ pci_free_irq_vectors(pdev);
17588 ++ return ret;
17589 + }
17590 +
17591 + static void spi_pci_remove(struct pci_dev *pdev)
17592 +diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
17593 +index 37a3e0f8e7526..a702e9d7d68c0 100644
17594 +--- a/drivers/spi/spi-fsi.c
17595 ++++ b/drivers/spi/spi-fsi.c
17596 +@@ -24,11 +24,16 @@
17597 +
17598 + #define SPI_FSI_BASE 0x70000
17599 + #define SPI_FSI_INIT_TIMEOUT_MS 1000
17600 +-#define SPI_FSI_MAX_TRANSFER_SIZE 2048
17601 ++#define SPI_FSI_MAX_XFR_SIZE 2048
17602 ++#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED 32
17603 +
17604 + #define SPI_FSI_ERROR 0x0
17605 + #define SPI_FSI_COUNTER_CFG 0x1
17606 + #define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32)
17607 ++#define SPI_FSI_COUNTER_CFG_N2_RX BIT_ULL(8)
17608 ++#define SPI_FSI_COUNTER_CFG_N2_TX BIT_ULL(9)
17609 ++#define SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
17610 ++#define SPI_FSI_COUNTER_CFG_N2_RELOAD BIT_ULL(11)
17611 + #define SPI_FSI_CFG1 0x2
17612 + #define SPI_FSI_CLOCK_CFG 0x3
17613 + #define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
17614 +@@ -61,7 +66,7 @@
17615 + #define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62)
17616 + #define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63)
17617 + #define SPI_FSI_STATUS_ANY_ERROR \
17618 +- (SPI_FSI_STATUS_ERROR | SPI_FSI_STATUS_TDR_UNDERRUN | \
17619 ++ (SPI_FSI_STATUS_ERROR | \
17620 + SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \
17621 + SPI_FSI_STATUS_RDR_OVERRUN)
17622 + #define SPI_FSI_PORT_CTRL 0x9
17623 +@@ -70,6 +75,8 @@ struct fsi_spi {
17624 + struct device *dev; /* SPI controller device */
17625 + struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
17626 + u32 base;
17627 ++ size_t max_xfr_size;
17628 ++ bool restricted;
17629 + };
17630 +
17631 + struct fsi_spi_sequence {
17632 +@@ -205,8 +212,12 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
17633 + if (rc)
17634 + return rc;
17635 +
17636 +- return fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
17637 +- SPI_FSI_CLOCK_CFG_RESET2);
17638 ++ rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
17639 ++ SPI_FSI_CLOCK_CFG_RESET2);
17640 ++ if (rc)
17641 ++ return rc;
17642 ++
17643 ++ return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
17644 + }
17645 +
17646 + static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
17647 +@@ -214,8 +225,8 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
17648 + /*
17649 + * Add the next byte of instruction to the 8-byte sequence register.
17650 + * Then decrement the counter so that the next instruction will go in
17651 +- * the right place. Return the number of "slots" left in the sequence
17652 +- * register.
17653 ++ * the right place. Return the index of the slot we just filled in the
17654 ++ * sequence register.
17655 + */
17656 + seq->data |= (u64)val << seq->bit;
17657 + seq->bit -= 8;
17658 +@@ -233,40 +244,71 @@ static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
17659 + struct fsi_spi_sequence *seq,
17660 + struct spi_transfer *transfer)
17661 + {
17662 ++ bool docfg = false;
17663 + int loops;
17664 + int idx;
17665 + int rc;
17666 ++ u8 val = 0;
17667 + u8 len = min(transfer->len, 8U);
17668 + u8 rem = transfer->len % len;
17669 ++ u64 cfg = 0ULL;
17670 +
17671 + loops = transfer->len / len;
17672 +
17673 + if (transfer->tx_buf) {
17674 +- idx = fsi_spi_sequence_add(seq,
17675 +- SPI_FSI_SEQUENCE_SHIFT_OUT(len));
17676 ++ val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
17677 ++ idx = fsi_spi_sequence_add(seq, val);
17678 ++
17679 + if (rem)
17680 + rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
17681 + } else if (transfer->rx_buf) {
17682 +- idx = fsi_spi_sequence_add(seq,
17683 +- SPI_FSI_SEQUENCE_SHIFT_IN(len));
17684 ++ val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
17685 ++ idx = fsi_spi_sequence_add(seq, val);
17686 ++
17687 + if (rem)
17688 + rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
17689 + } else {
17690 + return -EINVAL;
17691 + }
17692 +
17693 ++ if (ctx->restricted) {
17694 ++ const int eidx = rem ? 5 : 6;
17695 ++
17696 ++ while (loops > 1 && idx <= eidx) {
17697 ++ idx = fsi_spi_sequence_add(seq, val);
17698 ++ loops--;
17699 ++ docfg = true;
17700 ++ }
17701 ++
17702 ++ if (loops > 1) {
17703 ++ dev_warn(ctx->dev, "No sequencer slots; aborting.\n");
17704 ++ return -EINVAL;
17705 ++ }
17706 ++ }
17707 ++
17708 + if (loops > 1) {
17709 + fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
17710 ++ docfg = true;
17711 ++ }
17712 +
17713 +- if (rem)
17714 +- fsi_spi_sequence_add(seq, rem);
17715 ++ if (docfg) {
17716 ++ cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
17717 ++ if (transfer->rx_buf)
17718 ++ cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
17719 ++ SPI_FSI_COUNTER_CFG_N2_TX |
17720 ++ SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
17721 ++ SPI_FSI_COUNTER_CFG_N2_RELOAD;
17722 +
17723 +- rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG,
17724 +- SPI_FSI_COUNTER_CFG_LOOPS(loops - 1));
17725 ++ rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
17726 + if (rc)
17727 + return rc;
17728 ++ } else {
17729 ++ fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
17730 + }
17731 +
17732 ++ if (rem)
17733 ++ fsi_spi_sequence_add(seq, rem);
17734 ++
17735 + return 0;
17736 + }
17737 +
17738 +@@ -275,6 +317,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
17739 + {
17740 + int rc = 0;
17741 + u64 status = 0ULL;
17742 ++ u64 cfg = 0ULL;
17743 +
17744 + if (transfer->tx_buf) {
17745 + int nb;
17746 +@@ -312,6 +355,16 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
17747 + u64 in = 0ULL;
17748 + u8 *rx = transfer->rx_buf;
17749 +
17750 ++ rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
17751 ++ if (rc)
17752 ++ return rc;
17753 ++
17754 ++ if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
17755 ++ rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
17756 ++ if (rc)
17757 ++ return rc;
17758 ++ }
17759 ++
17760 + while (transfer->len > recv) {
17761 + do {
17762 + rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
17763 +@@ -350,7 +403,7 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
17764 + u64 status = 0ULL;
17765 + u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE |
17766 + SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
17767 +- FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 4);
17768 ++ FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19);
17769 +
17770 + end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS);
17771 + do {
17772 +@@ -407,7 +460,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
17773 +
17774 + /* Sequencer must do shift out (tx) first. */
17775 + if (!transfer->tx_buf ||
17776 +- transfer->len > SPI_FSI_MAX_TRANSFER_SIZE) {
17777 ++ transfer->len > (ctx->max_xfr_size + 8)) {
17778 + rc = -EINVAL;
17779 + goto error;
17780 + }
17781 +@@ -431,7 +484,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
17782 +
17783 + /* Sequencer can only do shift in (rx) after tx. */
17784 + if (next->rx_buf) {
17785 +- if (next->len > SPI_FSI_MAX_TRANSFER_SIZE) {
17786 ++ if (next->len > ctx->max_xfr_size) {
17787 + rc = -EINVAL;
17788 + goto error;
17789 + }
17790 +@@ -476,7 +529,9 @@ error:
17791 +
17792 + static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
17793 + {
17794 +- return SPI_FSI_MAX_TRANSFER_SIZE;
17795 ++ struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
17796 ++
17797 ++ return ctx->max_xfr_size;
17798 + }
17799 +
17800 + static int fsi_spi_probe(struct device *dev)
17801 +@@ -524,6 +579,14 @@ static int fsi_spi_probe(struct device *dev)
17802 + ctx->fsi = fsi;
17803 + ctx->base = base + SPI_FSI_BASE;
17804 +
17805 ++ if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
17806 ++ ctx->restricted = true;
17807 ++ ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
17808 ++ } else {
17809 ++ ctx->restricted = false;
17810 ++ ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
17811 ++ }
17812 ++
17813 + rc = devm_spi_register_controller(dev, ctlr);
17814 + if (rc)
17815 + spi_controller_put(ctlr);
17816 +diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
17817 +index 38a5f1304cec4..e38e5ad3c7068 100644
17818 +--- a/drivers/spi/spi-imx.c
17819 ++++ b/drivers/spi/spi-imx.c
17820 +@@ -1707,7 +1707,7 @@ static int spi_imx_probe(struct platform_device *pdev)
17821 + ret = spi_bitbang_start(&spi_imx->bitbang);
17822 + if (ret) {
17823 + dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
17824 +- goto out_runtime_pm_put;
17825 ++ goto out_bitbang_start;
17826 + }
17827 +
17828 + dev_info(&pdev->dev, "probed\n");
17829 +@@ -1717,6 +1717,9 @@ static int spi_imx_probe(struct platform_device *pdev)
17830 +
17831 + return ret;
17832 +
17833 ++out_bitbang_start:
17834 ++ if (spi_imx->devtype_data->has_dmamode)
17835 ++ spi_imx_sdma_exit(spi_imx);
17836 + out_runtime_pm_put:
17837 + pm_runtime_dont_use_autosuspend(spi_imx->dev);
17838 + pm_runtime_put_sync(spi_imx->dev);
17839 +diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
17840 +index 1c9478e6e5d99..d4c9510af3931 100644
17841 +--- a/drivers/spi/spi-omap2-mcspi.c
17842 ++++ b/drivers/spi/spi-omap2-mcspi.c
17843 +@@ -24,7 +24,6 @@
17844 + #include <linux/of.h>
17845 + #include <linux/of_device.h>
17846 + #include <linux/gcd.h>
17847 +-#include <linux/iopoll.h>
17848 +
17849 + #include <linux/spi/spi.h>
17850 +
17851 +@@ -348,9 +347,19 @@ disable_fifo:
17852 +
17853 + static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
17854 + {
17855 +- u32 val;
17856 +-
17857 +- return readl_poll_timeout(reg, val, val & bit, 1, MSEC_PER_SEC);
17858 ++ unsigned long timeout;
17859 ++
17860 ++ timeout = jiffies + msecs_to_jiffies(1000);
17861 ++ while (!(readl_relaxed(reg) & bit)) {
17862 ++ if (time_after(jiffies, timeout)) {
17863 ++ if (!(readl_relaxed(reg) & bit))
17864 ++ return -ETIMEDOUT;
17865 ++ else
17866 ++ return 0;
17867 ++ }
17868 ++ cpu_relax();
17869 ++ }
17870 ++ return 0;
17871 + }
17872 +
17873 + static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
17874 +diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
17875 +index 924b24441789a..1f08e32a10fe2 100644
17876 +--- a/drivers/spi/spi-s3c64xx.c
17877 ++++ b/drivers/spi/spi-s3c64xx.c
17878 +@@ -122,6 +122,7 @@
17879 +
17880 + struct s3c64xx_spi_dma_data {
17881 + struct dma_chan *ch;
17882 ++ dma_cookie_t cookie;
17883 + enum dma_transfer_direction direction;
17884 + };
17885 +
17886 +@@ -271,12 +272,13 @@ static void s3c64xx_spi_dmacb(void *data)
17887 + spin_unlock_irqrestore(&sdd->lock, flags);
17888 + }
17889 +
17890 +-static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
17891 ++static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
17892 + struct sg_table *sgt)
17893 + {
17894 + struct s3c64xx_spi_driver_data *sdd;
17895 + struct dma_slave_config config;
17896 + struct dma_async_tx_descriptor *desc;
17897 ++ int ret;
17898 +
17899 + memset(&config, 0, sizeof(config));
17900 +
17901 +@@ -300,12 +302,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
17902 +
17903 + desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
17904 + dma->direction, DMA_PREP_INTERRUPT);
17905 ++ if (!desc) {
17906 ++ dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
17907 ++ dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
17908 ++ return -ENOMEM;
17909 ++ }
17910 +
17911 + desc->callback = s3c64xx_spi_dmacb;
17912 + desc->callback_param = dma;
17913 +
17914 +- dmaengine_submit(desc);
17915 ++ dma->cookie = dmaengine_submit(desc);
17916 ++ ret = dma_submit_error(dma->cookie);
17917 ++ if (ret) {
17918 ++ dev_err(&sdd->pdev->dev, "DMA submission failed");
17919 ++ return -EIO;
17920 ++ }
17921 ++
17922 + dma_async_issue_pending(dma->ch);
17923 ++ return 0;
17924 + }
17925 +
17926 + static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
17927 +@@ -355,11 +369,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
17928 + return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
17929 + }
17930 +
17931 +-static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
17932 ++static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
17933 + struct spi_transfer *xfer, int dma_mode)
17934 + {
17935 + void __iomem *regs = sdd->regs;
17936 + u32 modecfg, chcfg;
17937 ++ int ret = 0;
17938 +
17939 + modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
17940 + modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
17941 +@@ -385,7 +400,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
17942 + chcfg |= S3C64XX_SPI_CH_TXCH_ON;
17943 + if (dma_mode) {
17944 + modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
17945 +- prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
17946 ++ ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
17947 + } else {
17948 + switch (sdd->cur_bpw) {
17949 + case 32:
17950 +@@ -417,12 +432,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
17951 + writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
17952 + | S3C64XX_SPI_PACKET_CNT_EN,
17953 + regs + S3C64XX_SPI_PACKET_CNT);
17954 +- prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
17955 ++ ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
17956 + }
17957 + }
17958 +
17959 ++ if (ret)
17960 ++ return ret;
17961 ++
17962 + writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
17963 + writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
17964 ++
17965 ++ return 0;
17966 + }
17967 +
17968 + static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
17969 +@@ -555,9 +575,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
17970 + return 0;
17971 + }
17972 +
17973 +-static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
17974 ++static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
17975 + {
17976 + void __iomem *regs = sdd->regs;
17977 ++ int ret;
17978 + u32 val;
17979 +
17980 + /* Disable Clock */
17981 +@@ -605,7 +626,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
17982 +
17983 + if (sdd->port_conf->clk_from_cmu) {
17984 + /* The src_clk clock is divided internally by 2 */
17985 +- clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
17986 ++ ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
17987 ++ if (ret)
17988 ++ return ret;
17989 + } else {
17990 + /* Configure Clock */
17991 + val = readl(regs + S3C64XX_SPI_CLK_CFG);
17992 +@@ -619,6 +642,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
17993 + val |= S3C64XX_SPI_ENCLK_ENABLE;
17994 + writel(val, regs + S3C64XX_SPI_CLK_CFG);
17995 + }
17996 ++
17997 ++ return 0;
17998 + }
17999 +
18000 + #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
18001 +@@ -661,7 +686,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
18002 + sdd->cur_bpw = bpw;
18003 + sdd->cur_speed = speed;
18004 + sdd->cur_mode = spi->mode;
18005 +- s3c64xx_spi_config(sdd);
18006 ++ status = s3c64xx_spi_config(sdd);
18007 ++ if (status)
18008 ++ return status;
18009 + }
18010 +
18011 + if (!is_polling(sdd) && (xfer->len > fifo_len) &&
18012 +@@ -685,13 +712,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
18013 + sdd->state &= ~RXBUSY;
18014 + sdd->state &= ~TXBUSY;
18015 +
18016 +- s3c64xx_enable_datapath(sdd, xfer, use_dma);
18017 +-
18018 + /* Start the signals */
18019 + s3c64xx_spi_set_cs(spi, true);
18020 +
18021 ++ status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
18022 ++
18023 + spin_unlock_irqrestore(&sdd->lock, flags);
18024 +
18025 ++ if (status) {
18026 ++ dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
18027 ++ break;
18028 ++ }
18029 ++
18030 + if (use_dma)
18031 + status = s3c64xx_wait_for_dma(sdd, xfer);
18032 + else
18033 +diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
18034 +index 03929b9d3a8bc..d0725bc8b48a4 100644
18035 +--- a/drivers/staging/emxx_udc/emxx_udc.c
18036 ++++ b/drivers/staging/emxx_udc/emxx_udc.c
18037 +@@ -2593,7 +2593,7 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
18038 +
18039 + if (req->unaligned) {
18040 + if (!ep->virt_buf)
18041 +- ep->virt_buf = dma_alloc_coherent(NULL, PAGE_SIZE,
18042 ++ ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
18043 + &ep->phys_buf,
18044 + GFP_ATOMIC | GFP_DMA);
18045 + if (ep->epnum > 0) {
18046 +@@ -3148,7 +3148,7 @@ static int nbu2ss_drv_remove(struct platform_device *pdev)
18047 + for (i = 0; i < NUM_ENDPOINTS; i++) {
18048 + ep = &udc->ep[i];
18049 + if (ep->virt_buf)
18050 +- dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf,
18051 ++ dma_free_coherent(udc->dev, PAGE_SIZE, (void *)ep->virt_buf,
18052 + ep->phys_buf);
18053 + }
18054 +
18055 +diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
18056 +index a68cbb4995f0f..33a0f8ff82aa8 100644
18057 +--- a/drivers/staging/media/atomisp/pci/sh_css.c
18058 ++++ b/drivers/staging/media/atomisp/pci/sh_css.c
18059 +@@ -9521,7 +9521,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
18060 + if (err)
18061 + {
18062 + IA_CSS_LEAVE_ERR(err);
18063 +- return err;
18064 ++ goto ERR;
18065 + }
18066 + #endif
18067 + for (i = 0; i < num_pipes; i++)
18068 +diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
18069 +index 194d058480777..6dcd47bd9ed3f 100644
18070 +--- a/drivers/staging/media/hantro/hantro_h264.c
18071 ++++ b/drivers/staging/media/hantro/hantro_h264.c
18072 +@@ -325,7 +325,7 @@ dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
18073 + */
18074 + dst_buf = hantro_get_dst_buf(ctx);
18075 + buf = &dst_buf->vb2_buf;
18076 +- dma_addr = vb2_dma_contig_plane_dma_addr(buf, 0);
18077 ++ dma_addr = hantro_get_dec_buf_addr(ctx, buf);
18078 + }
18079 +
18080 + return dma_addr;
18081 +diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c
18082 +index 44062ffceaea7..6d2a8f2a8f0bb 100644
18083 +--- a/drivers/staging/media/hantro/hantro_postproc.c
18084 ++++ b/drivers/staging/media/hantro/hantro_postproc.c
18085 +@@ -118,7 +118,9 @@ int hantro_postproc_alloc(struct hantro_ctx *ctx)
18086 + unsigned int num_buffers = cap_queue->num_buffers;
18087 + unsigned int i, buf_size;
18088 +
18089 +- buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage;
18090 ++ buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage +
18091 ++ hantro_h264_mv_size(ctx->dst_fmt.width,
18092 ++ ctx->dst_fmt.height);
18093 +
18094 + for (i = 0; i < num_buffers; ++i) {
18095 + struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
18096 +diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c
18097 +index fbd53d7c097cd..e9d6bd9e9332a 100644
18098 +--- a/drivers/staging/media/ipu3/ipu3-css-params.c
18099 ++++ b/drivers/staging/media/ipu3/ipu3-css-params.c
18100 +@@ -159,7 +159,7 @@ imgu_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
18101 +
18102 + memset(&cfg->scaler_coeffs_chroma, 0,
18103 + sizeof(cfg->scaler_coeffs_chroma));
18104 +- memset(&cfg->scaler_coeffs_luma, 0, sizeof(*cfg->scaler_coeffs_luma));
18105 ++ memset(&cfg->scaler_coeffs_luma, 0, sizeof(cfg->scaler_coeffs_luma));
18106 + do {
18107 + phase_step_correction++;
18108 +
18109 +diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
18110 +index 7c4df6d48c43d..4df9476ef2a9b 100644
18111 +--- a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
18112 ++++ b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
18113 +@@ -16,6 +16,7 @@
18114 + */
18115 +
18116 + #include <linux/clk.h>
18117 ++#include <linux/delay.h>
18118 + #include <linux/io.h>
18119 + #include <linux/mfd/syscon.h>
18120 + #include <linux/module.h>
18121 +diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
18122 +index 483ce04789ed0..7f6798b223ef8 100644
18123 +--- a/drivers/staging/qlge/qlge.h
18124 ++++ b/drivers/staging/qlge/qlge.h
18125 +@@ -2338,21 +2338,21 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
18126 + #endif
18127 +
18128 + #ifdef QL_OB_DUMP
18129 +-void ql_dump_tx_desc(struct tx_buf_desc *tbd);
18130 +-void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
18131 +-void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
18132 +-#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
18133 +-#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
18134 ++void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd);
18135 ++void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb);
18136 ++void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp);
18137 ++#define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb) ql_dump_ob_mac_iocb(qdev, ob_mac_iocb)
18138 ++#define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp) ql_dump_ob_mac_rsp(qdev, ob_mac_rsp)
18139 + #else
18140 +-#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
18141 +-#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
18142 ++#define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb)
18143 ++#define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp)
18144 + #endif
18145 +
18146 + #ifdef QL_IB_DUMP
18147 +-void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
18148 +-#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
18149 ++void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp);
18150 ++#define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp) ql_dump_ib_mac_rsp(qdev, ib_mac_rsp)
18151 + #else
18152 +-#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
18153 ++#define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp)
18154 + #endif
18155 +
18156 + #ifdef QL_ALL_DUMP
18157 +diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
18158 +index a55bf0b3e9dcc..42fd13990f3a8 100644
18159 +--- a/drivers/staging/qlge/qlge_dbg.c
18160 ++++ b/drivers/staging/qlge/qlge_dbg.c
18161 +@@ -1431,7 +1431,7 @@ void ql_dump_routing_entries(struct ql_adapter *qdev)
18162 + }
18163 + if (value)
18164 + netdev_err(qdev->ndev,
18165 +- "%s: Routing Mask %d = 0x%.08x\n",
18166 ++ "Routing Mask %d = 0x%.08x\n",
18167 + i, value);
18168 + }
18169 + ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
18170 +@@ -1617,6 +1617,9 @@ void ql_dump_qdev(struct ql_adapter *qdev)
18171 + #ifdef QL_CB_DUMP
18172 + void ql_dump_wqicb(struct wqicb *wqicb)
18173 + {
18174 ++ struct tx_ring *tx_ring = container_of(wqicb, struct tx_ring, wqicb);
18175 ++ struct ql_adapter *qdev = tx_ring->qdev;
18176 ++
18177 + netdev_err(qdev->ndev, "Dumping wqicb stuff...\n");
18178 + netdev_err(qdev->ndev, "wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
18179 + netdev_err(qdev->ndev, "wqicb->flags = %x\n",
18180 +@@ -1632,8 +1635,8 @@ void ql_dump_wqicb(struct wqicb *wqicb)
18181 +
18182 + void ql_dump_tx_ring(struct tx_ring *tx_ring)
18183 + {
18184 +- if (!tx_ring)
18185 +- return;
18186 ++ struct ql_adapter *qdev = tx_ring->qdev;
18187 ++
18188 + netdev_err(qdev->ndev, "===================== Dumping tx_ring %d ===============\n",
18189 + tx_ring->wq_id);
18190 + netdev_err(qdev->ndev, "tx_ring->base = %p\n", tx_ring->wq_base);
18191 +@@ -1657,6 +1660,8 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring)
18192 + void ql_dump_ricb(struct ricb *ricb)
18193 + {
18194 + int i;
18195 ++ struct ql_adapter *qdev =
18196 ++ container_of(ricb, struct ql_adapter, ricb);
18197 +
18198 + netdev_err(qdev->ndev, "===================== Dumping ricb ===============\n");
18199 + netdev_err(qdev->ndev, "Dumping ricb stuff...\n");
18200 +@@ -1686,6 +1691,9 @@ void ql_dump_ricb(struct ricb *ricb)
18201 +
18202 + void ql_dump_cqicb(struct cqicb *cqicb)
18203 + {
18204 ++ struct rx_ring *rx_ring = container_of(cqicb, struct rx_ring, cqicb);
18205 ++ struct ql_adapter *qdev = rx_ring->qdev;
18206 ++
18207 + netdev_err(qdev->ndev, "Dumping cqicb stuff...\n");
18208 +
18209 + netdev_err(qdev->ndev, "cqicb->msix_vect = %d\n", cqicb->msix_vect);
18210 +@@ -1725,8 +1733,8 @@ static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring)
18211 +
18212 + void ql_dump_rx_ring(struct rx_ring *rx_ring)
18213 + {
18214 +- if (!rx_ring)
18215 +- return;
18216 ++ struct ql_adapter *qdev = rx_ring->qdev;
18217 ++
18218 + netdev_err(qdev->ndev,
18219 + "===================== Dumping rx_ring %d ===============\n",
18220 + rx_ring->cq_id);
18221 +@@ -1816,7 +1824,7 @@ fail_it:
18222 + #endif
18223 +
18224 + #ifdef QL_OB_DUMP
18225 +-void ql_dump_tx_desc(struct tx_buf_desc *tbd)
18226 ++void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd)
18227 + {
18228 + netdev_err(qdev->ndev, "tbd->addr = 0x%llx\n",
18229 + le64_to_cpu((u64)tbd->addr));
18230 +@@ -1843,7 +1851,7 @@ void ql_dump_tx_desc(struct tx_buf_desc *tbd)
18231 + tbd->len & TX_DESC_E ? "E" : ".");
18232 + }
18233 +
18234 +-void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
18235 ++void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb)
18236 + {
18237 + struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
18238 + (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
18239 +@@ -1886,10 +1894,10 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
18240 + frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
18241 + }
18242 + tbd = &ob_mac_iocb->tbd[0];
18243 +- ql_dump_tx_desc(tbd);
18244 ++ ql_dump_tx_desc(qdev, tbd);
18245 + }
18246 +
18247 +-void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
18248 ++void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp)
18249 + {
18250 + netdev_err(qdev->ndev, "%s\n", __func__);
18251 + netdev_err(qdev->ndev, "opcode = %d\n", ob_mac_rsp->opcode);
18252 +@@ -1906,7 +1914,7 @@ void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
18253 + #endif
18254 +
18255 + #ifdef QL_IB_DUMP
18256 +-void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
18257 ++void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp)
18258 + {
18259 + netdev_err(qdev->ndev, "%s\n", __func__);
18260 + netdev_err(qdev->ndev, "opcode = 0x%x\n", ib_mac_rsp->opcode);
18261 +diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
18262 +index 2028458bea6f0..b351a7eb7a897 100644
18263 +--- a/drivers/staging/qlge/qlge_main.c
18264 ++++ b/drivers/staging/qlge/qlge_main.c
18265 +@@ -1856,7 +1856,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
18266 + struct net_device *ndev = qdev->ndev;
18267 + struct sk_buff *skb = NULL;
18268 +
18269 +- QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
18270 ++ QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp);
18271 +
18272 + skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
18273 + if (unlikely(!skb)) {
18274 +@@ -1954,7 +1954,7 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
18275 + ((le16_to_cpu(ib_mac_rsp->vlan_id) &
18276 + IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
18277 +
18278 +- QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
18279 ++ QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp);
18280 +
18281 + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
18282 + /* The data and headers are split into
18283 +@@ -2001,7 +2001,7 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
18284 + struct tx_ring *tx_ring;
18285 + struct tx_ring_desc *tx_ring_desc;
18286 +
18287 +- QL_DUMP_OB_MAC_RSP(mac_rsp);
18288 ++ QL_DUMP_OB_MAC_RSP(qdev, mac_rsp);
18289 + tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
18290 + tx_ring_desc = &tx_ring->q[mac_rsp->tid];
18291 + ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
18292 +@@ -2593,7 +2593,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
18293 + tx_ring->tx_errors++;
18294 + return NETDEV_TX_BUSY;
18295 + }
18296 +- QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
18297 ++ QL_DUMP_OB_MAC_IOCB(qdev, mac_iocb_ptr);
18298 + tx_ring->prod_idx++;
18299 + if (tx_ring->prod_idx == tx_ring->wq_len)
18300 + tx_ring->prod_idx = 0;
18301 +diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
18302 +index 195d963c4fbb4..b6fee7230ce05 100644
18303 +--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
18304 ++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
18305 +@@ -597,7 +597,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
18306 +
18307 + prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE,
18308 + sizeof(struct ieee80211_rxb *),
18309 +- GFP_KERNEL);
18310 ++ GFP_ATOMIC);
18311 + if (!prxbIndicateArray)
18312 + return;
18313 +
18314 +diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
18315 +index d83f421acfc1e..a397dc6231f13 100644
18316 +--- a/drivers/staging/rtl8712/rtl8712_recv.c
18317 ++++ b/drivers/staging/rtl8712/rtl8712_recv.c
18318 +@@ -477,11 +477,14 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
18319 + while (!end_of_queue_search(phead, plist)) {
18320 + pnextrframe = container_of(plist, union recv_frame, u.list);
18321 + pnextattrib = &pnextrframe->u.hdr.attrib;
18322 ++
18323 ++ if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
18324 ++ return false;
18325 ++
18326 + if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
18327 + plist = plist->next;
18328 +- else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
18329 +- return false;
18330 +- break;
18331 ++ else
18332 ++ break;
18333 + }
18334 + list_del_init(&(prframe->u.hdr.list));
18335 + list_add_tail(&(prframe->u.hdr.list), plist);
18336 +diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
18337 +index 6fb0788807426..ef0cc1e474ae6 100644
18338 +--- a/drivers/staging/wfx/data_rx.c
18339 ++++ b/drivers/staging/wfx/data_rx.c
18340 +@@ -17,6 +17,9 @@ static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt)
18341 + {
18342 + int params, tid;
18343 +
18344 ++ if (wfx_api_older_than(wvif->wdev, 3, 6))
18345 ++ return;
18346 ++
18347 + switch (mgmt->u.action.u.addba_req.action_code) {
18348 + case WLAN_ACTION_ADDBA_REQ:
18349 + params = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
18350 +@@ -41,7 +44,7 @@ void wfx_rx_cb(struct wfx_vif *wvif,
18351 + memset(hdr, 0, sizeof(*hdr));
18352 +
18353 + if (arg->status == HIF_STATUS_RX_FAIL_MIC)
18354 +- hdr->flag |= RX_FLAG_MMIC_ERROR;
18355 ++ hdr->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_IV_STRIPPED;
18356 + else if (arg->status)
18357 + goto drop;
18358 +
18359 +diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
18360 +index 4e30ab17a93d4..7dace7c17bf5c 100644
18361 +--- a/drivers/staging/wfx/sta.c
18362 ++++ b/drivers/staging/wfx/sta.c
18363 +@@ -682,15 +682,16 @@ int wfx_ampdu_action(struct ieee80211_hw *hw,
18364 + struct ieee80211_vif *vif,
18365 + struct ieee80211_ampdu_params *params)
18366 + {
18367 +- /* Aggregation is implemented fully in firmware,
18368 +- * including block ack negotiation. Do not allow
18369 +- * mac80211 stack to do anything: it interferes with
18370 +- * the firmware.
18371 +- */
18372 +-
18373 +- /* Note that we still need this function stubbed. */
18374 +-
18375 +- return -ENOTSUPP;
18376 ++ // Aggregation is implemented fully in firmware
18377 ++ switch (params->action) {
18378 ++ case IEEE80211_AMPDU_RX_START:
18379 ++ case IEEE80211_AMPDU_RX_STOP:
18380 ++ // Just acknowledge it to enable frame re-ordering
18381 ++ return 0;
18382 ++ default:
18383 ++ // Leave the firmware doing its business for tx aggregation
18384 ++ return -ENOTSUPP;
18385 ++ }
18386 + }
18387 +
18388 + int wfx_add_chanctx(struct ieee80211_hw *hw,
18389 +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
18390 +index 9b75923505020..86b28117787ec 100644
18391 +--- a/drivers/target/target_core_user.c
18392 ++++ b/drivers/target/target_core_user.c
18393 +@@ -681,7 +681,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
18394 + void *from, *to = NULL;
18395 + size_t copy_bytes, to_offset, offset;
18396 + struct scatterlist *sg;
18397 +- struct page *page;
18398 ++ struct page *page = NULL;
18399 +
18400 + for_each_sg(data_sg, sg, data_nents, i) {
18401 + int sg_remaining = sg->length;
18402 +diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
18403 +index af7b2383e8f6b..019f4812def6c 100644
18404 +--- a/drivers/thermal/thermal_netlink.c
18405 ++++ b/drivers/thermal/thermal_netlink.c
18406 +@@ -78,7 +78,7 @@ int thermal_genl_sampling_temp(int id, int temp)
18407 + hdr = genlmsg_put(skb, 0, 0, &thermal_gnl_family, 0,
18408 + THERMAL_GENL_SAMPLING_TEMP);
18409 + if (!hdr)
18410 +- return -EMSGSIZE;
18411 ++ goto out_free;
18412 +
18413 + if (nla_put_u32(skb, THERMAL_GENL_ATTR_TZ_ID, id))
18414 + goto out_cancel;
18415 +@@ -93,6 +93,7 @@ int thermal_genl_sampling_temp(int id, int temp)
18416 + return 0;
18417 + out_cancel:
18418 + genlmsg_cancel(skb, hdr);
18419 ++out_free:
18420 + nlmsg_free(skb);
18421 +
18422 + return -EMSGSIZE;
18423 +diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
18424 +index d1b27b0522a3c..8d60e0ff67b4d 100644
18425 +--- a/drivers/tty/hvc/Kconfig
18426 ++++ b/drivers/tty/hvc/Kconfig
18427 +@@ -81,6 +81,7 @@ config HVC_DCC
18428 + bool "ARM JTAG DCC console"
18429 + depends on ARM || ARM64
18430 + select HVC_DRIVER
18431 ++ select SERIAL_CORE_CONSOLE
18432 + help
18433 + This console uses the JTAG DCC on ARM to create a console under the HVC
18434 + driver. This console is used through a JTAG only on ARM. If you don't have
18435 +diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
18436 +index 55105ac38f89b..509d1042825a1 100644
18437 +--- a/drivers/tty/hvc/hvcs.c
18438 ++++ b/drivers/tty/hvc/hvcs.c
18439 +@@ -1216,13 +1216,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
18440 +
18441 + tty_wait_until_sent(tty, HVCS_CLOSE_WAIT);
18442 +
18443 +- /*
18444 +- * This line is important because it tells hvcs_open that this
18445 +- * device needs to be re-configured the next time hvcs_open is
18446 +- * called.
18447 +- */
18448 +- tty->driver_data = NULL;
18449 +-
18450 + free_irq(irq, hvcsd);
18451 + return;
18452 + } else if (hvcsd->port.count < 0) {
18453 +@@ -1237,6 +1230,13 @@ static void hvcs_cleanup(struct tty_struct * tty)
18454 + {
18455 + struct hvcs_struct *hvcsd = tty->driver_data;
18456 +
18457 ++ /*
18458 ++ * This line is important because it tells hvcs_open that this
18459 ++ * device needs to be re-configured the next time hvcs_open is
18460 ++ * called.
18461 ++ */
18462 ++ tty->driver_data = NULL;
18463 ++
18464 + tty_port_put(&hvcsd->port);
18465 + }
18466 +
18467 +diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c
18468 +index cf20616340a1a..fe569f6294a24 100644
18469 +--- a/drivers/tty/ipwireless/network.c
18470 ++++ b/drivers/tty/ipwireless/network.c
18471 +@@ -117,7 +117,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
18472 + skb->len,
18473 + notify_packet_sent,
18474 + network);
18475 +- if (ret == -1) {
18476 ++ if (ret < 0) {
18477 + skb_pull(skb, 2);
18478 + return 0;
18479 + }
18480 +@@ -134,7 +134,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
18481 + notify_packet_sent,
18482 + network);
18483 + kfree(buf);
18484 +- if (ret == -1)
18485 ++ if (ret < 0)
18486 + return 0;
18487 + }
18488 + kfree_skb(skb);
18489 +diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
18490 +index fad3401e604d9..23584769fc292 100644
18491 +--- a/drivers/tty/ipwireless/tty.c
18492 ++++ b/drivers/tty/ipwireless/tty.c
18493 +@@ -218,7 +218,7 @@ static int ipw_write(struct tty_struct *linux_tty,
18494 + ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS,
18495 + buf, count,
18496 + ipw_write_packet_sent_callback, tty);
18497 +- if (ret == -1) {
18498 ++ if (ret < 0) {
18499 + mutex_unlock(&tty->ipw_tty_mutex);
18500 + return 0;
18501 + }
18502 +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
18503 +index 00099a8439d21..c6a1d8c4e6894 100644
18504 +--- a/drivers/tty/pty.c
18505 ++++ b/drivers/tty/pty.c
18506 +@@ -120,10 +120,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
18507 + spin_lock_irqsave(&to->port->lock, flags);
18508 + /* Stuff the data into the input queue of the other end */
18509 + c = tty_insert_flip_string(to->port, buf, c);
18510 ++ spin_unlock_irqrestore(&to->port->lock, flags);
18511 + /* And shovel */
18512 + if (c)
18513 + tty_flip_buffer_push(to->port);
18514 +- spin_unlock_irqrestore(&to->port->lock, flags);
18515 + }
18516 + return c;
18517 + }
18518 +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
18519 +index 87f450b7c1779..9e204f9b799a1 100644
18520 +--- a/drivers/tty/serial/8250/8250_dw.c
18521 ++++ b/drivers/tty/serial/8250/8250_dw.c
18522 +@@ -373,39 +373,6 @@ static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios)
18523 + serial8250_do_set_ldisc(p, termios);
18524 + }
18525 +
18526 +-static int dw8250_startup(struct uart_port *p)
18527 +-{
18528 +- struct dw8250_data *d = to_dw8250_data(p->private_data);
18529 +- int ret;
18530 +-
18531 +- /*
18532 +- * Some platforms may provide a reference clock shared between several
18533 +- * devices. In this case before using the serial port first we have to
18534 +- * make sure that any clock state change is known to the UART port at
18535 +- * least post factum.
18536 +- */
18537 +- if (d->clk) {
18538 +- ret = clk_notifier_register(d->clk, &d->clk_notifier);
18539 +- if (ret)
18540 +- dev_warn(p->dev, "Failed to set the clock notifier\n");
18541 +- }
18542 +-
18543 +- return serial8250_do_startup(p);
18544 +-}
18545 +-
18546 +-static void dw8250_shutdown(struct uart_port *p)
18547 +-{
18548 +- struct dw8250_data *d = to_dw8250_data(p->private_data);
18549 +-
18550 +- serial8250_do_shutdown(p);
18551 +-
18552 +- if (d->clk) {
18553 +- clk_notifier_unregister(d->clk, &d->clk_notifier);
18554 +-
18555 +- flush_work(&d->clk_work);
18556 +- }
18557 +-}
18558 +-
18559 + /*
18560 + * dw8250_fallback_dma_filter will prevent the UART from getting just any free
18561 + * channel on platforms that have DMA engines, but don't have any channels
18562 +@@ -501,8 +468,6 @@ static int dw8250_probe(struct platform_device *pdev)
18563 + p->serial_out = dw8250_serial_out;
18564 + p->set_ldisc = dw8250_set_ldisc;
18565 + p->set_termios = dw8250_set_termios;
18566 +- p->startup = dw8250_startup;
18567 +- p->shutdown = dw8250_shutdown;
18568 +
18569 + p->membase = devm_ioremap(dev, regs->start, resource_size(regs));
18570 + if (!p->membase)
18571 +@@ -622,6 +587,19 @@ static int dw8250_probe(struct platform_device *pdev)
18572 + goto err_reset;
18573 + }
18574 +
18575 ++ /*
18576 ++ * Some platforms may provide a reference clock shared between several
18577 ++ * devices. In this case any clock state change must be known to the
18578 ++ * UART port at least post factum.
18579 ++ */
18580 ++ if (data->clk) {
18581 ++ err = clk_notifier_register(data->clk, &data->clk_notifier);
18582 ++ if (err)
18583 ++ dev_warn(p->dev, "Failed to set the clock notifier\n");
18584 ++ else
18585 ++ queue_work(system_unbound_wq, &data->clk_work);
18586 ++ }
18587 ++
18588 + platform_set_drvdata(pdev, data);
18589 +
18590 + pm_runtime_set_active(dev);
18591 +@@ -648,6 +626,12 @@ static int dw8250_remove(struct platform_device *pdev)
18592 +
18593 + pm_runtime_get_sync(dev);
18594 +
18595 ++ if (data->clk) {
18596 ++ clk_notifier_unregister(data->clk, &data->clk_notifier);
18597 ++
18598 ++ flush_work(&data->clk_work);
18599 ++ }
18600 ++
18601 + serial8250_unregister_port(data->data.line);
18602 +
18603 + reset_control_assert(data->rst);
18604 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
18605 +index c71d647eb87a0..b0af13074cd36 100644
18606 +--- a/drivers/tty/serial/8250/8250_port.c
18607 ++++ b/drivers/tty/serial/8250/8250_port.c
18608 +@@ -2653,6 +2653,10 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
18609 + goto out_lock;
18610 +
18611 + port->uartclk = uartclk;
18612 ++
18613 ++ if (!tty_port_initialized(&port->state->port))
18614 ++ goto out_lock;
18615 ++
18616 + termios = &port->state->port.tty->termios;
18617 +
18618 + baud = serial8250_get_baud_rate(port, termios, NULL);
18619 +@@ -2665,7 +2669,6 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
18620 +
18621 + serial8250_set_divisor(port, baud, quot, frac);
18622 + serial_port_out(port, UART_LCR, up->lcr);
18623 +- serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
18624 +
18625 + spin_unlock_irqrestore(&port->lock, flags);
18626 + serial8250_rpm_put(up);
18627 +diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
18628 +index 9409be982aa64..20b98a3ba0466 100644
18629 +--- a/drivers/tty/serial/Kconfig
18630 ++++ b/drivers/tty/serial/Kconfig
18631 +@@ -8,6 +8,7 @@ menu "Serial drivers"
18632 +
18633 + config SERIAL_EARLYCON
18634 + bool
18635 ++ depends on SERIAL_CORE
18636 + help
18637 + Support for early consoles with the earlycon parameter. This enables
18638 + the console before standard serial driver is probed. The console is
18639 +@@ -520,6 +521,7 @@ config SERIAL_IMX_EARLYCON
18640 + depends on ARCH_MXC || COMPILE_TEST
18641 + depends on OF
18642 + select SERIAL_EARLYCON
18643 ++ select SERIAL_CORE_CONSOLE
18644 + help
18645 + If you have enabled the earlycon on the Freescale IMX
18646 + CPU you can make it the earlycon by answering Y to this option.
18647 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
18648 +index 7ca6422492241..e17465a8a773c 100644
18649 +--- a/drivers/tty/serial/fsl_lpuart.c
18650 ++++ b/drivers/tty/serial/fsl_lpuart.c
18651 +@@ -649,26 +649,24 @@ static int lpuart32_poll_init(struct uart_port *port)
18652 + spin_lock_irqsave(&sport->port.lock, flags);
18653 +
18654 + /* Disable Rx & Tx */
18655 +- lpuart32_write(&sport->port, UARTCTRL, 0);
18656 ++ lpuart32_write(&sport->port, 0, UARTCTRL);
18657 +
18658 + temp = lpuart32_read(&sport->port, UARTFIFO);
18659 +
18660 + /* Enable Rx and Tx FIFO */
18661 +- lpuart32_write(&sport->port, UARTFIFO,
18662 +- temp | UARTFIFO_RXFE | UARTFIFO_TXFE);
18663 ++ lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
18664 +
18665 + /* flush Tx and Rx FIFO */
18666 +- lpuart32_write(&sport->port, UARTFIFO,
18667 +- UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH);
18668 ++ lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
18669 +
18670 + /* explicitly clear RDRF */
18671 + if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
18672 + lpuart32_read(&sport->port, UARTDATA);
18673 +- lpuart32_write(&sport->port, UARTFIFO, UARTFIFO_RXUF);
18674 ++ lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO);
18675 + }
18676 +
18677 + /* Enable Rx and Tx */
18678 +- lpuart32_write(&sport->port, UARTCTRL, UARTCTRL_RE | UARTCTRL_TE);
18679 ++ lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
18680 + spin_unlock_irqrestore(&sport->port.lock, flags);
18681 +
18682 + return 0;
18683 +@@ -677,12 +675,12 @@ static int lpuart32_poll_init(struct uart_port *port)
18684 + static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c)
18685 + {
18686 + lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE);
18687 +- lpuart32_write(port, UARTDATA, c);
18688 ++ lpuart32_write(port, c, UARTDATA);
18689 + }
18690 +
18691 + static int lpuart32_poll_get_char(struct uart_port *port)
18692 + {
18693 +- if (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF))
18694 ++ if (!(lpuart32_read(port, UARTWATER) >> UARTWATER_RXCNT_OFF))
18695 + return NO_POLL_CHAR;
18696 +
18697 + return lpuart32_read(port, UARTDATA);
18698 +diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
18699 +index dea649ee173ba..02a69e20014b1 100644
18700 +--- a/drivers/usb/cdns3/gadget.c
18701 ++++ b/drivers/usb/cdns3/gadget.c
18702 +@@ -2990,12 +2990,12 @@ void cdns3_gadget_exit(struct cdns3 *cdns)
18703 +
18704 + priv_dev = cdns->gadget_dev;
18705 +
18706 +- devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
18707 +
18708 + pm_runtime_mark_last_busy(cdns->dev);
18709 + pm_runtime_put_autosuspend(cdns->dev);
18710 +
18711 + usb_del_gadget_udc(&priv_dev->gadget);
18712 ++ devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
18713 +
18714 + cdns3_free_all_eps(priv_dev);
18715 +
18716 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
18717 +index 7f6f3ab5b8a67..24d79eec6654e 100644
18718 +--- a/drivers/usb/class/cdc-acm.c
18719 ++++ b/drivers/usb/class/cdc-acm.c
18720 +@@ -1243,9 +1243,21 @@ static int acm_probe(struct usb_interface *intf,
18721 + }
18722 + }
18723 + } else {
18724 ++ int class = -1;
18725 ++
18726 + data_intf_num = union_header->bSlaveInterface0;
18727 + control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
18728 + data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
18729 ++
18730 ++ if (control_interface)
18731 ++ class = control_interface->cur_altsetting->desc.bInterfaceClass;
18732 ++
18733 ++ if (class != USB_CLASS_COMM && class != USB_CLASS_CDC_DATA) {
18734 ++ dev_dbg(&intf->dev, "Broken union descriptor, assuming single interface\n");
18735 ++ combined_interfaces = 1;
18736 ++ control_interface = data_interface = intf;
18737 ++ goto look_for_collapsed_interface;
18738 ++ }
18739 + }
18740 +
18741 + if (!control_interface || !data_interface) {
18742 +@@ -1906,6 +1918,17 @@ static const struct usb_device_id acm_ids[] = {
18743 + .driver_info = IGNORE_DEVICE,
18744 + },
18745 +
18746 ++ /* Exclude ETAS ES58x */
18747 ++ { USB_DEVICE(0x108c, 0x0159), /* ES581.4 */
18748 ++ .driver_info = IGNORE_DEVICE,
18749 ++ },
18750 ++ { USB_DEVICE(0x108c, 0x0168), /* ES582.1 */
18751 ++ .driver_info = IGNORE_DEVICE,
18752 ++ },
18753 ++ { USB_DEVICE(0x108c, 0x0169), /* ES584.1 */
18754 ++ .driver_info = IGNORE_DEVICE,
18755 ++ },
18756 ++
18757 + { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
18758 + .driver_info = SEND_ZERO_PACKET,
18759 + },
18760 +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
18761 +index 7f5de956a2fc8..02d0cfd23bb29 100644
18762 +--- a/drivers/usb/class/cdc-wdm.c
18763 ++++ b/drivers/usb/class/cdc-wdm.c
18764 +@@ -58,6 +58,9 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
18765 +
18766 + #define WDM_MAX 16
18767 +
18768 ++/* we cannot wait forever at flush() */
18769 ++#define WDM_FLUSH_TIMEOUT (30 * HZ)
18770 ++
18771 + /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
18772 + #define WDM_DEFAULT_BUFSIZE 256
18773 +
18774 +@@ -151,7 +154,7 @@ static void wdm_out_callback(struct urb *urb)
18775 + kfree(desc->outbuf);
18776 + desc->outbuf = NULL;
18777 + clear_bit(WDM_IN_USE, &desc->flags);
18778 +- wake_up(&desc->wait);
18779 ++ wake_up_all(&desc->wait);
18780 + }
18781 +
18782 + static void wdm_in_callback(struct urb *urb)
18783 +@@ -393,6 +396,9 @@ static ssize_t wdm_write
18784 + if (test_bit(WDM_RESETTING, &desc->flags))
18785 + r = -EIO;
18786 +
18787 ++ if (test_bit(WDM_DISCONNECTING, &desc->flags))
18788 ++ r = -ENODEV;
18789 ++
18790 + if (r < 0) {
18791 + rv = r;
18792 + goto out_free_mem_pm;
18793 +@@ -424,6 +430,7 @@ static ssize_t wdm_write
18794 + if (rv < 0) {
18795 + desc->outbuf = NULL;
18796 + clear_bit(WDM_IN_USE, &desc->flags);
18797 ++ wake_up_all(&desc->wait); /* for wdm_wait_for_response() */
18798 + dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
18799 + rv = usb_translate_errors(rv);
18800 + goto out_free_mem_pm;
18801 +@@ -583,28 +590,58 @@ err:
18802 + return rv;
18803 + }
18804 +
18805 +-static int wdm_flush(struct file *file, fl_owner_t id)
18806 ++static int wdm_wait_for_response(struct file *file, long timeout)
18807 + {
18808 + struct wdm_device *desc = file->private_data;
18809 ++ long rv; /* Use long here because (int) MAX_SCHEDULE_TIMEOUT < 0. */
18810 ++
18811 ++ /*
18812 ++ * Needs both flags. We cannot do with one because resetting it would
18813 ++ * cause a race with write() yet we need to signal a disconnect.
18814 ++ */
18815 ++ rv = wait_event_interruptible_timeout(desc->wait,
18816 ++ !test_bit(WDM_IN_USE, &desc->flags) ||
18817 ++ test_bit(WDM_DISCONNECTING, &desc->flags),
18818 ++ timeout);
18819 +
18820 +- wait_event(desc->wait,
18821 +- /*
18822 +- * needs both flags. We cannot do with one
18823 +- * because resetting it would cause a race
18824 +- * with write() yet we need to signal
18825 +- * a disconnect
18826 +- */
18827 +- !test_bit(WDM_IN_USE, &desc->flags) ||
18828 +- test_bit(WDM_DISCONNECTING, &desc->flags));
18829 +-
18830 +- /* cannot dereference desc->intf if WDM_DISCONNECTING */
18831 ++ /*
18832 ++ * To report the correct error. This is best effort.
18833 ++ * We are inevitably racing with the hardware.
18834 ++ */
18835 + if (test_bit(WDM_DISCONNECTING, &desc->flags))
18836 + return -ENODEV;
18837 +- if (desc->werr < 0)
18838 +- dev_err(&desc->intf->dev, "Error in flush path: %d\n",
18839 +- desc->werr);
18840 ++ if (!rv)
18841 ++ return -EIO;
18842 ++ if (rv < 0)
18843 ++ return -EINTR;
18844 ++
18845 ++ spin_lock_irq(&desc->iuspin);
18846 ++ rv = desc->werr;
18847 ++ desc->werr = 0;
18848 ++ spin_unlock_irq(&desc->iuspin);
18849 ++
18850 ++ return usb_translate_errors(rv);
18851 ++
18852 ++}
18853 ++
18854 ++/*
18855 ++ * You need to send a signal when you react to malicious or defective hardware.
18856 ++ * Also, don't abort when fsync() returned -EINVAL, for older kernels which do
18857 ++ * not implement wdm_flush() will return -EINVAL.
18858 ++ */
18859 ++static int wdm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
18860 ++{
18861 ++ return wdm_wait_for_response(file, MAX_SCHEDULE_TIMEOUT);
18862 ++}
18863 +
18864 +- return usb_translate_errors(desc->werr);
18865 ++/*
18866 ++ * Same with wdm_fsync(), except it uses finite timeout in order to react to
18867 ++ * malicious or defective hardware which ceased communication after close() was
18868 ++ * implicitly called due to process termination.
18869 ++ */
18870 ++static int wdm_flush(struct file *file, fl_owner_t id)
18871 ++{
18872 ++ return wdm_wait_for_response(file, WDM_FLUSH_TIMEOUT);
18873 + }
18874 +
18875 + static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
18876 +@@ -729,6 +766,7 @@ static const struct file_operations wdm_fops = {
18877 + .owner = THIS_MODULE,
18878 + .read = wdm_read,
18879 + .write = wdm_write,
18880 ++ .fsync = wdm_fsync,
18881 + .open = wdm_open,
18882 + .flush = wdm_flush,
18883 + .release = wdm_release,
18884 +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
18885 +index 7bc23469f4e4e..27e83e55a5901 100644
18886 +--- a/drivers/usb/core/urb.c
18887 ++++ b/drivers/usb/core/urb.c
18888 +@@ -772,11 +772,12 @@ void usb_block_urb(struct urb *urb)
18889 + EXPORT_SYMBOL_GPL(usb_block_urb);
18890 +
18891 + /**
18892 +- * usb_kill_anchored_urbs - cancel transfer requests en masse
18893 ++ * usb_kill_anchored_urbs - kill all URBs associated with an anchor
18894 + * @anchor: anchor the requests are bound to
18895 + *
18896 +- * this allows all outstanding URBs to be killed starting
18897 +- * from the back of the queue
18898 ++ * This kills all outstanding URBs starting from the back of the queue,
18899 ++ * with guarantee that no completer callbacks will take place from the
18900 ++ * anchor after this function returns.
18901 + *
18902 + * This routine should not be called by a driver after its disconnect
18903 + * method has returned.
18904 +@@ -784,20 +785,26 @@ EXPORT_SYMBOL_GPL(usb_block_urb);
18905 + void usb_kill_anchored_urbs(struct usb_anchor *anchor)
18906 + {
18907 + struct urb *victim;
18908 ++ int surely_empty;
18909 +
18910 +- spin_lock_irq(&anchor->lock);
18911 +- while (!list_empty(&anchor->urb_list)) {
18912 +- victim = list_entry(anchor->urb_list.prev, struct urb,
18913 +- anchor_list);
18914 +- /* we must make sure the URB isn't freed before we kill it*/
18915 +- usb_get_urb(victim);
18916 +- spin_unlock_irq(&anchor->lock);
18917 +- /* this will unanchor the URB */
18918 +- usb_kill_urb(victim);
18919 +- usb_put_urb(victim);
18920 ++ do {
18921 + spin_lock_irq(&anchor->lock);
18922 +- }
18923 +- spin_unlock_irq(&anchor->lock);
18924 ++ while (!list_empty(&anchor->urb_list)) {
18925 ++ victim = list_entry(anchor->urb_list.prev,
18926 ++ struct urb, anchor_list);
18927 ++ /* make sure the URB isn't freed before we kill it */
18928 ++ usb_get_urb(victim);
18929 ++ spin_unlock_irq(&anchor->lock);
18930 ++ /* this will unanchor the URB */
18931 ++ usb_kill_urb(victim);
18932 ++ usb_put_urb(victim);
18933 ++ spin_lock_irq(&anchor->lock);
18934 ++ }
18935 ++ surely_empty = usb_anchor_check_wakeup(anchor);
18936 ++
18937 ++ spin_unlock_irq(&anchor->lock);
18938 ++ cpu_relax();
18939 ++ } while (!surely_empty);
18940 + }
18941 + EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
18942 +
18943 +@@ -816,21 +823,27 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
18944 + void usb_poison_anchored_urbs(struct usb_anchor *anchor)
18945 + {
18946 + struct urb *victim;
18947 ++ int surely_empty;
18948 +
18949 +- spin_lock_irq(&anchor->lock);
18950 +- anchor->poisoned = 1;
18951 +- while (!list_empty(&anchor->urb_list)) {
18952 +- victim = list_entry(anchor->urb_list.prev, struct urb,
18953 +- anchor_list);
18954 +- /* we must make sure the URB isn't freed before we kill it*/
18955 +- usb_get_urb(victim);
18956 +- spin_unlock_irq(&anchor->lock);
18957 +- /* this will unanchor the URB */
18958 +- usb_poison_urb(victim);
18959 +- usb_put_urb(victim);
18960 ++ do {
18961 + spin_lock_irq(&anchor->lock);
18962 +- }
18963 +- spin_unlock_irq(&anchor->lock);
18964 ++ anchor->poisoned = 1;
18965 ++ while (!list_empty(&anchor->urb_list)) {
18966 ++ victim = list_entry(anchor->urb_list.prev,
18967 ++ struct urb, anchor_list);
18968 ++ /* make sure the URB isn't freed before we kill it */
18969 ++ usb_get_urb(victim);
18970 ++ spin_unlock_irq(&anchor->lock);
18971 ++ /* this will unanchor the URB */
18972 ++ usb_poison_urb(victim);
18973 ++ usb_put_urb(victim);
18974 ++ spin_lock_irq(&anchor->lock);
18975 ++ }
18976 ++ surely_empty = usb_anchor_check_wakeup(anchor);
18977 ++
18978 ++ spin_unlock_irq(&anchor->lock);
18979 ++ cpu_relax();
18980 ++ } while (!surely_empty);
18981 + }
18982 + EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
18983 +
18984 +@@ -970,14 +983,20 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
18985 + {
18986 + struct urb *victim;
18987 + unsigned long flags;
18988 ++ int surely_empty;
18989 ++
18990 ++ do {
18991 ++ spin_lock_irqsave(&anchor->lock, flags);
18992 ++ while (!list_empty(&anchor->urb_list)) {
18993 ++ victim = list_entry(anchor->urb_list.prev,
18994 ++ struct urb, anchor_list);
18995 ++ __usb_unanchor_urb(victim, anchor);
18996 ++ }
18997 ++ surely_empty = usb_anchor_check_wakeup(anchor);
18998 +
18999 +- spin_lock_irqsave(&anchor->lock, flags);
19000 +- while (!list_empty(&anchor->urb_list)) {
19001 +- victim = list_entry(anchor->urb_list.prev, struct urb,
19002 +- anchor_list);
19003 +- __usb_unanchor_urb(victim, anchor);
19004 +- }
19005 +- spin_unlock_irqrestore(&anchor->lock, flags);
19006 ++ spin_unlock_irqrestore(&anchor->lock, flags);
19007 ++ cpu_relax();
19008 ++ } while (!surely_empty);
19009 + }
19010 +
19011 + EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
19012 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
19013 +index 5b9d23991c99d..d367da4c6f850 100644
19014 +--- a/drivers/usb/dwc2/gadget.c
19015 ++++ b/drivers/usb/dwc2/gadget.c
19016 +@@ -713,8 +713,11 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
19017 + */
19018 + static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
19019 + {
19020 ++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
19021 + int is_isoc = hs_ep->isochronous;
19022 + unsigned int maxsize;
19023 ++ u32 mps = hs_ep->ep.maxpacket;
19024 ++ int dir_in = hs_ep->dir_in;
19025 +
19026 + if (is_isoc)
19027 + maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
19028 +@@ -723,6 +726,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
19029 + else
19030 + maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
19031 +
19032 ++ /* Interrupt OUT EP with mps not multiple of 4 */
19033 ++ if (hs_ep->index)
19034 ++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
19035 ++ maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
19036 ++
19037 + return maxsize;
19038 + }
19039 +
19040 +@@ -738,11 +746,14 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
19041 + * Isochronous - descriptor rx/tx bytes bitfield limit,
19042 + * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
19043 + * have concatenations from various descriptors within one packet.
19044 ++ * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
19045 ++ * to a single descriptor.
19046 + *
19047 + * Selects corresponding mask for RX/TX bytes as well.
19048 + */
19049 + static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
19050 + {
19051 ++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
19052 + u32 mps = hs_ep->ep.maxpacket;
19053 + int dir_in = hs_ep->dir_in;
19054 + u32 desc_size = 0;
19055 +@@ -766,6 +777,13 @@ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
19056 + desc_size -= desc_size % mps;
19057 + }
19058 +
19059 ++ /* Interrupt OUT EP with mps not multiple of 4 */
19060 ++ if (hs_ep->index)
19061 ++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
19062 ++ desc_size = mps;
19063 ++ *mask = DEV_DMA_NBYTES_MASK;
19064 ++ }
19065 ++
19066 + return desc_size;
19067 + }
19068 +
19069 +@@ -1123,13 +1141,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
19070 + length += (mps - (length % mps));
19071 + }
19072 +
19073 +- /*
19074 +- * If more data to send, adjust DMA for EP0 out data stage.
19075 +- * ureq->dma stays unchanged, hence increment it by already
19076 +- * passed passed data count before starting new transaction.
19077 +- */
19078 +- if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
19079 +- continuing)
19080 ++ if (continuing)
19081 + offset = ureq->actual;
19082 +
19083 + /* Fill DDMA chain entries */
19084 +@@ -2320,22 +2332,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
19085 + */
19086 + static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
19087 + {
19088 ++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
19089 + struct dwc2_hsotg *hsotg = hs_ep->parent;
19090 + unsigned int bytes_rem = 0;
19091 ++ unsigned int bytes_rem_correction = 0;
19092 + struct dwc2_dma_desc *desc = hs_ep->desc_list;
19093 + int i;
19094 + u32 status;
19095 ++ u32 mps = hs_ep->ep.maxpacket;
19096 ++ int dir_in = hs_ep->dir_in;
19097 +
19098 + if (!desc)
19099 + return -EINVAL;
19100 +
19101 ++ /* Interrupt OUT EP with mps not multiple of 4 */
19102 ++ if (hs_ep->index)
19103 ++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
19104 ++ bytes_rem_correction = 4 - (mps % 4);
19105 ++
19106 + for (i = 0; i < hs_ep->desc_count; ++i) {
19107 + status = desc->status;
19108 + bytes_rem += status & DEV_DMA_NBYTES_MASK;
19109 ++ bytes_rem -= bytes_rem_correction;
19110 +
19111 + if (status & DEV_DMA_STS_MASK)
19112 + dev_err(hsotg->dev, "descriptor %d closed with %x\n",
19113 + i, status & DEV_DMA_STS_MASK);
19114 ++
19115 ++ if (status & DEV_DMA_L)
19116 ++ break;
19117 ++
19118 + desc++;
19119 + }
19120 +
19121 +diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
19122 +index 8f9d061c4d5fa..a3611cdd1deaa 100644
19123 +--- a/drivers/usb/dwc2/params.c
19124 ++++ b/drivers/usb/dwc2/params.c
19125 +@@ -860,7 +860,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
19126 + int dwc2_init_params(struct dwc2_hsotg *hsotg)
19127 + {
19128 + const struct of_device_id *match;
19129 +- void (*set_params)(void *data);
19130 ++ void (*set_params)(struct dwc2_hsotg *data);
19131 +
19132 + dwc2_set_default_params(hsotg);
19133 + dwc2_get_device_properties(hsotg);
19134 +diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
19135 +index db9fd4bd1a38c..b28e90e0b685d 100644
19136 +--- a/drivers/usb/dwc2/platform.c
19137 ++++ b/drivers/usb/dwc2/platform.c
19138 +@@ -584,12 +584,16 @@ static int dwc2_driver_probe(struct platform_device *dev)
19139 + if (retval) {
19140 + hsotg->gadget.udc = NULL;
19141 + dwc2_hsotg_remove(hsotg);
19142 +- goto error_init;
19143 ++ goto error_debugfs;
19144 + }
19145 + }
19146 + #endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
19147 + return 0;
19148 +
19149 ++error_debugfs:
19150 ++ dwc2_debugfs_exit(hsotg);
19151 ++ if (hsotg->hcd_enabled)
19152 ++ dwc2_hcd_remove(hsotg);
19153 + error_init:
19154 + if (hsotg->params.activate_stm_id_vb_detection)
19155 + regulator_disable(hsotg->usb33d);
19156 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
19157 +index 2eb34c8b4065f..2f9f4ad562d4e 100644
19158 +--- a/drivers/usb/dwc3/core.c
19159 ++++ b/drivers/usb/dwc3/core.c
19160 +@@ -119,6 +119,7 @@ static void __dwc3_set_mode(struct work_struct *work)
19161 + struct dwc3 *dwc = work_to_dwc(work);
19162 + unsigned long flags;
19163 + int ret;
19164 ++ u32 reg;
19165 +
19166 + if (dwc->dr_mode != USB_DR_MODE_OTG)
19167 + return;
19168 +@@ -172,6 +173,11 @@ static void __dwc3_set_mode(struct work_struct *work)
19169 + otg_set_vbus(dwc->usb2_phy->otg, true);
19170 + phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
19171 + phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
19172 ++ if (dwc->dis_split_quirk) {
19173 ++ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
19174 ++ reg |= DWC3_GUCTL3_SPLITDISABLE;
19175 ++ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
19176 ++ }
19177 + }
19178 + break;
19179 + case DWC3_GCTL_PRTCAP_DEVICE:
19180 +@@ -929,13 +935,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
19181 + */
19182 + dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
19183 +
19184 +- /* Handle USB2.0-only core configuration */
19185 +- if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
19186 +- DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
19187 +- if (dwc->maximum_speed == USB_SPEED_SUPER)
19188 +- dwc->maximum_speed = USB_SPEED_HIGH;
19189 +- }
19190 +-
19191 + ret = dwc3_phy_setup(dwc);
19192 + if (ret)
19193 + goto err0;
19194 +@@ -1356,6 +1355,9 @@ static void dwc3_get_properties(struct dwc3 *dwc)
19195 + dwc->dis_metastability_quirk = device_property_read_bool(dev,
19196 + "snps,dis_metastability_quirk");
19197 +
19198 ++ dwc->dis_split_quirk = device_property_read_bool(dev,
19199 ++ "snps,dis-split-quirk");
19200 ++
19201 + dwc->lpm_nyet_threshold = lpm_nyet_threshold;
19202 + dwc->tx_de_emphasis = tx_de_emphasis;
19203 +
19204 +@@ -1381,6 +1383,8 @@ bool dwc3_has_imod(struct dwc3 *dwc)
19205 + static void dwc3_check_params(struct dwc3 *dwc)
19206 + {
19207 + struct device *dev = dwc->dev;
19208 ++ unsigned int hwparam_gen =
19209 ++ DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
19210 +
19211 + /* Check for proper value of imod_interval */
19212 + if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
19213 +@@ -1412,17 +1416,23 @@ static void dwc3_check_params(struct dwc3 *dwc)
19214 + dwc->maximum_speed);
19215 + fallthrough;
19216 + case USB_SPEED_UNKNOWN:
19217 +- /* default to superspeed */
19218 +- dwc->maximum_speed = USB_SPEED_SUPER;
19219 +-
19220 +- /*
19221 +- * default to superspeed plus if we are capable.
19222 +- */
19223 +- if ((DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) &&
19224 +- (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
19225 +- DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
19226 ++ switch (hwparam_gen) {
19227 ++ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
19228 + dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
19229 +-
19230 ++ break;
19231 ++ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
19232 ++ if (DWC3_IP_IS(DWC32))
19233 ++ dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
19234 ++ else
19235 ++ dwc->maximum_speed = USB_SPEED_SUPER;
19236 ++ break;
19237 ++ case DWC3_GHWPARAMS3_SSPHY_IFC_DIS:
19238 ++ dwc->maximum_speed = USB_SPEED_HIGH;
19239 ++ break;
19240 ++ default:
19241 ++ dwc->maximum_speed = USB_SPEED_SUPER;
19242 ++ break;
19243 ++ }
19244 + break;
19245 + }
19246 + }
19247 +@@ -1865,10 +1875,26 @@ static int dwc3_resume(struct device *dev)
19248 +
19249 + return 0;
19250 + }
19251 ++
19252 ++static void dwc3_complete(struct device *dev)
19253 ++{
19254 ++ struct dwc3 *dwc = dev_get_drvdata(dev);
19255 ++ u32 reg;
19256 ++
19257 ++ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
19258 ++ dwc->dis_split_quirk) {
19259 ++ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
19260 ++ reg |= DWC3_GUCTL3_SPLITDISABLE;
19261 ++ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
19262 ++ }
19263 ++}
19264 ++#else
19265 ++#define dwc3_complete NULL
19266 + #endif /* CONFIG_PM_SLEEP */
19267 +
19268 + static const struct dev_pm_ops dwc3_dev_pm_ops = {
19269 + SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
19270 ++ .complete = dwc3_complete,
19271 + SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
19272 + dwc3_runtime_idle)
19273 + };
19274 +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
19275 +index 2f04b3e42bf1c..ba0f743f35528 100644
19276 +--- a/drivers/usb/dwc3/core.h
19277 ++++ b/drivers/usb/dwc3/core.h
19278 +@@ -138,6 +138,7 @@
19279 + #define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10))
19280 +
19281 + #define DWC3_GHWPARAMS8 0xc600
19282 ++#define DWC3_GUCTL3 0xc60c
19283 + #define DWC3_GFLADJ 0xc630
19284 +
19285 + /* Device Registers */
19286 +@@ -380,6 +381,9 @@
19287 + /* Global User Control Register 2 */
19288 + #define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
19289 +
19290 ++/* Global User Control Register 3 */
19291 ++#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
19292 ++
19293 + /* Device Configuration Register */
19294 + #define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
19295 + #define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
19296 +@@ -1052,6 +1056,7 @@ struct dwc3_scratchpad_array {
19297 + * 2 - No de-emphasis
19298 + * 3 - Reserved
19299 + * @dis_metastability_quirk: set to disable metastability quirk.
19300 ++ * @dis_split_quirk: set to disable split boundary.
19301 + * @imod_interval: set the interrupt moderation interval in 250ns
19302 + * increments or 0 to disable.
19303 + */
19304 +@@ -1245,6 +1250,8 @@ struct dwc3 {
19305 +
19306 + unsigned dis_metastability_quirk:1;
19307 +
19308 ++ unsigned dis_split_quirk:1;
19309 ++
19310 + u16 imod_interval;
19311 + };
19312 +
19313 +diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
19314 +index 7df1150129354..2816e4a9813ad 100644
19315 +--- a/drivers/usb/dwc3/dwc3-of-simple.c
19316 ++++ b/drivers/usb/dwc3/dwc3-of-simple.c
19317 +@@ -176,6 +176,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
19318 + { .compatible = "cavium,octeon-7130-usb-uctl" },
19319 + { .compatible = "sprd,sc9860-dwc3" },
19320 + { .compatible = "allwinner,sun50i-h6-dwc3" },
19321 ++ { .compatible = "hisilicon,hi3670-dwc3" },
19322 + { /* Sentinel */ }
19323 + };
19324 + MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
19325 +diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
19326 +index 1f638759a9533..92a7c3a839454 100644
19327 +--- a/drivers/usb/gadget/function/f_ncm.c
19328 ++++ b/drivers/usb/gadget/function/f_ncm.c
19329 +@@ -85,8 +85,10 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
19330 + /* peak (theoretical) bulk transfer rate in bits-per-second */
19331 + static inline unsigned ncm_bitrate(struct usb_gadget *g)
19332 + {
19333 +- if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
19334 +- return 13 * 1024 * 8 * 1000 * 8;
19335 ++ if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
19336 ++ return 4250000000U;
19337 ++ else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
19338 ++ return 3750000000U;
19339 + else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
19340 + return 13 * 512 * 8 * 1000 * 8;
19341 + else
19342 +@@ -1534,7 +1536,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
19343 + fs_ncm_notify_desc.bEndpointAddress;
19344 +
19345 + status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
19346 +- ncm_ss_function, NULL);
19347 ++ ncm_ss_function, ncm_ss_function);
19348 + if (status)
19349 + goto fail;
19350 +
19351 +diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
19352 +index 68697f596066c..64a4112068fc8 100644
19353 +--- a/drivers/usb/gadget/function/f_printer.c
19354 ++++ b/drivers/usb/gadget/function/f_printer.c
19355 +@@ -31,6 +31,7 @@
19356 + #include <linux/types.h>
19357 + #include <linux/ctype.h>
19358 + #include <linux/cdev.h>
19359 ++#include <linux/kref.h>
19360 +
19361 + #include <asm/byteorder.h>
19362 + #include <linux/io.h>
19363 +@@ -64,7 +65,7 @@ struct printer_dev {
19364 + struct usb_gadget *gadget;
19365 + s8 interface;
19366 + struct usb_ep *in_ep, *out_ep;
19367 +-
19368 ++ struct kref kref;
19369 + struct list_head rx_reqs; /* List of free RX structs */
19370 + struct list_head rx_reqs_active; /* List of Active RX xfers */
19371 + struct list_head rx_buffers; /* List of completed xfers */
19372 +@@ -218,6 +219,13 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
19373 +
19374 + /*-------------------------------------------------------------------------*/
19375 +
19376 ++static void printer_dev_free(struct kref *kref)
19377 ++{
19378 ++ struct printer_dev *dev = container_of(kref, struct printer_dev, kref);
19379 ++
19380 ++ kfree(dev);
19381 ++}
19382 ++
19383 + static struct usb_request *
19384 + printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags)
19385 + {
19386 +@@ -353,6 +361,7 @@ printer_open(struct inode *inode, struct file *fd)
19387 +
19388 + spin_unlock_irqrestore(&dev->lock, flags);
19389 +
19390 ++ kref_get(&dev->kref);
19391 + DBG(dev, "printer_open returned %x\n", ret);
19392 + return ret;
19393 + }
19394 +@@ -370,6 +379,7 @@ printer_close(struct inode *inode, struct file *fd)
19395 + dev->printer_status &= ~PRINTER_SELECTED;
19396 + spin_unlock_irqrestore(&dev->lock, flags);
19397 +
19398 ++ kref_put(&dev->kref, printer_dev_free);
19399 + DBG(dev, "printer_close\n");
19400 +
19401 + return 0;
19402 +@@ -1386,7 +1396,8 @@ static void gprinter_free(struct usb_function *f)
19403 + struct f_printer_opts *opts;
19404 +
19405 + opts = container_of(f->fi, struct f_printer_opts, func_inst);
19406 +- kfree(dev);
19407 ++
19408 ++ kref_put(&dev->kref, printer_dev_free);
19409 + mutex_lock(&opts->lock);
19410 + --opts->refcnt;
19411 + mutex_unlock(&opts->lock);
19412 +@@ -1455,6 +1466,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi)
19413 + return ERR_PTR(-ENOMEM);
19414 + }
19415 +
19416 ++ kref_init(&dev->kref);
19417 + ++opts->refcnt;
19418 + dev->minor = opts->minor;
19419 + dev->pnp_string = opts->pnp_string;
19420 +diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
19421 +index c3cc6bd14e615..31ea76adcc0db 100644
19422 +--- a/drivers/usb/gadget/function/u_ether.c
19423 ++++ b/drivers/usb/gadget/function/u_ether.c
19424 +@@ -93,7 +93,7 @@ struct eth_dev {
19425 + static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
19426 + {
19427 + if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
19428 +- gadget->speed == USB_SPEED_SUPER))
19429 ++ gadget->speed >= USB_SPEED_SUPER))
19430 + return qmult * DEFAULT_QLEN;
19431 + else
19432 + return DEFAULT_QLEN;
19433 +diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
19434 +index 127ecc2b43176..2caccbb6e0140 100644
19435 +--- a/drivers/usb/gadget/function/u_serial.c
19436 ++++ b/drivers/usb/gadget/function/u_serial.c
19437 +@@ -1391,6 +1391,7 @@ void gserial_disconnect(struct gserial *gser)
19438 + if (port->port.tty)
19439 + tty_hangup(port->port.tty);
19440 + }
19441 ++ port->suspended = false;
19442 + spin_unlock_irqrestore(&port->port_lock, flags);
19443 +
19444 + /* disable endpoints, aborting down any active I/O */
19445 +diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
19446 +index feaec00a3c169..9cd4a70ccdd6d 100644
19447 +--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
19448 ++++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
19449 +@@ -26,6 +26,7 @@
19450 + #include <linux/seq_file.h>
19451 + #include <linux/slab.h>
19452 + #include <linux/timer.h>
19453 ++#include <linux/usb.h>
19454 + #include <linux/usb/ch9.h>
19455 + #include <linux/usb/gadget.h>
19456 + #include <linux/workqueue.h>
19457 +diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
19458 +index dd37e77dae001..2845ea328a064 100644
19459 +--- a/drivers/usb/host/ohci-hcd.c
19460 ++++ b/drivers/usb/host/ohci-hcd.c
19461 +@@ -673,20 +673,24 @@ retry:
19462 +
19463 + /* handle root hub init quirks ... */
19464 + val = roothub_a (ohci);
19465 +- val &= ~(RH_A_PSM | RH_A_OCPM);
19466 ++ /* Configure for per-port over-current protection by default */
19467 ++ val &= ~RH_A_NOCP;
19468 ++ val |= RH_A_OCPM;
19469 + if (ohci->flags & OHCI_QUIRK_SUPERIO) {
19470 +- /* NSC 87560 and maybe others */
19471 ++ /* NSC 87560 and maybe others.
19472 ++ * Ganged power switching, no over-current protection.
19473 ++ */
19474 + val |= RH_A_NOCP;
19475 +- val &= ~(RH_A_POTPGT | RH_A_NPS);
19476 +- ohci_writel (ohci, val, &ohci->regs->roothub.a);
19477 ++ val &= ~(RH_A_POTPGT | RH_A_NPS | RH_A_PSM | RH_A_OCPM);
19478 + } else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
19479 + (ohci->flags & OHCI_QUIRK_HUB_POWER)) {
19480 + /* hub power always on; required for AMD-756 and some
19481 +- * Mac platforms. ganged overcurrent reporting, if any.
19482 ++ * Mac platforms.
19483 + */
19484 + val |= RH_A_NPS;
19485 +- ohci_writel (ohci, val, &ohci->regs->roothub.a);
19486 + }
19487 ++ ohci_writel(ohci, val, &ohci->regs->roothub.a);
19488 ++
19489 + ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
19490 + ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
19491 + &ohci->regs->roothub.b);
19492 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
19493 +index f4cedcaee14b3..e534f524b7f87 100644
19494 +--- a/drivers/usb/host/xhci.c
19495 ++++ b/drivers/usb/host/xhci.c
19496 +@@ -1915,8 +1915,6 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
19497 + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
19498 + trace_xhci_add_endpoint(ep_ctx);
19499 +
19500 +- xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
19501 +-
19502 + xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
19503 + (unsigned int) ep->desc.bEndpointAddress,
19504 + udev->slot_id,
19505 +@@ -2949,6 +2947,7 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
19506 + xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
19507 + virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
19508 + virt_dev->eps[i].new_ring = NULL;
19509 ++ xhci_debugfs_create_endpoint(xhci, virt_dev, i);
19510 + }
19511 + command_cleanup:
19512 + kfree(command->completion);
19513 +diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
19514 +index 74264e5906951..1fa6fcac82992 100644
19515 +--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
19516 ++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
19517 +@@ -1522,6 +1522,11 @@ static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
19518 + (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
19519 + }
19520 +
19521 ++static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
19522 ++{
19523 ++ return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
19524 ++}
19525 ++
19526 + static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
19527 + {
19528 + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
19529 +@@ -1535,8 +1540,8 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
19530 + return err;
19531 +
19532 + ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
19533 +- ndev->config.mtu = __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev),
19534 +- ndev->mtu);
19535 ++ ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
19536 ++ ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
19537 + return err;
19538 + }
19539 +
19540 +@@ -1653,6 +1658,9 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *
19541 + if (err)
19542 + goto err_mr;
19543 +
19544 ++ if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
19545 ++ return 0;
19546 ++
19547 + restore_channels_info(ndev);
19548 + err = setup_driver(ndev);
19549 + if (err)
19550 +diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
19551 +index d98843feddce0..5076d0155bc3f 100644
19552 +--- a/drivers/vfio/pci/vfio_pci_config.c
19553 ++++ b/drivers/vfio/pci/vfio_pci_config.c
19554 +@@ -406,7 +406,7 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
19555 + * PF SR-IOV capability, there's therefore no need to trigger
19556 + * faults based on the virtual value.
19557 + */
19558 +- return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
19559 ++ return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY);
19560 + }
19561 +
19562 + /*
19563 +@@ -520,8 +520,8 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
19564 +
19565 + count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
19566 +
19567 +- /* Mask in virtual memory enable for SR-IOV devices */
19568 +- if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) {
19569 ++ /* Mask in virtual memory enable */
19570 ++ if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) {
19571 + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
19572 + u32 tmp_val = le32_to_cpu(*val);
19573 +
19574 +@@ -589,9 +589,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
19575 + * shows it disabled (phys_mem/io, then the device has
19576 + * undergone some kind of backdoor reset and needs to be
19577 + * restored before we allow it to enable the bars.
19578 +- * SR-IOV devices will trigger this, but we catch them later
19579 ++ * SR-IOV devices will trigger this - for mem enable let's
19580 ++ * catch this now and for io enable it will be caught later
19581 + */
19582 +- if ((new_mem && virt_mem && !phys_mem) ||
19583 ++ if ((new_mem && virt_mem && !phys_mem &&
19584 ++ !pdev->no_command_memory) ||
19585 + (new_io && virt_io && !phys_io) ||
19586 + vfio_need_bar_restore(vdev))
19587 + vfio_bar_restore(vdev);
19588 +@@ -1734,12 +1736,14 @@ int vfio_config_init(struct vfio_pci_device *vdev)
19589 + vconfig[PCI_INTERRUPT_PIN]);
19590 +
19591 + vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
19592 +-
19593 ++ }
19594 ++ if (pdev->no_command_memory) {
19595 + /*
19596 +- * VFs do no implement the memory enable bit of the COMMAND
19597 +- * register therefore we'll not have it set in our initial
19598 +- * copy of config space after pci_enable_device(). For
19599 +- * consistency with PFs, set the virtual enable bit here.
19600 ++ * VFs and devices that set pdev->no_command_memory do not
19601 ++ * implement the memory enable bit of the COMMAND register
19602 ++ * therefore we'll not have it set in our initial copy of
19603 ++ * config space after pci_enable_device(). For consistency
19604 ++ * with PFs, set the virtual enable bit here.
19605 + */
19606 + *(__le16 *)&vconfig[PCI_COMMAND] |=
19607 + cpu_to_le16(PCI_COMMAND_MEMORY);
19608 +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
19609 +index 1d9fb25929459..869dce5f134dd 100644
19610 +--- a/drivers/vfio/pci/vfio_pci_intrs.c
19611 ++++ b/drivers/vfio/pci/vfio_pci_intrs.c
19612 +@@ -352,11 +352,13 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
19613 + vdev->ctx[vector].producer.token = trigger;
19614 + vdev->ctx[vector].producer.irq = irq;
19615 + ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
19616 +- if (unlikely(ret))
19617 ++ if (unlikely(ret)) {
19618 + dev_info(&pdev->dev,
19619 + "irq bypass producer (token %p) registration fails: %d\n",
19620 + vdev->ctx[vector].producer.token, ret);
19621 +
19622 ++ vdev->ctx[vector].producer.token = NULL;
19623 ++ }
19624 + vdev->ctx[vector].trigger = trigger;
19625 +
19626 + return 0;
19627 +diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
19628 +index 262ab0efd06c6..2151bc7f87ab1 100644
19629 +--- a/drivers/vfio/vfio.c
19630 ++++ b/drivers/vfio/vfio.c
19631 +@@ -1949,8 +1949,10 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
19632 + if (!group)
19633 + return -ENODEV;
19634 +
19635 +- if (group->dev_counter > 1)
19636 +- return -EINVAL;
19637 ++ if (group->dev_counter > 1) {
19638 ++ ret = -EINVAL;
19639 ++ goto err_pin_pages;
19640 ++ }
19641 +
19642 + ret = vfio_group_add_container_user(group);
19643 + if (ret)
19644 +@@ -2051,6 +2053,9 @@ int vfio_group_pin_pages(struct vfio_group *group,
19645 + if (!group || !user_iova_pfn || !phys_pfn || !npage)
19646 + return -EINVAL;
19647 +
19648 ++ if (group->dev_counter > 1)
19649 ++ return -EINVAL;
19650 ++
19651 + if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
19652 + return -E2BIG;
19653 +
19654 +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
19655 +index 5fbf0c1f74338..9dde5ed852fd0 100644
19656 +--- a/drivers/vfio/vfio_iommu_type1.c
19657 ++++ b/drivers/vfio/vfio_iommu_type1.c
19658 +@@ -693,7 +693,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
19659 +
19660 + ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
19661 + if (ret) {
19662 +- vfio_unpin_page_external(dma, iova, do_accounting);
19663 ++ if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
19664 ++ vfio_lock_acct(dma, -1, true);
19665 + goto pin_unwind;
19666 + }
19667 +
19668 +@@ -2933,7 +2934,8 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
19669 + * size
19670 + */
19671 + bitmap_set(dma->bitmap, offset >> pgshift,
19672 +- *copied >> pgshift);
19673 ++ ((offset + *copied - 1) >> pgshift) -
19674 ++ (offset >> pgshift) + 1);
19675 + }
19676 + } else
19677 + *copied = copy_from_user(data, (void __user *)vaddr,
19678 +diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
19679 +index 0ce1815850080..8268ac43d54f7 100644
19680 +--- a/drivers/video/backlight/sky81452-backlight.c
19681 ++++ b/drivers/video/backlight/sky81452-backlight.c
19682 +@@ -217,6 +217,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
19683 + num_entry);
19684 + if (ret < 0) {
19685 + dev_err(dev, "led-sources node is invalid.\n");
19686 ++ of_node_put(np);
19687 + return ERR_PTR(-EINVAL);
19688 + }
19689 +
19690 +diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
19691 +index 3fe509cb9b874..13bd2bd5c043a 100644
19692 +--- a/drivers/video/fbdev/aty/radeon_base.c
19693 ++++ b/drivers/video/fbdev/aty/radeon_base.c
19694 +@@ -2307,7 +2307,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
19695 +
19696 + ret = radeon_kick_out_firmware_fb(pdev);
19697 + if (ret)
19698 +- return ret;
19699 ++ goto err_release_fb;
19700 +
19701 + /* request the mem regions */
19702 + ret = pci_request_region(pdev, 0, "radeonfb framebuffer");
19703 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
19704 +index 6815bfb7f5724..e33bf1c386926 100644
19705 +--- a/drivers/video/fbdev/core/fbmem.c
19706 ++++ b/drivers/video/fbdev/core/fbmem.c
19707 +@@ -1006,6 +1006,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
19708 + return 0;
19709 + }
19710 +
19711 ++ /* bitfill_aligned() assumes that it's at least 8x8 */
19712 ++ if (var->xres < 8 || var->yres < 8)
19713 ++ return -EINVAL;
19714 ++
19715 + ret = info->fbops->fb_check_var(var, info);
19716 +
19717 + if (ret)
19718 +diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
19719 +index dfe3eb769638b..fde27feae5d0c 100644
19720 +--- a/drivers/video/fbdev/sis/init.c
19721 ++++ b/drivers/video/fbdev/sis/init.c
19722 +@@ -2428,6 +2428,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
19723 +
19724 + i = 0;
19725 +
19726 ++ if (SiS_Pr->ChipType == SIS_730)
19727 ++ queuedata = &FQBQData730[0];
19728 ++ else
19729 ++ queuedata = &FQBQData[0];
19730 ++
19731 + if(ModeNo > 0x13) {
19732 +
19733 + /* Get VCLK */
19734 +@@ -2445,12 +2450,6 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
19735 + /* Get half colordepth */
19736 + colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)];
19737 +
19738 +- if(SiS_Pr->ChipType == SIS_730) {
19739 +- queuedata = &FQBQData730[0];
19740 +- } else {
19741 +- queuedata = &FQBQData[0];
19742 +- }
19743 +-
19744 + do {
19745 + templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth;
19746 +
19747 +diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
19748 +index 578d3541e3d6f..1e8a38a7967d8 100644
19749 +--- a/drivers/video/fbdev/vga16fb.c
19750 ++++ b/drivers/video/fbdev/vga16fb.c
19751 +@@ -243,7 +243,7 @@ static void vga16fb_update_fix(struct fb_info *info)
19752 + }
19753 +
19754 + static void vga16fb_clock_chip(struct vga16fb_par *par,
19755 +- unsigned int pixclock,
19756 ++ unsigned int *pixclock,
19757 + const struct fb_info *info,
19758 + int mul, int div)
19759 + {
19760 +@@ -259,14 +259,14 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
19761 + { 0 /* bad */, 0x00, 0x00}};
19762 + int err;
19763 +
19764 +- pixclock = (pixclock * mul) / div;
19765 ++ *pixclock = (*pixclock * mul) / div;
19766 + best = vgaclocks;
19767 +- err = pixclock - best->pixclock;
19768 ++ err = *pixclock - best->pixclock;
19769 + if (err < 0) err = -err;
19770 + for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) {
19771 + int tmp;
19772 +
19773 +- tmp = pixclock - ptr->pixclock;
19774 ++ tmp = *pixclock - ptr->pixclock;
19775 + if (tmp < 0) tmp = -tmp;
19776 + if (tmp < err) {
19777 + err = tmp;
19778 +@@ -275,7 +275,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
19779 + }
19780 + par->misc |= best->misc;
19781 + par->clkdiv = best->seq_clock_mode;
19782 +- pixclock = (best->pixclock * div) / mul;
19783 ++ *pixclock = (best->pixclock * div) / mul;
19784 + }
19785 +
19786 + #define FAIL(X) return -EINVAL
19787 +@@ -497,10 +497,10 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var,
19788 +
19789 + if (mode & MODE_8BPP)
19790 + /* pixel clock == vga clock / 2 */
19791 +- vga16fb_clock_chip(par, var->pixclock, info, 1, 2);
19792 ++ vga16fb_clock_chip(par, &var->pixclock, info, 1, 2);
19793 + else
19794 + /* pixel clock == vga clock */
19795 +- vga16fb_clock_chip(par, var->pixclock, info, 1, 1);
19796 ++ vga16fb_clock_chip(par, &var->pixclock, info, 1, 1);
19797 +
19798 + var->red.offset = var->green.offset = var->blue.offset =
19799 + var->transp.offset = 0;
19800 +diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
19801 +index 1b0b11b55d2a0..46ee0a0998b6f 100644
19802 +--- a/drivers/virt/fsl_hypervisor.c
19803 ++++ b/drivers/virt/fsl_hypervisor.c
19804 +@@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
19805 +
19806 + unsigned int i;
19807 + long ret = 0;
19808 +- int num_pinned; /* return value from get_user_pages() */
19809 ++ int num_pinned = 0; /* return value from get_user_pages_fast() */
19810 + phys_addr_t remote_paddr; /* The next address in the remote buffer */
19811 + uint32_t count; /* The number of bytes left to copy */
19812 +
19813 +@@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
19814 + return -EINVAL;
19815 +
19816 + /*
19817 +- * The array of pages returned by get_user_pages() covers only
19818 ++ * The array of pages returned by get_user_pages_fast() covers only
19819 + * page-aligned memory. Since the user buffer is probably not
19820 + * page-aligned, we need to handle the discrepancy.
19821 + *
19822 +@@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
19823 +
19824 + /*
19825 + * 'pages' is an array of struct page pointers that's initialized by
19826 +- * get_user_pages().
19827 ++ * get_user_pages_fast().
19828 + */
19829 + pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
19830 + if (!pages) {
19831 +@@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
19832 + if (!sg_list_unaligned) {
19833 + pr_debug("fsl-hv: could not allocate S/G list\n");
19834 + ret = -ENOMEM;
19835 +- goto exit;
19836 ++ goto free_pages;
19837 + }
19838 + sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
19839 +
19840 +@@ -250,7 +250,6 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
19841 + num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
19842 +
19843 + if (num_pinned != num_pages) {
19844 +- /* get_user_pages() failed */
19845 + pr_debug("fsl-hv: could not lock source buffer\n");
19846 + ret = (num_pinned < 0) ? num_pinned : -EFAULT;
19847 + goto exit;
19848 +@@ -292,13 +291,13 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
19849 + virt_to_phys(sg_list), num_pages);
19850 +
19851 + exit:
19852 +- if (pages) {
19853 +- for (i = 0; i < num_pages; i++)
19854 +- if (pages[i])
19855 +- put_page(pages[i]);
19856 ++ if (pages && (num_pinned > 0)) {
19857 ++ for (i = 0; i < num_pinned; i++)
19858 ++ put_page(pages[i]);
19859 + }
19860 +
19861 + kfree(sg_list_unaligned);
19862 ++free_pages:
19863 + kfree(pages);
19864 +
19865 + if (!ret)
19866 +diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
19867 +index 87eaf357ae01f..adf015aa4126f 100644
19868 +--- a/drivers/watchdog/sp5100_tco.h
19869 ++++ b/drivers/watchdog/sp5100_tco.h
19870 +@@ -70,7 +70,7 @@
19871 + #define EFCH_PM_DECODEEN_WDT_TMREN BIT(7)
19872 +
19873 +
19874 +-#define EFCH_PM_DECODEEN3 0x00
19875 ++#define EFCH_PM_DECODEEN3 0x03
19876 + #define EFCH_PM_DECODEEN_SECOND_RES GENMASK(1, 0)
19877 + #define EFCH_PM_WATCHDOG_DISABLE ((u8)GENMASK(3, 2))
19878 +
19879 +diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
19880 +index 6798addabd5a0..bcf01af3fa6a8 100644
19881 +--- a/drivers/watchdog/watchdog_dev.c
19882 ++++ b/drivers/watchdog/watchdog_dev.c
19883 +@@ -994,8 +994,10 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
19884 + wd_data->wdd = wdd;
19885 + wdd->wd_data = wd_data;
19886 +
19887 +- if (IS_ERR_OR_NULL(watchdog_kworker))
19888 ++ if (IS_ERR_OR_NULL(watchdog_kworker)) {
19889 ++ kfree(wd_data);
19890 + return -ENODEV;
19891 ++ }
19892 +
19893 + device_initialize(&wd_data->dev);
19894 + wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
19895 +@@ -1021,7 +1023,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
19896 + pr_err("%s: a legacy watchdog module is probably present.\n",
19897 + wdd->info->identity);
19898 + old_wd_data = NULL;
19899 +- kfree(wd_data);
19900 ++ put_device(&wd_data->dev);
19901 + return err;
19902 + }
19903 + }
19904 +diff --git a/fs/afs/cell.c b/fs/afs/cell.c
19905 +index 5b79cdceefa0f..bc7ed46aaca9f 100644
19906 +--- a/fs/afs/cell.c
19907 ++++ b/fs/afs/cell.c
19908 +@@ -19,7 +19,8 @@ static unsigned __read_mostly afs_cell_gc_delay = 10;
19909 + static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
19910 + static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
19911 +
19912 +-static void afs_manage_cell(struct work_struct *);
19913 ++static void afs_queue_cell_manager(struct afs_net *);
19914 ++static void afs_manage_cell_work(struct work_struct *);
19915 +
19916 + static void afs_dec_cells_outstanding(struct afs_net *net)
19917 + {
19918 +@@ -37,19 +38,21 @@ static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
19919 + atomic_inc(&net->cells_outstanding);
19920 + if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
19921 + afs_dec_cells_outstanding(net);
19922 ++ } else {
19923 ++ afs_queue_cell_manager(net);
19924 + }
19925 + }
19926 +
19927 + /*
19928 +- * Look up and get an activation reference on a cell record under RCU
19929 +- * conditions. The caller must hold the RCU read lock.
19930 ++ * Look up and get an activation reference on a cell record. The caller must
19931 ++ * hold net->cells_lock at least read-locked.
19932 + */
19933 +-struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
19934 +- const char *name, unsigned int namesz)
19935 ++static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
19936 ++ const char *name, unsigned int namesz)
19937 + {
19938 + struct afs_cell *cell = NULL;
19939 + struct rb_node *p;
19940 +- int n, seq = 0, ret = 0;
19941 ++ int n;
19942 +
19943 + _enter("%*.*s", namesz, namesz, name);
19944 +
19945 +@@ -58,61 +61,47 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
19946 + if (namesz > AFS_MAXCELLNAME)
19947 + return ERR_PTR(-ENAMETOOLONG);
19948 +
19949 +- do {
19950 +- /* Unfortunately, rbtree walking doesn't give reliable results
19951 +- * under just the RCU read lock, so we have to check for
19952 +- * changes.
19953 +- */
19954 +- if (cell)
19955 +- afs_put_cell(net, cell);
19956 +- cell = NULL;
19957 +- ret = -ENOENT;
19958 +-
19959 +- read_seqbegin_or_lock(&net->cells_lock, &seq);
19960 +-
19961 +- if (!name) {
19962 +- cell = rcu_dereference_raw(net->ws_cell);
19963 +- if (cell) {
19964 +- afs_get_cell(cell);
19965 +- ret = 0;
19966 +- break;
19967 +- }
19968 +- ret = -EDESTADDRREQ;
19969 +- continue;
19970 +- }
19971 ++ if (!name) {
19972 ++ cell = net->ws_cell;
19973 ++ if (!cell)
19974 ++ return ERR_PTR(-EDESTADDRREQ);
19975 ++ goto found;
19976 ++ }
19977 +
19978 +- p = rcu_dereference_raw(net->cells.rb_node);
19979 +- while (p) {
19980 +- cell = rb_entry(p, struct afs_cell, net_node);
19981 +-
19982 +- n = strncasecmp(cell->name, name,
19983 +- min_t(size_t, cell->name_len, namesz));
19984 +- if (n == 0)
19985 +- n = cell->name_len - namesz;
19986 +- if (n < 0) {
19987 +- p = rcu_dereference_raw(p->rb_left);
19988 +- } else if (n > 0) {
19989 +- p = rcu_dereference_raw(p->rb_right);
19990 +- } else {
19991 +- if (atomic_inc_not_zero(&cell->usage)) {
19992 +- ret = 0;
19993 +- break;
19994 +- }
19995 +- /* We want to repeat the search, this time with
19996 +- * the lock properly locked.
19997 +- */
19998 +- }
19999 +- cell = NULL;
20000 +- }
20001 ++ p = net->cells.rb_node;
20002 ++ while (p) {
20003 ++ cell = rb_entry(p, struct afs_cell, net_node);
20004 ++
20005 ++ n = strncasecmp(cell->name, name,
20006 ++ min_t(size_t, cell->name_len, namesz));
20007 ++ if (n == 0)
20008 ++ n = cell->name_len - namesz;
20009 ++ if (n < 0)
20010 ++ p = p->rb_left;
20011 ++ else if (n > 0)
20012 ++ p = p->rb_right;
20013 ++ else
20014 ++ goto found;
20015 ++ }
20016 +
20017 +- } while (need_seqretry(&net->cells_lock, seq));
20018 ++ return ERR_PTR(-ENOENT);
20019 +
20020 +- done_seqretry(&net->cells_lock, seq);
20021 ++found:
20022 ++ return afs_use_cell(cell);
20023 ++}
20024 +
20025 +- if (ret != 0 && cell)
20026 +- afs_put_cell(net, cell);
20027 ++/*
20028 ++ * Look up and get an activation reference on a cell record.
20029 ++ */
20030 ++struct afs_cell *afs_find_cell(struct afs_net *net,
20031 ++ const char *name, unsigned int namesz)
20032 ++{
20033 ++ struct afs_cell *cell;
20034 +
20035 +- return ret == 0 ? cell : ERR_PTR(ret);
20036 ++ down_read(&net->cells_lock);
20037 ++ cell = afs_find_cell_locked(net, name, namesz);
20038 ++ up_read(&net->cells_lock);
20039 ++ return cell;
20040 + }
20041 +
20042 + /*
20043 +@@ -166,8 +155,9 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
20044 + cell->name[i] = tolower(name[i]);
20045 + cell->name[i] = 0;
20046 +
20047 +- atomic_set(&cell->usage, 2);
20048 +- INIT_WORK(&cell->manager, afs_manage_cell);
20049 ++ atomic_set(&cell->ref, 1);
20050 ++ atomic_set(&cell->active, 0);
20051 ++ INIT_WORK(&cell->manager, afs_manage_cell_work);
20052 + cell->volumes = RB_ROOT;
20053 + INIT_HLIST_HEAD(&cell->proc_volumes);
20054 + seqlock_init(&cell->volume_lock);
20055 +@@ -206,6 +196,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
20056 + cell->dns_source = vllist->source;
20057 + cell->dns_status = vllist->status;
20058 + smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
20059 ++ atomic_inc(&net->cells_outstanding);
20060 +
20061 + _leave(" = %p", cell);
20062 + return cell;
20063 +@@ -245,9 +236,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
20064 + _enter("%s,%s", name, vllist);
20065 +
20066 + if (!excl) {
20067 +- rcu_read_lock();
20068 +- cell = afs_lookup_cell_rcu(net, name, namesz);
20069 +- rcu_read_unlock();
20070 ++ cell = afs_find_cell(net, name, namesz);
20071 + if (!IS_ERR(cell))
20072 + goto wait_for_cell;
20073 + }
20074 +@@ -268,7 +257,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
20075 + /* Find the insertion point and check to see if someone else added a
20076 + * cell whilst we were allocating.
20077 + */
20078 +- write_seqlock(&net->cells_lock);
20079 ++ down_write(&net->cells_lock);
20080 +
20081 + pp = &net->cells.rb_node;
20082 + parent = NULL;
20083 +@@ -290,23 +279,23 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
20084 +
20085 + cell = candidate;
20086 + candidate = NULL;
20087 ++ atomic_set(&cell->active, 2);
20088 + rb_link_node_rcu(&cell->net_node, parent, pp);
20089 + rb_insert_color(&cell->net_node, &net->cells);
20090 +- atomic_inc(&net->cells_outstanding);
20091 +- write_sequnlock(&net->cells_lock);
20092 ++ up_write(&net->cells_lock);
20093 +
20094 +- queue_work(afs_wq, &cell->manager);
20095 ++ afs_queue_cell(cell);
20096 +
20097 + wait_for_cell:
20098 + _debug("wait_for_cell");
20099 + wait_var_event(&cell->state,
20100 + ({
20101 + state = smp_load_acquire(&cell->state); /* vs error */
20102 +- state == AFS_CELL_ACTIVE || state == AFS_CELL_FAILED;
20103 ++ state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED;
20104 + }));
20105 +
20106 + /* Check the state obtained from the wait check. */
20107 +- if (state == AFS_CELL_FAILED) {
20108 ++ if (state == AFS_CELL_REMOVED) {
20109 + ret = cell->error;
20110 + goto error;
20111 + }
20112 +@@ -320,16 +309,17 @@ cell_already_exists:
20113 + if (excl) {
20114 + ret = -EEXIST;
20115 + } else {
20116 +- afs_get_cell(cursor);
20117 ++ afs_use_cell(cursor);
20118 + ret = 0;
20119 + }
20120 +- write_sequnlock(&net->cells_lock);
20121 +- kfree(candidate);
20122 ++ up_write(&net->cells_lock);
20123 ++ if (candidate)
20124 ++ afs_put_cell(candidate);
20125 + if (ret == 0)
20126 + goto wait_for_cell;
20127 + goto error_noput;
20128 + error:
20129 +- afs_put_cell(net, cell);
20130 ++ afs_unuse_cell(net, cell);
20131 + error_noput:
20132 + _leave(" = %d [error]", ret);
20133 + return ERR_PTR(ret);
20134 +@@ -374,15 +364,15 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
20135 + }
20136 +
20137 + if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
20138 +- afs_get_cell(new_root);
20139 ++ afs_use_cell(new_root);
20140 +
20141 + /* install the new cell */
20142 +- write_seqlock(&net->cells_lock);
20143 +- old_root = rcu_access_pointer(net->ws_cell);
20144 +- rcu_assign_pointer(net->ws_cell, new_root);
20145 +- write_sequnlock(&net->cells_lock);
20146 ++ down_write(&net->cells_lock);
20147 ++ old_root = net->ws_cell;
20148 ++ net->ws_cell = new_root;
20149 ++ up_write(&net->cells_lock);
20150 +
20151 +- afs_put_cell(net, old_root);
20152 ++ afs_unuse_cell(net, old_root);
20153 + _leave(" = 0");
20154 + return 0;
20155 + }
20156 +@@ -488,18 +478,21 @@ out_wake:
20157 + static void afs_cell_destroy(struct rcu_head *rcu)
20158 + {
20159 + struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
20160 ++ struct afs_net *net = cell->net;
20161 ++ int u;
20162 +
20163 + _enter("%p{%s}", cell, cell->name);
20164 +
20165 +- ASSERTCMP(atomic_read(&cell->usage), ==, 0);
20166 ++ u = atomic_read(&cell->ref);
20167 ++ ASSERTCMP(u, ==, 0);
20168 +
20169 +- afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
20170 +- afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
20171 +- afs_put_cell(cell->net, cell->alias_of);
20172 ++ afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
20173 ++ afs_unuse_cell(net, cell->alias_of);
20174 + key_put(cell->anonymous_key);
20175 + kfree(cell->name);
20176 + kfree(cell);
20177 +
20178 ++ afs_dec_cells_outstanding(net);
20179 + _leave(" [destroyed]");
20180 + }
20181 +
20182 +@@ -534,16 +527,50 @@ void afs_cells_timer(struct timer_list *timer)
20183 + */
20184 + struct afs_cell *afs_get_cell(struct afs_cell *cell)
20185 + {
20186 +- atomic_inc(&cell->usage);
20187 ++ if (atomic_read(&cell->ref) <= 0)
20188 ++ BUG();
20189 ++
20190 ++ atomic_inc(&cell->ref);
20191 + return cell;
20192 + }
20193 +
20194 + /*
20195 + * Drop a reference on a cell record.
20196 + */
20197 +-void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
20198 ++void afs_put_cell(struct afs_cell *cell)
20199 ++{
20200 ++ if (cell) {
20201 ++ unsigned int u, a;
20202 ++
20203 ++ u = atomic_dec_return(&cell->ref);
20204 ++ if (u == 0) {
20205 ++ a = atomic_read(&cell->active);
20206 ++ WARN(a != 0, "Cell active count %u > 0\n", a);
20207 ++ call_rcu(&cell->rcu, afs_cell_destroy);
20208 ++ }
20209 ++ }
20210 ++}
20211 ++
20212 ++/*
20213 ++ * Note a cell becoming more active.
20214 ++ */
20215 ++struct afs_cell *afs_use_cell(struct afs_cell *cell)
20216 ++{
20217 ++ if (atomic_read(&cell->ref) <= 0)
20218 ++ BUG();
20219 ++
20220 ++ atomic_inc(&cell->active);
20221 ++ return cell;
20222 ++}
20223 ++
20224 ++/*
20225 ++ * Record a cell becoming less active. When the active counter reaches 1, it
20226 ++ * is scheduled for destruction, but may get reactivated.
20227 ++ */
20228 ++void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell)
20229 + {
20230 + time64_t now, expire_delay;
20231 ++ int a;
20232 +
20233 + if (!cell)
20234 + return;
20235 +@@ -556,11 +583,21 @@ void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
20236 + if (cell->vl_servers->nr_servers)
20237 + expire_delay = afs_cell_gc_delay;
20238 +
20239 +- if (atomic_dec_return(&cell->usage) > 1)
20240 +- return;
20241 ++ a = atomic_dec_return(&cell->active);
20242 ++ WARN_ON(a == 0);
20243 ++ if (a == 1)
20244 ++ /* 'cell' may now be garbage collected. */
20245 ++ afs_set_cell_timer(net, expire_delay);
20246 ++}
20247 +
20248 +- /* 'cell' may now be garbage collected. */
20249 +- afs_set_cell_timer(net, expire_delay);
20250 ++/*
20251 ++ * Queue a cell for management, giving the workqueue a ref to hold.
20252 ++ */
20253 ++void afs_queue_cell(struct afs_cell *cell)
20254 ++{
20255 ++ afs_get_cell(cell);
20256 ++ if (!queue_work(afs_wq, &cell->manager))
20257 ++ afs_put_cell(cell);
20258 + }
20259 +
20260 + /*
20261 +@@ -660,12 +697,10 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
20262 + * Manage a cell record, initialising and destroying it, maintaining its DNS
20263 + * records.
20264 + */
20265 +-static void afs_manage_cell(struct work_struct *work)
20266 ++static void afs_manage_cell(struct afs_cell *cell)
20267 + {
20268 +- struct afs_cell *cell = container_of(work, struct afs_cell, manager);
20269 + struct afs_net *net = cell->net;
20270 +- bool deleted;
20271 +- int ret, usage;
20272 ++ int ret, active;
20273 +
20274 + _enter("%s", cell->name);
20275 +
20276 +@@ -674,14 +709,17 @@ again:
20277 + switch (cell->state) {
20278 + case AFS_CELL_INACTIVE:
20279 + case AFS_CELL_FAILED:
20280 +- write_seqlock(&net->cells_lock);
20281 +- usage = 1;
20282 +- deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0);
20283 +- if (deleted)
20284 ++ down_write(&net->cells_lock);
20285 ++ active = 1;
20286 ++ if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) {
20287 + rb_erase(&cell->net_node, &net->cells);
20288 +- write_sequnlock(&net->cells_lock);
20289 +- if (deleted)
20290 ++ smp_store_release(&cell->state, AFS_CELL_REMOVED);
20291 ++ }
20292 ++ up_write(&net->cells_lock);
20293 ++ if (cell->state == AFS_CELL_REMOVED) {
20294 ++ wake_up_var(&cell->state);
20295 + goto final_destruction;
20296 ++ }
20297 + if (cell->state == AFS_CELL_FAILED)
20298 + goto done;
20299 + smp_store_release(&cell->state, AFS_CELL_UNSET);
20300 +@@ -703,7 +741,7 @@ again:
20301 + goto again;
20302 +
20303 + case AFS_CELL_ACTIVE:
20304 +- if (atomic_read(&cell->usage) > 1) {
20305 ++ if (atomic_read(&cell->active) > 1) {
20306 + if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
20307 + ret = afs_update_cell(cell);
20308 + if (ret < 0)
20309 +@@ -716,13 +754,16 @@ again:
20310 + goto again;
20311 +
20312 + case AFS_CELL_DEACTIVATING:
20313 +- if (atomic_read(&cell->usage) > 1)
20314 ++ if (atomic_read(&cell->active) > 1)
20315 + goto reverse_deactivation;
20316 + afs_deactivate_cell(net, cell);
20317 + smp_store_release(&cell->state, AFS_CELL_INACTIVE);
20318 + wake_up_var(&cell->state);
20319 + goto again;
20320 +
20321 ++ case AFS_CELL_REMOVED:
20322 ++ goto done;
20323 ++
20324 + default:
20325 + break;
20326 + }
20327 +@@ -748,9 +789,18 @@ done:
20328 + return;
20329 +
20330 + final_destruction:
20331 +- call_rcu(&cell->rcu, afs_cell_destroy);
20332 +- afs_dec_cells_outstanding(net);
20333 +- _leave(" [destruct %d]", atomic_read(&net->cells_outstanding));
20334 ++ /* The root volume is pinning the cell */
20335 ++ afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
20336 ++ cell->root_volume = NULL;
20337 ++ afs_put_cell(cell);
20338 ++}
20339 ++
20340 ++static void afs_manage_cell_work(struct work_struct *work)
20341 ++{
20342 ++ struct afs_cell *cell = container_of(work, struct afs_cell, manager);
20343 ++
20344 ++ afs_manage_cell(cell);
20345 ++ afs_put_cell(cell);
20346 + }
20347 +
20348 + /*
20349 +@@ -779,26 +829,25 @@ void afs_manage_cells(struct work_struct *work)
20350 + * lack of use and cells whose DNS results have expired and dispatch
20351 + * their managers.
20352 + */
20353 +- read_seqlock_excl(&net->cells_lock);
20354 ++ down_read(&net->cells_lock);
20355 +
20356 + for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
20357 + struct afs_cell *cell =
20358 + rb_entry(cursor, struct afs_cell, net_node);
20359 +- unsigned usage;
20360 ++ unsigned active;
20361 + bool sched_cell = false;
20362 +
20363 +- usage = atomic_read(&cell->usage);
20364 +- _debug("manage %s %u", cell->name, usage);
20365 ++ active = atomic_read(&cell->active);
20366 ++ _debug("manage %s %u %u", cell->name, atomic_read(&cell->ref), active);
20367 +
20368 +- ASSERTCMP(usage, >=, 1);
20369 ++ ASSERTCMP(active, >=, 1);
20370 +
20371 + if (purging) {
20372 + if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
20373 +- usage = atomic_dec_return(&cell->usage);
20374 +- ASSERTCMP(usage, ==, 1);
20375 ++ atomic_dec(&cell->active);
20376 + }
20377 +
20378 +- if (usage == 1) {
20379 ++ if (active == 1) {
20380 + struct afs_vlserver_list *vllist;
20381 + time64_t expire_at = cell->last_inactive;
20382 +
20383 +@@ -821,10 +870,10 @@ void afs_manage_cells(struct work_struct *work)
20384 + }
20385 +
20386 + if (sched_cell)
20387 +- queue_work(afs_wq, &cell->manager);
20388 ++ afs_queue_cell(cell);
20389 + }
20390 +
20391 +- read_sequnlock_excl(&net->cells_lock);
20392 ++ up_read(&net->cells_lock);
20393 +
20394 + /* Update the timer on the way out. We have to pass an increment on
20395 + * cells_outstanding in the namespace that we are in to the timer or
20396 +@@ -854,11 +903,11 @@ void afs_cell_purge(struct afs_net *net)
20397 +
20398 + _enter("");
20399 +
20400 +- write_seqlock(&net->cells_lock);
20401 +- ws = rcu_access_pointer(net->ws_cell);
20402 +- RCU_INIT_POINTER(net->ws_cell, NULL);
20403 +- write_sequnlock(&net->cells_lock);
20404 +- afs_put_cell(net, ws);
20405 ++ down_write(&net->cells_lock);
20406 ++ ws = net->ws_cell;
20407 ++ net->ws_cell = NULL;
20408 ++ up_write(&net->cells_lock);
20409 ++ afs_unuse_cell(net, ws);
20410 +
20411 + _debug("del timer");
20412 + if (del_timer_sync(&net->cells_timer))
20413 +diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
20414 +index 7b784af604fd9..da32797dd4257 100644
20415 +--- a/fs/afs/dynroot.c
20416 ++++ b/fs/afs/dynroot.c
20417 +@@ -123,9 +123,9 @@ static int afs_probe_cell_name(struct dentry *dentry)
20418 + len--;
20419 + }
20420 +
20421 +- cell = afs_lookup_cell_rcu(net, name, len);
20422 ++ cell = afs_find_cell(net, name, len);
20423 + if (!IS_ERR(cell)) {
20424 +- afs_put_cell(net, cell);
20425 ++ afs_unuse_cell(net, cell);
20426 + return 0;
20427 + }
20428 +
20429 +@@ -179,7 +179,6 @@ static struct dentry *afs_lookup_atcell(struct dentry *dentry)
20430 + struct afs_cell *cell;
20431 + struct afs_net *net = afs_d2net(dentry);
20432 + struct dentry *ret;
20433 +- unsigned int seq = 0;
20434 + char *name;
20435 + int len;
20436 +
20437 +@@ -191,17 +190,13 @@ static struct dentry *afs_lookup_atcell(struct dentry *dentry)
20438 + if (!name)
20439 + goto out_p;
20440 +
20441 +- rcu_read_lock();
20442 +- do {
20443 +- read_seqbegin_or_lock(&net->cells_lock, &seq);
20444 +- cell = rcu_dereference_raw(net->ws_cell);
20445 +- if (cell) {
20446 +- len = cell->name_len;
20447 +- memcpy(name, cell->name, len + 1);
20448 +- }
20449 +- } while (need_seqretry(&net->cells_lock, seq));
20450 +- done_seqretry(&net->cells_lock, seq);
20451 +- rcu_read_unlock();
20452 ++ down_read(&net->cells_lock);
20453 ++ cell = net->ws_cell;
20454 ++ if (cell) {
20455 ++ len = cell->name_len;
20456 ++ memcpy(name, cell->name, len + 1);
20457 ++ }
20458 ++ up_read(&net->cells_lock);
20459 +
20460 + ret = ERR_PTR(-ENOENT);
20461 + if (!cell)
20462 +diff --git a/fs/afs/internal.h b/fs/afs/internal.h
20463 +index e5f0446f27e5f..06e617ee4cd1e 100644
20464 +--- a/fs/afs/internal.h
20465 ++++ b/fs/afs/internal.h
20466 +@@ -263,11 +263,11 @@ struct afs_net {
20467 +
20468 + /* Cell database */
20469 + struct rb_root cells;
20470 +- struct afs_cell __rcu *ws_cell;
20471 ++ struct afs_cell *ws_cell;
20472 + struct work_struct cells_manager;
20473 + struct timer_list cells_timer;
20474 + atomic_t cells_outstanding;
20475 +- seqlock_t cells_lock;
20476 ++ struct rw_semaphore cells_lock;
20477 + struct mutex cells_alias_lock;
20478 +
20479 + struct mutex proc_cells_lock;
20480 +@@ -326,6 +326,7 @@ enum afs_cell_state {
20481 + AFS_CELL_DEACTIVATING,
20482 + AFS_CELL_INACTIVE,
20483 + AFS_CELL_FAILED,
20484 ++ AFS_CELL_REMOVED,
20485 + };
20486 +
20487 + /*
20488 +@@ -363,7 +364,8 @@ struct afs_cell {
20489 + #endif
20490 + time64_t dns_expiry; /* Time AFSDB/SRV record expires */
20491 + time64_t last_inactive; /* Time of last drop of usage count */
20492 +- atomic_t usage;
20493 ++ atomic_t ref; /* Struct refcount */
20494 ++ atomic_t active; /* Active usage counter */
20495 + unsigned long flags;
20496 + #define AFS_CELL_FL_NO_GC 0 /* The cell was added manually, don't auto-gc */
20497 + #define AFS_CELL_FL_DO_LOOKUP 1 /* DNS lookup requested */
20498 +@@ -917,11 +919,14 @@ static inline bool afs_cb_is_broken(unsigned int cb_break,
20499 + * cell.c
20500 + */
20501 + extern int afs_cell_init(struct afs_net *, const char *);
20502 +-extern struct afs_cell *afs_lookup_cell_rcu(struct afs_net *, const char *, unsigned);
20503 ++extern struct afs_cell *afs_find_cell(struct afs_net *, const char *, unsigned);
20504 + extern struct afs_cell *afs_lookup_cell(struct afs_net *, const char *, unsigned,
20505 + const char *, bool);
20506 ++extern struct afs_cell *afs_use_cell(struct afs_cell *);
20507 ++extern void afs_unuse_cell(struct afs_net *, struct afs_cell *);
20508 + extern struct afs_cell *afs_get_cell(struct afs_cell *);
20509 +-extern void afs_put_cell(struct afs_net *, struct afs_cell *);
20510 ++extern void afs_put_cell(struct afs_cell *);
20511 ++extern void afs_queue_cell(struct afs_cell *);
20512 + extern void afs_manage_cells(struct work_struct *);
20513 + extern void afs_cells_timer(struct timer_list *);
20514 + extern void __net_exit afs_cell_purge(struct afs_net *);
20515 +diff --git a/fs/afs/main.c b/fs/afs/main.c
20516 +index 31b472f7c734c..accdd8970e7c0 100644
20517 +--- a/fs/afs/main.c
20518 ++++ b/fs/afs/main.c
20519 +@@ -78,7 +78,7 @@ static int __net_init afs_net_init(struct net *net_ns)
20520 + mutex_init(&net->socket_mutex);
20521 +
20522 + net->cells = RB_ROOT;
20523 +- seqlock_init(&net->cells_lock);
20524 ++ init_rwsem(&net->cells_lock);
20525 + INIT_WORK(&net->cells_manager, afs_manage_cells);
20526 + timer_setup(&net->cells_timer, afs_cells_timer, 0);
20527 +
20528 +diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
20529 +index 79bc5f1338edf..c69a0282960cc 100644
20530 +--- a/fs/afs/mntpt.c
20531 ++++ b/fs/afs/mntpt.c
20532 +@@ -88,7 +88,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
20533 + ctx->force = true;
20534 + }
20535 + if (ctx->cell) {
20536 +- afs_put_cell(ctx->net, ctx->cell);
20537 ++ afs_unuse_cell(ctx->net, ctx->cell);
20538 + ctx->cell = NULL;
20539 + }
20540 + if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
20541 +@@ -124,7 +124,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
20542 + char *buf;
20543 +
20544 + if (src_as->cell)
20545 +- ctx->cell = afs_get_cell(src_as->cell);
20546 ++ ctx->cell = afs_use_cell(src_as->cell);
20547 +
20548 + if (size < 2 || size > PAGE_SIZE - 1)
20549 + return -EINVAL;
20550 +diff --git a/fs/afs/proc.c b/fs/afs/proc.c
20551 +index e8babb62ed442..76fbe0560cfb7 100644
20552 +--- a/fs/afs/proc.c
20553 ++++ b/fs/afs/proc.c
20554 +@@ -38,7 +38,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
20555 +
20556 + if (v == SEQ_START_TOKEN) {
20557 + /* display header on line 1 */
20558 +- seq_puts(m, "USE TTL SV ST NAME\n");
20559 ++ seq_puts(m, "USE ACT TTL SV ST NAME\n");
20560 + return 0;
20561 + }
20562 +
20563 +@@ -46,10 +46,11 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
20564 + vllist = rcu_dereference(cell->vl_servers);
20565 +
20566 + /* display one cell per line on subsequent lines */
20567 +- seq_printf(m, "%3u %6lld %2u %2u %s\n",
20568 +- atomic_read(&cell->usage),
20569 ++ seq_printf(m, "%3u %3u %6lld %2u %2u %s\n",
20570 ++ atomic_read(&cell->ref),
20571 ++ atomic_read(&cell->active),
20572 + cell->dns_expiry - ktime_get_real_seconds(),
20573 +- vllist->nr_servers,
20574 ++ vllist ? vllist->nr_servers : 0,
20575 + cell->state,
20576 + cell->name);
20577 + return 0;
20578 +@@ -128,7 +129,7 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
20579 + }
20580 +
20581 + if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
20582 +- afs_put_cell(net, cell);
20583 ++ afs_unuse_cell(net, cell);
20584 + } else {
20585 + goto inval;
20586 + }
20587 +@@ -154,13 +155,11 @@ static int afs_proc_rootcell_show(struct seq_file *m, void *v)
20588 + struct afs_net *net;
20589 +
20590 + net = afs_seq2net_single(m);
20591 +- if (rcu_access_pointer(net->ws_cell)) {
20592 +- rcu_read_lock();
20593 +- cell = rcu_dereference(net->ws_cell);
20594 +- if (cell)
20595 +- seq_printf(m, "%s\n", cell->name);
20596 +- rcu_read_unlock();
20597 +- }
20598 ++ down_read(&net->cells_lock);
20599 ++ cell = net->ws_cell;
20600 ++ if (cell)
20601 ++ seq_printf(m, "%s\n", cell->name);
20602 ++ up_read(&net->cells_lock);
20603 + return 0;
20604 + }
20605 +
20606 +diff --git a/fs/afs/super.c b/fs/afs/super.c
20607 +index b552357b1d137..e72c223f831d2 100644
20608 +--- a/fs/afs/super.c
20609 ++++ b/fs/afs/super.c
20610 +@@ -294,7 +294,7 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param)
20611 + cellnamesz, cellnamesz, cellname ?: "");
20612 + return PTR_ERR(cell);
20613 + }
20614 +- afs_put_cell(ctx->net, ctx->cell);
20615 ++ afs_unuse_cell(ctx->net, ctx->cell);
20616 + ctx->cell = cell;
20617 + }
20618 +
20619 +@@ -389,8 +389,8 @@ static int afs_validate_fc(struct fs_context *fc)
20620 + _debug("switch to alias");
20621 + key_put(ctx->key);
20622 + ctx->key = NULL;
20623 +- cell = afs_get_cell(ctx->cell->alias_of);
20624 +- afs_put_cell(ctx->net, ctx->cell);
20625 ++ cell = afs_use_cell(ctx->cell->alias_of);
20626 ++ afs_unuse_cell(ctx->net, ctx->cell);
20627 + ctx->cell = cell;
20628 + goto reget_key;
20629 + }
20630 +@@ -508,7 +508,7 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
20631 + if (ctx->dyn_root) {
20632 + as->dyn_root = true;
20633 + } else {
20634 +- as->cell = afs_get_cell(ctx->cell);
20635 ++ as->cell = afs_use_cell(ctx->cell);
20636 + as->volume = afs_get_volume(ctx->volume,
20637 + afs_volume_trace_get_alloc_sbi);
20638 + }
20639 +@@ -521,7 +521,7 @@ static void afs_destroy_sbi(struct afs_super_info *as)
20640 + if (as) {
20641 + struct afs_net *net = afs_net(as->net_ns);
20642 + afs_put_volume(net, as->volume, afs_volume_trace_put_destroy_sbi);
20643 +- afs_put_cell(net, as->cell);
20644 ++ afs_unuse_cell(net, as->cell);
20645 + put_net(as->net_ns);
20646 + kfree(as);
20647 + }
20648 +@@ -607,7 +607,7 @@ static void afs_free_fc(struct fs_context *fc)
20649 +
20650 + afs_destroy_sbi(fc->s_fs_info);
20651 + afs_put_volume(ctx->net, ctx->volume, afs_volume_trace_put_free_fc);
20652 +- afs_put_cell(ctx->net, ctx->cell);
20653 ++ afs_unuse_cell(ctx->net, ctx->cell);
20654 + key_put(ctx->key);
20655 + kfree(ctx);
20656 + }
20657 +@@ -634,9 +634,7 @@ static int afs_init_fs_context(struct fs_context *fc)
20658 + ctx->net = afs_net(fc->net_ns);
20659 +
20660 + /* Default to the workstation cell. */
20661 +- rcu_read_lock();
20662 +- cell = afs_lookup_cell_rcu(ctx->net, NULL, 0);
20663 +- rcu_read_unlock();
20664 ++ cell = afs_find_cell(ctx->net, NULL, 0);
20665 + if (IS_ERR(cell))
20666 + cell = NULL;
20667 + ctx->cell = cell;
20668 +diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
20669 +index 5082ef04e99c5..ddb4cb67d0fd9 100644
20670 +--- a/fs/afs/vl_alias.c
20671 ++++ b/fs/afs/vl_alias.c
20672 +@@ -177,7 +177,7 @@ static int afs_compare_cell_roots(struct afs_cell *cell)
20673 +
20674 + is_alias:
20675 + rcu_read_unlock();
20676 +- cell->alias_of = afs_get_cell(p);
20677 ++ cell->alias_of = afs_use_cell(p);
20678 + return 1;
20679 + }
20680 +
20681 +@@ -247,18 +247,18 @@ static int afs_query_for_alias(struct afs_cell *cell, struct key *key)
20682 + continue;
20683 + if (p->root_volume)
20684 + continue; /* Ignore cells that have a root.cell volume. */
20685 +- afs_get_cell(p);
20686 ++ afs_use_cell(p);
20687 + mutex_unlock(&cell->net->proc_cells_lock);
20688 +
20689 + if (afs_query_for_alias_one(cell, key, p) != 0)
20690 + goto is_alias;
20691 +
20692 + if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) {
20693 +- afs_put_cell(cell->net, p);
20694 ++ afs_unuse_cell(cell->net, p);
20695 + return -ERESTARTSYS;
20696 + }
20697 +
20698 +- afs_put_cell(cell->net, p);
20699 ++ afs_unuse_cell(cell->net, p);
20700 + }
20701 +
20702 + mutex_unlock(&cell->net->proc_cells_lock);
20703 +diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
20704 +index c0458c903b310..da3b072d4d638 100644
20705 +--- a/fs/afs/vl_rotate.c
20706 ++++ b/fs/afs/vl_rotate.c
20707 +@@ -45,7 +45,7 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
20708 + cell->dns_expiry <= ktime_get_real_seconds()) {
20709 + dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count);
20710 + set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
20711 +- queue_work(afs_wq, &cell->manager);
20712 ++ afs_queue_cell(cell);
20713 +
20714 + if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
20715 + if (wait_var_event_interruptible(
20716 +diff --git a/fs/afs/volume.c b/fs/afs/volume.c
20717 +index 9bc0509e3634c..a838030e95634 100644
20718 +--- a/fs/afs/volume.c
20719 ++++ b/fs/afs/volume.c
20720 +@@ -106,7 +106,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
20721 + return volume;
20722 +
20723 + error_1:
20724 +- afs_put_cell(params->net, volume->cell);
20725 ++ afs_put_cell(volume->cell);
20726 + kfree(volume);
20727 + error_0:
20728 + return ERR_PTR(ret);
20729 +@@ -228,7 +228,7 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
20730 +
20731 + afs_remove_volume_from_cell(volume);
20732 + afs_put_serverlist(net, rcu_access_pointer(volume->servers));
20733 +- afs_put_cell(net, volume->cell);
20734 ++ afs_put_cell(volume->cell);
20735 + trace_afs_volume(volume->vid, atomic_read(&volume->usage),
20736 + afs_volume_trace_free);
20737 + kfree_rcu(volume, rcu);
20738 +diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
20739 +index 219a09a2b7340..250b8cbaaf97a 100644
20740 +--- a/fs/btrfs/extent-io-tree.h
20741 ++++ b/fs/btrfs/extent-io-tree.h
20742 +@@ -48,6 +48,7 @@ enum {
20743 + IO_TREE_INODE_FILE_EXTENT,
20744 + IO_TREE_LOG_CSUM_RANGE,
20745 + IO_TREE_SELFTEST,
20746 ++ IO_TREE_DEVICE_ALLOC_STATE,
20747 + };
20748 +
20749 + struct extent_io_tree {
20750 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
20751 +index 1997a7d67f22f..e61c298ce2b42 100644
20752 +--- a/fs/btrfs/volumes.c
20753 ++++ b/fs/btrfs/volumes.c
20754 +@@ -406,7 +406,7 @@ void __exit btrfs_cleanup_fs_uuids(void)
20755 + * Returned struct is not linked onto any lists and must be destroyed using
20756 + * btrfs_free_device.
20757 + */
20758 +-static struct btrfs_device *__alloc_device(void)
20759 ++static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
20760 + {
20761 + struct btrfs_device *dev;
20762 +
20763 +@@ -433,7 +433,8 @@ static struct btrfs_device *__alloc_device(void)
20764 + btrfs_device_data_ordered_init(dev);
20765 + INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
20766 + INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
20767 +- extent_io_tree_init(NULL, &dev->alloc_state, 0, NULL);
20768 ++ extent_io_tree_init(fs_info, &dev->alloc_state,
20769 ++ IO_TREE_DEVICE_ALLOC_STATE, NULL);
20770 +
20771 + return dev;
20772 + }
20773 +@@ -6529,7 +6530,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
20774 + if (WARN_ON(!devid && !fs_info))
20775 + return ERR_PTR(-EINVAL);
20776 +
20777 +- dev = __alloc_device();
20778 ++ dev = __alloc_device(fs_info);
20779 + if (IS_ERR(dev))
20780 + return dev;
20781 +
20782 +diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
20783 +index 689162e2e1755..3150c19cdc2fb 100644
20784 +--- a/fs/cifs/asn1.c
20785 ++++ b/fs/cifs/asn1.c
20786 +@@ -530,8 +530,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
20787 + return 0;
20788 + } else if ((cls != ASN1_CTX) || (con != ASN1_CON)
20789 + || (tag != ASN1_EOC)) {
20790 +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
20791 +- cls, con, tag, end, *end);
20792 ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
20793 ++ cls, con, tag, end);
20794 + return 0;
20795 + }
20796 +
20797 +@@ -541,8 +541,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
20798 + return 0;
20799 + } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
20800 + || (tag != ASN1_SEQ)) {
20801 +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
20802 +- cls, con, tag, end, *end);
20803 ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 1\n",
20804 ++ cls, con, tag, end);
20805 + return 0;
20806 + }
20807 +
20808 +@@ -552,8 +552,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
20809 + return 0;
20810 + } else if ((cls != ASN1_CTX) || (con != ASN1_CON)
20811 + || (tag != ASN1_EOC)) {
20812 +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
20813 +- cls, con, tag, end, *end);
20814 ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
20815 ++ cls, con, tag, end);
20816 + return 0;
20817 + }
20818 +
20819 +@@ -564,8 +564,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
20820 + return 0;
20821 + } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
20822 + || (tag != ASN1_SEQ)) {
20823 +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
20824 +- cls, con, tag, end, *end);
20825 ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d sequence_end = %p exit 1\n",
20826 ++ cls, con, tag, sequence_end);
20827 + return 0;
20828 + }
20829 +
20830 +diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
20831 +index fcff14ef1c701..23b21e9436528 100644
20832 +--- a/fs/cifs/cifsacl.c
20833 ++++ b/fs/cifs/cifsacl.c
20834 +@@ -338,7 +338,7 @@ invalidate_key:
20835 + goto out_key_put;
20836 + }
20837 +
20838 +-static int
20839 ++int
20840 + sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
20841 + struct cifs_fattr *fattr, uint sidtype)
20842 + {
20843 +@@ -359,7 +359,8 @@ sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
20844 + return -EIO;
20845 + }
20846 +
20847 +- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) {
20848 ++ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) ||
20849 ++ (cifs_sb_master_tcon(cifs_sb)->posix_extensions)) {
20850 + uint32_t unix_id;
20851 + bool is_group;
20852 +
20853 +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
20854 +index bb68cbf810740..24c6f36177bac 100644
20855 +--- a/fs/cifs/cifsproto.h
20856 ++++ b/fs/cifs/cifsproto.h
20857 +@@ -209,6 +209,8 @@ extern int cifs_set_file_info(struct inode *inode, struct iattr *attrs,
20858 + extern int cifs_rename_pending_delete(const char *full_path,
20859 + struct dentry *dentry,
20860 + const unsigned int xid);
20861 ++extern int sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
20862 ++ struct cifs_fattr *fattr, uint sidtype);
20863 + extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
20864 + struct cifs_fattr *fattr, struct inode *inode,
20865 + bool get_mode_from_special_sid,
20866 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
20867 +index a5731dd6e6566..9817a31a39db6 100644
20868 +--- a/fs/cifs/connect.c
20869 ++++ b/fs/cifs/connect.c
20870 +@@ -3595,7 +3595,10 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
20871 + */
20872 + tcon->retry = volume_info->retry;
20873 + tcon->nocase = volume_info->nocase;
20874 +- tcon->nohandlecache = volume_info->nohandlecache;
20875 ++ if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
20876 ++ tcon->nohandlecache = volume_info->nohandlecache;
20877 ++ else
20878 ++ tcon->nohandlecache = 1;
20879 + tcon->nodelete = volume_info->nodelete;
20880 + tcon->local_lease = volume_info->local_lease;
20881 + INIT_LIST_HEAD(&tcon->pending_opens);
20882 +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
20883 +index 6df0922e7e304..709fb53e9fee1 100644
20884 +--- a/fs/cifs/readdir.c
20885 ++++ b/fs/cifs/readdir.c
20886 +@@ -267,9 +267,8 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
20887 + if (reparse_file_needs_reval(fattr))
20888 + fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
20889 +
20890 +- /* TODO map SIDs */
20891 +- fattr->cf_uid = cifs_sb->mnt_uid;
20892 +- fattr->cf_gid = cifs_sb->mnt_gid;
20893 ++ sid_to_id(cifs_sb, &parsed.owner, fattr, SIDOWNER);
20894 ++ sid_to_id(cifs_sb, &parsed.group, fattr, SIDGROUP);
20895 + }
20896 +
20897 + static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info)
20898 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
20899 +index d44df8f95bcd4..09e1cd320ee56 100644
20900 +--- a/fs/cifs/smb2ops.c
20901 ++++ b/fs/cifs/smb2ops.c
20902 +@@ -3072,7 +3072,12 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
20903 + oparms.tcon = tcon;
20904 + oparms.desired_access = READ_CONTROL;
20905 + oparms.disposition = FILE_OPEN;
20906 +- oparms.create_options = cifs_create_options(cifs_sb, 0);
20907 ++ /*
20908 ++ * When querying an ACL, even if the file is a symlink we want to open
20909 ++ * the source not the target, and so the protocol requires that the
20910 ++ * client specify this flag when opening a reparse point
20911 ++ */
20912 ++ oparms.create_options = cifs_create_options(cifs_sb, 0) | OPEN_REPARSE_POINT;
20913 + oparms.fid = &fid;
20914 + oparms.reconnect = false;
20915 +
20916 +@@ -3924,7 +3929,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
20917 + if (rc) {
20918 + cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
20919 + enc ? "en" : "de");
20920 +- return 0;
20921 ++ return rc;
20922 + }
20923 +
20924 + rc = smb3_crypto_aead_allocate(server);
20925 +@@ -4103,7 +4108,8 @@ smb3_is_transform_hdr(void *buf)
20926 + static int
20927 + decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
20928 + unsigned int buf_data_size, struct page **pages,
20929 +- unsigned int npages, unsigned int page_data_size)
20930 ++ unsigned int npages, unsigned int page_data_size,
20931 ++ bool is_offloaded)
20932 + {
20933 + struct kvec iov[2];
20934 + struct smb_rqst rqst = {NULL};
20935 +@@ -4129,7 +4135,8 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
20936 +
20937 + memmove(buf, iov[1].iov_base, buf_data_size);
20938 +
20939 +- server->total_read = buf_data_size + page_data_size;
20940 ++ if (!is_offloaded)
20941 ++ server->total_read = buf_data_size + page_data_size;
20942 +
20943 + return rc;
20944 + }
20945 +@@ -4342,7 +4349,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
20946 + struct mid_q_entry *mid;
20947 +
20948 + rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
20949 +- dw->ppages, dw->npages, dw->len);
20950 ++ dw->ppages, dw->npages, dw->len, true);
20951 + if (rc) {
20952 + cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
20953 + goto free_pages;
20954 +@@ -4448,7 +4455,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
20955 +
20956 + non_offloaded_decrypt:
20957 + rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
20958 +- pages, npages, len);
20959 ++ pages, npages, len, false);
20960 + if (rc)
20961 + goto free_pages;
20962 +
20963 +@@ -4504,7 +4511,7 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
20964 + server->total_read += length;
20965 +
20966 + buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
20967 +- length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
20968 ++ length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false);
20969 + if (length)
20970 + return length;
20971 +
20972 +diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
20973 +index 2d73fd39ad96f..b92f345231780 100644
20974 +--- a/fs/crypto/policy.c
20975 ++++ b/fs/crypto/policy.c
20976 +@@ -192,10 +192,15 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy,
20977 + 32, 32))
20978 + return false;
20979 +
20980 ++ /*
20981 ++ * IV_INO_LBLK_32 hashes the inode number, so in principle it can
20982 ++ * support any ino_bits. However, currently the inode number is gotten
20983 ++ * from inode::i_ino which is 'unsigned long'. So for now the
20984 ++ * implementation limit is 32 bits.
20985 ++ */
20986 + if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
20987 +- /* This uses hashed inode numbers, so ino_bits doesn't matter. */
20988 + !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_32",
20989 +- INT_MAX, 32))
20990 ++ 32, 32))
20991 + return false;
20992 +
20993 + if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) {
20994 +diff --git a/fs/d_path.c b/fs/d_path.c
20995 +index 0f1fc1743302f..a69e2cd36e6e3 100644
20996 +--- a/fs/d_path.c
20997 ++++ b/fs/d_path.c
20998 +@@ -102,6 +102,8 @@ restart:
20999 +
21000 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
21001 + struct mount *parent = READ_ONCE(mnt->mnt_parent);
21002 ++ struct mnt_namespace *mnt_ns;
21003 ++
21004 + /* Escaped? */
21005 + if (dentry != vfsmnt->mnt_root) {
21006 + bptr = *buffer;
21007 +@@ -116,7 +118,9 @@ restart:
21008 + vfsmnt = &mnt->mnt;
21009 + continue;
21010 + }
21011 +- if (is_mounted(vfsmnt) && !is_anon_ns(mnt->mnt_ns))
21012 ++ mnt_ns = READ_ONCE(mnt->mnt_ns);
21013 ++ /* open-coded is_mounted() to use local mnt_ns */
21014 ++ if (!IS_ERR_OR_NULL(mnt_ns) && !is_anon_ns(mnt_ns))
21015 + error = 1; // absolute root
21016 + else
21017 + error = 2; // detached or not attached yet
21018 +diff --git a/fs/dlm/config.c b/fs/dlm/config.c
21019 +index 47f0b98b707f8..f33a7e4ae917b 100644
21020 +--- a/fs/dlm/config.c
21021 ++++ b/fs/dlm/config.c
21022 +@@ -221,6 +221,7 @@ struct dlm_space {
21023 + struct list_head members;
21024 + struct mutex members_lock;
21025 + int members_count;
21026 ++ struct dlm_nodes *nds;
21027 + };
21028 +
21029 + struct dlm_comms {
21030 +@@ -430,6 +431,7 @@ static struct config_group *make_space(struct config_group *g, const char *name)
21031 + INIT_LIST_HEAD(&sp->members);
21032 + mutex_init(&sp->members_lock);
21033 + sp->members_count = 0;
21034 ++ sp->nds = nds;
21035 + return &sp->group;
21036 +
21037 + fail:
21038 +@@ -451,6 +453,7 @@ static void drop_space(struct config_group *g, struct config_item *i)
21039 + static void release_space(struct config_item *i)
21040 + {
21041 + struct dlm_space *sp = config_item_to_space(i);
21042 ++ kfree(sp->nds);
21043 + kfree(sp);
21044 + }
21045 +
21046 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
21047 +index 523e00d7b3924..69187b6205b2b 100644
21048 +--- a/fs/ext4/ext4.h
21049 ++++ b/fs/ext4/ext4.h
21050 +@@ -492,7 +492,7 @@ struct flex_groups {
21051 +
21052 + /* Flags which are mutually exclusive to DAX */
21053 + #define EXT4_DAX_MUT_EXCL (EXT4_VERITY_FL | EXT4_ENCRYPT_FL |\
21054 +- EXT4_JOURNAL_DATA_FL)
21055 ++ EXT4_JOURNAL_DATA_FL | EXT4_INLINE_DATA_FL)
21056 +
21057 + /* Mask out flags that are inappropriate for the given type of inode. */
21058 + static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
21059 +diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
21060 +index dbccf46f17709..37347ba868b70 100644
21061 +--- a/fs/ext4/fsmap.c
21062 ++++ b/fs/ext4/fsmap.c
21063 +@@ -108,6 +108,9 @@ static int ext4_getfsmap_helper(struct super_block *sb,
21064 +
21065 + /* Are we just counting mappings? */
21066 + if (info->gfi_head->fmh_count == 0) {
21067 ++ if (info->gfi_head->fmh_entries == UINT_MAX)
21068 ++ return EXT4_QUERY_RANGE_ABORT;
21069 ++
21070 + if (rec_fsblk > info->gfi_next_fsblk)
21071 + info->gfi_head->fmh_entries++;
21072 +
21073 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
21074 +index 132c118d12e15..a8d99f676fb1f 100644
21075 +--- a/fs/ext4/mballoc.c
21076 ++++ b/fs/ext4/mballoc.c
21077 +@@ -4160,7 +4160,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
21078 + struct ext4_buddy e4b;
21079 + int err;
21080 + int busy = 0;
21081 +- int free = 0;
21082 ++ int free, free_total = 0;
21083 +
21084 + mb_debug(sb, "discard preallocation for group %u\n", group);
21085 + if (list_empty(&grp->bb_prealloc_list))
21086 +@@ -4188,8 +4188,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
21087 +
21088 + INIT_LIST_HEAD(&list);
21089 + repeat:
21090 ++ free = 0;
21091 + ext4_lock_group(sb, group);
21092 +- this_cpu_inc(discard_pa_seq);
21093 + list_for_each_entry_safe(pa, tmp,
21094 + &grp->bb_prealloc_list, pa_group_list) {
21095 + spin_lock(&pa->pa_lock);
21096 +@@ -4206,6 +4206,9 @@ repeat:
21097 + /* seems this one can be freed ... */
21098 + ext4_mb_mark_pa_deleted(sb, pa);
21099 +
21100 ++ if (!free)
21101 ++ this_cpu_inc(discard_pa_seq);
21102 ++
21103 + /* we can trust pa_free ... */
21104 + free += pa->pa_free;
21105 +
21106 +@@ -4215,22 +4218,6 @@ repeat:
21107 + list_add(&pa->u.pa_tmp_list, &list);
21108 + }
21109 +
21110 +- /* if we still need more blocks and some PAs were used, try again */
21111 +- if (free < needed && busy) {
21112 +- busy = 0;
21113 +- ext4_unlock_group(sb, group);
21114 +- cond_resched();
21115 +- goto repeat;
21116 +- }
21117 +-
21118 +- /* found anything to free? */
21119 +- if (list_empty(&list)) {
21120 +- BUG_ON(free != 0);
21121 +- mb_debug(sb, "Someone else may have freed PA for this group %u\n",
21122 +- group);
21123 +- goto out;
21124 +- }
21125 +-
21126 + /* now free all selected PAs */
21127 + list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
21128 +
21129 +@@ -4248,14 +4235,22 @@ repeat:
21130 + call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
21131 + }
21132 +
21133 +-out:
21134 ++ free_total += free;
21135 ++
21136 ++ /* if we still need more blocks and some PAs were used, try again */
21137 ++ if (free_total < needed && busy) {
21138 ++ ext4_unlock_group(sb, group);
21139 ++ cond_resched();
21140 ++ busy = 0;
21141 ++ goto repeat;
21142 ++ }
21143 + ext4_unlock_group(sb, group);
21144 + ext4_mb_unload_buddy(&e4b);
21145 + put_bh(bitmap_bh);
21146 + out_dbg:
21147 + mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
21148 +- free, group, grp->bb_free);
21149 +- return free;
21150 ++ free_total, group, grp->bb_free);
21151 ++ return free_total;
21152 + }
21153 +
21154 + /*
21155 +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
21156 +index 66969ae852b97..5195e083fc1e6 100644
21157 +--- a/fs/f2fs/inode.c
21158 ++++ b/fs/f2fs/inode.c
21159 +@@ -287,6 +287,13 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
21160 + return false;
21161 + }
21162 +
21163 ++ if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
21164 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
21165 ++ f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
21166 ++ __func__, inode->i_ino);
21167 ++ return false;
21168 ++ }
21169 ++
21170 + if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
21171 + fi->i_flags & F2FS_COMPR_FL &&
21172 + F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
21173 +diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
21174 +index 88ed9969cc862..5fe7d8fa93801 100644
21175 +--- a/fs/f2fs/sysfs.c
21176 ++++ b/fs/f2fs/sysfs.c
21177 +@@ -968,4 +968,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
21178 + }
21179 + kobject_del(&sbi->s_kobj);
21180 + kobject_put(&sbi->s_kobj);
21181 ++ wait_for_completion(&sbi->s_kobj_unregister);
21182 + }
21183 +diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
21184 +index bcfc288dba3fb..b115e7d47fcec 100644
21185 +--- a/fs/iomap/buffered-io.c
21186 ++++ b/fs/iomap/buffered-io.c
21187 +@@ -49,16 +49,8 @@ iomap_page_create(struct inode *inode, struct page *page)
21188 + if (iop || i_blocksize(inode) == PAGE_SIZE)
21189 + return iop;
21190 +
21191 +- iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
21192 +- atomic_set(&iop->read_count, 0);
21193 +- atomic_set(&iop->write_count, 0);
21194 ++ iop = kzalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
21195 + spin_lock_init(&iop->uptodate_lock);
21196 +- bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
21197 +-
21198 +- /*
21199 +- * migrate_page_move_mapping() assumes that pages with private data have
21200 +- * their count elevated by 1.
21201 +- */
21202 + attach_page_private(page, iop);
21203 + return iop;
21204 + }
21205 +@@ -574,10 +566,10 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
21206 + loff_t block_start = pos & ~(block_size - 1);
21207 + loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
21208 + unsigned from = offset_in_page(pos), to = from + len, poff, plen;
21209 +- int status;
21210 +
21211 + if (PageUptodate(page))
21212 + return 0;
21213 ++ ClearPageError(page);
21214 +
21215 + do {
21216 + iomap_adjust_read_range(inode, iop, &block_start,
21217 +@@ -594,14 +586,13 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
21218 + if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
21219 + return -EIO;
21220 + zero_user_segments(page, poff, from, to, poff + plen);
21221 +- iomap_set_range_uptodate(page, poff, plen);
21222 +- continue;
21223 ++ } else {
21224 ++ int status = iomap_read_page_sync(block_start, page,
21225 ++ poff, plen, srcmap);
21226 ++ if (status)
21227 ++ return status;
21228 + }
21229 +-
21230 +- status = iomap_read_page_sync(block_start, page, poff, plen,
21231 +- srcmap);
21232 +- if (status)
21233 +- return status;
21234 ++ iomap_set_range_uptodate(page, poff, plen);
21235 + } while ((block_start += plen) < block_end);
21236 +
21237 + return 0;
21238 +diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
21239 +index c1aafb2ab9907..9519113ebc352 100644
21240 +--- a/fs/iomap/direct-io.c
21241 ++++ b/fs/iomap/direct-io.c
21242 +@@ -388,6 +388,16 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
21243 + return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
21244 + case IOMAP_INLINE:
21245 + return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
21246 ++ case IOMAP_DELALLOC:
21247 ++ /*
21248 ++ * DIO is not serialised against mmap() access at all, and so
21249 ++ * if the page_mkwrite occurs between the writeback and the
21250 ++ * iomap_apply() call in the DIO path, then it will see the
21251 ++ * DELALLOC block that the page-mkwrite allocated.
21252 ++ */
21253 ++ pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
21254 ++ dio->iocb->ki_filp, current->comm);
21255 ++ return -EIO;
21256 + default:
21257 + WARN_ON_ONCE(1);
21258 + return -EIO;
21259 +diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
21260 +index 524812984e2d4..009987e690207 100644
21261 +--- a/fs/nfs/fs_context.c
21262 ++++ b/fs/nfs/fs_context.c
21263 +@@ -94,6 +94,7 @@ enum {
21264 + static const struct constant_table nfs_param_enums_local_lock[] = {
21265 + { "all", Opt_local_lock_all },
21266 + { "flock", Opt_local_lock_flock },
21267 ++ { "posix", Opt_local_lock_posix },
21268 + { "none", Opt_local_lock_none },
21269 + {}
21270 + };
21271 +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
21272 +index fdfc77486acee..984938024011b 100644
21273 +--- a/fs/nfs/nfs4file.c
21274 ++++ b/fs/nfs/nfs4file.c
21275 +@@ -9,6 +9,7 @@
21276 + #include <linux/falloc.h>
21277 + #include <linux/mount.h>
21278 + #include <linux/nfs_fs.h>
21279 ++#include <linux/nfs_ssc.h>
21280 + #include "delegation.h"
21281 + #include "internal.h"
21282 + #include "iostat.h"
21283 +@@ -314,9 +315,8 @@ out:
21284 + static int read_name_gen = 1;
21285 + #define SSC_READ_NAME_BODY "ssc_read_%d"
21286 +
21287 +-struct file *
21288 +-nfs42_ssc_open(struct vfsmount *ss_mnt, struct nfs_fh *src_fh,
21289 +- nfs4_stateid *stateid)
21290 ++static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
21291 ++ struct nfs_fh *src_fh, nfs4_stateid *stateid)
21292 + {
21293 + struct nfs_fattr fattr;
21294 + struct file *filep, *res;
21295 +@@ -398,14 +398,40 @@ out_filep:
21296 + fput(filep);
21297 + goto out_free_name;
21298 + }
21299 +-EXPORT_SYMBOL_GPL(nfs42_ssc_open);
21300 +-void nfs42_ssc_close(struct file *filep)
21301 ++
21302 ++static void __nfs42_ssc_close(struct file *filep)
21303 + {
21304 + struct nfs_open_context *ctx = nfs_file_open_context(filep);
21305 +
21306 + ctx->state->flags = 0;
21307 + }
21308 +-EXPORT_SYMBOL_GPL(nfs42_ssc_close);
21309 ++
21310 ++static const struct nfs4_ssc_client_ops nfs4_ssc_clnt_ops_tbl = {
21311 ++ .sco_open = __nfs42_ssc_open,
21312 ++ .sco_close = __nfs42_ssc_close,
21313 ++};
21314 ++
21315 ++/**
21316 ++ * nfs42_ssc_register_ops - Wrapper to register NFS_V4 ops in nfs_common
21317 ++ *
21318 ++ * Return values:
21319 ++ * None
21320 ++ */
21321 ++void nfs42_ssc_register_ops(void)
21322 ++{
21323 ++ nfs42_ssc_register(&nfs4_ssc_clnt_ops_tbl);
21324 ++}
21325 ++
21326 ++/**
21327 ++ * nfs42_ssc_unregister_ops - wrapper to un-register NFS_V4 ops in nfs_common
21328 ++ *
21329 ++ * Return values:
21330 ++ * None.
21331 ++ */
21332 ++void nfs42_ssc_unregister_ops(void)
21333 ++{
21334 ++ nfs42_ssc_unregister(&nfs4_ssc_clnt_ops_tbl);
21335 ++}
21336 + #endif /* CONFIG_NFS_V4_2 */
21337 +
21338 + const struct file_operations nfs4_file_operations = {
21339 +diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
21340 +index 0c1ab846b83dd..93f5c1678ec29 100644
21341 +--- a/fs/nfs/nfs4super.c
21342 ++++ b/fs/nfs/nfs4super.c
21343 +@@ -7,6 +7,7 @@
21344 + #include <linux/mount.h>
21345 + #include <linux/nfs4_mount.h>
21346 + #include <linux/nfs_fs.h>
21347 ++#include <linux/nfs_ssc.h>
21348 + #include "delegation.h"
21349 + #include "internal.h"
21350 + #include "nfs4_fs.h"
21351 +@@ -279,6 +280,9 @@ static int __init init_nfs_v4(void)
21352 + if (err)
21353 + goto out2;
21354 +
21355 ++#ifdef CONFIG_NFS_V4_2
21356 ++ nfs42_ssc_register_ops();
21357 ++#endif
21358 + register_nfs_version(&nfs_v4);
21359 + return 0;
21360 + out2:
21361 +@@ -297,6 +301,7 @@ static void __exit exit_nfs_v4(void)
21362 + unregister_nfs_version(&nfs_v4);
21363 + #ifdef CONFIG_NFS_V4_2
21364 + nfs4_xattr_cache_exit();
21365 ++ nfs42_ssc_unregister_ops();
21366 + #endif
21367 + nfs4_unregister_sysctl();
21368 + nfs_idmap_quit();
21369 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
21370 +index 7a70287f21a2c..f7dad8227a5f4 100644
21371 +--- a/fs/nfs/super.c
21372 ++++ b/fs/nfs/super.c
21373 +@@ -57,6 +57,7 @@
21374 + #include <linux/rcupdate.h>
21375 +
21376 + #include <linux/uaccess.h>
21377 ++#include <linux/nfs_ssc.h>
21378 +
21379 + #include "nfs4_fs.h"
21380 + #include "callback.h"
21381 +@@ -85,6 +86,10 @@ const struct super_operations nfs_sops = {
21382 + };
21383 + EXPORT_SYMBOL_GPL(nfs_sops);
21384 +
21385 ++static const struct nfs_ssc_client_ops nfs_ssc_clnt_ops_tbl = {
21386 ++ .sco_sb_deactive = nfs_sb_deactive,
21387 ++};
21388 ++
21389 + #if IS_ENABLED(CONFIG_NFS_V4)
21390 + static int __init register_nfs4_fs(void)
21391 + {
21392 +@@ -106,6 +111,16 @@ static void unregister_nfs4_fs(void)
21393 + }
21394 + #endif
21395 +
21396 ++static void nfs_ssc_register_ops(void)
21397 ++{
21398 ++ nfs_ssc_register(&nfs_ssc_clnt_ops_tbl);
21399 ++}
21400 ++
21401 ++static void nfs_ssc_unregister_ops(void)
21402 ++{
21403 ++ nfs_ssc_unregister(&nfs_ssc_clnt_ops_tbl);
21404 ++}
21405 ++
21406 + static struct shrinker acl_shrinker = {
21407 + .count_objects = nfs_access_cache_count,
21408 + .scan_objects = nfs_access_cache_scan,
21409 +@@ -133,6 +148,7 @@ int __init register_nfs_fs(void)
21410 + ret = register_shrinker(&acl_shrinker);
21411 + if (ret < 0)
21412 + goto error_3;
21413 ++ nfs_ssc_register_ops();
21414 + return 0;
21415 + error_3:
21416 + nfs_unregister_sysctl();
21417 +@@ -152,6 +168,7 @@ void __exit unregister_nfs_fs(void)
21418 + unregister_shrinker(&acl_shrinker);
21419 + nfs_unregister_sysctl();
21420 + unregister_nfs4_fs();
21421 ++ nfs_ssc_unregister_ops();
21422 + unregister_filesystem(&nfs_fs_type);
21423 + }
21424 +
21425 +diff --git a/fs/nfs_common/Makefile b/fs/nfs_common/Makefile
21426 +index 4bebe834c0091..fa82f5aaa6d95 100644
21427 +--- a/fs/nfs_common/Makefile
21428 ++++ b/fs/nfs_common/Makefile
21429 +@@ -7,3 +7,4 @@ obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o
21430 + nfs_acl-objs := nfsacl.o
21431 +
21432 + obj-$(CONFIG_GRACE_PERIOD) += grace.o
21433 ++obj-$(CONFIG_GRACE_PERIOD) += nfs_ssc.o
21434 +diff --git a/fs/nfs_common/nfs_ssc.c b/fs/nfs_common/nfs_ssc.c
21435 +new file mode 100644
21436 +index 0000000000000..f43bbb3739134
21437 +--- /dev/null
21438 ++++ b/fs/nfs_common/nfs_ssc.c
21439 +@@ -0,0 +1,94 @@
21440 ++// SPDX-License-Identifier: GPL-2.0-only
21441 ++/*
21442 ++ * fs/nfs_common/nfs_ssc_comm.c
21443 ++ *
21444 ++ * Helper for knfsd's SSC to access ops in NFS client modules
21445 ++ *
21446 ++ * Author: Dai Ngo <dai.ngo@××××××.com>
21447 ++ *
21448 ++ * Copyright (c) 2020, Oracle and/or its affiliates.
21449 ++ */
21450 ++
21451 ++#include <linux/module.h>
21452 ++#include <linux/fs.h>
21453 ++#include <linux/nfs_ssc.h>
21454 ++#include "../nfs/nfs4_fs.h"
21455 ++
21456 ++MODULE_LICENSE("GPL");
21457 ++
21458 ++struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl;
21459 ++EXPORT_SYMBOL_GPL(nfs_ssc_client_tbl);
21460 ++
21461 ++#ifdef CONFIG_NFS_V4_2
21462 ++/**
21463 ++ * nfs42_ssc_register - install the NFS_V4 client ops in the nfs_ssc_client_tbl
21464 ++ * @ops: NFS_V4 ops to be installed
21465 ++ *
21466 ++ * Return values:
21467 ++ * None
21468 ++ */
21469 ++void nfs42_ssc_register(const struct nfs4_ssc_client_ops *ops)
21470 ++{
21471 ++ nfs_ssc_client_tbl.ssc_nfs4_ops = ops;
21472 ++}
21473 ++EXPORT_SYMBOL_GPL(nfs42_ssc_register);
21474 ++
21475 ++/**
21476 ++ * nfs42_ssc_unregister - uninstall the NFS_V4 client ops from
21477 ++ * the nfs_ssc_client_tbl
21478 ++ * @ops: ops to be uninstalled
21479 ++ *
21480 ++ * Return values:
21481 ++ * None
21482 ++ */
21483 ++void nfs42_ssc_unregister(const struct nfs4_ssc_client_ops *ops)
21484 ++{
21485 ++ if (nfs_ssc_client_tbl.ssc_nfs4_ops != ops)
21486 ++ return;
21487 ++
21488 ++ nfs_ssc_client_tbl.ssc_nfs4_ops = NULL;
21489 ++}
21490 ++EXPORT_SYMBOL_GPL(nfs42_ssc_unregister);
21491 ++#endif /* CONFIG_NFS_V4_2 */
21492 ++
21493 ++#ifdef CONFIG_NFS_V4_2
21494 ++/**
21495 ++ * nfs_ssc_register - install the NFS_FS client ops in the nfs_ssc_client_tbl
21496 ++ * @ops: NFS_FS ops to be installed
21497 ++ *
21498 ++ * Return values:
21499 ++ * None
21500 ++ */
21501 ++void nfs_ssc_register(const struct nfs_ssc_client_ops *ops)
21502 ++{
21503 ++ nfs_ssc_client_tbl.ssc_nfs_ops = ops;
21504 ++}
21505 ++EXPORT_SYMBOL_GPL(nfs_ssc_register);
21506 ++
21507 ++/**
21508 ++ * nfs_ssc_unregister - uninstall the NFS_FS client ops from
21509 ++ * the nfs_ssc_client_tbl
21510 ++ * @ops: ops to be uninstalled
21511 ++ *
21512 ++ * Return values:
21513 ++ * None
21514 ++ */
21515 ++void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops)
21516 ++{
21517 ++ if (nfs_ssc_client_tbl.ssc_nfs_ops != ops)
21518 ++ return;
21519 ++ nfs_ssc_client_tbl.ssc_nfs_ops = NULL;
21520 ++}
21521 ++EXPORT_SYMBOL_GPL(nfs_ssc_unregister);
21522 ++
21523 ++#else
21524 ++void nfs_ssc_register(const struct nfs_ssc_client_ops *ops)
21525 ++{
21526 ++}
21527 ++EXPORT_SYMBOL_GPL(nfs_ssc_register);
21528 ++
21529 ++void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops)
21530 ++{
21531 ++}
21532 ++EXPORT_SYMBOL_GPL(nfs_ssc_unregister);
21533 ++#endif /* CONFIG_NFS_V4_2 */
21534 +diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
21535 +index 99d2cae91bd68..f368f3215f88f 100644
21536 +--- a/fs/nfsd/Kconfig
21537 ++++ b/fs/nfsd/Kconfig
21538 +@@ -136,7 +136,7 @@ config NFSD_FLEXFILELAYOUT
21539 +
21540 + config NFSD_V4_2_INTER_SSC
21541 + bool "NFSv4.2 inter server to server COPY"
21542 +- depends on NFSD_V4 && NFS_V4_1 && NFS_V4_2 && NFS_FS=y
21543 ++ depends on NFSD_V4 && NFS_V4_1 && NFS_V4_2
21544 + help
21545 + This option enables support for NFSv4.2 inter server to
21546 + server copy where the destination server calls the NFSv4.2
21547 +diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
21548 +index c8b9d2667ee6f..3c6c2f7d1688b 100644
21549 +--- a/fs/nfsd/filecache.c
21550 ++++ b/fs/nfsd/filecache.c
21551 +@@ -889,7 +889,7 @@ nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
21552 +
21553 + hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
21554 + nf_node, lockdep_is_held(&nfsd_file_hashtbl[hashval].nfb_lock)) {
21555 +- if ((need & nf->nf_may) != need)
21556 ++ if (nf->nf_may != need)
21557 + continue;
21558 + if (nf->nf_inode != inode)
21559 + continue;
21560 +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
21561 +index eaf50eafa9359..84e10aef14175 100644
21562 +--- a/fs/nfsd/nfs4proc.c
21563 ++++ b/fs/nfsd/nfs4proc.c
21564 +@@ -38,6 +38,7 @@
21565 + #include <linux/slab.h>
21566 + #include <linux/kthread.h>
21567 + #include <linux/sunrpc/addr.h>
21568 ++#include <linux/nfs_ssc.h>
21569 +
21570 + #include "idmap.h"
21571 + #include "cache.h"
21572 +@@ -1247,7 +1248,7 @@ out_err:
21573 + static void
21574 + nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
21575 + {
21576 +- nfs_sb_deactive(ss_mnt->mnt_sb);
21577 ++ nfs_do_sb_deactive(ss_mnt->mnt_sb);
21578 + mntput(ss_mnt);
21579 + }
21580 +
21581 +diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
21582 +index 9bb9f0952b186..caf563981532b 100644
21583 +--- a/fs/ntfs/inode.c
21584 ++++ b/fs/ntfs/inode.c
21585 +@@ -1810,6 +1810,12 @@ int ntfs_read_inode_mount(struct inode *vi)
21586 + brelse(bh);
21587 + }
21588 +
21589 ++ if (le32_to_cpu(m->bytes_allocated) != vol->mft_record_size) {
21590 ++ ntfs_error(sb, "Incorrect mft record size %u in superblock, should be %u.",
21591 ++ le32_to_cpu(m->bytes_allocated), vol->mft_record_size);
21592 ++ goto err_out;
21593 ++ }
21594 ++
21595 + /* Apply the mst fixups. */
21596 + if (post_read_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size)) {
21597 + /* FIXME: Try to use the $MFTMirr now. */
21598 +diff --git a/fs/proc/base.c b/fs/proc/base.c
21599 +index 617db4e0faa09..aa69c35d904ca 100644
21600 +--- a/fs/proc/base.c
21601 ++++ b/fs/proc/base.c
21602 +@@ -1055,7 +1055,6 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
21603 +
21604 + static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
21605 + {
21606 +- static DEFINE_MUTEX(oom_adj_mutex);
21607 + struct mm_struct *mm = NULL;
21608 + struct task_struct *task;
21609 + int err = 0;
21610 +@@ -1095,7 +1094,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
21611 + struct task_struct *p = find_lock_task_mm(task);
21612 +
21613 + if (p) {
21614 +- if (atomic_read(&p->mm->mm_users) > 1) {
21615 ++ if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) {
21616 + mm = p->mm;
21617 + mmgrab(mm);
21618 + }
21619 +diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
21620 +index 58fc2a7c7fd19..e69a2bfdd81c0 100644
21621 +--- a/fs/quota/quota_v2.c
21622 ++++ b/fs/quota/quota_v2.c
21623 +@@ -282,6 +282,7 @@ static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot)
21624 + d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
21625 + d->dqb_btime = cpu_to_le64(m->dqb_btime);
21626 + d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
21627 ++ d->dqb_pad = 0;
21628 + if (qtree_entry_unused(info, dp))
21629 + d->dqb_itime = cpu_to_le64(1);
21630 + }
21631 +diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
21632 +index 4146954549560..355523f4a4bf3 100644
21633 +--- a/fs/ramfs/file-nommu.c
21634 ++++ b/fs/ramfs/file-nommu.c
21635 +@@ -224,7 +224,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
21636 + if (!pages)
21637 + goto out_free;
21638 +
21639 +- nr = find_get_pages(inode->i_mapping, &pgoff, lpages, pages);
21640 ++ nr = find_get_pages_contig(inode->i_mapping, pgoff, lpages, pages);
21641 + if (nr != lpages)
21642 + goto out_free_pages; /* leave if some pages were missing */
21643 +
21644 +diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
21645 +index e43fed96704d8..c76d563dec0e1 100644
21646 +--- a/fs/reiserfs/inode.c
21647 ++++ b/fs/reiserfs/inode.c
21648 +@@ -2159,7 +2159,8 @@ out_end_trans:
21649 + out_inserted_sd:
21650 + clear_nlink(inode);
21651 + th->t_trans_id = 0; /* so the caller can't use this handle later */
21652 +- unlock_new_inode(inode); /* OK to do even if we hadn't locked it */
21653 ++ if (inode->i_state & I_NEW)
21654 ++ unlock_new_inode(inode);
21655 + iput(inode);
21656 + return err;
21657 + }
21658 +diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
21659 +index a6bce5b1fb1dc..1b9c7a387dc71 100644
21660 +--- a/fs/reiserfs/super.c
21661 ++++ b/fs/reiserfs/super.c
21662 +@@ -1258,6 +1258,10 @@ static int reiserfs_parse_options(struct super_block *s,
21663 + "turned on.");
21664 + return 0;
21665 + }
21666 ++ if (qf_names[qtype] !=
21667 ++ REISERFS_SB(s)->s_qf_names[qtype])
21668 ++ kfree(qf_names[qtype]);
21669 ++ qf_names[qtype] = NULL;
21670 + if (*arg) { /* Some filename specified? */
21671 + if (REISERFS_SB(s)->s_qf_names[qtype]
21672 + && strcmp(REISERFS_SB(s)->s_qf_names[qtype],
21673 +@@ -1287,10 +1291,6 @@ static int reiserfs_parse_options(struct super_block *s,
21674 + else
21675 + *mount_options |= 1 << REISERFS_GRPQUOTA;
21676 + } else {
21677 +- if (qf_names[qtype] !=
21678 +- REISERFS_SB(s)->s_qf_names[qtype])
21679 +- kfree(qf_names[qtype]);
21680 +- qf_names[qtype] = NULL;
21681 + if (qtype == USRQUOTA)
21682 + *mount_options &= ~(1 << REISERFS_USRQUOTA);
21683 + else
21684 +diff --git a/fs/udf/inode.c b/fs/udf/inode.c
21685 +index adaba8e8b326e..566118417e562 100644
21686 +--- a/fs/udf/inode.c
21687 ++++ b/fs/udf/inode.c
21688 +@@ -139,21 +139,24 @@ void udf_evict_inode(struct inode *inode)
21689 + struct udf_inode_info *iinfo = UDF_I(inode);
21690 + int want_delete = 0;
21691 +
21692 +- if (!inode->i_nlink && !is_bad_inode(inode)) {
21693 +- want_delete = 1;
21694 +- udf_setsize(inode, 0);
21695 +- udf_update_inode(inode, IS_SYNC(inode));
21696 ++ if (!is_bad_inode(inode)) {
21697 ++ if (!inode->i_nlink) {
21698 ++ want_delete = 1;
21699 ++ udf_setsize(inode, 0);
21700 ++ udf_update_inode(inode, IS_SYNC(inode));
21701 ++ }
21702 ++ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
21703 ++ inode->i_size != iinfo->i_lenExtents) {
21704 ++ udf_warn(inode->i_sb,
21705 ++ "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
21706 ++ inode->i_ino, inode->i_mode,
21707 ++ (unsigned long long)inode->i_size,
21708 ++ (unsigned long long)iinfo->i_lenExtents);
21709 ++ }
21710 + }
21711 + truncate_inode_pages_final(&inode->i_data);
21712 + invalidate_inode_buffers(inode);
21713 + clear_inode(inode);
21714 +- if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
21715 +- inode->i_size != iinfo->i_lenExtents) {
21716 +- udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
21717 +- inode->i_ino, inode->i_mode,
21718 +- (unsigned long long)inode->i_size,
21719 +- (unsigned long long)iinfo->i_lenExtents);
21720 +- }
21721 + kfree(iinfo->i_ext.i_data);
21722 + iinfo->i_ext.i_data = NULL;
21723 + udf_clear_extent_cache(inode);
21724 +diff --git a/fs/udf/super.c b/fs/udf/super.c
21725 +index 1c42f544096d8..a03b8ce5ef0fd 100644
21726 +--- a/fs/udf/super.c
21727 ++++ b/fs/udf/super.c
21728 +@@ -1353,6 +1353,12 @@ static int udf_load_sparable_map(struct super_block *sb,
21729 + (int)spm->numSparingTables);
21730 + return -EIO;
21731 + }
21732 ++ if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) {
21733 ++ udf_err(sb, "error loading logical volume descriptor: "
21734 ++ "Too big sparing table size (%u)\n",
21735 ++ le32_to_cpu(spm->sizeSparingTable));
21736 ++ return -EIO;
21737 ++ }
21738 +
21739 + for (i = 0; i < spm->numSparingTables; i++) {
21740 + loc = le32_to_cpu(spm->locSparingTable[i]);
21741 +diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
21742 +index 1d9fa8a300f15..6c1aba16113c5 100644
21743 +--- a/fs/xfs/libxfs/xfs_rtbitmap.c
21744 ++++ b/fs/xfs/libxfs/xfs_rtbitmap.c
21745 +@@ -1018,7 +1018,6 @@ xfs_rtalloc_query_range(
21746 + struct xfs_mount *mp = tp->t_mountp;
21747 + xfs_rtblock_t rtstart;
21748 + xfs_rtblock_t rtend;
21749 +- xfs_rtblock_t rem;
21750 + int is_free;
21751 + int error = 0;
21752 +
21753 +@@ -1027,13 +1026,12 @@ xfs_rtalloc_query_range(
21754 + if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
21755 + low_rec->ar_startext == high_rec->ar_startext)
21756 + return 0;
21757 +- if (high_rec->ar_startext > mp->m_sb.sb_rextents)
21758 +- high_rec->ar_startext = mp->m_sb.sb_rextents;
21759 ++ high_rec->ar_startext = min(high_rec->ar_startext,
21760 ++ mp->m_sb.sb_rextents - 1);
21761 +
21762 + /* Iterate the bitmap, looking for discrepancies. */
21763 + rtstart = low_rec->ar_startext;
21764 +- rem = high_rec->ar_startext - rtstart;
21765 +- while (rem) {
21766 ++ while (rtstart <= high_rec->ar_startext) {
21767 + /* Is the first block free? */
21768 + error = xfs_rtcheck_range(mp, tp, rtstart, 1, 1, &rtend,
21769 + &is_free);
21770 +@@ -1042,7 +1040,7 @@ xfs_rtalloc_query_range(
21771 +
21772 + /* How long does the extent go for? */
21773 + error = xfs_rtfind_forw(mp, tp, rtstart,
21774 +- high_rec->ar_startext - 1, &rtend);
21775 ++ high_rec->ar_startext, &rtend);
21776 + if (error)
21777 + break;
21778 +
21779 +@@ -1055,7 +1053,6 @@ xfs_rtalloc_query_range(
21780 + break;
21781 + }
21782 +
21783 +- rem -= rtend - rtstart + 1;
21784 + rtstart = rtend + 1;
21785 + }
21786 +
21787 +diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c
21788 +index 8f0457d67d779..de2772394de21 100644
21789 +--- a/fs/xfs/xfs_buf_item_recover.c
21790 ++++ b/fs/xfs/xfs_buf_item_recover.c
21791 +@@ -719,6 +719,8 @@ xlog_recover_get_buf_lsn(
21792 + case XFS_ABTC_MAGIC:
21793 + case XFS_RMAP_CRC_MAGIC:
21794 + case XFS_REFC_CRC_MAGIC:
21795 ++ case XFS_FIBT_CRC_MAGIC:
21796 ++ case XFS_FIBT_MAGIC:
21797 + case XFS_IBT_CRC_MAGIC:
21798 + case XFS_IBT_MAGIC: {
21799 + struct xfs_btree_block *btb = blk;
21800 +diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
21801 +index a29f78a663ca5..3d1b951247440 100644
21802 +--- a/fs/xfs/xfs_file.c
21803 ++++ b/fs/xfs/xfs_file.c
21804 +@@ -1008,6 +1008,21 @@ xfs_file_fadvise(
21805 + return ret;
21806 + }
21807 +
21808 ++/* Does this file, inode, or mount want synchronous writes? */
21809 ++static inline bool xfs_file_sync_writes(struct file *filp)
21810 ++{
21811 ++ struct xfs_inode *ip = XFS_I(file_inode(filp));
21812 ++
21813 ++ if (ip->i_mount->m_flags & XFS_MOUNT_WSYNC)
21814 ++ return true;
21815 ++ if (filp->f_flags & (__O_SYNC | O_DSYNC))
21816 ++ return true;
21817 ++ if (IS_SYNC(file_inode(filp)))
21818 ++ return true;
21819 ++
21820 ++ return false;
21821 ++}
21822 ++
21823 + STATIC loff_t
21824 + xfs_file_remap_range(
21825 + struct file *file_in,
21826 +@@ -1065,7 +1080,7 @@ xfs_file_remap_range(
21827 + if (ret)
21828 + goto out_unlock;
21829 +
21830 +- if (mp->m_flags & XFS_MOUNT_WSYNC)
21831 ++ if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
21832 + xfs_log_force_inode(dest);
21833 + out_unlock:
21834 + xfs_iunlock2_io_mmap(src, dest);
21835 +diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
21836 +index 4eebcec4aae6c..9ce5e7d5bf8f2 100644
21837 +--- a/fs/xfs/xfs_fsmap.c
21838 ++++ b/fs/xfs/xfs_fsmap.c
21839 +@@ -26,7 +26,7 @@
21840 + #include "xfs_rtalloc.h"
21841 +
21842 + /* Convert an xfs_fsmap to an fsmap. */
21843 +-void
21844 ++static void
21845 + xfs_fsmap_from_internal(
21846 + struct fsmap *dest,
21847 + struct xfs_fsmap *src)
21848 +@@ -155,8 +155,7 @@ xfs_fsmap_owner_from_rmap(
21849 + /* getfsmap query state */
21850 + struct xfs_getfsmap_info {
21851 + struct xfs_fsmap_head *head;
21852 +- xfs_fsmap_format_t formatter; /* formatting fn */
21853 +- void *format_arg; /* format buffer */
21854 ++ struct fsmap *fsmap_recs; /* mapping records */
21855 + struct xfs_buf *agf_bp; /* AGF, for refcount queries */
21856 + xfs_daddr_t next_daddr; /* next daddr we expect */
21857 + u64 missing_owner; /* owner of holes */
21858 +@@ -224,6 +223,20 @@ xfs_getfsmap_is_shared(
21859 + return 0;
21860 + }
21861 +
21862 ++static inline void
21863 ++xfs_getfsmap_format(
21864 ++ struct xfs_mount *mp,
21865 ++ struct xfs_fsmap *xfm,
21866 ++ struct xfs_getfsmap_info *info)
21867 ++{
21868 ++ struct fsmap *rec;
21869 ++
21870 ++ trace_xfs_getfsmap_mapping(mp, xfm);
21871 ++
21872 ++ rec = &info->fsmap_recs[info->head->fmh_entries++];
21873 ++ xfs_fsmap_from_internal(rec, xfm);
21874 ++}
21875 ++
21876 + /*
21877 + * Format a reverse mapping for getfsmap, having translated rm_startblock
21878 + * into the appropriate daddr units.
21879 +@@ -256,6 +269,9 @@ xfs_getfsmap_helper(
21880 +
21881 + /* Are we just counting mappings? */
21882 + if (info->head->fmh_count == 0) {
21883 ++ if (info->head->fmh_entries == UINT_MAX)
21884 ++ return -ECANCELED;
21885 ++
21886 + if (rec_daddr > info->next_daddr)
21887 + info->head->fmh_entries++;
21888 +
21889 +@@ -285,10 +301,7 @@ xfs_getfsmap_helper(
21890 + fmr.fmr_offset = 0;
21891 + fmr.fmr_length = rec_daddr - info->next_daddr;
21892 + fmr.fmr_flags = FMR_OF_SPECIAL_OWNER;
21893 +- error = info->formatter(&fmr, info->format_arg);
21894 +- if (error)
21895 +- return error;
21896 +- info->head->fmh_entries++;
21897 ++ xfs_getfsmap_format(mp, &fmr, info);
21898 + }
21899 +
21900 + if (info->last)
21901 +@@ -320,11 +333,8 @@ xfs_getfsmap_helper(
21902 + if (shared)
21903 + fmr.fmr_flags |= FMR_OF_SHARED;
21904 + }
21905 +- error = info->formatter(&fmr, info->format_arg);
21906 +- if (error)
21907 +- return error;
21908 +- info->head->fmh_entries++;
21909 +
21910 ++ xfs_getfsmap_format(mp, &fmr, info);
21911 + out:
21912 + rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
21913 + if (info->next_daddr < rec_daddr)
21914 +@@ -792,11 +802,11 @@ xfs_getfsmap_check_keys(
21915 + #endif /* CONFIG_XFS_RT */
21916 +
21917 + /*
21918 +- * Get filesystem's extents as described in head, and format for
21919 +- * output. Calls formatter to fill the user's buffer until all
21920 +- * extents are mapped, until the passed-in head->fmh_count slots have
21921 +- * been filled, or until the formatter short-circuits the loop, if it
21922 +- * is tracking filled-in extents on its own.
21923 ++ * Get filesystem's extents as described in head, and format for output. Fills
21924 ++ * in the supplied records array until there are no more reverse mappings to
21925 ++ * return or head.fmh_entries == head.fmh_count. In the second case, this
21926 ++ * function returns -ECANCELED to indicate that more records would have been
21927 ++ * returned.
21928 + *
21929 + * Key to Confusion
21930 + * ----------------
21931 +@@ -816,8 +826,7 @@ int
21932 + xfs_getfsmap(
21933 + struct xfs_mount *mp,
21934 + struct xfs_fsmap_head *head,
21935 +- xfs_fsmap_format_t formatter,
21936 +- void *arg)
21937 ++ struct fsmap *fsmap_recs)
21938 + {
21939 + struct xfs_trans *tp = NULL;
21940 + struct xfs_fsmap dkeys[2]; /* per-dev keys */
21941 +@@ -892,8 +901,7 @@ xfs_getfsmap(
21942 +
21943 + info.next_daddr = head->fmh_keys[0].fmr_physical +
21944 + head->fmh_keys[0].fmr_length;
21945 +- info.formatter = formatter;
21946 +- info.format_arg = arg;
21947 ++ info.fsmap_recs = fsmap_recs;
21948 + info.head = head;
21949 +
21950 + /*
21951 +diff --git a/fs/xfs/xfs_fsmap.h b/fs/xfs/xfs_fsmap.h
21952 +index c6c57739b8626..a0775788e7b13 100644
21953 +--- a/fs/xfs/xfs_fsmap.h
21954 ++++ b/fs/xfs/xfs_fsmap.h
21955 +@@ -27,13 +27,9 @@ struct xfs_fsmap_head {
21956 + struct xfs_fsmap fmh_keys[2]; /* low and high keys */
21957 + };
21958 +
21959 +-void xfs_fsmap_from_internal(struct fsmap *dest, struct xfs_fsmap *src);
21960 + void xfs_fsmap_to_internal(struct xfs_fsmap *dest, struct fsmap *src);
21961 +
21962 +-/* fsmap to userspace formatter - copy to user & advance pointer */
21963 +-typedef int (*xfs_fsmap_format_t)(struct xfs_fsmap *, void *);
21964 +-
21965 + int xfs_getfsmap(struct xfs_mount *mp, struct xfs_fsmap_head *head,
21966 +- xfs_fsmap_format_t formatter, void *arg);
21967 ++ struct fsmap *out_recs);
21968 +
21969 + #endif /* __XFS_FSMAP_H__ */
21970 +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
21971 +index 6f22a66777cd0..b0882f8a787f1 100644
21972 +--- a/fs/xfs/xfs_ioctl.c
21973 ++++ b/fs/xfs/xfs_ioctl.c
21974 +@@ -1715,39 +1715,17 @@ out_free_buf:
21975 + return error;
21976 + }
21977 +
21978 +-struct getfsmap_info {
21979 +- struct xfs_mount *mp;
21980 +- struct fsmap_head __user *data;
21981 +- unsigned int idx;
21982 +- __u32 last_flags;
21983 +-};
21984 +-
21985 +-STATIC int
21986 +-xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv)
21987 +-{
21988 +- struct getfsmap_info *info = priv;
21989 +- struct fsmap fm;
21990 +-
21991 +- trace_xfs_getfsmap_mapping(info->mp, xfm);
21992 +-
21993 +- info->last_flags = xfm->fmr_flags;
21994 +- xfs_fsmap_from_internal(&fm, xfm);
21995 +- if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm,
21996 +- sizeof(struct fsmap)))
21997 +- return -EFAULT;
21998 +-
21999 +- return 0;
22000 +-}
22001 +-
22002 + STATIC int
22003 + xfs_ioc_getfsmap(
22004 + struct xfs_inode *ip,
22005 + struct fsmap_head __user *arg)
22006 + {
22007 +- struct getfsmap_info info = { NULL };
22008 + struct xfs_fsmap_head xhead = {0};
22009 + struct fsmap_head head;
22010 +- bool aborted = false;
22011 ++ struct fsmap *recs;
22012 ++ unsigned int count;
22013 ++ __u32 last_flags = 0;
22014 ++ bool done = false;
22015 + int error;
22016 +
22017 + if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
22018 +@@ -1759,38 +1737,112 @@ xfs_ioc_getfsmap(
22019 + sizeof(head.fmh_keys[1].fmr_reserved)))
22020 + return -EINVAL;
22021 +
22022 ++ /*
22023 ++ * Use an internal memory buffer so that we don't have to copy fsmap
22024 ++ * data to userspace while holding locks. Start by trying to allocate
22025 ++ * up to 128k for the buffer, but fall back to a single page if needed.
22026 ++ */
22027 ++ count = min_t(unsigned int, head.fmh_count,
22028 ++ 131072 / sizeof(struct fsmap));
22029 ++ recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
22030 ++ if (!recs) {
22031 ++ count = min_t(unsigned int, head.fmh_count,
22032 ++ PAGE_SIZE / sizeof(struct fsmap));
22033 ++ recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
22034 ++ if (!recs)
22035 ++ return -ENOMEM;
22036 ++ }
22037 ++
22038 + xhead.fmh_iflags = head.fmh_iflags;
22039 +- xhead.fmh_count = head.fmh_count;
22040 + xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
22041 + xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
22042 +
22043 + trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
22044 + trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
22045 +
22046 +- info.mp = ip->i_mount;
22047 +- info.data = arg;
22048 +- error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info);
22049 +- if (error == -ECANCELED) {
22050 +- error = 0;
22051 +- aborted = true;
22052 +- } else if (error)
22053 +- return error;
22054 ++ head.fmh_entries = 0;
22055 ++ do {
22056 ++ struct fsmap __user *user_recs;
22057 ++ struct fsmap *last_rec;
22058 ++
22059 ++ user_recs = &arg->fmh_recs[head.fmh_entries];
22060 ++ xhead.fmh_entries = 0;
22061 ++ xhead.fmh_count = min_t(unsigned int, count,
22062 ++ head.fmh_count - head.fmh_entries);
22063 ++
22064 ++ /* Run query, record how many entries we got. */
22065 ++ error = xfs_getfsmap(ip->i_mount, &xhead, recs);
22066 ++ switch (error) {
22067 ++ case 0:
22068 ++ /*
22069 ++ * There are no more records in the result set. Copy
22070 ++ * whatever we got to userspace and break out.
22071 ++ */
22072 ++ done = true;
22073 ++ break;
22074 ++ case -ECANCELED:
22075 ++ /*
22076 ++ * The internal memory buffer is full. Copy whatever
22077 ++ * records we got to userspace and go again if we have
22078 ++ * not yet filled the userspace buffer.
22079 ++ */
22080 ++ error = 0;
22081 ++ break;
22082 ++ default:
22083 ++ goto out_free;
22084 ++ }
22085 ++ head.fmh_entries += xhead.fmh_entries;
22086 ++ head.fmh_oflags = xhead.fmh_oflags;
22087 +
22088 +- /* If we didn't abort, set the "last" flag in the last fmx */
22089 +- if (!aborted && info.idx) {
22090 +- info.last_flags |= FMR_OF_LAST;
22091 +- if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags,
22092 +- &info.last_flags, sizeof(info.last_flags)))
22093 +- return -EFAULT;
22094 ++ /*
22095 ++ * If the caller wanted a record count or there aren't any
22096 ++ * new records to return, we're done.
22097 ++ */
22098 ++ if (head.fmh_count == 0 || xhead.fmh_entries == 0)
22099 ++ break;
22100 ++
22101 ++ /* Copy all the records we got out to userspace. */
22102 ++ if (copy_to_user(user_recs, recs,
22103 ++ xhead.fmh_entries * sizeof(struct fsmap))) {
22104 ++ error = -EFAULT;
22105 ++ goto out_free;
22106 ++ }
22107 ++
22108 ++ /* Remember the last record flags we copied to userspace. */
22109 ++ last_rec = &recs[xhead.fmh_entries - 1];
22110 ++ last_flags = last_rec->fmr_flags;
22111 ++
22112 ++ /* Set up the low key for the next iteration. */
22113 ++ xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec);
22114 ++ trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
22115 ++ } while (!done && head.fmh_entries < head.fmh_count);
22116 ++
22117 ++ /*
22118 ++ * If there are no more records in the query result set and we're not
22119 ++ * in counting mode, mark the last record returned with the LAST flag.
22120 ++ */
22121 ++ if (done && head.fmh_count > 0 && head.fmh_entries > 0) {
22122 ++ struct fsmap __user *user_rec;
22123 ++
22124 ++ last_flags |= FMR_OF_LAST;
22125 ++ user_rec = &arg->fmh_recs[head.fmh_entries - 1];
22126 ++
22127 ++ if (copy_to_user(&user_rec->fmr_flags, &last_flags,
22128 ++ sizeof(last_flags))) {
22129 ++ error = -EFAULT;
22130 ++ goto out_free;
22131 ++ }
22132 + }
22133 +
22134 + /* copy back header */
22135 +- head.fmh_entries = xhead.fmh_entries;
22136 +- head.fmh_oflags = xhead.fmh_oflags;
22137 +- if (copy_to_user(arg, &head, sizeof(struct fsmap_head)))
22138 +- return -EFAULT;
22139 ++ if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) {
22140 ++ error = -EFAULT;
22141 ++ goto out_free;
22142 ++ }
22143 +
22144 +- return 0;
22145 ++out_free:
22146 ++ kmem_free(recs);
22147 ++ return error;
22148 + }
22149 +
22150 + STATIC int
22151 +diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
22152 +index 6209e7b6b895b..86994d7f7cba3 100644
22153 +--- a/fs/xfs/xfs_rtalloc.c
22154 ++++ b/fs/xfs/xfs_rtalloc.c
22155 +@@ -247,6 +247,9 @@ xfs_rtallocate_extent_block(
22156 + end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
22157 + i <= end;
22158 + i++) {
22159 ++ /* Make sure we don't scan off the end of the rt volume. */
22160 ++ maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i;
22161 ++
22162 + /*
22163 + * See if there's a free extent of maxlen starting at i.
22164 + * If it's not so then next will contain the first non-free.
22165 +@@ -442,6 +445,14 @@ xfs_rtallocate_extent_near(
22166 + */
22167 + if (bno >= mp->m_sb.sb_rextents)
22168 + bno = mp->m_sb.sb_rextents - 1;
22169 ++
22170 ++ /* Make sure we don't run off the end of the rt volume. */
22171 ++ maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno;
22172 ++ if (maxlen < minlen) {
22173 ++ *rtblock = NULLRTBLOCK;
22174 ++ return 0;
22175 ++ }
22176 ++
22177 + /*
22178 + * Try the exact allocation first.
22179 + */
22180 +diff --git a/include/dt-bindings/mux/mux-j721e-wiz.h b/include/dt-bindings/mux/mux-j721e-wiz.h
22181 +deleted file mode 100644
22182 +index fd1c4ea9fc7f0..0000000000000
22183 +--- a/include/dt-bindings/mux/mux-j721e-wiz.h
22184 ++++ /dev/null
22185 +@@ -1,53 +0,0 @@
22186 +-/* SPDX-License-Identifier: GPL-2.0 */
22187 +-/*
22188 +- * This header provides constants for J721E WIZ.
22189 +- */
22190 +-
22191 +-#ifndef _DT_BINDINGS_J721E_WIZ
22192 +-#define _DT_BINDINGS_J721E_WIZ
22193 +-
22194 +-#define SERDES0_LANE0_QSGMII_LANE1 0x0
22195 +-#define SERDES0_LANE0_PCIE0_LANE0 0x1
22196 +-#define SERDES0_LANE0_USB3_0_SWAP 0x2
22197 +-
22198 +-#define SERDES0_LANE1_QSGMII_LANE2 0x0
22199 +-#define SERDES0_LANE1_PCIE0_LANE1 0x1
22200 +-#define SERDES0_LANE1_USB3_0 0x2
22201 +-
22202 +-#define SERDES1_LANE0_QSGMII_LANE3 0x0
22203 +-#define SERDES1_LANE0_PCIE1_LANE0 0x1
22204 +-#define SERDES1_LANE0_USB3_1_SWAP 0x2
22205 +-#define SERDES1_LANE0_SGMII_LANE0 0x3
22206 +-
22207 +-#define SERDES1_LANE1_QSGMII_LANE4 0x0
22208 +-#define SERDES1_LANE1_PCIE1_LANE1 0x1
22209 +-#define SERDES1_LANE1_USB3_1 0x2
22210 +-#define SERDES1_LANE1_SGMII_LANE1 0x3
22211 +-
22212 +-#define SERDES2_LANE0_PCIE2_LANE0 0x1
22213 +-#define SERDES2_LANE0_SGMII_LANE0 0x3
22214 +-#define SERDES2_LANE0_USB3_1_SWAP 0x2
22215 +-
22216 +-#define SERDES2_LANE1_PCIE2_LANE1 0x1
22217 +-#define SERDES2_LANE1_USB3_1 0x2
22218 +-#define SERDES2_LANE1_SGMII_LANE1 0x3
22219 +-
22220 +-#define SERDES3_LANE0_PCIE3_LANE0 0x1
22221 +-#define SERDES3_LANE0_USB3_0_SWAP 0x2
22222 +-
22223 +-#define SERDES3_LANE1_PCIE3_LANE1 0x1
22224 +-#define SERDES3_LANE1_USB3_0 0x2
22225 +-
22226 +-#define SERDES4_LANE0_EDP_LANE0 0x0
22227 +-#define SERDES4_LANE0_QSGMII_LANE5 0x2
22228 +-
22229 +-#define SERDES4_LANE1_EDP_LANE1 0x0
22230 +-#define SERDES4_LANE1_QSGMII_LANE6 0x2
22231 +-
22232 +-#define SERDES4_LANE2_EDP_LANE2 0x0
22233 +-#define SERDES4_LANE2_QSGMII_LANE7 0x2
22234 +-
22235 +-#define SERDES4_LANE3_EDP_LANE3 0x0
22236 +-#define SERDES4_LANE3_QSGMII_LANE8 0x2
22237 +-
22238 +-#endif /* _DT_BINDINGS_J721E_WIZ */
22239 +diff --git a/include/dt-bindings/mux/ti-serdes.h b/include/dt-bindings/mux/ti-serdes.h
22240 +new file mode 100644
22241 +index 0000000000000..146d0685a9251
22242 +--- /dev/null
22243 ++++ b/include/dt-bindings/mux/ti-serdes.h
22244 +@@ -0,0 +1,71 @@
22245 ++/* SPDX-License-Identifier: GPL-2.0 */
22246 ++/*
22247 ++ * This header provides constants for SERDES MUX for TI SoCs
22248 ++ */
22249 ++
22250 ++#ifndef _DT_BINDINGS_MUX_TI_SERDES
22251 ++#define _DT_BINDINGS_MUX_TI_SERDES
22252 ++
22253 ++/* J721E */
22254 ++
22255 ++#define J721E_SERDES0_LANE0_QSGMII_LANE1 0x0
22256 ++#define J721E_SERDES0_LANE0_PCIE0_LANE0 0x1
22257 ++#define J721E_SERDES0_LANE0_USB3_0_SWAP 0x2
22258 ++#define J721E_SERDES0_LANE0_IP4_UNUSED 0x3
22259 ++
22260 ++#define J721E_SERDES0_LANE1_QSGMII_LANE2 0x0
22261 ++#define J721E_SERDES0_LANE1_PCIE0_LANE1 0x1
22262 ++#define J721E_SERDES0_LANE1_USB3_0 0x2
22263 ++#define J721E_SERDES0_LANE1_IP4_UNUSED 0x3
22264 ++
22265 ++#define J721E_SERDES1_LANE0_QSGMII_LANE3 0x0
22266 ++#define J721E_SERDES1_LANE0_PCIE1_LANE0 0x1
22267 ++#define J721E_SERDES1_LANE0_USB3_1_SWAP 0x2
22268 ++#define J721E_SERDES1_LANE0_SGMII_LANE0 0x3
22269 ++
22270 ++#define J721E_SERDES1_LANE1_QSGMII_LANE4 0x0
22271 ++#define J721E_SERDES1_LANE1_PCIE1_LANE1 0x1
22272 ++#define J721E_SERDES1_LANE1_USB3_1 0x2
22273 ++#define J721E_SERDES1_LANE1_SGMII_LANE1 0x3
22274 ++
22275 ++#define J721E_SERDES2_LANE0_IP1_UNUSED 0x0
22276 ++#define J721E_SERDES2_LANE0_PCIE2_LANE0 0x1
22277 ++#define J721E_SERDES2_LANE0_USB3_1_SWAP 0x2
22278 ++#define J721E_SERDES2_LANE0_SGMII_LANE0 0x3
22279 ++
22280 ++#define J721E_SERDES2_LANE1_IP1_UNUSED 0x0
22281 ++#define J721E_SERDES2_LANE1_PCIE2_LANE1 0x1
22282 ++#define J721E_SERDES2_LANE1_USB3_1 0x2
22283 ++#define J721E_SERDES2_LANE1_SGMII_LANE1 0x3
22284 ++
22285 ++#define J721E_SERDES3_LANE0_IP1_UNUSED 0x0
22286 ++#define J721E_SERDES3_LANE0_PCIE3_LANE0 0x1
22287 ++#define J721E_SERDES3_LANE0_USB3_0_SWAP 0x2
22288 ++#define J721E_SERDES3_LANE0_IP4_UNUSED 0x3
22289 ++
22290 ++#define J721E_SERDES3_LANE1_IP1_UNUSED 0x0
22291 ++#define J721E_SERDES3_LANE1_PCIE3_LANE1 0x1
22292 ++#define J721E_SERDES3_LANE1_USB3_0 0x2
22293 ++#define J721E_SERDES3_LANE1_IP4_UNUSED 0x3
22294 ++
22295 ++#define J721E_SERDES4_LANE0_EDP_LANE0 0x0
22296 ++#define J721E_SERDES4_LANE0_IP2_UNUSED 0x1
22297 ++#define J721E_SERDES4_LANE0_QSGMII_LANE5 0x2
22298 ++#define J721E_SERDES4_LANE0_IP4_UNUSED 0x3
22299 ++
22300 ++#define J721E_SERDES4_LANE1_EDP_LANE1 0x0
22301 ++#define J721E_SERDES4_LANE1_IP2_UNUSED 0x1
22302 ++#define J721E_SERDES4_LANE1_QSGMII_LANE6 0x2
22303 ++#define J721E_SERDES4_LANE1_IP4_UNUSED 0x3
22304 ++
22305 ++#define J721E_SERDES4_LANE2_EDP_LANE2 0x0
22306 ++#define J721E_SERDES4_LANE2_IP2_UNUSED 0x1
22307 ++#define J721E_SERDES4_LANE2_QSGMII_LANE7 0x2
22308 ++#define J721E_SERDES4_LANE2_IP4_UNUSED 0x3
22309 ++
22310 ++#define J721E_SERDES4_LANE3_EDP_LANE3 0x0
22311 ++#define J721E_SERDES4_LANE3_IP2_UNUSED 0x1
22312 ++#define J721E_SERDES4_LANE3_QSGMII_LANE8 0x2
22313 ++#define J721E_SERDES4_LANE3_IP4_UNUSED 0x3
22314 ++
22315 ++#endif /* _DT_BINDINGS_MUX_TI_SERDES */
22316 +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
22317 +index 53c7bd568c5d4..5026b75db9725 100644
22318 +--- a/include/linux/bpf_verifier.h
22319 ++++ b/include/linux/bpf_verifier.h
22320 +@@ -358,6 +358,7 @@ struct bpf_subprog_info {
22321 + u32 start; /* insn idx of function entry point */
22322 + u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
22323 + u16 stack_depth; /* max. stack depth used by this function */
22324 ++ bool has_tail_call;
22325 + };
22326 +
22327 + /* single container for all structs
22328 +diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
22329 +index 6e87225600ae3..064870844f06c 100644
22330 +--- a/include/linux/dma-direct.h
22331 ++++ b/include/linux/dma-direct.h
22332 +@@ -62,9 +62,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
22333 + {
22334 + dma_addr_t end = addr + size - 1;
22335 +
22336 +- if (!dev->dma_mask)
22337 +- return false;
22338 +-
22339 + if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
22340 + min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
22341 + return false;
22342 +diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
22343 +index 6a584b3e5c74f..1130f271de669 100644
22344 +--- a/include/linux/lockdep.h
22345 ++++ b/include/linux/lockdep.h
22346 +@@ -512,19 +512,19 @@ static inline void print_irqtrace_events(struct task_struct *curr)
22347 + #define lock_map_release(l) lock_release(l, _THIS_IP_)
22348 +
22349 + #ifdef CONFIG_PROVE_LOCKING
22350 +-# define might_lock(lock) \
22351 ++# define might_lock(lock) \
22352 + do { \
22353 + typecheck(struct lockdep_map *, &(lock)->dep_map); \
22354 + lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
22355 + lock_release(&(lock)->dep_map, _THIS_IP_); \
22356 + } while (0)
22357 +-# define might_lock_read(lock) \
22358 ++# define might_lock_read(lock) \
22359 + do { \
22360 + typecheck(struct lockdep_map *, &(lock)->dep_map); \
22361 + lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
22362 + lock_release(&(lock)->dep_map, _THIS_IP_); \
22363 + } while (0)
22364 +-# define might_lock_nested(lock, subclass) \
22365 ++# define might_lock_nested(lock, subclass) \
22366 + do { \
22367 + typecheck(struct lockdep_map *, &(lock)->dep_map); \
22368 + lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
22369 +@@ -534,44 +534,39 @@ do { \
22370 +
22371 + DECLARE_PER_CPU(int, hardirqs_enabled);
22372 + DECLARE_PER_CPU(int, hardirq_context);
22373 ++DECLARE_PER_CPU(unsigned int, lockdep_recursion);
22374 +
22375 +-/*
22376 +- * The below lockdep_assert_*() macros use raw_cpu_read() to access the above
22377 +- * per-cpu variables. This is required because this_cpu_read() will potentially
22378 +- * call into preempt/irq-disable and that obviously isn't right. This is also
22379 +- * correct because when IRQs are enabled, it doesn't matter if we accidentally
22380 +- * read the value from our previous CPU.
22381 +- */
22382 ++#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
22383 +
22384 + #define lockdep_assert_irqs_enabled() \
22385 + do { \
22386 +- WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \
22387 ++ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
22388 + } while (0)
22389 +
22390 + #define lockdep_assert_irqs_disabled() \
22391 + do { \
22392 +- WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \
22393 ++ WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
22394 + } while (0)
22395 +
22396 + #define lockdep_assert_in_irq() \
22397 + do { \
22398 +- WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \
22399 ++ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
22400 + } while (0)
22401 +
22402 + #define lockdep_assert_preemption_enabled() \
22403 + do { \
22404 + WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
22405 +- debug_locks && \
22406 ++ __lockdep_enabled && \
22407 + (preempt_count() != 0 || \
22408 +- !raw_cpu_read(hardirqs_enabled))); \
22409 ++ !this_cpu_read(hardirqs_enabled))); \
22410 + } while (0)
22411 +
22412 + #define lockdep_assert_preemption_disabled() \
22413 + do { \
22414 + WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
22415 +- debug_locks && \
22416 ++ __lockdep_enabled && \
22417 + (preempt_count() == 0 && \
22418 +- raw_cpu_read(hardirqs_enabled))); \
22419 ++ this_cpu_read(hardirqs_enabled))); \
22420 + } while (0)
22421 +
22422 + #else
22423 +diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
22424 +index bb35b449f5330..9a1fd49df17f6 100644
22425 +--- a/include/linux/lockdep_types.h
22426 ++++ b/include/linux/lockdep_types.h
22427 +@@ -35,8 +35,12 @@ enum lockdep_wait_type {
22428 + /*
22429 + * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
22430 + * the total number of states... :-(
22431 ++ *
22432 ++ * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each
22433 ++ * of those we generates 4 states, Additionally we report on USED and USED_READ.
22434 + */
22435 +-#define XXX_LOCK_USAGE_STATES (1+2*4)
22436 ++#define XXX_LOCK_USAGE_STATES 2
22437 ++#define LOCK_TRACE_STATES (XXX_LOCK_USAGE_STATES*4 + 2)
22438 +
22439 + /*
22440 + * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
22441 +@@ -106,7 +110,7 @@ struct lock_class {
22442 + * IRQ/softirq usage tracking bits:
22443 + */
22444 + unsigned long usage_mask;
22445 +- const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES];
22446 ++ const struct lock_trace *usage_traces[LOCK_TRACE_STATES];
22447 +
22448 + /*
22449 + * Generation counter, when doing certain classes of graph walking,
22450 +diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
22451 +index 05eea1aef5aa0..ea35157974187 100644
22452 +--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
22453 ++++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
22454 +@@ -28,8 +28,7 @@
22455 + * bit 16-27: update value
22456 + * bit 31: 1 - update, 0 - no update
22457 + */
22458 +-#define CMDQ_WFE_OPTION (CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \
22459 +- CMDQ_WFE_WAIT_VALUE)
22460 ++#define CMDQ_WFE_OPTION (CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE)
22461 +
22462 + /** cmdq event maximum */
22463 + #define CMDQ_MAX_EVENT 0x3ff
22464 +diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h
22465 +new file mode 100644
22466 +index 0000000000000..f5ba0fbff72fe
22467 +--- /dev/null
22468 ++++ b/include/linux/nfs_ssc.h
22469 +@@ -0,0 +1,67 @@
22470 ++/* SPDX-License-Identifier: GPL-2.0 */
22471 ++/*
22472 ++ * include/linux/nfs_ssc.h
22473 ++ *
22474 ++ * Author: Dai Ngo <dai.ngo@××××××.com>
22475 ++ *
22476 ++ * Copyright (c) 2020, Oracle and/or its affiliates.
22477 ++ */
22478 ++
22479 ++#include <linux/nfs_fs.h>
22480 ++
22481 ++extern struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl;
22482 ++
22483 ++/*
22484 ++ * NFS_V4
22485 ++ */
22486 ++struct nfs4_ssc_client_ops {
22487 ++ struct file *(*sco_open)(struct vfsmount *ss_mnt,
22488 ++ struct nfs_fh *src_fh, nfs4_stateid *stateid);
22489 ++ void (*sco_close)(struct file *filep);
22490 ++};
22491 ++
22492 ++/*
22493 ++ * NFS_FS
22494 ++ */
22495 ++struct nfs_ssc_client_ops {
22496 ++ void (*sco_sb_deactive)(struct super_block *sb);
22497 ++};
22498 ++
22499 ++struct nfs_ssc_client_ops_tbl {
22500 ++ const struct nfs4_ssc_client_ops *ssc_nfs4_ops;
22501 ++ const struct nfs_ssc_client_ops *ssc_nfs_ops;
22502 ++};
22503 ++
22504 ++extern void nfs42_ssc_register_ops(void);
22505 ++extern void nfs42_ssc_unregister_ops(void);
22506 ++
22507 ++extern void nfs42_ssc_register(const struct nfs4_ssc_client_ops *ops);
22508 ++extern void nfs42_ssc_unregister(const struct nfs4_ssc_client_ops *ops);
22509 ++
22510 ++#ifdef CONFIG_NFSD_V4_2_INTER_SSC
22511 ++static inline struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
22512 ++ struct nfs_fh *src_fh, nfs4_stateid *stateid)
22513 ++{
22514 ++ if (nfs_ssc_client_tbl.ssc_nfs4_ops)
22515 ++ return (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_open)(ss_mnt, src_fh, stateid);
22516 ++ return ERR_PTR(-EIO);
22517 ++}
22518 ++
22519 ++static inline void nfs42_ssc_close(struct file *filep)
22520 ++{
22521 ++ if (nfs_ssc_client_tbl.ssc_nfs4_ops)
22522 ++ (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep);
22523 ++}
22524 ++#endif
22525 ++
22526 ++/*
22527 ++ * NFS_FS
22528 ++ */
22529 ++extern void nfs_ssc_register(const struct nfs_ssc_client_ops *ops);
22530 ++extern void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops);
22531 ++
22532 ++static inline void nfs_do_sb_deactive(struct super_block *sb)
22533 ++{
22534 ++ if (nfs_ssc_client_tbl.ssc_nfs_ops)
22535 ++ (*nfs_ssc_client_tbl.ssc_nfs_ops->sco_sb_deactive)(sb);
22536 ++}
22537 +diff --git a/include/linux/notifier.h b/include/linux/notifier.h
22538 +index 018947611483e..2fb373a5c1ede 100644
22539 +--- a/include/linux/notifier.h
22540 ++++ b/include/linux/notifier.h
22541 +@@ -161,20 +161,19 @@ extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
22542 +
22543 + extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
22544 + unsigned long val, void *v);
22545 +-extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
22546 +- unsigned long val, void *v, int nr_to_call, int *nr_calls);
22547 + extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
22548 + unsigned long val, void *v);
22549 +-extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
22550 +- unsigned long val, void *v, int nr_to_call, int *nr_calls);
22551 + extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
22552 + unsigned long val, void *v);
22553 +-extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
22554 +- unsigned long val, void *v, int nr_to_call, int *nr_calls);
22555 + extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
22556 + unsigned long val, void *v);
22557 +-extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
22558 +- unsigned long val, void *v, int nr_to_call, int *nr_calls);
22559 ++
22560 ++extern int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh,
22561 ++ unsigned long val_up, unsigned long val_down, void *v);
22562 ++extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
22563 ++ unsigned long val_up, unsigned long val_down, void *v);
22564 ++extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
22565 ++ unsigned long val_up, unsigned long val_down, void *v);
22566 +
22567 + #define NOTIFY_DONE 0x0000 /* Don't care */
22568 + #define NOTIFY_OK 0x0001 /* Suits me */
22569 +diff --git a/include/linux/oom.h b/include/linux/oom.h
22570 +index f022f581ac29d..2db9a14325112 100644
22571 +--- a/include/linux/oom.h
22572 ++++ b/include/linux/oom.h
22573 +@@ -55,6 +55,7 @@ struct oom_control {
22574 + };
22575 +
22576 + extern struct mutex oom_lock;
22577 ++extern struct mutex oom_adj_mutex;
22578 +
22579 + static inline void set_current_oom_origin(void)
22580 + {
22581 +diff --git a/include/linux/overflow.h b/include/linux/overflow.h
22582 +index 93fcef105061b..ff3c48f0abc5b 100644
22583 +--- a/include/linux/overflow.h
22584 ++++ b/include/linux/overflow.h
22585 +@@ -3,6 +3,7 @@
22586 + #define __LINUX_OVERFLOW_H
22587 +
22588 + #include <linux/compiler.h>
22589 ++#include <linux/limits.h>
22590 +
22591 + /*
22592 + * In the fallback code below, we need to compute the minimum and
22593 +diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
22594 +index 8679ccd722e89..3468794f83d23 100644
22595 +--- a/include/linux/page_owner.h
22596 ++++ b/include/linux/page_owner.h
22597 +@@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops;
22598 + extern void __reset_page_owner(struct page *page, unsigned int order);
22599 + extern void __set_page_owner(struct page *page,
22600 + unsigned int order, gfp_t gfp_mask);
22601 +-extern void __split_page_owner(struct page *page, unsigned int order);
22602 ++extern void __split_page_owner(struct page *page, unsigned int nr);
22603 + extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
22604 + extern void __set_page_owner_migrate_reason(struct page *page, int reason);
22605 + extern void __dump_page_owner(struct page *page);
22606 +@@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page,
22607 + __set_page_owner(page, order, gfp_mask);
22608 + }
22609 +
22610 +-static inline void split_page_owner(struct page *page, unsigned int order)
22611 ++static inline void split_page_owner(struct page *page, unsigned int nr)
22612 + {
22613 + if (static_branch_unlikely(&page_owner_inited))
22614 +- __split_page_owner(page, order);
22615 ++ __split_page_owner(page, nr);
22616 + }
22617 + static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
22618 + {
22619 +diff --git a/include/linux/pci.h b/include/linux/pci.h
22620 +index 835530605c0d7..3ff723124ca7f 100644
22621 +--- a/include/linux/pci.h
22622 ++++ b/include/linux/pci.h
22623 +@@ -445,6 +445,7 @@ struct pci_dev {
22624 + unsigned int is_probed:1; /* Device probing in progress */
22625 + unsigned int link_active_reporting:1;/* Device capable of reporting link active */
22626 + unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
22627 ++ unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
22628 + pci_dev_flags_t dev_flags;
22629 + atomic_t enable_cnt; /* pci_enable_device has been called */
22630 +
22631 +diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
22632 +index fbbeb2f6189b8..b34a094b2258d 100644
22633 +--- a/include/linux/platform_data/dma-dw.h
22634 ++++ b/include/linux/platform_data/dma-dw.h
22635 +@@ -26,6 +26,7 @@ struct device;
22636 + * @dst_id: dst request line
22637 + * @m_master: memory master for transfers on allocated channel
22638 + * @p_master: peripheral master for transfers on allocated channel
22639 ++ * @channels: mask of the channels permitted for allocation (zero value means any)
22640 + * @hs_polarity:set active low polarity of handshake interface
22641 + */
22642 + struct dw_dma_slave {
22643 +@@ -34,6 +35,7 @@ struct dw_dma_slave {
22644 + u8 dst_id;
22645 + u8 m_master;
22646 + u8 p_master;
22647 ++ u8 channels;
22648 + bool hs_polarity;
22649 + };
22650 +
22651 +diff --git a/include/linux/prandom.h b/include/linux/prandom.h
22652 +index aa16e6468f91e..cc1e71334e53c 100644
22653 +--- a/include/linux/prandom.h
22654 ++++ b/include/linux/prandom.h
22655 +@@ -16,12 +16,44 @@ void prandom_bytes(void *buf, size_t nbytes);
22656 + void prandom_seed(u32 seed);
22657 + void prandom_reseed_late(void);
22658 +
22659 ++#if BITS_PER_LONG == 64
22660 ++/*
22661 ++ * The core SipHash round function. Each line can be executed in
22662 ++ * parallel given enough CPU resources.
22663 ++ */
22664 ++#define PRND_SIPROUND(v0, v1, v2, v3) ( \
22665 ++ v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \
22666 ++ v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \
22667 ++ v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \
22668 ++ v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \
22669 ++)
22670 ++
22671 ++#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
22672 ++#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
22673 ++
22674 ++#elif BITS_PER_LONG == 32
22675 ++/*
22676 ++ * On 32-bit machines, we use HSipHash, a reduced-width version of SipHash.
22677 ++ * This is weaker, but 32-bit machines are not used for high-traffic
22678 ++ * applications, so there is less output for an attacker to analyze.
22679 ++ */
22680 ++#define PRND_SIPROUND(v0, v1, v2, v3) ( \
22681 ++ v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \
22682 ++ v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \
22683 ++ v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \
22684 ++ v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \
22685 ++)
22686 ++#define PRND_K0 0x6c796765
22687 ++#define PRND_K1 0x74656462
22688 ++
22689 ++#else
22690 ++#error Unsupported BITS_PER_LONG
22691 ++#endif
22692 ++
22693 + struct rnd_state {
22694 + __u32 s1, s2, s3, s4;
22695 + };
22696 +
22697 +-DECLARE_PER_CPU(struct rnd_state, net_rand_state);
22698 +-
22699 + u32 prandom_u32_state(struct rnd_state *state);
22700 + void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
22701 + void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
22702 +diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
22703 +index ecdc6542070f1..dfd82eab29025 100644
22704 +--- a/include/linux/sched/coredump.h
22705 ++++ b/include/linux/sched/coredump.h
22706 +@@ -72,6 +72,7 @@ static inline int get_dumpable(struct mm_struct *mm)
22707 + #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
22708 + #define MMF_OOM_VICTIM 25 /* mm is the oom victim */
22709 + #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
22710 ++#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
22711 + #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
22712 +
22713 + #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
22714 +diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
22715 +index 962d9768945f0..7b99e3dba2065 100644
22716 +--- a/include/linux/seqlock.h
22717 ++++ b/include/linux/seqlock.h
22718 +@@ -154,6 +154,19 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
22719 + * @lock: Pointer to the associated LOCKTYPE
22720 + */
22721 +
22722 ++#define seqcount_LOCKNAME_init(s, _lock, lockname) \
22723 ++ do { \
22724 ++ seqcount_##lockname##_t *____s = (s); \
22725 ++ seqcount_init(&____s->seqcount); \
22726 ++ __SEQ_LOCK(____s->lock = (_lock)); \
22727 ++ } while (0)
22728 ++
22729 ++#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
22730 ++#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
22731 ++#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock);
22732 ++#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex);
22733 ++#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex);
22734 ++
22735 + /*
22736 + * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
22737 + * @locktype: actual typename
22738 +@@ -167,13 +180,6 @@ typedef struct seqcount_##lockname { \
22739 + __SEQ_LOCK(locktype *lock); \
22740 + } seqcount_##lockname##_t; \
22741 + \
22742 +-static __always_inline void \
22743 +-seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \
22744 +-{ \
22745 +- seqcount_init(&s->seqcount); \
22746 +- __SEQ_LOCK(s->lock = lock); \
22747 +-} \
22748 +- \
22749 + static __always_inline seqcount_t * \
22750 + __seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
22751 + { \
22752 +@@ -228,13 +234,12 @@ SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)
22753 + __SEQ_LOCK(.lock = (assoc_lock)) \
22754 + }
22755 +
22756 +-#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
22757 + #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
22758 ++#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
22759 + #define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
22760 + #define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
22761 + #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
22762 +
22763 +-
22764 + #define __seqprop_case(s, lockname, prop) \
22765 + seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
22766 +
22767 +diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
22768 +index 2249ecaf77e42..76a3075077533 100644
22769 +--- a/include/linux/soc/mediatek/mtk-cmdq.h
22770 ++++ b/include/linux/soc/mediatek/mtk-cmdq.h
22771 +@@ -105,11 +105,12 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
22772 + /**
22773 + * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet
22774 + * @pkt: the CMDQ packet
22775 +- * @event: the desired event type to "wait and CLEAR"
22776 ++ * @event: the desired event type to wait
22777 ++ * @clear: clear event or not after event arrive
22778 + *
22779 + * Return: 0 for success; else the error code is returned
22780 + */
22781 +-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
22782 ++int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear);
22783 +
22784 + /**
22785 + * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet
22786 +diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
22787 +index 0d3920896d502..716db4a0fed89 100644
22788 +--- a/include/net/netfilter/nf_log.h
22789 ++++ b/include/net/netfilter/nf_log.h
22790 +@@ -108,6 +108,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
22791 + unsigned int logflags);
22792 + void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
22793 + struct sock *sk);
22794 ++void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb);
22795 + void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
22796 + unsigned int hooknum, const struct sk_buff *skb,
22797 + const struct net_device *in,
22798 +diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
22799 +index e1057b255f69a..879fe8cff5819 100644
22800 +--- a/include/net/tc_act/tc_tunnel_key.h
22801 ++++ b/include/net/tc_act/tc_tunnel_key.h
22802 +@@ -56,7 +56,10 @@ static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a)
22803 + {
22804 + #ifdef CONFIG_NET_CLS_ACT
22805 + struct tcf_tunnel_key *t = to_tunnel_key(a);
22806 +- struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
22807 ++ struct tcf_tunnel_key_params *params;
22808 ++
22809 ++ params = rcu_dereference_protected(t->params,
22810 ++ lockdep_is_held(&a->tcfa_lock));
22811 +
22812 + return &params->tcft_enc_metadata->u.tun_info;
22813 + #else
22814 +diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
22815 +index 71f573a418bf0..07a764eb692ee 100644
22816 +--- a/include/rdma/ib_umem.h
22817 ++++ b/include/rdma/ib_umem.h
22818 +@@ -68,10 +68,11 @@ static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offs
22819 + size_t length) {
22820 + return -EINVAL;
22821 + }
22822 +-static inline int ib_umem_find_best_pgsz(struct ib_umem *umem,
22823 +- unsigned long pgsz_bitmap,
22824 +- unsigned long virt) {
22825 +- return -EINVAL;
22826 ++static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
22827 ++ unsigned long pgsz_bitmap,
22828 ++ unsigned long virt)
22829 ++{
22830 ++ return 0;
22831 + }
22832 +
22833 + #endif /* CONFIG_INFINIBAND_USER_MEM */
22834 +diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
22835 +index c0b2fa7e9b959..5b4f0efc4241f 100644
22836 +--- a/include/rdma/ib_verbs.h
22837 ++++ b/include/rdma/ib_verbs.h
22838 +@@ -2439,7 +2439,7 @@ struct ib_device_ops {
22839 + int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
22840 + struct ib_udata *udata);
22841 + int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
22842 +- void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
22843 ++ int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
22844 + int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
22845 + struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
22846 + struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
22847 +@@ -2468,7 +2468,7 @@ struct ib_device_ops {
22848 + int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
22849 + int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
22850 + int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
22851 +- void (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
22852 ++ int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
22853 + struct ib_flow *(*create_flow)(struct ib_qp *qp,
22854 + struct ib_flow_attr *flow_attr,
22855 + int domain, struct ib_udata *udata);
22856 +@@ -2496,7 +2496,7 @@ struct ib_device_ops {
22857 + struct ib_wq *(*create_wq)(struct ib_pd *pd,
22858 + struct ib_wq_init_attr *init_attr,
22859 + struct ib_udata *udata);
22860 +- void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
22861 ++ int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
22862 + int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
22863 + u32 wq_attr_mask, struct ib_udata *udata);
22864 + struct ib_rwq_ind_table *(*create_rwq_ind_table)(
22865 +@@ -3817,46 +3817,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
22866 + return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
22867 + }
22868 +
22869 +-struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
22870 +- int nr_cqe, int comp_vector,
22871 +- enum ib_poll_context poll_ctx,
22872 +- const char *caller, struct ib_udata *udata);
22873 +-
22874 +-/**
22875 +- * ib_alloc_cq_user: Allocate kernel/user CQ
22876 +- * @dev: The IB device
22877 +- * @private: Private data attached to the CQE
22878 +- * @nr_cqe: Number of CQEs in the CQ
22879 +- * @comp_vector: Completion vector used for the IRQs
22880 +- * @poll_ctx: Context used for polling the CQ
22881 +- * @udata: Valid user data or NULL for kernel objects
22882 +- */
22883 +-static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
22884 +- void *private, int nr_cqe,
22885 +- int comp_vector,
22886 +- enum ib_poll_context poll_ctx,
22887 +- struct ib_udata *udata)
22888 +-{
22889 +- return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
22890 +- KBUILD_MODNAME, udata);
22891 +-}
22892 +-
22893 +-/**
22894 +- * ib_alloc_cq: Allocate kernel CQ
22895 +- * @dev: The IB device
22896 +- * @private: Private data attached to the CQE
22897 +- * @nr_cqe: Number of CQEs in the CQ
22898 +- * @comp_vector: Completion vector used for the IRQs
22899 +- * @poll_ctx: Context used for polling the CQ
22900 +- *
22901 +- * NOTE: for user cq use ib_alloc_cq_user with valid udata!
22902 +- */
22903 ++struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
22904 ++ int comp_vector, enum ib_poll_context poll_ctx,
22905 ++ const char *caller);
22906 + static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
22907 + int nr_cqe, int comp_vector,
22908 + enum ib_poll_context poll_ctx)
22909 + {
22910 +- return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
22911 +- NULL);
22912 ++ return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
22913 ++ KBUILD_MODNAME);
22914 + }
22915 +
22916 + struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
22917 +@@ -3878,26 +3847,7 @@ static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
22918 + KBUILD_MODNAME);
22919 + }
22920 +
22921 +-/**
22922 +- * ib_free_cq_user - Free kernel/user CQ
22923 +- * @cq: The CQ to free
22924 +- * @udata: Valid user data or NULL for kernel objects
22925 +- *
22926 +- * NOTE: This function shouldn't be called on shared CQs.
22927 +- */
22928 +-void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
22929 +-
22930 +-/**
22931 +- * ib_free_cq - Free kernel CQ
22932 +- * @cq: The CQ to free
22933 +- *
22934 +- * NOTE: for user cq use ib_free_cq_user with valid udata!
22935 +- */
22936 +-static inline void ib_free_cq(struct ib_cq *cq)
22937 +-{
22938 +- ib_free_cq_user(cq, NULL);
22939 +-}
22940 +-
22941 ++void ib_free_cq(struct ib_cq *cq);
22942 + int ib_process_cq_direct(struct ib_cq *cq, int budget);
22943 +
22944 + /**
22945 +@@ -3955,7 +3905,9 @@ int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
22946 + */
22947 + static inline void ib_destroy_cq(struct ib_cq *cq)
22948 + {
22949 +- ib_destroy_cq_user(cq, NULL);
22950 ++ int ret = ib_destroy_cq_user(cq, NULL);
22951 ++
22952 ++ WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
22953 + }
22954 +
22955 + /**
22956 +@@ -4379,7 +4331,7 @@ struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
22957 +
22958 + struct ib_wq *ib_create_wq(struct ib_pd *pd,
22959 + struct ib_wq_init_attr *init_attr);
22960 +-int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
22961 ++int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
22962 + int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
22963 + u32 wq_attr_mask);
22964 + int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
22965 +diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
22966 +index 731ac09ed2313..5b567b43e1b16 100644
22967 +--- a/include/scsi/scsi_common.h
22968 ++++ b/include/scsi/scsi_common.h
22969 +@@ -25,6 +25,13 @@ scsi_command_size(const unsigned char *cmnd)
22970 + scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
22971 + }
22972 +
22973 ++static inline unsigned char
22974 ++scsi_command_control(const unsigned char *cmnd)
22975 ++{
22976 ++ return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
22977 ++ cmnd[1] : cmnd[COMMAND_SIZE(cmnd[0]) - 1];
22978 ++}
22979 ++
22980 + /* Returns a human-readable name for the device */
22981 + extern const char *scsi_device_type(unsigned type);
22982 +
22983 +diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
22984 +index 0fea49bfc5e86..73827b7d17e00 100644
22985 +--- a/include/sound/hda_codec.h
22986 ++++ b/include/sound/hda_codec.h
22987 +@@ -253,6 +253,7 @@ struct hda_codec {
22988 + unsigned int force_pin_prefix:1; /* Add location prefix */
22989 + unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
22990 + unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
22991 ++ unsigned int forced_resume:1; /* forced resume for jack */
22992 + unsigned int mst_no_extra_pcms:1; /* no backup PCMs for DP-MST */
22993 +
22994 + #ifdef CONFIG_PM
22995 +diff --git a/include/trace/events/target.h b/include/trace/events/target.h
22996 +index 77408edd29d2a..67fad2677ed55 100644
22997 +--- a/include/trace/events/target.h
22998 ++++ b/include/trace/events/target.h
22999 +@@ -141,6 +141,7 @@ TRACE_EVENT(target_sequencer_start,
23000 + __field( unsigned int, opcode )
23001 + __field( unsigned int, data_length )
23002 + __field( unsigned int, task_attribute )
23003 ++ __field( unsigned char, control )
23004 + __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
23005 + __string( initiator, cmd->se_sess->se_node_acl->initiatorname )
23006 + ),
23007 +@@ -151,6 +152,7 @@ TRACE_EVENT(target_sequencer_start,
23008 + __entry->opcode = cmd->t_task_cdb[0];
23009 + __entry->data_length = cmd->data_length;
23010 + __entry->task_attribute = cmd->sam_task_attr;
23011 ++ __entry->control = scsi_command_control(cmd->t_task_cdb);
23012 + memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
23013 + __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
23014 + ),
23015 +@@ -160,9 +162,7 @@ TRACE_EVENT(target_sequencer_start,
23016 + __entry->tag, show_opcode_name(__entry->opcode),
23017 + __entry->data_length, __print_hex(__entry->cdb, 16),
23018 + show_task_attribute_name(__entry->task_attribute),
23019 +- scsi_command_size(__entry->cdb) <= 16 ?
23020 +- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
23021 +- __entry->cdb[1]
23022 ++ __entry->control
23023 + )
23024 + );
23025 +
23026 +@@ -178,6 +178,7 @@ TRACE_EVENT(target_cmd_complete,
23027 + __field( unsigned int, opcode )
23028 + __field( unsigned int, data_length )
23029 + __field( unsigned int, task_attribute )
23030 ++ __field( unsigned char, control )
23031 + __field( unsigned char, scsi_status )
23032 + __field( unsigned char, sense_length )
23033 + __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
23034 +@@ -191,6 +192,7 @@ TRACE_EVENT(target_cmd_complete,
23035 + __entry->opcode = cmd->t_task_cdb[0];
23036 + __entry->data_length = cmd->data_length;
23037 + __entry->task_attribute = cmd->sam_task_attr;
23038 ++ __entry->control = scsi_command_control(cmd->t_task_cdb);
23039 + __entry->scsi_status = cmd->scsi_status;
23040 + __entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ?
23041 + min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0;
23042 +@@ -208,9 +210,7 @@ TRACE_EVENT(target_cmd_complete,
23043 + show_opcode_name(__entry->opcode),
23044 + __entry->data_length, __print_hex(__entry->cdb, 16),
23045 + show_task_attribute_name(__entry->task_attribute),
23046 +- scsi_command_size(__entry->cdb) <= 16 ?
23047 +- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
23048 +- __entry->cdb[1]
23049 ++ __entry->control
23050 + )
23051 + );
23052 +
23053 +diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
23054 +index f9701410d3b52..57a222014cd20 100644
23055 +--- a/include/uapi/linux/pci_regs.h
23056 ++++ b/include/uapi/linux/pci_regs.h
23057 +@@ -76,6 +76,7 @@
23058 + #define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
23059 + #define PCI_LATENCY_TIMER 0x0d /* 8 bits */
23060 + #define PCI_HEADER_TYPE 0x0e /* 8 bits */
23061 ++#define PCI_HEADER_TYPE_MASK 0x7f
23062 + #define PCI_HEADER_TYPE_NORMAL 0
23063 + #define PCI_HEADER_TYPE_BRIDGE 1
23064 + #define PCI_HEADER_TYPE_CARDBUS 2
23065 +diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
23066 +index 077e7ee69e3d8..b95d3c485d27e 100644
23067 +--- a/include/uapi/linux/perf_event.h
23068 ++++ b/include/uapi/linux/perf_event.h
23069 +@@ -1196,7 +1196,7 @@ union perf_mem_data_src {
23070 +
23071 + #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
23072 + /* 1 free */
23073 +-#define PERF_MEM_SNOOPX_SHIFT 37
23074 ++#define PERF_MEM_SNOOPX_SHIFT 38
23075 +
23076 + /* locked instruction */
23077 + #define PERF_MEM_LOCK_NA 0x01 /* not available */
23078 +diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
23079 +index b367430e611c7..3d897de890612 100644
23080 +--- a/kernel/bpf/percpu_freelist.c
23081 ++++ b/kernel/bpf/percpu_freelist.c
23082 +@@ -17,6 +17,8 @@ int pcpu_freelist_init(struct pcpu_freelist *s)
23083 + raw_spin_lock_init(&head->lock);
23084 + head->first = NULL;
23085 + }
23086 ++ raw_spin_lock_init(&s->extralist.lock);
23087 ++ s->extralist.first = NULL;
23088 + return 0;
23089 + }
23090 +
23091 +@@ -40,12 +42,50 @@ static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
23092 + raw_spin_unlock(&head->lock);
23093 + }
23094 +
23095 ++static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s,
23096 ++ struct pcpu_freelist_node *node)
23097 ++{
23098 ++ if (!raw_spin_trylock(&s->extralist.lock))
23099 ++ return false;
23100 ++
23101 ++ pcpu_freelist_push_node(&s->extralist, node);
23102 ++ raw_spin_unlock(&s->extralist.lock);
23103 ++ return true;
23104 ++}
23105 ++
23106 ++static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s,
23107 ++ struct pcpu_freelist_node *node)
23108 ++{
23109 ++ int cpu, orig_cpu;
23110 ++
23111 ++ orig_cpu = cpu = raw_smp_processor_id();
23112 ++ while (1) {
23113 ++ struct pcpu_freelist_head *head;
23114 ++
23115 ++ head = per_cpu_ptr(s->freelist, cpu);
23116 ++ if (raw_spin_trylock(&head->lock)) {
23117 ++ pcpu_freelist_push_node(head, node);
23118 ++ raw_spin_unlock(&head->lock);
23119 ++ return;
23120 ++ }
23121 ++ cpu = cpumask_next(cpu, cpu_possible_mask);
23122 ++ if (cpu >= nr_cpu_ids)
23123 ++ cpu = 0;
23124 ++
23125 ++ /* cannot lock any per cpu lock, try extralist */
23126 ++ if (cpu == orig_cpu &&
23127 ++ pcpu_freelist_try_push_extra(s, node))
23128 ++ return;
23129 ++ }
23130 ++}
23131 ++
23132 + void __pcpu_freelist_push(struct pcpu_freelist *s,
23133 + struct pcpu_freelist_node *node)
23134 + {
23135 +- struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
23136 +-
23137 +- ___pcpu_freelist_push(head, node);
23138 ++ if (in_nmi())
23139 ++ ___pcpu_freelist_push_nmi(s, node);
23140 ++ else
23141 ++ ___pcpu_freelist_push(this_cpu_ptr(s->freelist), node);
23142 + }
23143 +
23144 + void pcpu_freelist_push(struct pcpu_freelist *s,
23145 +@@ -81,7 +121,7 @@ again:
23146 + }
23147 + }
23148 +
23149 +-struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
23150 ++static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s)
23151 + {
23152 + struct pcpu_freelist_head *head;
23153 + struct pcpu_freelist_node *node;
23154 +@@ -102,8 +142,59 @@ struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
23155 + if (cpu >= nr_cpu_ids)
23156 + cpu = 0;
23157 + if (cpu == orig_cpu)
23158 +- return NULL;
23159 ++ break;
23160 ++ }
23161 ++
23162 ++ /* per cpu lists are all empty, try extralist */
23163 ++ raw_spin_lock(&s->extralist.lock);
23164 ++ node = s->extralist.first;
23165 ++ if (node)
23166 ++ s->extralist.first = node->next;
23167 ++ raw_spin_unlock(&s->extralist.lock);
23168 ++ return node;
23169 ++}
23170 ++
23171 ++static struct pcpu_freelist_node *
23172 ++___pcpu_freelist_pop_nmi(struct pcpu_freelist *s)
23173 ++{
23174 ++ struct pcpu_freelist_head *head;
23175 ++ struct pcpu_freelist_node *node;
23176 ++ int orig_cpu, cpu;
23177 ++
23178 ++ orig_cpu = cpu = raw_smp_processor_id();
23179 ++ while (1) {
23180 ++ head = per_cpu_ptr(s->freelist, cpu);
23181 ++ if (raw_spin_trylock(&head->lock)) {
23182 ++ node = head->first;
23183 ++ if (node) {
23184 ++ head->first = node->next;
23185 ++ raw_spin_unlock(&head->lock);
23186 ++ return node;
23187 ++ }
23188 ++ raw_spin_unlock(&head->lock);
23189 ++ }
23190 ++ cpu = cpumask_next(cpu, cpu_possible_mask);
23191 ++ if (cpu >= nr_cpu_ids)
23192 ++ cpu = 0;
23193 ++ if (cpu == orig_cpu)
23194 ++ break;
23195 + }
23196 ++
23197 ++ /* cannot pop from per cpu lists, try extralist */
23198 ++ if (!raw_spin_trylock(&s->extralist.lock))
23199 ++ return NULL;
23200 ++ node = s->extralist.first;
23201 ++ if (node)
23202 ++ s->extralist.first = node->next;
23203 ++ raw_spin_unlock(&s->extralist.lock);
23204 ++ return node;
23205 ++}
23206 ++
23207 ++struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
23208 ++{
23209 ++ if (in_nmi())
23210 ++ return ___pcpu_freelist_pop_nmi(s);
23211 ++ return ___pcpu_freelist_pop(s);
23212 + }
23213 +
23214 + struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
23215 +diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
23216 +index fbf8a8a289791..3c76553cfe571 100644
23217 +--- a/kernel/bpf/percpu_freelist.h
23218 ++++ b/kernel/bpf/percpu_freelist.h
23219 +@@ -13,6 +13,7 @@ struct pcpu_freelist_head {
23220 +
23221 + struct pcpu_freelist {
23222 + struct pcpu_freelist_head __percpu *freelist;
23223 ++ struct pcpu_freelist_head extralist;
23224 + };
23225 +
23226 + struct pcpu_freelist_node {
23227 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
23228 +index fba52d9ec8fc4..43cd175c66a55 100644
23229 +--- a/kernel/bpf/verifier.c
23230 ++++ b/kernel/bpf/verifier.c
23231 +@@ -1489,6 +1489,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
23232 + for (i = 0; i < insn_cnt; i++) {
23233 + u8 code = insn[i].code;
23234 +
23235 ++ if (code == (BPF_JMP | BPF_CALL) &&
23236 ++ insn[i].imm == BPF_FUNC_tail_call &&
23237 ++ insn[i].src_reg != BPF_PSEUDO_CALL)
23238 ++ subprog[cur_subprog].has_tail_call = true;
23239 + if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
23240 + goto next;
23241 + if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
23242 +@@ -2974,6 +2978,31 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
23243 + int ret_prog[MAX_CALL_FRAMES];
23244 +
23245 + process_func:
23246 ++ /* protect against potential stack overflow that might happen when
23247 ++ * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
23248 ++ * depth for such case down to 256 so that the worst case scenario
23249 ++ * would result in 8k stack size (32 which is tailcall limit * 256 =
23250 ++ * 8k).
23251 ++ *
23252 ++ * To get the idea what might happen, see an example:
23253 ++ * func1 -> sub rsp, 128
23254 ++ * subfunc1 -> sub rsp, 256
23255 ++ * tailcall1 -> add rsp, 256
23256 ++ * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
23257 ++ * subfunc2 -> sub rsp, 64
23258 ++ * subfunc22 -> sub rsp, 128
23259 ++ * tailcall2 -> add rsp, 128
23260 ++ * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
23261 ++ *
23262 ++ * tailcall will unwind the current stack frame but it will not get rid
23263 ++ * of caller's stack as shown on the example above.
23264 ++ */
23265 ++ if (idx && subprog[idx].has_tail_call && depth >= 256) {
23266 ++ verbose(env,
23267 ++ "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
23268 ++ depth);
23269 ++ return -EACCES;
23270 ++ }
23271 + /* round up to 32-bytes, since this is granularity
23272 + * of interpreter stack size
23273 + */
23274 +@@ -4885,24 +4914,19 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
23275 + regs[BPF_REG_0].id = ++env->id_gen;
23276 + } else {
23277 + regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
23278 +- regs[BPF_REG_0].id = ++env->id_gen;
23279 + }
23280 + } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
23281 + mark_reg_known_zero(env, regs, BPF_REG_0);
23282 + regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
23283 +- regs[BPF_REG_0].id = ++env->id_gen;
23284 + } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
23285 + mark_reg_known_zero(env, regs, BPF_REG_0);
23286 + regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
23287 +- regs[BPF_REG_0].id = ++env->id_gen;
23288 + } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
23289 + mark_reg_known_zero(env, regs, BPF_REG_0);
23290 + regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
23291 +- regs[BPF_REG_0].id = ++env->id_gen;
23292 + } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) {
23293 + mark_reg_known_zero(env, regs, BPF_REG_0);
23294 + regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
23295 +- regs[BPF_REG_0].id = ++env->id_gen;
23296 + regs[BPF_REG_0].mem_size = meta.mem_size;
23297 + } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) {
23298 + int ret_btf_id;
23299 +@@ -4922,6 +4946,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
23300 + return -EINVAL;
23301 + }
23302 +
23303 ++ if (reg_type_may_be_null(regs[BPF_REG_0].type))
23304 ++ regs[BPF_REG_0].id = ++env->id_gen;
23305 ++
23306 + if (is_ptr_cast_function(func_id)) {
23307 + /* For release_reference() */
23308 + regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
23309 +@@ -6847,7 +6874,8 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
23310 + struct bpf_reg_state *reg, u32 id,
23311 + bool is_null)
23312 + {
23313 +- if (reg_type_may_be_null(reg->type) && reg->id == id) {
23314 ++ if (reg_type_may_be_null(reg->type) && reg->id == id &&
23315 ++ !WARN_ON_ONCE(!reg->id)) {
23316 + /* Old offset (both fixed and variable parts) should
23317 + * have been known-zero, because we don't allow pointer
23318 + * arithmetic on pointers that might be NULL.
23319 +@@ -11046,6 +11074,11 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
23320 + }
23321 +
23322 + if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
23323 ++ if (tgt_prog) {
23324 ++ verbose(env, "can't modify return codes of BPF programs\n");
23325 ++ ret = -EINVAL;
23326 ++ goto out;
23327 ++ }
23328 + ret = check_attach_modify_return(prog, addr);
23329 + if (ret)
23330 + verbose(env, "%s() is not modifiable\n",
23331 +diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
23332 +index 44a259338e33d..f7e1d0eccdbc6 100644
23333 +--- a/kernel/cpu_pm.c
23334 ++++ b/kernel/cpu_pm.c
23335 +@@ -15,18 +15,28 @@
23336 +
23337 + static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
23338 +
23339 +-static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
23340 ++static int cpu_pm_notify(enum cpu_pm_event event)
23341 + {
23342 + int ret;
23343 +
23344 + /*
23345 +- * __atomic_notifier_call_chain has a RCU read critical section, which
23346 ++ * atomic_notifier_call_chain has a RCU read critical section, which
23347 + * could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let
23348 + * RCU know this.
23349 + */
23350 + rcu_irq_enter_irqson();
23351 +- ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
23352 +- nr_to_call, nr_calls);
23353 ++ ret = atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL);
23354 ++ rcu_irq_exit_irqson();
23355 ++
23356 ++ return notifier_to_errno(ret);
23357 ++}
23358 ++
23359 ++static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down)
23360 ++{
23361 ++ int ret;
23362 ++
23363 ++ rcu_irq_enter_irqson();
23364 ++ ret = atomic_notifier_call_chain_robust(&cpu_pm_notifier_chain, event_up, event_down, NULL);
23365 + rcu_irq_exit_irqson();
23366 +
23367 + return notifier_to_errno(ret);
23368 +@@ -80,18 +90,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
23369 + */
23370 + int cpu_pm_enter(void)
23371 + {
23372 +- int nr_calls = 0;
23373 +- int ret = 0;
23374 +-
23375 +- ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
23376 +- if (ret)
23377 +- /*
23378 +- * Inform listeners (nr_calls - 1) about failure of CPU PM
23379 +- * PM entry who are notified earlier to prepare for it.
23380 +- */
23381 +- cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
23382 +-
23383 +- return ret;
23384 ++ return cpu_pm_notify_robust(CPU_PM_ENTER, CPU_PM_ENTER_FAILED);
23385 + }
23386 + EXPORT_SYMBOL_GPL(cpu_pm_enter);
23387 +
23388 +@@ -109,7 +108,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter);
23389 + */
23390 + int cpu_pm_exit(void)
23391 + {
23392 +- return cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
23393 ++ return cpu_pm_notify(CPU_PM_EXIT);
23394 + }
23395 + EXPORT_SYMBOL_GPL(cpu_pm_exit);
23396 +
23397 +@@ -131,18 +130,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit);
23398 + */
23399 + int cpu_cluster_pm_enter(void)
23400 + {
23401 +- int nr_calls = 0;
23402 +- int ret = 0;
23403 +-
23404 +- ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
23405 +- if (ret)
23406 +- /*
23407 +- * Inform listeners (nr_calls - 1) about failure of CPU cluster
23408 +- * PM entry who are notified earlier to prepare for it.
23409 +- */
23410 +- cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
23411 +-
23412 +- return ret;
23413 ++ return cpu_pm_notify_robust(CPU_CLUSTER_PM_ENTER, CPU_CLUSTER_PM_ENTER_FAILED);
23414 + }
23415 + EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
23416 +
23417 +@@ -163,7 +151,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
23418 + */
23419 + int cpu_cluster_pm_exit(void)
23420 + {
23421 +- return cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
23422 ++ return cpu_pm_notify(CPU_CLUSTER_PM_EXIT);
23423 + }
23424 + EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
23425 +
23426 +diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
23427 +index 9d847ab851dbe..e240c97086e20 100644
23428 +--- a/kernel/debug/kdb/kdb_io.c
23429 ++++ b/kernel/debug/kdb/kdb_io.c
23430 +@@ -706,12 +706,16 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
23431 + size_avail = sizeof(kdb_buffer) - len;
23432 + goto kdb_print_out;
23433 + }
23434 +- if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH)
23435 ++ if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) {
23436 + /*
23437 + * This was a interactive search (using '/' at more
23438 +- * prompt) and it has completed. Clear the flag.
23439 ++ * prompt) and it has completed. Replace the \0 with
23440 ++ * its original value to ensure multi-line strings
23441 ++ * are handled properly, and return to normal mode.
23442 + */
23443 ++ *cphold = replaced_byte;
23444 + kdb_grepping_flag = 0;
23445 ++ }
23446 + /*
23447 + * at this point the string is a full line and
23448 + * should be printed, up to the null.
23449 +diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
23450 +index 0d129421e75fc..7133d5c6e1a6d 100644
23451 +--- a/kernel/dma/mapping.c
23452 ++++ b/kernel/dma/mapping.c
23453 +@@ -144,6 +144,10 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
23454 + dma_addr_t addr;
23455 +
23456 + BUG_ON(!valid_dma_direction(dir));
23457 ++
23458 ++ if (WARN_ON_ONCE(!dev->dma_mask))
23459 ++ return DMA_MAPPING_ERROR;
23460 ++
23461 + if (dma_map_direct(dev, ops))
23462 + addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
23463 + else
23464 +@@ -179,6 +183,10 @@ int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
23465 + int ents;
23466 +
23467 + BUG_ON(!valid_dma_direction(dir));
23468 ++
23469 ++ if (WARN_ON_ONCE(!dev->dma_mask))
23470 ++ return 0;
23471 ++
23472 + if (dma_map_direct(dev, ops))
23473 + ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
23474 + else
23475 +@@ -213,6 +221,9 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
23476 +
23477 + BUG_ON(!valid_dma_direction(dir));
23478 +
23479 ++ if (WARN_ON_ONCE(!dev->dma_mask))
23480 ++ return DMA_MAPPING_ERROR;
23481 ++
23482 + /* Don't allow RAM to be mapped */
23483 + if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
23484 + return DMA_MAPPING_ERROR;
23485 +diff --git a/kernel/events/core.c b/kernel/events/core.c
23486 +index e8bf92202542b..6a1ae6a62d489 100644
23487 +--- a/kernel/events/core.c
23488 ++++ b/kernel/events/core.c
23489 +@@ -5869,11 +5869,11 @@ static void perf_pmu_output_stop(struct perf_event *event);
23490 + static void perf_mmap_close(struct vm_area_struct *vma)
23491 + {
23492 + struct perf_event *event = vma->vm_file->private_data;
23493 +-
23494 + struct perf_buffer *rb = ring_buffer_get(event);
23495 + struct user_struct *mmap_user = rb->mmap_user;
23496 + int mmap_locked = rb->mmap_locked;
23497 + unsigned long size = perf_data_size(rb);
23498 ++ bool detach_rest = false;
23499 +
23500 + if (event->pmu->event_unmapped)
23501 + event->pmu->event_unmapped(event, vma->vm_mm);
23502 +@@ -5904,7 +5904,8 @@ static void perf_mmap_close(struct vm_area_struct *vma)
23503 + mutex_unlock(&event->mmap_mutex);
23504 + }
23505 +
23506 +- atomic_dec(&rb->mmap_count);
23507 ++ if (atomic_dec_and_test(&rb->mmap_count))
23508 ++ detach_rest = true;
23509 +
23510 + if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
23511 + goto out_put;
23512 +@@ -5913,7 +5914,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
23513 + mutex_unlock(&event->mmap_mutex);
23514 +
23515 + /* If there's still other mmap()s of this buffer, we're done. */
23516 +- if (atomic_read(&rb->mmap_count))
23517 ++ if (!detach_rest)
23518 + goto out_put;
23519 +
23520 + /*
23521 +diff --git a/kernel/fork.c b/kernel/fork.c
23522 +index da8d360fb0326..a9ce750578cae 100644
23523 +--- a/kernel/fork.c
23524 ++++ b/kernel/fork.c
23525 +@@ -1810,6 +1810,25 @@ static __always_inline void delayed_free_task(struct task_struct *tsk)
23526 + free_task(tsk);
23527 + }
23528 +
23529 ++static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
23530 ++{
23531 ++ /* Skip if kernel thread */
23532 ++ if (!tsk->mm)
23533 ++ return;
23534 ++
23535 ++ /* Skip if spawning a thread or using vfork */
23536 ++ if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
23537 ++ return;
23538 ++
23539 ++ /* We need to synchronize with __set_oom_adj */
23540 ++ mutex_lock(&oom_adj_mutex);
23541 ++ set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
23542 ++ /* Update the values in case they were changed after copy_signal */
23543 ++ tsk->signal->oom_score_adj = current->signal->oom_score_adj;
23544 ++ tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
23545 ++ mutex_unlock(&oom_adj_mutex);
23546 ++}
23547 ++
23548 + /*
23549 + * This creates a new process as a copy of the old one,
23550 + * but does not actually start it yet.
23551 +@@ -2282,6 +2301,8 @@ static __latent_entropy struct task_struct *copy_process(
23552 + trace_task_newtask(p, clone_flags);
23553 + uprobe_copy_process(p, clone_flags);
23554 +
23555 ++ copy_oom_score_adj(clone_flags, p);
23556 ++
23557 + return p;
23558 +
23559 + bad_fork_cancel_cgroup:
23560 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
23561 +index 2facbbd146ec2..85d15f0362dc5 100644
23562 +--- a/kernel/locking/lockdep.c
23563 ++++ b/kernel/locking/lockdep.c
23564 +@@ -76,6 +76,23 @@ module_param(lock_stat, int, 0644);
23565 + #define lock_stat 0
23566 + #endif
23567 +
23568 ++DEFINE_PER_CPU(unsigned int, lockdep_recursion);
23569 ++EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
23570 ++
23571 ++static inline bool lockdep_enabled(void)
23572 ++{
23573 ++ if (!debug_locks)
23574 ++ return false;
23575 ++
23576 ++ if (raw_cpu_read(lockdep_recursion))
23577 ++ return false;
23578 ++
23579 ++ if (current->lockdep_recursion)
23580 ++ return false;
23581 ++
23582 ++ return true;
23583 ++}
23584 ++
23585 + /*
23586 + * lockdep_lock: protects the lockdep graph, the hashes and the
23587 + * class/list/hash allocators.
23588 +@@ -93,7 +110,7 @@ static inline void lockdep_lock(void)
23589 +
23590 + arch_spin_lock(&__lock);
23591 + __owner = current;
23592 +- current->lockdep_recursion++;
23593 ++ __this_cpu_inc(lockdep_recursion);
23594 + }
23595 +
23596 + static inline void lockdep_unlock(void)
23597 +@@ -101,7 +118,7 @@ static inline void lockdep_unlock(void)
23598 + if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
23599 + return;
23600 +
23601 +- current->lockdep_recursion--;
23602 ++ __this_cpu_dec(lockdep_recursion);
23603 + __owner = NULL;
23604 + arch_spin_unlock(&__lock);
23605 + }
23606 +@@ -393,10 +410,15 @@ void lockdep_init_task(struct task_struct *task)
23607 + task->lockdep_recursion = 0;
23608 + }
23609 +
23610 ++static __always_inline void lockdep_recursion_inc(void)
23611 ++{
23612 ++ __this_cpu_inc(lockdep_recursion);
23613 ++}
23614 ++
23615 + static __always_inline void lockdep_recursion_finish(void)
23616 + {
23617 +- if (WARN_ON_ONCE((--current->lockdep_recursion) & LOCKDEP_RECURSION_MASK))
23618 +- current->lockdep_recursion = 0;
23619 ++ if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion)))
23620 ++ __this_cpu_write(lockdep_recursion, 0);
23621 + }
23622 +
23623 + void lockdep_set_selftest_task(struct task_struct *task)
23624 +@@ -585,6 +607,8 @@ static const char *usage_str[] =
23625 + #include "lockdep_states.h"
23626 + #undef LOCKDEP_STATE
23627 + [LOCK_USED] = "INITIAL USE",
23628 ++ [LOCK_USED_READ] = "INITIAL READ USE",
23629 ++ /* abused as string storage for verify_lock_unused() */
23630 + [LOCK_USAGE_STATES] = "IN-NMI",
23631 + };
23632 + #endif
23633 +@@ -1939,7 +1963,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
23634 + #endif
23635 + printk(KERN_CONT " {\n");
23636 +
23637 +- for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
23638 ++ for (bit = 0; bit < LOCK_TRACE_STATES; bit++) {
23639 + if (class->usage_mask & (1 << bit)) {
23640 + int len = depth;
23641 +
23642 +@@ -3657,7 +3681,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
23643 + if (unlikely(in_nmi()))
23644 + return;
23645 +
23646 +- if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
23647 ++ if (unlikely(__this_cpu_read(lockdep_recursion)))
23648 + return;
23649 +
23650 + if (unlikely(lockdep_hardirqs_enabled())) {
23651 +@@ -3693,7 +3717,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
23652 +
23653 + current->hardirq_chain_key = current->curr_chain_key;
23654 +
23655 +- current->lockdep_recursion++;
23656 ++ lockdep_recursion_inc();
23657 + __trace_hardirqs_on_caller();
23658 + lockdep_recursion_finish();
23659 + }
23660 +@@ -3726,7 +3750,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
23661 + goto skip_checks;
23662 + }
23663 +
23664 +- if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
23665 ++ if (unlikely(__this_cpu_read(lockdep_recursion)))
23666 + return;
23667 +
23668 + if (lockdep_hardirqs_enabled()) {
23669 +@@ -3779,7 +3803,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
23670 + if (in_nmi()) {
23671 + if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
23672 + return;
23673 +- } else if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)
23674 ++ } else if (__this_cpu_read(lockdep_recursion))
23675 + return;
23676 +
23677 + /*
23678 +@@ -3812,7 +3836,7 @@ void lockdep_softirqs_on(unsigned long ip)
23679 + {
23680 + struct irqtrace_events *trace = &current->irqtrace;
23681 +
23682 +- if (unlikely(!debug_locks || current->lockdep_recursion))
23683 ++ if (unlikely(!lockdep_enabled()))
23684 + return;
23685 +
23686 + /*
23687 +@@ -3827,7 +3851,7 @@ void lockdep_softirqs_on(unsigned long ip)
23688 + return;
23689 + }
23690 +
23691 +- current->lockdep_recursion++;
23692 ++ lockdep_recursion_inc();
23693 + /*
23694 + * We'll do an OFF -> ON transition:
23695 + */
23696 +@@ -3850,7 +3874,7 @@ void lockdep_softirqs_on(unsigned long ip)
23697 + */
23698 + void lockdep_softirqs_off(unsigned long ip)
23699 + {
23700 +- if (unlikely(!debug_locks || current->lockdep_recursion))
23701 ++ if (unlikely(!lockdep_enabled()))
23702 + return;
23703 +
23704 + /*
23705 +@@ -3969,7 +3993,7 @@ static int separate_irq_context(struct task_struct *curr,
23706 + static int mark_lock(struct task_struct *curr, struct held_lock *this,
23707 + enum lock_usage_bit new_bit)
23708 + {
23709 +- unsigned int old_mask, new_mask, ret = 1;
23710 ++ unsigned int new_mask, ret = 1;
23711 +
23712 + if (new_bit >= LOCK_USAGE_STATES) {
23713 + DEBUG_LOCKS_WARN_ON(1);
23714 +@@ -3996,30 +4020,26 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
23715 + if (unlikely(hlock_class(this)->usage_mask & new_mask))
23716 + goto unlock;
23717 +
23718 +- old_mask = hlock_class(this)->usage_mask;
23719 + hlock_class(this)->usage_mask |= new_mask;
23720 +
23721 +- /*
23722 +- * Save one usage_traces[] entry and map both LOCK_USED and
23723 +- * LOCK_USED_READ onto the same entry.
23724 +- */
23725 +- if (new_bit == LOCK_USED || new_bit == LOCK_USED_READ) {
23726 +- if (old_mask & (LOCKF_USED | LOCKF_USED_READ))
23727 +- goto unlock;
23728 +- new_bit = LOCK_USED;
23729 ++ if (new_bit < LOCK_TRACE_STATES) {
23730 ++ if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
23731 ++ return 0;
23732 + }
23733 +
23734 +- if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
23735 +- return 0;
23736 +-
23737 + switch (new_bit) {
23738 ++ case 0 ... LOCK_USED-1:
23739 ++ ret = mark_lock_irq(curr, this, new_bit);
23740 ++ if (!ret)
23741 ++ return 0;
23742 ++ break;
23743 ++
23744 + case LOCK_USED:
23745 + debug_atomic_dec(nr_unused_locks);
23746 + break;
23747 ++
23748 + default:
23749 +- ret = mark_lock_irq(curr, this, new_bit);
23750 +- if (!ret)
23751 +- return 0;
23752 ++ break;
23753 + }
23754 +
23755 + unlock:
23756 +@@ -4235,11 +4255,11 @@ void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
23757 + if (subclass) {
23758 + unsigned long flags;
23759 +
23760 +- if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
23761 ++ if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
23762 + return;
23763 +
23764 + raw_local_irq_save(flags);
23765 +- current->lockdep_recursion++;
23766 ++ lockdep_recursion_inc();
23767 + register_lock_class(lock, subclass, 1);
23768 + lockdep_recursion_finish();
23769 + raw_local_irq_restore(flags);
23770 +@@ -4922,11 +4942,11 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
23771 + {
23772 + unsigned long flags;
23773 +
23774 +- if (unlikely(current->lockdep_recursion))
23775 ++ if (unlikely(!lockdep_enabled()))
23776 + return;
23777 +
23778 + raw_local_irq_save(flags);
23779 +- current->lockdep_recursion++;
23780 ++ lockdep_recursion_inc();
23781 + check_flags(flags);
23782 + if (__lock_set_class(lock, name, key, subclass, ip))
23783 + check_chain_key(current);
23784 +@@ -4939,11 +4959,11 @@ void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
23785 + {
23786 + unsigned long flags;
23787 +
23788 +- if (unlikely(current->lockdep_recursion))
23789 ++ if (unlikely(!lockdep_enabled()))
23790 + return;
23791 +
23792 + raw_local_irq_save(flags);
23793 +- current->lockdep_recursion++;
23794 ++ lockdep_recursion_inc();
23795 + check_flags(flags);
23796 + if (__lock_downgrade(lock, ip))
23797 + check_chain_key(current);
23798 +@@ -4981,7 +5001,7 @@ static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock
23799 +
23800 + static bool lockdep_nmi(void)
23801 + {
23802 +- if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)
23803 ++ if (raw_cpu_read(lockdep_recursion))
23804 + return false;
23805 +
23806 + if (!in_nmi())
23807 +@@ -5002,7 +5022,10 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
23808 +
23809 + trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
23810 +
23811 +- if (unlikely(current->lockdep_recursion)) {
23812 ++ if (!debug_locks)
23813 ++ return;
23814 ++
23815 ++ if (unlikely(!lockdep_enabled())) {
23816 + /* XXX allow trylock from NMI ?!? */
23817 + if (lockdep_nmi() && !trylock) {
23818 + struct held_lock hlock;
23819 +@@ -5025,7 +5048,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
23820 + raw_local_irq_save(flags);
23821 + check_flags(flags);
23822 +
23823 +- current->lockdep_recursion++;
23824 ++ lockdep_recursion_inc();
23825 + __lock_acquire(lock, subclass, trylock, read, check,
23826 + irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
23827 + lockdep_recursion_finish();
23828 +@@ -5039,13 +5062,13 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)
23829 +
23830 + trace_lock_release(lock, ip);
23831 +
23832 +- if (unlikely(current->lockdep_recursion))
23833 ++ if (unlikely(!lockdep_enabled()))
23834 + return;
23835 +
23836 + raw_local_irq_save(flags);
23837 + check_flags(flags);
23838 +
23839 +- current->lockdep_recursion++;
23840 ++ lockdep_recursion_inc();
23841 + if (__lock_release(lock, ip))
23842 + check_chain_key(current);
23843 + lockdep_recursion_finish();
23844 +@@ -5058,13 +5081,13 @@ noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
23845 + unsigned long flags;
23846 + int ret = 0;
23847 +
23848 +- if (unlikely(current->lockdep_recursion))
23849 ++ if (unlikely(!lockdep_enabled()))
23850 + return 1; /* avoid false negative lockdep_assert_held() */
23851 +
23852 + raw_local_irq_save(flags);
23853 + check_flags(flags);
23854 +
23855 +- current->lockdep_recursion++;
23856 ++ lockdep_recursion_inc();
23857 + ret = __lock_is_held(lock, read);
23858 + lockdep_recursion_finish();
23859 + raw_local_irq_restore(flags);
23860 +@@ -5079,13 +5102,13 @@ struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
23861 + struct pin_cookie cookie = NIL_COOKIE;
23862 + unsigned long flags;
23863 +
23864 +- if (unlikely(current->lockdep_recursion))
23865 ++ if (unlikely(!lockdep_enabled()))
23866 + return cookie;
23867 +
23868 + raw_local_irq_save(flags);
23869 + check_flags(flags);
23870 +
23871 +- current->lockdep_recursion++;
23872 ++ lockdep_recursion_inc();
23873 + cookie = __lock_pin_lock(lock);
23874 + lockdep_recursion_finish();
23875 + raw_local_irq_restore(flags);
23876 +@@ -5098,13 +5121,13 @@ void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
23877 + {
23878 + unsigned long flags;
23879 +
23880 +- if (unlikely(current->lockdep_recursion))
23881 ++ if (unlikely(!lockdep_enabled()))
23882 + return;
23883 +
23884 + raw_local_irq_save(flags);
23885 + check_flags(flags);
23886 +
23887 +- current->lockdep_recursion++;
23888 ++ lockdep_recursion_inc();
23889 + __lock_repin_lock(lock, cookie);
23890 + lockdep_recursion_finish();
23891 + raw_local_irq_restore(flags);
23892 +@@ -5115,13 +5138,13 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
23893 + {
23894 + unsigned long flags;
23895 +
23896 +- if (unlikely(current->lockdep_recursion))
23897 ++ if (unlikely(!lockdep_enabled()))
23898 + return;
23899 +
23900 + raw_local_irq_save(flags);
23901 + check_flags(flags);
23902 +
23903 +- current->lockdep_recursion++;
23904 ++ lockdep_recursion_inc();
23905 + __lock_unpin_lock(lock, cookie);
23906 + lockdep_recursion_finish();
23907 + raw_local_irq_restore(flags);
23908 +@@ -5251,15 +5274,12 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
23909 +
23910 + trace_lock_acquired(lock, ip);
23911 +
23912 +- if (unlikely(!lock_stat || !debug_locks))
23913 +- return;
23914 +-
23915 +- if (unlikely(current->lockdep_recursion))
23916 ++ if (unlikely(!lock_stat || !lockdep_enabled()))
23917 + return;
23918 +
23919 + raw_local_irq_save(flags);
23920 + check_flags(flags);
23921 +- current->lockdep_recursion++;
23922 ++ lockdep_recursion_inc();
23923 + __lock_contended(lock, ip);
23924 + lockdep_recursion_finish();
23925 + raw_local_irq_restore(flags);
23926 +@@ -5272,15 +5292,12 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
23927 +
23928 + trace_lock_contended(lock, ip);
23929 +
23930 +- if (unlikely(!lock_stat || !debug_locks))
23931 +- return;
23932 +-
23933 +- if (unlikely(current->lockdep_recursion))
23934 ++ if (unlikely(!lock_stat || !lockdep_enabled()))
23935 + return;
23936 +
23937 + raw_local_irq_save(flags);
23938 + check_flags(flags);
23939 +- current->lockdep_recursion++;
23940 ++ lockdep_recursion_inc();
23941 + __lock_acquired(lock, ip);
23942 + lockdep_recursion_finish();
23943 + raw_local_irq_restore(flags);
23944 +diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
23945 +index b0be1560ed17a..de49f9e1c11ba 100644
23946 +--- a/kernel/locking/lockdep_internals.h
23947 ++++ b/kernel/locking/lockdep_internals.h
23948 +@@ -20,9 +20,12 @@ enum lock_usage_bit {
23949 + #undef LOCKDEP_STATE
23950 + LOCK_USED,
23951 + LOCK_USED_READ,
23952 +- LOCK_USAGE_STATES
23953 ++ LOCK_USAGE_STATES,
23954 + };
23955 +
23956 ++/* states after LOCK_USED_READ are not traced and printed */
23957 ++static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES);
23958 ++
23959 + #define LOCK_USAGE_READ_MASK 1
23960 + #define LOCK_USAGE_DIR_MASK 2
23961 + #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
23962 +@@ -121,7 +124,7 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
23963 + extern struct list_head all_lock_classes;
23964 + extern struct lock_chain lock_chains[];
23965 +
23966 +-#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
23967 ++#define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
23968 +
23969 + extern void get_usage_chars(struct lock_class *class,
23970 + char usage[LOCK_USAGE_CHARS]);
23971 +diff --git a/kernel/module.c b/kernel/module.c
23972 +index 1c5cff34d9f28..8486123ffd7af 100644
23973 +--- a/kernel/module.c
23974 ++++ b/kernel/module.c
23975 +@@ -91,8 +91,9 @@ EXPORT_SYMBOL_GPL(module_mutex);
23976 + static LIST_HEAD(modules);
23977 +
23978 + /* Work queue for freeing init sections in success case */
23979 +-static struct work_struct init_free_wq;
23980 +-static struct llist_head init_free_list;
23981 ++static void do_free_init(struct work_struct *w);
23982 ++static DECLARE_WORK(init_free_wq, do_free_init);
23983 ++static LLIST_HEAD(init_free_list);
23984 +
23985 + #ifdef CONFIG_MODULES_TREE_LOOKUP
23986 +
23987 +@@ -3579,14 +3580,6 @@ static void do_free_init(struct work_struct *w)
23988 + }
23989 + }
23990 +
23991 +-static int __init modules_wq_init(void)
23992 +-{
23993 +- INIT_WORK(&init_free_wq, do_free_init);
23994 +- init_llist_head(&init_free_list);
23995 +- return 0;
23996 +-}
23997 +-module_init(modules_wq_init);
23998 +-
23999 + /*
24000 + * This is where the real work happens.
24001 + *
24002 +diff --git a/kernel/notifier.c b/kernel/notifier.c
24003 +index 84c987dfbe036..1b019cbca594a 100644
24004 +--- a/kernel/notifier.c
24005 ++++ b/kernel/notifier.c
24006 +@@ -94,6 +94,34 @@ static int notifier_call_chain(struct notifier_block **nl,
24007 + }
24008 + NOKPROBE_SYMBOL(notifier_call_chain);
24009 +
24010 ++/**
24011 ++ * notifier_call_chain_robust - Inform the registered notifiers about an event
24012 ++ * and rollback on error.
24013 ++ * @nl: Pointer to head of the blocking notifier chain
24014 ++ * @val_up: Value passed unmodified to the notifier function
24015 ++ * @val_down: Value passed unmodified to the notifier function when recovering
24016 ++ * from an error on @val_up
24017 ++ * @v Pointer passed unmodified to the notifier function
24018 ++ *
24019 ++ * NOTE: It is important the @nl chain doesn't change between the two
24020 ++ * invocations of notifier_call_chain() such that we visit the
24021 ++ * exact same notifier callbacks; this rules out any RCU usage.
24022 ++ *
24023 ++ * Returns: the return value of the @val_up call.
24024 ++ */
24025 ++static int notifier_call_chain_robust(struct notifier_block **nl,
24026 ++ unsigned long val_up, unsigned long val_down,
24027 ++ void *v)
24028 ++{
24029 ++ int ret, nr = 0;
24030 ++
24031 ++ ret = notifier_call_chain(nl, val_up, v, -1, &nr);
24032 ++ if (ret & NOTIFY_STOP_MASK)
24033 ++ notifier_call_chain(nl, val_down, v, nr-1, NULL);
24034 ++
24035 ++ return ret;
24036 ++}
24037 ++
24038 + /*
24039 + * Atomic notifier chain routines. Registration and unregistration
24040 + * use a spinlock, and call_chain is synchronized by RCU (no locks).
24041 +@@ -144,13 +172,30 @@ int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
24042 + }
24043 + EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
24044 +
24045 ++int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh,
24046 ++ unsigned long val_up, unsigned long val_down, void *v)
24047 ++{
24048 ++ unsigned long flags;
24049 ++ int ret;
24050 ++
24051 ++ /*
24052 ++ * Musn't use RCU; because then the notifier list can
24053 ++ * change between the up and down traversal.
24054 ++ */
24055 ++ spin_lock_irqsave(&nh->lock, flags);
24056 ++ ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v);
24057 ++ spin_unlock_irqrestore(&nh->lock, flags);
24058 ++
24059 ++ return ret;
24060 ++}
24061 ++EXPORT_SYMBOL_GPL(atomic_notifier_call_chain_robust);
24062 ++NOKPROBE_SYMBOL(atomic_notifier_call_chain_robust);
24063 ++
24064 + /**
24065 +- * __atomic_notifier_call_chain - Call functions in an atomic notifier chain
24066 ++ * atomic_notifier_call_chain - Call functions in an atomic notifier chain
24067 + * @nh: Pointer to head of the atomic notifier chain
24068 + * @val: Value passed unmodified to notifier function
24069 + * @v: Pointer passed unmodified to notifier function
24070 +- * @nr_to_call: See the comment for notifier_call_chain.
24071 +- * @nr_calls: See the comment for notifier_call_chain.
24072 + *
24073 + * Calls each function in a notifier chain in turn. The functions
24074 + * run in an atomic context, so they must not block.
24075 +@@ -163,24 +208,16 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
24076 + * Otherwise the return value is the return value
24077 + * of the last notifier function called.
24078 + */
24079 +-int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
24080 +- unsigned long val, void *v,
24081 +- int nr_to_call, int *nr_calls)
24082 ++int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
24083 ++ unsigned long val, void *v)
24084 + {
24085 + int ret;
24086 +
24087 + rcu_read_lock();
24088 +- ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
24089 ++ ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
24090 + rcu_read_unlock();
24091 +- return ret;
24092 +-}
24093 +-EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
24094 +-NOKPROBE_SYMBOL(__atomic_notifier_call_chain);
24095 +
24096 +-int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
24097 +- unsigned long val, void *v)
24098 +-{
24099 +- return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
24100 ++ return ret;
24101 + }
24102 + EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
24103 + NOKPROBE_SYMBOL(atomic_notifier_call_chain);
24104 +@@ -250,13 +287,30 @@ int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
24105 + }
24106 + EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
24107 +
24108 ++int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
24109 ++ unsigned long val_up, unsigned long val_down, void *v)
24110 ++{
24111 ++ int ret = NOTIFY_DONE;
24112 ++
24113 ++ /*
24114 ++ * We check the head outside the lock, but if this access is
24115 ++ * racy then it does not matter what the result of the test
24116 ++ * is, we re-check the list after having taken the lock anyway:
24117 ++ */
24118 ++ if (rcu_access_pointer(nh->head)) {
24119 ++ down_read(&nh->rwsem);
24120 ++ ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v);
24121 ++ up_read(&nh->rwsem);
24122 ++ }
24123 ++ return ret;
24124 ++}
24125 ++EXPORT_SYMBOL_GPL(blocking_notifier_call_chain_robust);
24126 ++
24127 + /**
24128 +- * __blocking_notifier_call_chain - Call functions in a blocking notifier chain
24129 ++ * blocking_notifier_call_chain - Call functions in a blocking notifier chain
24130 + * @nh: Pointer to head of the blocking notifier chain
24131 + * @val: Value passed unmodified to notifier function
24132 + * @v: Pointer passed unmodified to notifier function
24133 +- * @nr_to_call: See comment for notifier_call_chain.
24134 +- * @nr_calls: See comment for notifier_call_chain.
24135 + *
24136 + * Calls each function in a notifier chain in turn. The functions
24137 + * run in a process context, so they are allowed to block.
24138 +@@ -268,9 +322,8 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
24139 + * Otherwise the return value is the return value
24140 + * of the last notifier function called.
24141 + */
24142 +-int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
24143 +- unsigned long val, void *v,
24144 +- int nr_to_call, int *nr_calls)
24145 ++int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
24146 ++ unsigned long val, void *v)
24147 + {
24148 + int ret = NOTIFY_DONE;
24149 +
24150 +@@ -281,19 +334,11 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
24151 + */
24152 + if (rcu_access_pointer(nh->head)) {
24153 + down_read(&nh->rwsem);
24154 +- ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
24155 +- nr_calls);
24156 ++ ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
24157 + up_read(&nh->rwsem);
24158 + }
24159 + return ret;
24160 + }
24161 +-EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain);
24162 +-
24163 +-int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
24164 +- unsigned long val, void *v)
24165 +-{
24166 +- return __blocking_notifier_call_chain(nh, val, v, -1, NULL);
24167 +-}
24168 + EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
24169 +
24170 + /*
24171 +@@ -335,13 +380,18 @@ int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
24172 + }
24173 + EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
24174 +
24175 ++int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
24176 ++ unsigned long val_up, unsigned long val_down, void *v)
24177 ++{
24178 ++ return notifier_call_chain_robust(&nh->head, val_up, val_down, v);
24179 ++}
24180 ++EXPORT_SYMBOL_GPL(raw_notifier_call_chain_robust);
24181 ++
24182 + /**
24183 +- * __raw_notifier_call_chain - Call functions in a raw notifier chain
24184 ++ * raw_notifier_call_chain - Call functions in a raw notifier chain
24185 + * @nh: Pointer to head of the raw notifier chain
24186 + * @val: Value passed unmodified to notifier function
24187 + * @v: Pointer passed unmodified to notifier function
24188 +- * @nr_to_call: See comment for notifier_call_chain.
24189 +- * @nr_calls: See comment for notifier_call_chain
24190 + *
24191 + * Calls each function in a notifier chain in turn. The functions
24192 + * run in an undefined context.
24193 +@@ -354,18 +404,10 @@ EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
24194 + * Otherwise the return value is the return value
24195 + * of the last notifier function called.
24196 + */
24197 +-int __raw_notifier_call_chain(struct raw_notifier_head *nh,
24198 +- unsigned long val, void *v,
24199 +- int nr_to_call, int *nr_calls)
24200 +-{
24201 +- return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
24202 +-}
24203 +-EXPORT_SYMBOL_GPL(__raw_notifier_call_chain);
24204 +-
24205 + int raw_notifier_call_chain(struct raw_notifier_head *nh,
24206 + unsigned long val, void *v)
24207 + {
24208 +- return __raw_notifier_call_chain(nh, val, v, -1, NULL);
24209 ++ return notifier_call_chain(&nh->head, val, v, -1, NULL);
24210 + }
24211 + EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
24212 +
24213 +@@ -437,12 +479,10 @@ int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
24214 + EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
24215 +
24216 + /**
24217 +- * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain
24218 ++ * srcu_notifier_call_chain - Call functions in an SRCU notifier chain
24219 + * @nh: Pointer to head of the SRCU notifier chain
24220 + * @val: Value passed unmodified to notifier function
24221 + * @v: Pointer passed unmodified to notifier function
24222 +- * @nr_to_call: See comment for notifier_call_chain.
24223 +- * @nr_calls: See comment for notifier_call_chain
24224 + *
24225 + * Calls each function in a notifier chain in turn. The functions
24226 + * run in a process context, so they are allowed to block.
24227 +@@ -454,25 +494,17 @@ EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
24228 + * Otherwise the return value is the return value
24229 + * of the last notifier function called.
24230 + */
24231 +-int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
24232 +- unsigned long val, void *v,
24233 +- int nr_to_call, int *nr_calls)
24234 ++int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
24235 ++ unsigned long val, void *v)
24236 + {
24237 + int ret;
24238 + int idx;
24239 +
24240 + idx = srcu_read_lock(&nh->srcu);
24241 +- ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
24242 ++ ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
24243 + srcu_read_unlock(&nh->srcu, idx);
24244 + return ret;
24245 + }
24246 +-EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain);
24247 +-
24248 +-int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
24249 +- unsigned long val, void *v)
24250 +-{
24251 +- return __srcu_notifier_call_chain(nh, val, v, -1, NULL);
24252 +-}
24253 + EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
24254 +
24255 + /**
24256 +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
24257 +index e7aa57fb2fdc3..2fc7d509a34fc 100644
24258 +--- a/kernel/power/hibernate.c
24259 ++++ b/kernel/power/hibernate.c
24260 +@@ -706,8 +706,8 @@ static int load_image_and_restore(void)
24261 + */
24262 + int hibernate(void)
24263 + {
24264 +- int error, nr_calls = 0;
24265 + bool snapshot_test = false;
24266 ++ int error;
24267 +
24268 + if (!hibernation_available()) {
24269 + pm_pr_dbg("Hibernation not available.\n");
24270 +@@ -723,11 +723,9 @@ int hibernate(void)
24271 +
24272 + pr_info("hibernation entry\n");
24273 + pm_prepare_console();
24274 +- error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
24275 +- if (error) {
24276 +- nr_calls--;
24277 +- goto Exit;
24278 +- }
24279 ++ error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
24280 ++ if (error)
24281 ++ goto Restore;
24282 +
24283 + ksys_sync_helper();
24284 +
24285 +@@ -785,7 +783,8 @@ int hibernate(void)
24286 + /* Don't bother checking whether freezer_test_done is true */
24287 + freezer_test_done = false;
24288 + Exit:
24289 +- __pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL);
24290 ++ pm_notifier_call_chain(PM_POST_HIBERNATION);
24291 ++ Restore:
24292 + pm_restore_console();
24293 + hibernate_release();
24294 + Unlock:
24295 +@@ -804,7 +803,7 @@ int hibernate(void)
24296 + */
24297 + int hibernate_quiet_exec(int (*func)(void *data), void *data)
24298 + {
24299 +- int error, nr_calls = 0;
24300 ++ int error;
24301 +
24302 + lock_system_sleep();
24303 +
24304 +@@ -815,11 +814,9 @@ int hibernate_quiet_exec(int (*func)(void *data), void *data)
24305 +
24306 + pm_prepare_console();
24307 +
24308 +- error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
24309 +- if (error) {
24310 +- nr_calls--;
24311 +- goto exit;
24312 +- }
24313 ++ error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
24314 ++ if (error)
24315 ++ goto restore;
24316 +
24317 + error = freeze_processes();
24318 + if (error)
24319 +@@ -880,8 +877,9 @@ thaw:
24320 + thaw_processes();
24321 +
24322 + exit:
24323 +- __pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL);
24324 ++ pm_notifier_call_chain(PM_POST_HIBERNATION);
24325 +
24326 ++restore:
24327 + pm_restore_console();
24328 +
24329 + hibernate_release();
24330 +@@ -910,7 +908,7 @@ EXPORT_SYMBOL_GPL(hibernate_quiet_exec);
24331 + */
24332 + static int software_resume(void)
24333 + {
24334 +- int error, nr_calls = 0;
24335 ++ int error;
24336 +
24337 + /*
24338 + * If the user said "noresume".. bail out early.
24339 +@@ -948,17 +946,6 @@ static int software_resume(void)
24340 +
24341 + /* Check if the device is there */
24342 + swsusp_resume_device = name_to_dev_t(resume_file);
24343 +-
24344 +- /*
24345 +- * name_to_dev_t is ineffective to verify parition if resume_file is in
24346 +- * integer format. (e.g. major:minor)
24347 +- */
24348 +- if (isdigit(resume_file[0]) && resume_wait) {
24349 +- int partno;
24350 +- while (!get_gendisk(swsusp_resume_device, &partno))
24351 +- msleep(10);
24352 +- }
24353 +-
24354 + if (!swsusp_resume_device) {
24355 + /*
24356 + * Some device discovery might still be in progress; we need
24357 +@@ -997,11 +984,9 @@ static int software_resume(void)
24358 +
24359 + pr_info("resume from hibernation\n");
24360 + pm_prepare_console();
24361 +- error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
24362 +- if (error) {
24363 +- nr_calls--;
24364 +- goto Close_Finish;
24365 +- }
24366 ++ error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE);
24367 ++ if (error)
24368 ++ goto Restore;
24369 +
24370 + pm_pr_dbg("Preparing processes for hibernation restore.\n");
24371 + error = freeze_processes();
24372 +@@ -1017,7 +1002,8 @@ static int software_resume(void)
24373 + error = load_image_and_restore();
24374 + thaw_processes();
24375 + Finish:
24376 +- __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
24377 ++ pm_notifier_call_chain(PM_POST_RESTORE);
24378 ++ Restore:
24379 + pm_restore_console();
24380 + pr_info("resume failed (%d)\n", error);
24381 + hibernate_release();
24382 +diff --git a/kernel/power/main.c b/kernel/power/main.c
24383 +index 40f86ec4ab30d..0aefd6f57e0ac 100644
24384 +--- a/kernel/power/main.c
24385 ++++ b/kernel/power/main.c
24386 +@@ -80,18 +80,18 @@ int unregister_pm_notifier(struct notifier_block *nb)
24387 + }
24388 + EXPORT_SYMBOL_GPL(unregister_pm_notifier);
24389 +
24390 +-int __pm_notifier_call_chain(unsigned long val, int nr_to_call, int *nr_calls)
24391 ++int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down)
24392 + {
24393 + int ret;
24394 +
24395 +- ret = __blocking_notifier_call_chain(&pm_chain_head, val, NULL,
24396 +- nr_to_call, nr_calls);
24397 ++ ret = blocking_notifier_call_chain_robust(&pm_chain_head, val_up, val_down, NULL);
24398 +
24399 + return notifier_to_errno(ret);
24400 + }
24401 ++
24402 + int pm_notifier_call_chain(unsigned long val)
24403 + {
24404 +- return __pm_notifier_call_chain(val, -1, NULL);
24405 ++ return blocking_notifier_call_chain(&pm_chain_head, val, NULL);
24406 + }
24407 +
24408 + /* If set, devices may be suspended and resumed asynchronously. */
24409 +diff --git a/kernel/power/power.h b/kernel/power/power.h
24410 +index 32fc89ac96c30..24f12d534515f 100644
24411 +--- a/kernel/power/power.h
24412 ++++ b/kernel/power/power.h
24413 +@@ -210,8 +210,7 @@ static inline void suspend_test_finish(const char *label) {}
24414 +
24415 + #ifdef CONFIG_PM_SLEEP
24416 + /* kernel/power/main.c */
24417 +-extern int __pm_notifier_call_chain(unsigned long val, int nr_to_call,
24418 +- int *nr_calls);
24419 ++extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down);
24420 + extern int pm_notifier_call_chain(unsigned long val);
24421 + #endif
24422 +
24423 +diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
24424 +index 8b1bb5ee7e5d6..32391acc806bf 100644
24425 +--- a/kernel/power/suspend.c
24426 ++++ b/kernel/power/suspend.c
24427 +@@ -342,18 +342,16 @@ static int suspend_test(int level)
24428 + */
24429 + static int suspend_prepare(suspend_state_t state)
24430 + {
24431 +- int error, nr_calls = 0;
24432 ++ int error;
24433 +
24434 + if (!sleep_state_supported(state))
24435 + return -EPERM;
24436 +
24437 + pm_prepare_console();
24438 +
24439 +- error = __pm_notifier_call_chain(PM_SUSPEND_PREPARE, -1, &nr_calls);
24440 +- if (error) {
24441 +- nr_calls--;
24442 +- goto Finish;
24443 +- }
24444 ++ error = pm_notifier_call_chain_robust(PM_SUSPEND_PREPARE, PM_POST_SUSPEND);
24445 ++ if (error)
24446 ++ goto Restore;
24447 +
24448 + trace_suspend_resume(TPS("freeze_processes"), 0, true);
24449 + error = suspend_freeze_processes();
24450 +@@ -363,8 +361,8 @@ static int suspend_prepare(suspend_state_t state)
24451 +
24452 + suspend_stats.failed_freeze++;
24453 + dpm_save_failed_step(SUSPEND_FREEZE);
24454 +- Finish:
24455 +- __pm_notifier_call_chain(PM_POST_SUSPEND, nr_calls, NULL);
24456 ++ pm_notifier_call_chain(PM_POST_SUSPEND);
24457 ++ Restore:
24458 + pm_restore_console();
24459 + return error;
24460 + }
24461 +diff --git a/kernel/power/user.c b/kernel/power/user.c
24462 +index d5eedc2baa2a1..047f598f89a5c 100644
24463 +--- a/kernel/power/user.c
24464 ++++ b/kernel/power/user.c
24465 +@@ -46,7 +46,7 @@ int is_hibernate_resume_dev(const struct inode *bd_inode)
24466 + static int snapshot_open(struct inode *inode, struct file *filp)
24467 + {
24468 + struct snapshot_data *data;
24469 +- int error, nr_calls = 0;
24470 ++ int error;
24471 +
24472 + if (!hibernation_available())
24473 + return -EPERM;
24474 +@@ -73,9 +73,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
24475 + swap_type_of(swsusp_resume_device, 0, NULL) : -1;
24476 + data->mode = O_RDONLY;
24477 + data->free_bitmaps = false;
24478 +- error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
24479 +- if (error)
24480 +- __pm_notifier_call_chain(PM_POST_HIBERNATION, --nr_calls, NULL);
24481 ++ error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
24482 + } else {
24483 + /*
24484 + * Resuming. We may need to wait for the image device to
24485 +@@ -85,15 +83,11 @@ static int snapshot_open(struct inode *inode, struct file *filp)
24486 +
24487 + data->swap = -1;
24488 + data->mode = O_WRONLY;
24489 +- error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
24490 ++ error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE);
24491 + if (!error) {
24492 + error = create_basic_memory_bitmaps();
24493 + data->free_bitmaps = !error;
24494 +- } else
24495 +- nr_calls--;
24496 +-
24497 +- if (error)
24498 +- __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
24499 ++ }
24500 + }
24501 + if (error)
24502 + hibernate_release();
24503 +diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
24504 +index f453bf8d2f1ef..49202099692be 100644
24505 +--- a/kernel/rcu/rcutorture.c
24506 ++++ b/kernel/rcu/rcutorture.c
24507 +@@ -2160,9 +2160,20 @@ static int __init rcu_torture_fwd_prog_init(void)
24508 + return -ENOMEM;
24509 + spin_lock_init(&rfp->rcu_fwd_lock);
24510 + rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
24511 ++ rcu_fwds = rfp;
24512 + return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
24513 + }
24514 +
24515 ++static void rcu_torture_fwd_prog_cleanup(void)
24516 ++{
24517 ++ struct rcu_fwd *rfp;
24518 ++
24519 ++ torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
24520 ++ rfp = rcu_fwds;
24521 ++ rcu_fwds = NULL;
24522 ++ kfree(rfp);
24523 ++}
24524 ++
24525 + /* Callback function for RCU barrier testing. */
24526 + static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
24527 + {
24528 +@@ -2460,7 +2471,7 @@ rcu_torture_cleanup(void)
24529 + show_rcu_gp_kthreads();
24530 + rcu_torture_read_exit_cleanup();
24531 + rcu_torture_barrier_cleanup();
24532 +- torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
24533 ++ rcu_torture_fwd_prog_cleanup();
24534 + torture_stop_kthread(rcu_torture_stall, stall_task);
24535 + torture_stop_kthread(rcu_torture_writer, writer_task);
24536 +
24537 +diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
24538 +index d9291f883b542..952595c678b37 100644
24539 +--- a/kernel/rcu/refscale.c
24540 ++++ b/kernel/rcu/refscale.c
24541 +@@ -546,9 +546,11 @@ static int main_func(void *arg)
24542 + // Print the average of all experiments
24543 + SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
24544 +
24545 +- buf[0] = 0;
24546 +- strcat(buf, "\n");
24547 +- strcat(buf, "Runs\tTime(ns)\n");
24548 ++ if (!errexit) {
24549 ++ buf[0] = 0;
24550 ++ strcat(buf, "\n");
24551 ++ strcat(buf, "Runs\tTime(ns)\n");
24552 ++ }
24553 +
24554 + for (exp = 0; exp < nruns; exp++) {
24555 + u64 avg;
24556 +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
24557 +index f78ee759af9cb..388a2ad292bf4 100644
24558 +--- a/kernel/rcu/tree.c
24559 ++++ b/kernel/rcu/tree.c
24560 +@@ -1898,7 +1898,7 @@ static void rcu_gp_fqs_loop(void)
24561 + break;
24562 + /* If time for quiescent-state forcing, do it. */
24563 + if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
24564 +- (gf & RCU_GP_FLAG_FQS)) {
24565 ++ (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
24566 + trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
24567 + TPS("fqsstart"));
24568 + rcu_gp_fqs(first_gp_fqs);
24569 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
24570 +index 2d95dc3f46444..b1e0da56abcac 100644
24571 +--- a/kernel/sched/core.c
24572 ++++ b/kernel/sched/core.c
24573 +@@ -43,7 +43,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
24574 +
24575 + DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
24576 +
24577 +-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
24578 ++#ifdef CONFIG_SCHED_DEBUG
24579 + /*
24580 + * Debugging: various feature bits
24581 + *
24582 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
24583 +index 1a68a0536adda..48a6d442b4443 100644
24584 +--- a/kernel/sched/fair.c
24585 ++++ b/kernel/sched/fair.c
24586 +@@ -1548,7 +1548,7 @@ struct task_numa_env {
24587 +
24588 + static unsigned long cpu_load(struct rq *rq);
24589 + static unsigned long cpu_util(int cpu);
24590 +-static inline long adjust_numa_imbalance(int imbalance, int src_nr_running);
24591 ++static inline long adjust_numa_imbalance(int imbalance, int nr_running);
24592 +
24593 + static inline enum
24594 + numa_type numa_classify(unsigned int imbalance_pct,
24595 +@@ -1925,7 +1925,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
24596 + src_running = env->src_stats.nr_running - 1;
24597 + dst_running = env->dst_stats.nr_running + 1;
24598 + imbalance = max(0, dst_running - src_running);
24599 +- imbalance = adjust_numa_imbalance(imbalance, src_running);
24600 ++ imbalance = adjust_numa_imbalance(imbalance, dst_running);
24601 +
24602 + /* Use idle CPU if there is no imbalance */
24603 + if (!imbalance) {
24604 +@@ -6075,7 +6075,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
24605 + /*
24606 + * Scan the local SMT mask for idle CPUs.
24607 + */
24608 +-static int select_idle_smt(struct task_struct *p, int target)
24609 ++static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
24610 + {
24611 + int cpu;
24612 +
24613 +@@ -6083,7 +6083,8 @@ static int select_idle_smt(struct task_struct *p, int target)
24614 + return -1;
24615 +
24616 + for_each_cpu(cpu, cpu_smt_mask(target)) {
24617 +- if (!cpumask_test_cpu(cpu, p->cpus_ptr))
24618 ++ if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
24619 ++ !cpumask_test_cpu(cpu, sched_domain_span(sd)))
24620 + continue;
24621 + if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
24622 + return cpu;
24623 +@@ -6099,7 +6100,7 @@ static inline int select_idle_core(struct task_struct *p, struct sched_domain *s
24624 + return -1;
24625 + }
24626 +
24627 +-static inline int select_idle_smt(struct task_struct *p, int target)
24628 ++static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
24629 + {
24630 + return -1;
24631 + }
24632 +@@ -6274,7 +6275,7 @@ symmetric:
24633 + if ((unsigned)i < nr_cpumask_bits)
24634 + return i;
24635 +
24636 +- i = select_idle_smt(p, target);
24637 ++ i = select_idle_smt(p, sd, target);
24638 + if ((unsigned)i < nr_cpumask_bits)
24639 + return i;
24640 +
24641 +@@ -6594,7 +6595,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
24642 +
24643 + util = cpu_util_next(cpu, p, cpu);
24644 + cpu_cap = capacity_of(cpu);
24645 +- spare_cap = cpu_cap - util;
24646 ++ spare_cap = cpu_cap;
24647 ++ lsub_positive(&spare_cap, util);
24648 +
24649 + /*
24650 + * Skip CPUs that cannot satisfy the capacity request.
24651 +@@ -8957,7 +8959,7 @@ next_group:
24652 + }
24653 + }
24654 +
24655 +-static inline long adjust_numa_imbalance(int imbalance, int src_nr_running)
24656 ++static inline long adjust_numa_imbalance(int imbalance, int nr_running)
24657 + {
24658 + unsigned int imbalance_min;
24659 +
24660 +@@ -8966,7 +8968,7 @@ static inline long adjust_numa_imbalance(int imbalance, int src_nr_running)
24661 + * tasks that remain local when the source domain is almost idle.
24662 + */
24663 + imbalance_min = 2;
24664 +- if (src_nr_running <= imbalance_min)
24665 ++ if (nr_running <= imbalance_min)
24666 + return 0;
24667 +
24668 + return imbalance;
24669 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
24670 +index 28709f6b0975c..8d1ca65db3b0d 100644
24671 +--- a/kernel/sched/sched.h
24672 ++++ b/kernel/sched/sched.h
24673 +@@ -1629,7 +1629,7 @@ enum {
24674 +
24675 + #undef SCHED_FEAT
24676 +
24677 +-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
24678 ++#ifdef CONFIG_SCHED_DEBUG
24679 +
24680 + /*
24681 + * To support run-time toggling of sched features, all the translation units
24682 +@@ -1637,6 +1637,7 @@ enum {
24683 + */
24684 + extern const_debug unsigned int sysctl_sched_features;
24685 +
24686 ++#ifdef CONFIG_JUMP_LABEL
24687 + #define SCHED_FEAT(name, enabled) \
24688 + static __always_inline bool static_branch_##name(struct static_key *key) \
24689 + { \
24690 +@@ -1649,7 +1650,13 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
24691 + extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
24692 + #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
24693 +
24694 +-#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */
24695 ++#else /* !CONFIG_JUMP_LABEL */
24696 ++
24697 ++#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
24698 ++
24699 ++#endif /* CONFIG_JUMP_LABEL */
24700 ++
24701 ++#else /* !SCHED_DEBUG */
24702 +
24703 + /*
24704 + * Each translation unit has its own copy of sysctl_sched_features to allow
24705 +@@ -1665,7 +1672,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features =
24706 +
24707 + #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
24708 +
24709 +-#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */
24710 ++#endif /* SCHED_DEBUG */
24711 +
24712 + extern struct static_key_false sched_numa_balancing;
24713 + extern struct static_key_false sched_schedstats;
24714 +diff --git a/kernel/time/timer.c b/kernel/time/timer.c
24715 +index a50364df10543..401fcb9d73886 100644
24716 +--- a/kernel/time/timer.c
24717 ++++ b/kernel/time/timer.c
24718 +@@ -1715,13 +1715,6 @@ void update_process_times(int user_tick)
24719 + scheduler_tick();
24720 + if (IS_ENABLED(CONFIG_POSIX_TIMERS))
24721 + run_posix_cpu_timers();
24722 +-
24723 +- /* The current CPU might make use of net randoms without receiving IRQs
24724 +- * to renew them often enough. Let's update the net_rand_state from a
24725 +- * non-constant value that's not affine to the number of calls to make
24726 +- * sure it's updated when there's some activity (we don't care in idle).
24727 +- */
24728 +- this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
24729 + }
24730 +
24731 + /**
24732 +diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
24733 +index c6cca0d1d5840..c8892156db341 100644
24734 +--- a/kernel/trace/trace_events_synth.c
24735 ++++ b/kernel/trace/trace_events_synth.c
24736 +@@ -132,7 +132,7 @@ static int synth_field_string_size(char *type)
24737 + start += sizeof("char[") - 1;
24738 +
24739 + end = strchr(type, ']');
24740 +- if (!end || end < start)
24741 ++ if (!end || end < start || type + strlen(type) > end + 1)
24742 + return -EINVAL;
24743 +
24744 + len = end - start;
24745 +@@ -465,6 +465,7 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
24746 + struct synth_field *field;
24747 + const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
24748 + int len, ret = 0;
24749 ++ ssize_t size;
24750 +
24751 + if (field_type[0] == ';')
24752 + field_type++;
24753 +@@ -501,8 +502,14 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
24754 + if (field_type[0] == ';')
24755 + field_type++;
24756 + len = strlen(field_type) + 1;
24757 +- if (array)
24758 +- len += strlen(array);
24759 ++
24760 ++ if (array) {
24761 ++ int l = strlen(array);
24762 ++
24763 ++ if (l && array[l - 1] == ';')
24764 ++ l--;
24765 ++ len += l;
24766 ++ }
24767 + if (prefix)
24768 + len += strlen(prefix);
24769 +
24770 +@@ -520,11 +527,12 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
24771 + field->type[len - 1] = '\0';
24772 + }
24773 +
24774 +- field->size = synth_field_size(field->type);
24775 +- if (!field->size) {
24776 ++ size = synth_field_size(field->type);
24777 ++ if (size <= 0) {
24778 + ret = -EINVAL;
24779 + goto free;
24780 + }
24781 ++ field->size = size;
24782 +
24783 + if (synth_field_is_string(field->type))
24784 + field->is_string = true;
24785 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
24786 +index 0c781f912f9f0..491789a793ae5 100644
24787 +--- a/lib/Kconfig.debug
24788 ++++ b/lib/Kconfig.debug
24789 +@@ -2367,6 +2367,15 @@ config TEST_HMM
24790 +
24791 + If unsure, say N.
24792 +
24793 ++config TEST_FREE_PAGES
24794 ++ tristate "Test freeing pages"
24795 ++ help
24796 ++ Test that a memory leak does not occur due to a race between
24797 ++ freeing a block of pages and a speculative page reference.
24798 ++ Loading this module is safe if your kernel has the bug fixed.
24799 ++ If the bug is not fixed, it will leak gigabytes of memory and
24800 ++ probably OOM your system.
24801 ++
24802 + config TEST_FPU
24803 + tristate "Test floating point operations in kernel space"
24804 + depends on X86 && !KCOV_INSTRUMENT_ALL
24805 +diff --git a/lib/Makefile b/lib/Makefile
24806 +index a4a4c6864f518..071b687b7363f 100644
24807 +--- a/lib/Makefile
24808 ++++ b/lib/Makefile
24809 +@@ -99,6 +99,7 @@ obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
24810 + obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
24811 + obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
24812 + obj-$(CONFIG_TEST_HMM) += test_hmm.o
24813 ++obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
24814 +
24815 + #
24816 + # CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
24817 +diff --git a/lib/crc32.c b/lib/crc32.c
24818 +index 35a03d03f9738..2a68dfd3b96c8 100644
24819 +--- a/lib/crc32.c
24820 ++++ b/lib/crc32.c
24821 +@@ -331,7 +331,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
24822 + return crc;
24823 + }
24824 +
24825 +-#if CRC_LE_BITS == 1
24826 ++#if CRC_BE_BITS == 1
24827 + u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
24828 + {
24829 + return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
24830 +diff --git a/lib/idr.c b/lib/idr.c
24831 +index c2cf2c52bbde5..4d2eef0259d2c 100644
24832 +--- a/lib/idr.c
24833 ++++ b/lib/idr.c
24834 +@@ -470,6 +470,7 @@ alloc:
24835 + goto retry;
24836 + nospc:
24837 + xas_unlock_irqrestore(&xas, flags);
24838 ++ kfree(alloc);
24839 + return -ENOSPC;
24840 + }
24841 + EXPORT_SYMBOL(ida_alloc_range);
24842 +diff --git a/lib/random32.c b/lib/random32.c
24843 +index dfb9981ab7989..be9f242a42075 100644
24844 +--- a/lib/random32.c
24845 ++++ b/lib/random32.c
24846 +@@ -41,16 +41,6 @@
24847 + #include <asm/unaligned.h>
24848 + #include <trace/events/random.h>
24849 +
24850 +-#ifdef CONFIG_RANDOM32_SELFTEST
24851 +-static void __init prandom_state_selftest(void);
24852 +-#else
24853 +-static inline void prandom_state_selftest(void)
24854 +-{
24855 +-}
24856 +-#endif
24857 +-
24858 +-DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
24859 +-
24860 + /**
24861 + * prandom_u32_state - seeded pseudo-random number generator.
24862 + * @state: pointer to state structure holding seeded state.
24863 +@@ -70,26 +60,6 @@ u32 prandom_u32_state(struct rnd_state *state)
24864 + }
24865 + EXPORT_SYMBOL(prandom_u32_state);
24866 +
24867 +-/**
24868 +- * prandom_u32 - pseudo random number generator
24869 +- *
24870 +- * A 32 bit pseudo-random number is generated using a fast
24871 +- * algorithm suitable for simulation. This algorithm is NOT
24872 +- * considered safe for cryptographic use.
24873 +- */
24874 +-u32 prandom_u32(void)
24875 +-{
24876 +- struct rnd_state *state = &get_cpu_var(net_rand_state);
24877 +- u32 res;
24878 +-
24879 +- res = prandom_u32_state(state);
24880 +- trace_prandom_u32(res);
24881 +- put_cpu_var(net_rand_state);
24882 +-
24883 +- return res;
24884 +-}
24885 +-EXPORT_SYMBOL(prandom_u32);
24886 +-
24887 + /**
24888 + * prandom_bytes_state - get the requested number of pseudo-random bytes
24889 + *
24890 +@@ -121,20 +91,6 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
24891 + }
24892 + EXPORT_SYMBOL(prandom_bytes_state);
24893 +
24894 +-/**
24895 +- * prandom_bytes - get the requested number of pseudo-random bytes
24896 +- * @buf: where to copy the pseudo-random bytes to
24897 +- * @bytes: the requested number of bytes
24898 +- */
24899 +-void prandom_bytes(void *buf, size_t bytes)
24900 +-{
24901 +- struct rnd_state *state = &get_cpu_var(net_rand_state);
24902 +-
24903 +- prandom_bytes_state(state, buf, bytes);
24904 +- put_cpu_var(net_rand_state);
24905 +-}
24906 +-EXPORT_SYMBOL(prandom_bytes);
24907 +-
24908 + static void prandom_warmup(struct rnd_state *state)
24909 + {
24910 + /* Calling RNG ten times to satisfy recurrence condition */
24911 +@@ -150,96 +106,6 @@ static void prandom_warmup(struct rnd_state *state)
24912 + prandom_u32_state(state);
24913 + }
24914 +
24915 +-static u32 __extract_hwseed(void)
24916 +-{
24917 +- unsigned int val = 0;
24918 +-
24919 +- (void)(arch_get_random_seed_int(&val) ||
24920 +- arch_get_random_int(&val));
24921 +-
24922 +- return val;
24923 +-}
24924 +-
24925 +-static void prandom_seed_early(struct rnd_state *state, u32 seed,
24926 +- bool mix_with_hwseed)
24927 +-{
24928 +-#define LCG(x) ((x) * 69069U) /* super-duper LCG */
24929 +-#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
24930 +- state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
24931 +- state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
24932 +- state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
24933 +- state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
24934 +-}
24935 +-
24936 +-/**
24937 +- * prandom_seed - add entropy to pseudo random number generator
24938 +- * @entropy: entropy value
24939 +- *
24940 +- * Add some additional entropy to the prandom pool.
24941 +- */
24942 +-void prandom_seed(u32 entropy)
24943 +-{
24944 +- int i;
24945 +- /*
24946 +- * No locking on the CPUs, but then somewhat random results are, well,
24947 +- * expected.
24948 +- */
24949 +- for_each_possible_cpu(i) {
24950 +- struct rnd_state *state = &per_cpu(net_rand_state, i);
24951 +-
24952 +- state->s1 = __seed(state->s1 ^ entropy, 2U);
24953 +- prandom_warmup(state);
24954 +- }
24955 +-}
24956 +-EXPORT_SYMBOL(prandom_seed);
24957 +-
24958 +-/*
24959 +- * Generate some initially weak seeding values to allow
24960 +- * to start the prandom_u32() engine.
24961 +- */
24962 +-static int __init prandom_init(void)
24963 +-{
24964 +- int i;
24965 +-
24966 +- prandom_state_selftest();
24967 +-
24968 +- for_each_possible_cpu(i) {
24969 +- struct rnd_state *state = &per_cpu(net_rand_state, i);
24970 +- u32 weak_seed = (i + jiffies) ^ random_get_entropy();
24971 +-
24972 +- prandom_seed_early(state, weak_seed, true);
24973 +- prandom_warmup(state);
24974 +- }
24975 +-
24976 +- return 0;
24977 +-}
24978 +-core_initcall(prandom_init);
24979 +-
24980 +-static void __prandom_timer(struct timer_list *unused);
24981 +-
24982 +-static DEFINE_TIMER(seed_timer, __prandom_timer);
24983 +-
24984 +-static void __prandom_timer(struct timer_list *unused)
24985 +-{
24986 +- u32 entropy;
24987 +- unsigned long expires;
24988 +-
24989 +- get_random_bytes(&entropy, sizeof(entropy));
24990 +- prandom_seed(entropy);
24991 +-
24992 +- /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
24993 +- expires = 40 + prandom_u32_max(40);
24994 +- seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
24995 +-
24996 +- add_timer(&seed_timer);
24997 +-}
24998 +-
24999 +-static void __init __prandom_start_seed_timer(void)
25000 +-{
25001 +- seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
25002 +- add_timer(&seed_timer);
25003 +-}
25004 +-
25005 + void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
25006 + {
25007 + int i;
25008 +@@ -259,51 +125,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
25009 + }
25010 + EXPORT_SYMBOL(prandom_seed_full_state);
25011 +
25012 +-/*
25013 +- * Generate better values after random number generator
25014 +- * is fully initialized.
25015 +- */
25016 +-static void __prandom_reseed(bool late)
25017 +-{
25018 +- unsigned long flags;
25019 +- static bool latch = false;
25020 +- static DEFINE_SPINLOCK(lock);
25021 +-
25022 +- /* Asking for random bytes might result in bytes getting
25023 +- * moved into the nonblocking pool and thus marking it
25024 +- * as initialized. In this case we would double back into
25025 +- * this function and attempt to do a late reseed.
25026 +- * Ignore the pointless attempt to reseed again if we're
25027 +- * already waiting for bytes when the nonblocking pool
25028 +- * got initialized.
25029 +- */
25030 +-
25031 +- /* only allow initial seeding (late == false) once */
25032 +- if (!spin_trylock_irqsave(&lock, flags))
25033 +- return;
25034 +-
25035 +- if (latch && !late)
25036 +- goto out;
25037 +-
25038 +- latch = true;
25039 +- prandom_seed_full_state(&net_rand_state);
25040 +-out:
25041 +- spin_unlock_irqrestore(&lock, flags);
25042 +-}
25043 +-
25044 +-void prandom_reseed_late(void)
25045 +-{
25046 +- __prandom_reseed(true);
25047 +-}
25048 +-
25049 +-static int __init prandom_reseed(void)
25050 +-{
25051 +- __prandom_reseed(false);
25052 +- __prandom_start_seed_timer();
25053 +- return 0;
25054 +-}
25055 +-late_initcall(prandom_reseed);
25056 +-
25057 + #ifdef CONFIG_RANDOM32_SELFTEST
25058 + static struct prandom_test1 {
25059 + u32 seed;
25060 +@@ -423,7 +244,28 @@ static struct prandom_test2 {
25061 + { 407983964U, 921U, 728767059U },
25062 + };
25063 +
25064 +-static void __init prandom_state_selftest(void)
25065 ++static u32 __extract_hwseed(void)
25066 ++{
25067 ++ unsigned int val = 0;
25068 ++
25069 ++ (void)(arch_get_random_seed_int(&val) ||
25070 ++ arch_get_random_int(&val));
25071 ++
25072 ++ return val;
25073 ++}
25074 ++
25075 ++static void prandom_seed_early(struct rnd_state *state, u32 seed,
25076 ++ bool mix_with_hwseed)
25077 ++{
25078 ++#define LCG(x) ((x) * 69069U) /* super-duper LCG */
25079 ++#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
25080 ++ state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
25081 ++ state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
25082 ++ state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
25083 ++ state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
25084 ++}
25085 ++
25086 ++static int __init prandom_state_selftest(void)
25087 + {
25088 + int i, j, errors = 0, runs = 0;
25089 + bool error = false;
25090 +@@ -463,5 +305,267 @@ static void __init prandom_state_selftest(void)
25091 + pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
25092 + else
25093 + pr_info("prandom: %d self tests passed\n", runs);
25094 ++ return 0;
25095 + }
25096 ++core_initcall(prandom_state_selftest);
25097 + #endif
25098 ++
25099 ++/*
25100 ++ * The prandom_u32() implementation is now completely separate from the
25101 ++ * prandom_state() functions, which are retained (for now) for compatibility.
25102 ++ *
25103 ++ * Because of (ab)use in the networking code for choosing random TCP/UDP port
25104 ++ * numbers, which open DoS possibilities if guessable, we want something
25105 ++ * stronger than a standard PRNG. But the performance requirements of
25106 ++ * the network code do not allow robust crypto for this application.
25107 ++ *
25108 ++ * So this is a homebrew Junior Spaceman implementation, based on the
25109 ++ * lowest-latency trustworthy crypto primitive available, SipHash.
25110 ++ * (The authors of SipHash have not been consulted about this abuse of
25111 ++ * their work.)
25112 ++ *
25113 ++ * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
25114 ++ * one word of output. This abbreviated version uses 2 rounds per word
25115 ++ * of output.
25116 ++ */
25117 ++
25118 ++struct siprand_state {
25119 ++ unsigned long v0;
25120 ++ unsigned long v1;
25121 ++ unsigned long v2;
25122 ++ unsigned long v3;
25123 ++};
25124 ++
25125 ++static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
25126 ++
25127 ++/*
25128 ++ * This is the core CPRNG function. As "pseudorandom", this is not used
25129 ++ * for truly valuable things, just intended to be a PITA to guess.
25130 ++ * For maximum speed, we do just two SipHash rounds per word. This is
25131 ++ * the same rate as 4 rounds per 64 bits that SipHash normally uses,
25132 ++ * so hopefully it's reasonably secure.
25133 ++ *
25134 ++ * There are two changes from the official SipHash finalization:
25135 ++ * - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
25136 ++ * they are there only to make the output rounds distinct from the input
25137 ++ * rounds, and this application has no input rounds.
25138 ++ * - Rather than returning v0^v1^v2^v3, return v1+v3.
25139 ++ * If you look at the SipHash round, the last operation on v3 is
25140 ++ * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
25141 ++ * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but
25142 ++ * it still cancels out half of the bits in v2 for no benefit.)
25143 ++ * Second, since the last combining operation was xor, continue the
25144 ++ * pattern of alternating xor/add for a tiny bit of extra non-linearity.
25145 ++ */
25146 ++static inline u32 siprand_u32(struct siprand_state *s)
25147 ++{
25148 ++ unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
25149 ++
25150 ++ PRND_SIPROUND(v0, v1, v2, v3);
25151 ++ PRND_SIPROUND(v0, v1, v2, v3);
25152 ++ s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3;
25153 ++ return v1 + v3;
25154 ++}
25155 ++
25156 ++
25157 ++/**
25158 ++ * prandom_u32 - pseudo random number generator
25159 ++ *
25160 ++ * A 32 bit pseudo-random number is generated using a fast
25161 ++ * algorithm suitable for simulation. This algorithm is NOT
25162 ++ * considered safe for cryptographic use.
25163 ++ */
25164 ++u32 prandom_u32(void)
25165 ++{
25166 ++ struct siprand_state *state = get_cpu_ptr(&net_rand_state);
25167 ++ u32 res = siprand_u32(state);
25168 ++
25169 ++ trace_prandom_u32(res);
25170 ++ put_cpu_ptr(&net_rand_state);
25171 ++ return res;
25172 ++}
25173 ++EXPORT_SYMBOL(prandom_u32);
25174 ++
25175 ++/**
25176 ++ * prandom_bytes - get the requested number of pseudo-random bytes
25177 ++ * @buf: where to copy the pseudo-random bytes to
25178 ++ * @bytes: the requested number of bytes
25179 ++ */
25180 ++void prandom_bytes(void *buf, size_t bytes)
25181 ++{
25182 ++ struct siprand_state *state = get_cpu_ptr(&net_rand_state);
25183 ++ u8 *ptr = buf;
25184 ++
25185 ++ while (bytes >= sizeof(u32)) {
25186 ++ put_unaligned(siprand_u32(state), (u32 *)ptr);
25187 ++ ptr += sizeof(u32);
25188 ++ bytes -= sizeof(u32);
25189 ++ }
25190 ++
25191 ++ if (bytes > 0) {
25192 ++ u32 rem = siprand_u32(state);
25193 ++
25194 ++ do {
25195 ++ *ptr++ = (u8)rem;
25196 ++ rem >>= BITS_PER_BYTE;
25197 ++ } while (--bytes > 0);
25198 ++ }
25199 ++ put_cpu_ptr(&net_rand_state);
25200 ++}
25201 ++EXPORT_SYMBOL(prandom_bytes);
25202 ++
25203 ++/**
25204 ++ * prandom_seed - add entropy to pseudo random number generator
25205 ++ * @entropy: entropy value
25206 ++ *
25207 ++ * Add some additional seed material to the prandom pool.
25208 ++ * The "entropy" is actually our IP address (the only caller is
25209 ++ * the network code), not for unpredictability, but to ensure that
25210 ++ * different machines are initialized differently.
25211 ++ */
25212 ++void prandom_seed(u32 entropy)
25213 ++{
25214 ++ int i;
25215 ++
25216 ++ add_device_randomness(&entropy, sizeof(entropy));
25217 ++
25218 ++ for_each_possible_cpu(i) {
25219 ++ struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
25220 ++ unsigned long v0 = state->v0, v1 = state->v1;
25221 ++ unsigned long v2 = state->v2, v3 = state->v3;
25222 ++
25223 ++ do {
25224 ++ v3 ^= entropy;
25225 ++ PRND_SIPROUND(v0, v1, v2, v3);
25226 ++ PRND_SIPROUND(v0, v1, v2, v3);
25227 ++ v0 ^= entropy;
25228 ++ } while (unlikely(!v0 || !v1 || !v2 || !v3));
25229 ++
25230 ++ WRITE_ONCE(state->v0, v0);
25231 ++ WRITE_ONCE(state->v1, v1);
25232 ++ WRITE_ONCE(state->v2, v2);
25233 ++ WRITE_ONCE(state->v3, v3);
25234 ++ }
25235 ++}
25236 ++EXPORT_SYMBOL(prandom_seed);
25237 ++
25238 ++/*
25239 ++ * Generate some initially weak seeding values to allow
25240 ++ * the prandom_u32() engine to be started.
25241 ++ */
25242 ++static int __init prandom_init_early(void)
25243 ++{
25244 ++ int i;
25245 ++ unsigned long v0, v1, v2, v3;
25246 ++
25247 ++ if (!arch_get_random_long(&v0))
25248 ++ v0 = jiffies;
25249 ++ if (!arch_get_random_long(&v1))
25250 ++ v1 = random_get_entropy();
25251 ++ v2 = v0 ^ PRND_K0;
25252 ++ v3 = v1 ^ PRND_K1;
25253 ++
25254 ++ for_each_possible_cpu(i) {
25255 ++ struct siprand_state *state;
25256 ++
25257 ++ v3 ^= i;
25258 ++ PRND_SIPROUND(v0, v1, v2, v3);
25259 ++ PRND_SIPROUND(v0, v1, v2, v3);
25260 ++ v0 ^= i;
25261 ++
25262 ++ state = per_cpu_ptr(&net_rand_state, i);
25263 ++ state->v0 = v0; state->v1 = v1;
25264 ++ state->v2 = v2; state->v3 = v3;
25265 ++ }
25266 ++
25267 ++ return 0;
25268 ++}
25269 ++core_initcall(prandom_init_early);
25270 ++
25271 ++
25272 ++/* Stronger reseeding when available, and periodically thereafter. */
25273 ++static void prandom_reseed(struct timer_list *unused);
25274 ++
25275 ++static DEFINE_TIMER(seed_timer, prandom_reseed);
25276 ++
25277 ++static void prandom_reseed(struct timer_list *unused)
25278 ++{
25279 ++ unsigned long expires;
25280 ++ int i;
25281 ++
25282 ++ /*
25283 ++ * Reinitialize each CPU's PRNG with 128 bits of key.
25284 ++ * No locking on the CPUs, but then somewhat random results are,
25285 ++ * well, expected.
25286 ++ */
25287 ++ for_each_possible_cpu(i) {
25288 ++ struct siprand_state *state;
25289 ++ unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
25290 ++ unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
25291 ++#if BITS_PER_LONG == 32
25292 ++ int j;
25293 ++
25294 ++ /*
25295 ++ * On 32-bit machines, hash in two extra words to
25296 ++ * approximate 128-bit key length. Not that the hash
25297 ++ * has that much security, but this prevents a trivial
25298 ++ * 64-bit brute force.
25299 ++ */
25300 ++ for (j = 0; j < 2; j++) {
25301 ++ unsigned long m = get_random_long();
25302 ++
25303 ++ v3 ^= m;
25304 ++ PRND_SIPROUND(v0, v1, v2, v3);
25305 ++ PRND_SIPROUND(v0, v1, v2, v3);
25306 ++ v0 ^= m;
25307 ++ }
25308 ++#endif
25309 ++ /*
25310 ++ * Probably impossible in practice, but there is a
25311 ++ * theoretical risk that a race between this reseeding
25312 ++ * and the target CPU writing its state back could
25313 ++ * create the all-zero SipHash fixed point.
25314 ++ *
25315 ++ * To ensure that never happens, ensure the state
25316 ++ * we write contains no zero words.
25317 ++ */
25318 ++ state = per_cpu_ptr(&net_rand_state, i);
25319 ++ WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
25320 ++ WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
25321 ++ WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
25322 ++ WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
25323 ++ }
25324 ++
25325 ++ /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
25326 ++ expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
25327 ++ mod_timer(&seed_timer, expires);
25328 ++}
25329 ++
25330 ++/*
25331 ++ * The random ready callback can be called from almost any interrupt.
25332 ++ * To avoid worrying about whether it's safe to delay that interrupt
25333 ++ * long enough to seed all CPUs, just schedule an immediate timer event.
25334 ++ */
25335 ++static void prandom_timer_start(struct random_ready_callback *unused)
25336 ++{
25337 ++ mod_timer(&seed_timer, jiffies);
25338 ++}
25339 ++
25340 ++/*
25341 ++ * Start periodic full reseeding as soon as strong
25342 ++ * random numbers are available.
25343 ++ */
25344 ++static int __init prandom_init_late(void)
25345 ++{
25346 ++ static struct random_ready_callback random_ready = {
25347 ++ .func = prandom_timer_start
25348 ++ };
25349 ++ int ret = add_random_ready_callback(&random_ready);
25350 ++
25351 ++ if (ret == -EALREADY) {
25352 ++ prandom_timer_start(&random_ready);
25353 ++ ret = 0;
25354 ++ }
25355 ++ return ret;
25356 ++}
25357 ++late_initcall(prandom_init_late);
25358 +diff --git a/lib/test_free_pages.c b/lib/test_free_pages.c
25359 +new file mode 100644
25360 +index 0000000000000..074e76bd76b2b
25361 +--- /dev/null
25362 ++++ b/lib/test_free_pages.c
25363 +@@ -0,0 +1,42 @@
25364 ++// SPDX-License-Identifier: GPL-2.0+
25365 ++/*
25366 ++ * test_free_pages.c: Check that free_pages() doesn't leak memory
25367 ++ * Copyright (c) 2020 Oracle
25368 ++ * Author: Matthew Wilcox <willy@×××××××××.org>
25369 ++ */
25370 ++
25371 ++#include <linux/gfp.h>
25372 ++#include <linux/mm.h>
25373 ++#include <linux/module.h>
25374 ++
25375 ++static void test_free_pages(gfp_t gfp)
25376 ++{
25377 ++ unsigned int i;
25378 ++
25379 ++ for (i = 0; i < 1000 * 1000; i++) {
25380 ++ unsigned long addr = __get_free_pages(gfp, 3);
25381 ++ struct page *page = virt_to_page(addr);
25382 ++
25383 ++ /* Simulate page cache getting a speculative reference */
25384 ++ get_page(page);
25385 ++ free_pages(addr, 3);
25386 ++ put_page(page);
25387 ++ }
25388 ++}
25389 ++
25390 ++static int m_in(void)
25391 ++{
25392 ++ test_free_pages(GFP_KERNEL);
25393 ++ test_free_pages(GFP_KERNEL | __GFP_COMP);
25394 ++
25395 ++ return 0;
25396 ++}
25397 ++
25398 ++static void m_ex(void)
25399 ++{
25400 ++}
25401 ++
25402 ++module_init(m_in);
25403 ++module_exit(m_ex);
25404 ++MODULE_AUTHOR("Matthew Wilcox <willy@×××××××××.org>");
25405 ++MODULE_LICENSE("GPL");
25406 +diff --git a/mm/filemap.c b/mm/filemap.c
25407 +index 99c49eeae71b8..f6d36ccc23515 100644
25408 +--- a/mm/filemap.c
25409 ++++ b/mm/filemap.c
25410 +@@ -827,10 +827,10 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
25411 + }
25412 + EXPORT_SYMBOL_GPL(replace_page_cache_page);
25413 +
25414 +-static int __add_to_page_cache_locked(struct page *page,
25415 +- struct address_space *mapping,
25416 +- pgoff_t offset, gfp_t gfp_mask,
25417 +- void **shadowp)
25418 ++noinline int __add_to_page_cache_locked(struct page *page,
25419 ++ struct address_space *mapping,
25420 ++ pgoff_t offset, gfp_t gfp_mask,
25421 ++ void **shadowp)
25422 + {
25423 + XA_STATE(xas, &mapping->i_pages, offset);
25424 + int huge = PageHuge(page);
25425 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
25426 +index da397779a6d43..18a6f8c8b2844 100644
25427 +--- a/mm/huge_memory.c
25428 ++++ b/mm/huge_memory.c
25429 +@@ -2335,13 +2335,13 @@ static void unmap_page(struct page *page)
25430 + VM_BUG_ON_PAGE(!unmap_success, page);
25431 + }
25432 +
25433 +-static void remap_page(struct page *page)
25434 ++static void remap_page(struct page *page, unsigned int nr)
25435 + {
25436 + int i;
25437 + if (PageTransHuge(page)) {
25438 + remove_migration_ptes(page, page, true);
25439 + } else {
25440 +- for (i = 0; i < HPAGE_PMD_NR; i++)
25441 ++ for (i = 0; i < nr; i++)
25442 + remove_migration_ptes(page + i, page + i, true);
25443 + }
25444 + }
25445 +@@ -2416,6 +2416,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
25446 + struct lruvec *lruvec;
25447 + struct address_space *swap_cache = NULL;
25448 + unsigned long offset = 0;
25449 ++ unsigned int nr = thp_nr_pages(head);
25450 + int i;
25451 +
25452 + lruvec = mem_cgroup_page_lruvec(head, pgdat);
25453 +@@ -2431,7 +2432,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
25454 + xa_lock(&swap_cache->i_pages);
25455 + }
25456 +
25457 +- for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
25458 ++ for (i = nr - 1; i >= 1; i--) {
25459 + __split_huge_page_tail(head, i, lruvec, list);
25460 + /* Some pages can be beyond i_size: drop them from page cache */
25461 + if (head[i].index >= end) {
25462 +@@ -2451,7 +2452,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
25463 +
25464 + ClearPageCompound(head);
25465 +
25466 +- split_page_owner(head, HPAGE_PMD_ORDER);
25467 ++ split_page_owner(head, nr);
25468 +
25469 + /* See comment in __split_huge_page_tail() */
25470 + if (PageAnon(head)) {
25471 +@@ -2470,9 +2471,15 @@ static void __split_huge_page(struct page *page, struct list_head *list,
25472 +
25473 + spin_unlock_irqrestore(&pgdat->lru_lock, flags);
25474 +
25475 +- remap_page(head);
25476 ++ remap_page(head, nr);
25477 +
25478 +- for (i = 0; i < HPAGE_PMD_NR; i++) {
25479 ++ if (PageSwapCache(head)) {
25480 ++ swp_entry_t entry = { .val = page_private(head) };
25481 ++
25482 ++ split_swap_cluster(entry);
25483 ++ }
25484 ++
25485 ++ for (i = 0; i < nr; i++) {
25486 + struct page *subpage = head + i;
25487 + if (subpage == page)
25488 + continue;
25489 +@@ -2706,12 +2713,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
25490 + }
25491 +
25492 + __split_huge_page(page, list, end, flags);
25493 +- if (PageSwapCache(head)) {
25494 +- swp_entry_t entry = { .val = page_private(head) };
25495 +-
25496 +- ret = split_swap_cluster(entry);
25497 +- } else
25498 +- ret = 0;
25499 ++ ret = 0;
25500 + } else {
25501 + if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
25502 + pr_alert("total_mapcount: %u, page_count(): %u\n",
25503 +@@ -2725,7 +2727,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
25504 + fail: if (mapping)
25505 + xa_unlock(&mapping->i_pages);
25506 + spin_unlock_irqrestore(&pgdata->lru_lock, flags);
25507 +- remap_page(head);
25508 ++ remap_page(head, thp_nr_pages(head));
25509 + ret = -EBUSY;
25510 + }
25511 +
25512 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
25513 +index 6877c765b8d03..9eefdb9cc2303 100644
25514 +--- a/mm/memcontrol.c
25515 ++++ b/mm/memcontrol.c
25516 +@@ -2887,6 +2887,17 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p)
25517 +
25518 + page = virt_to_head_page(p);
25519 +
25520 ++ /*
25521 ++ * If page->mem_cgroup is set, it's either a simple mem_cgroup pointer
25522 ++ * or a pointer to obj_cgroup vector. In the latter case the lowest
25523 ++ * bit of the pointer is set.
25524 ++ * The page->mem_cgroup pointer can be asynchronously changed
25525 ++ * from NULL to (obj_cgroup_vec | 0x1UL), but can't be changed
25526 ++ * from a valid memcg pointer to objcg vector or back.
25527 ++ */
25528 ++ if (!page->mem_cgroup)
25529 ++ return NULL;
25530 ++
25531 + /*
25532 + * Slab objects are accounted individually, not per-page.
25533 + * Memcg membership data for each individual object is saved in
25534 +@@ -5500,7 +5511,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
25535 + struct page *page = NULL;
25536 + swp_entry_t ent = pte_to_swp_entry(ptent);
25537 +
25538 +- if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
25539 ++ if (!(mc.flags & MOVE_ANON))
25540 + return NULL;
25541 +
25542 + /*
25543 +@@ -5519,6 +5530,9 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
25544 + return page;
25545 + }
25546 +
25547 ++ if (non_swap_entry(ent))
25548 ++ return NULL;
25549 ++
25550 + /*
25551 + * Because lookup_swap_cache() updates some statistics counter,
25552 + * we call find_get_page() with swapper_space directly.
25553 +diff --git a/mm/mmap.c b/mm/mmap.c
25554 +index bdd19f5b994e0..7a8987aa69962 100644
25555 +--- a/mm/mmap.c
25556 ++++ b/mm/mmap.c
25557 +@@ -3227,7 +3227,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
25558 + * By setting it to reflect the virtual start address of the
25559 + * vma, merges and splits can happen in a seamless way, just
25560 + * using the existing file pgoff checks and manipulations.
25561 +- * Similarly in do_mmap and in do_brk.
25562 ++ * Similarly in do_mmap and in do_brk_flags.
25563 + */
25564 + if (vma_is_anonymous(vma)) {
25565 + BUG_ON(vma->anon_vma);
25566 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
25567 +index e90f25d6385d7..8b84661a64109 100644
25568 +--- a/mm/oom_kill.c
25569 ++++ b/mm/oom_kill.c
25570 +@@ -64,6 +64,8 @@ int sysctl_oom_dump_tasks = 1;
25571 + * and mark_oom_victim
25572 + */
25573 + DEFINE_MUTEX(oom_lock);
25574 ++/* Serializes oom_score_adj and oom_score_adj_min updates */
25575 ++DEFINE_MUTEX(oom_adj_mutex);
25576 +
25577 + static inline bool is_memcg_oom(struct oom_control *oc)
25578 + {
25579 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
25580 +index 780c8f023b282..3fb35fe6a9e44 100644
25581 +--- a/mm/page_alloc.c
25582 ++++ b/mm/page_alloc.c
25583 +@@ -3209,7 +3209,7 @@ void split_page(struct page *page, unsigned int order)
25584 +
25585 + for (i = 1; i < (1 << order); i++)
25586 + set_page_refcounted(page + i);
25587 +- split_page_owner(page, order);
25588 ++ split_page_owner(page, 1 << order);
25589 + }
25590 + EXPORT_SYMBOL_GPL(split_page);
25591 +
25592 +@@ -3496,7 +3496,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
25593 +
25594 + #endif /* CONFIG_FAIL_PAGE_ALLOC */
25595 +
25596 +-static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
25597 ++noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
25598 + {
25599 + return __should_fail_alloc_page(gfp_mask, order);
25600 + }
25601 +@@ -4961,6 +4961,9 @@ void __free_pages(struct page *page, unsigned int order)
25602 + {
25603 + if (put_page_testzero(page))
25604 + free_the_page(page, order);
25605 ++ else if (!PageHead(page))
25606 ++ while (order-- > 0)
25607 ++ free_the_page(page + (1 << order), order);
25608 + }
25609 + EXPORT_SYMBOL(__free_pages);
25610 +
25611 +diff --git a/mm/page_owner.c b/mm/page_owner.c
25612 +index 3604615094235..4ca3051a10358 100644
25613 +--- a/mm/page_owner.c
25614 ++++ b/mm/page_owner.c
25615 +@@ -204,7 +204,7 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
25616 + page_owner->last_migrate_reason = reason;
25617 + }
25618 +
25619 +-void __split_page_owner(struct page *page, unsigned int order)
25620 ++void __split_page_owner(struct page *page, unsigned int nr)
25621 + {
25622 + int i;
25623 + struct page_ext *page_ext = lookup_page_ext(page);
25624 +@@ -213,7 +213,7 @@ void __split_page_owner(struct page *page, unsigned int order)
25625 + if (unlikely(!page_ext))
25626 + return;
25627 +
25628 +- for (i = 0; i < (1 << order); i++) {
25629 ++ for (i = 0; i < nr; i++) {
25630 + page_owner = get_page_owner(page_ext);
25631 + page_owner->order = 0;
25632 + page_ext = page_ext_next(page_ext);
25633 +diff --git a/mm/swapfile.c b/mm/swapfile.c
25634 +index debc94155f74d..b877c1504e00b 100644
25635 +--- a/mm/swapfile.c
25636 ++++ b/mm/swapfile.c
25637 +@@ -3343,7 +3343,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
25638 + error = inode_drain_writes(inode);
25639 + if (error) {
25640 + inode->i_flags &= ~S_SWAPFILE;
25641 +- goto bad_swap_unlock_inode;
25642 ++ goto free_swap_address_space;
25643 + }
25644 +
25645 + mutex_lock(&swapon_mutex);
25646 +@@ -3368,6 +3368,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
25647 +
25648 + error = 0;
25649 + goto out;
25650 ++free_swap_address_space:
25651 ++ exit_swap_address_space(p->type);
25652 + bad_swap_unlock_inode:
25653 + inode_unlock(inode);
25654 + bad_swap:
25655 +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
25656 +index 68bfe57b66250..be9cdf5dabe5d 100644
25657 +--- a/net/bluetooth/hci_core.c
25658 ++++ b/net/bluetooth/hci_core.c
25659 +@@ -3442,6 +3442,16 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
25660 + }
25661 + }
25662 +
25663 ++static void hci_suspend_clear_tasks(struct hci_dev *hdev)
25664 ++{
25665 ++ int i;
25666 ++
25667 ++ for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
25668 ++ clear_bit(i, hdev->suspend_tasks);
25669 ++
25670 ++ wake_up(&hdev->suspend_wait_q);
25671 ++}
25672 ++
25673 + static int hci_suspend_wait_event(struct hci_dev *hdev)
25674 + {
25675 + #define WAKE_COND \
25676 +@@ -3784,6 +3794,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
25677 +
25678 + cancel_work_sync(&hdev->power_on);
25679 +
25680 ++ hci_suspend_clear_tasks(hdev);
25681 + unregister_pm_notifier(&hdev->suspend_notifier);
25682 + cancel_work_sync(&hdev->suspend_prepare);
25683 +
25684 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
25685 +index 4b7fc430793cf..7cf42b9d3dfc8 100644
25686 +--- a/net/bluetooth/hci_event.c
25687 ++++ b/net/bluetooth/hci_event.c
25688 +@@ -2569,7 +2569,6 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
25689 + static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
25690 + {
25691 + struct hci_ev_conn_complete *ev = (void *) skb->data;
25692 +- struct inquiry_entry *ie;
25693 + struct hci_conn *conn;
25694 +
25695 + BT_DBG("%s", hdev->name);
25696 +@@ -2578,13 +2577,19 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
25697 +
25698 + conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
25699 + if (!conn) {
25700 +- /* Connection may not exist if auto-connected. Check the inquiry
25701 +- * cache to see if we've already discovered this bdaddr before.
25702 +- * If found and link is an ACL type, create a connection class
25703 ++ /* Connection may not exist if auto-connected. Check the bredr
25704 ++ * allowlist to see if this device is allowed to auto connect.
25705 ++ * If link is an ACL type, create a connection class
25706 + * automatically.
25707 ++ *
25708 ++ * Auto-connect will only occur if the event filter is
25709 ++ * programmed with a given address. Right now, event filter is
25710 ++ * only used during suspend.
25711 + */
25712 +- ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
25713 +- if (ie && ev->link_type == ACL_LINK) {
25714 ++ if (ev->link_type == ACL_LINK &&
25715 ++ hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
25716 ++ &ev->bdaddr,
25717 ++ BDADDR_BREDR)) {
25718 + conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
25719 + HCI_ROLE_SLAVE);
25720 + if (!conn) {
25721 +diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
25722 +index 79b4c01c515b9..f1b1edd0b6974 100644
25723 +--- a/net/bluetooth/l2cap_sock.c
25724 ++++ b/net/bluetooth/l2cap_sock.c
25725 +@@ -1521,8 +1521,6 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
25726 +
25727 + parent = bt_sk(sk)->parent;
25728 +
25729 +- sock_set_flag(sk, SOCK_ZAPPED);
25730 +-
25731 + switch (chan->state) {
25732 + case BT_OPEN:
25733 + case BT_BOUND:
25734 +@@ -1549,8 +1547,11 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
25735 +
25736 + break;
25737 + }
25738 +-
25739 + release_sock(sk);
25740 ++
25741 ++ /* Only zap after cleanup to avoid use after free race */
25742 ++ sock_set_flag(sk, SOCK_ZAPPED);
25743 ++
25744 + }
25745 +
25746 + static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
25747 +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
25748 +index 5758ccb524ef7..12a7cc9840b4d 100644
25749 +--- a/net/bluetooth/mgmt.c
25750 ++++ b/net/bluetooth/mgmt.c
25751 +@@ -4162,7 +4162,7 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
25752 + {
25753 + struct adv_monitor *monitor = NULL;
25754 + struct mgmt_rp_read_adv_monitor_features *rp = NULL;
25755 +- int handle;
25756 ++ int handle, err;
25757 + size_t rp_size = 0;
25758 + __u32 supported = 0;
25759 + __u16 num_handles = 0;
25760 +@@ -4197,9 +4197,13 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
25761 + if (num_handles)
25762 + memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
25763 +
25764 +- return mgmt_cmd_complete(sk, hdev->id,
25765 +- MGMT_OP_READ_ADV_MONITOR_FEATURES,
25766 +- MGMT_STATUS_SUCCESS, rp, rp_size);
25767 ++ err = mgmt_cmd_complete(sk, hdev->id,
25768 ++ MGMT_OP_READ_ADV_MONITOR_FEATURES,
25769 ++ MGMT_STATUS_SUCCESS, rp, rp_size);
25770 ++
25771 ++ kfree(rp);
25772 ++
25773 ++ return err;
25774 + }
25775 +
25776 + static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
25777 +diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
25778 +index 12a4f4d936810..3fda71a8579d1 100644
25779 +--- a/net/bridge/netfilter/ebt_dnat.c
25780 ++++ b/net/bridge/netfilter/ebt_dnat.c
25781 +@@ -21,7 +21,7 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
25782 + {
25783 + const struct ebt_nat_info *info = par->targinfo;
25784 +
25785 +- if (skb_ensure_writable(skb, ETH_ALEN))
25786 ++ if (skb_ensure_writable(skb, 0))
25787 + return EBT_DROP;
25788 +
25789 + ether_addr_copy(eth_hdr(skb)->h_dest, info->mac);
25790 +diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
25791 +index 0cad62a4052b9..307790562b492 100644
25792 +--- a/net/bridge/netfilter/ebt_redirect.c
25793 ++++ b/net/bridge/netfilter/ebt_redirect.c
25794 +@@ -21,7 +21,7 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
25795 + {
25796 + const struct ebt_redirect_info *info = par->targinfo;
25797 +
25798 +- if (skb_ensure_writable(skb, ETH_ALEN))
25799 ++ if (skb_ensure_writable(skb, 0))
25800 + return EBT_DROP;
25801 +
25802 + if (xt_hooknum(par) != NF_BR_BROUTING)
25803 +diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
25804 +index 27443bf229a3b..7dfbcdfc30e5d 100644
25805 +--- a/net/bridge/netfilter/ebt_snat.c
25806 ++++ b/net/bridge/netfilter/ebt_snat.c
25807 +@@ -22,7 +22,7 @@ ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
25808 + {
25809 + const struct ebt_nat_info *info = par->targinfo;
25810 +
25811 +- if (skb_ensure_writable(skb, ETH_ALEN * 2))
25812 ++ if (skb_ensure_writable(skb, 0))
25813 + return EBT_DROP;
25814 +
25815 + ether_addr_copy(eth_hdr(skb)->h_source, info->mac);
25816 +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
25817 +index 0cec4152f9797..e09d087ba2409 100644
25818 +--- a/net/can/j1939/transport.c
25819 ++++ b/net/can/j1939/transport.c
25820 +@@ -580,6 +580,7 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
25821 + skb->dev = priv->ndev;
25822 + can_skb_reserve(skb);
25823 + can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
25824 ++ can_skb_prv(skb)->skbcnt = 0;
25825 + /* reserve CAN header */
25826 + skb_reserve(skb, offsetof(struct can_frame, data));
25827 +
25828 +@@ -1487,6 +1488,7 @@ j1939_session *j1939_session_fresh_new(struct j1939_priv *priv,
25829 + skb->dev = priv->ndev;
25830 + can_skb_reserve(skb);
25831 + can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
25832 ++ can_skb_prv(skb)->skbcnt = 0;
25833 + skcb = j1939_skb_to_cb(skb);
25834 + memcpy(skcb, rel_skcb, sizeof(*skcb));
25835 +
25836 +diff --git a/net/core/filter.c b/net/core/filter.c
25837 +index b5f3faac5e3b6..150650c53829e 100644
25838 +--- a/net/core/filter.c
25839 ++++ b/net/core/filter.c
25840 +@@ -4354,7 +4354,8 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
25841 + cmpxchg(&sk->sk_pacing_status,
25842 + SK_PACING_NONE,
25843 + SK_PACING_NEEDED);
25844 +- sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
25845 ++ sk->sk_max_pacing_rate = (val == ~0U) ?
25846 ++ ~0UL : (unsigned int)val;
25847 + sk->sk_pacing_rate = min(sk->sk_pacing_rate,
25848 + sk->sk_max_pacing_rate);
25849 + break;
25850 +diff --git a/net/core/skmsg.c b/net/core/skmsg.c
25851 +index 649583158983a..30ddca6db6c6b 100644
25852 +--- a/net/core/skmsg.c
25853 ++++ b/net/core/skmsg.c
25854 +@@ -662,15 +662,16 @@ static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
25855 + {
25856 + int ret;
25857 +
25858 ++ /* strparser clones the skb before handing it to a upper layer,
25859 ++ * meaning we have the same data, but sk is NULL. We do want an
25860 ++ * sk pointer though when we run the BPF program. So we set it
25861 ++ * here and then NULL it to ensure we don't trigger a BUG_ON()
25862 ++ * in skb/sk operations later if kfree_skb is called with a
25863 ++ * valid skb->sk pointer and no destructor assigned.
25864 ++ */
25865 + skb->sk = psock->sk;
25866 + bpf_compute_data_end_sk_skb(skb);
25867 + ret = bpf_prog_run_pin_on_cpu(prog, skb);
25868 +- /* strparser clones the skb before handing it to a upper layer,
25869 +- * meaning skb_orphan has been called. We NULL sk on the way out
25870 +- * to ensure we don't trigger a BUG_ON() in skb/sk operations
25871 +- * later and because we are not charging the memory of this skb
25872 +- * to any socket yet.
25873 +- */
25874 + skb->sk = NULL;
25875 + return ret;
25876 + }
25877 +@@ -794,7 +795,6 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
25878 + }
25879 + prog = READ_ONCE(psock->progs.skb_verdict);
25880 + if (likely(prog)) {
25881 +- skb_orphan(skb);
25882 + tcp_skb_bpf_redirect_clear(skb);
25883 + ret = sk_psock_bpf_run(psock, prog, skb);
25884 + ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
25885 +diff --git a/net/core/sock.c b/net/core/sock.c
25886 +index 6c5c6b18eff4c..669f686ace801 100644
25887 +--- a/net/core/sock.c
25888 ++++ b/net/core/sock.c
25889 +@@ -769,7 +769,6 @@ static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
25890 + } else {
25891 + sock_reset_flag(sk, SOCK_RCVTSTAMP);
25892 + sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
25893 +- sock_reset_flag(sk, SOCK_TSTAMP_NEW);
25894 + }
25895 + }
25896 +
25897 +@@ -1007,8 +1006,6 @@ set_sndbuf:
25898 + __sock_set_timestamps(sk, valbool, true, true);
25899 + break;
25900 + case SO_TIMESTAMPING_NEW:
25901 +- sock_set_flag(sk, SOCK_TSTAMP_NEW);
25902 +- fallthrough;
25903 + case SO_TIMESTAMPING_OLD:
25904 + if (val & ~SOF_TIMESTAMPING_MASK) {
25905 + ret = -EINVAL;
25906 +@@ -1037,16 +1034,14 @@ set_sndbuf:
25907 + }
25908 +
25909 + sk->sk_tsflags = val;
25910 ++ sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
25911 ++
25912 + if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
25913 + sock_enable_timestamp(sk,
25914 + SOCK_TIMESTAMPING_RX_SOFTWARE);
25915 +- else {
25916 +- if (optname == SO_TIMESTAMPING_NEW)
25917 +- sock_reset_flag(sk, SOCK_TSTAMP_NEW);
25918 +-
25919 ++ else
25920 + sock_disable_timestamp(sk,
25921 + (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
25922 +- }
25923 + break;
25924 +
25925 + case SO_RCVLOWAT:
25926 +@@ -1181,7 +1176,7 @@ set_sndbuf:
25927 +
25928 + case SO_MAX_PACING_RATE:
25929 + {
25930 +- unsigned long ulval = (val == ~0U) ? ~0UL : val;
25931 ++ unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
25932 +
25933 + if (sizeof(ulval) != sizeof(val) &&
25934 + optlen >= sizeof(ulval) &&
25935 +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
25936 +index cf36f955bfe62..650f0391e22a1 100644
25937 +--- a/net/ipv4/icmp.c
25938 ++++ b/net/ipv4/icmp.c
25939 +@@ -239,7 +239,7 @@ static struct {
25940 + /**
25941 + * icmp_global_allow - Are we allowed to send one more ICMP message ?
25942 + *
25943 +- * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec.
25944 ++ * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
25945 + * Returns false if we reached the limit and can not send another packet.
25946 + * Note: called with BH disabled
25947 + */
25948 +@@ -267,7 +267,10 @@ bool icmp_global_allow(void)
25949 + }
25950 + credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
25951 + if (credit) {
25952 +- credit--;
25953 ++ /* We want to use a credit of one in average, but need to randomize
25954 ++ * it for security reasons.
25955 ++ */
25956 ++ credit = max_t(int, credit - prandom_u32_max(3), 0);
25957 + rc = true;
25958 + }
25959 + WRITE_ONCE(icmp_global.credit, credit);
25960 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
25961 +index 4e31f23e4117e..e70291748889b 100644
25962 +--- a/net/ipv4/ip_gre.c
25963 ++++ b/net/ipv4/ip_gre.c
25964 +@@ -625,9 +625,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
25965 + }
25966 +
25967 + if (dev->header_ops) {
25968 +- /* Need space for new headers */
25969 +- if (skb_cow_head(skb, dev->needed_headroom -
25970 +- (tunnel->hlen + sizeof(struct iphdr))))
25971 ++ if (skb_cow_head(skb, 0))
25972 + goto free_skb;
25973 +
25974 + tnl_params = (const struct iphdr *)skb->data;
25975 +@@ -748,7 +746,11 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
25976 + len = tunnel->tun_hlen - len;
25977 + tunnel->hlen = tunnel->hlen + len;
25978 +
25979 +- dev->needed_headroom = dev->needed_headroom + len;
25980 ++ if (dev->header_ops)
25981 ++ dev->hard_header_len += len;
25982 ++ else
25983 ++ dev->needed_headroom += len;
25984 ++
25985 + if (set_mtu)
25986 + dev->mtu = max_t(int, dev->mtu - len, 68);
25987 +
25988 +@@ -944,6 +946,7 @@ static void __gre_tunnel_init(struct net_device *dev)
25989 + tunnel->parms.iph.protocol = IPPROTO_GRE;
25990 +
25991 + tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
25992 ++ dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
25993 +
25994 + dev->features |= GRE_FEATURES;
25995 + dev->hw_features |= GRE_FEATURES;
25996 +@@ -987,10 +990,14 @@ static int ipgre_tunnel_init(struct net_device *dev)
25997 + return -EINVAL;
25998 + dev->flags = IFF_BROADCAST;
25999 + dev->header_ops = &ipgre_header_ops;
26000 ++ dev->hard_header_len = tunnel->hlen + sizeof(*iph);
26001 ++ dev->needed_headroom = 0;
26002 + }
26003 + #endif
26004 + } else if (!tunnel->collect_md) {
26005 + dev->header_ops = &ipgre_header_ops;
26006 ++ dev->hard_header_len = tunnel->hlen + sizeof(*iph);
26007 ++ dev->needed_headroom = 0;
26008 + }
26009 +
26010 + return ip_tunnel_init(dev);
26011 +diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
26012 +index 7a83f881efa9e..136030ad2e546 100644
26013 +--- a/net/ipv4/netfilter/nf_log_arp.c
26014 ++++ b/net/ipv4/netfilter/nf_log_arp.c
26015 +@@ -43,16 +43,31 @@ static void dump_arp_packet(struct nf_log_buf *m,
26016 + const struct nf_loginfo *info,
26017 + const struct sk_buff *skb, unsigned int nhoff)
26018 + {
26019 +- const struct arphdr *ah;
26020 +- struct arphdr _arph;
26021 + const struct arppayload *ap;
26022 + struct arppayload _arpp;
26023 ++ const struct arphdr *ah;
26024 ++ unsigned int logflags;
26025 ++ struct arphdr _arph;
26026 +
26027 + ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
26028 + if (ah == NULL) {
26029 + nf_log_buf_add(m, "TRUNCATED");
26030 + return;
26031 + }
26032 ++
26033 ++ if (info->type == NF_LOG_TYPE_LOG)
26034 ++ logflags = info->u.log.logflags;
26035 ++ else
26036 ++ logflags = NF_LOG_DEFAULT_MASK;
26037 ++
26038 ++ if (logflags & NF_LOG_MACDECODE) {
26039 ++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
26040 ++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
26041 ++ nf_log_dump_vlan(m, skb);
26042 ++ nf_log_buf_add(m, "MACPROTO=%04x ",
26043 ++ ntohs(eth_hdr(skb)->h_proto));
26044 ++ }
26045 ++
26046 + nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d",
26047 + ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op));
26048 +
26049 +diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
26050 +index 0c72156130b68..d07583fac8f8c 100644
26051 +--- a/net/ipv4/netfilter/nf_log_ipv4.c
26052 ++++ b/net/ipv4/netfilter/nf_log_ipv4.c
26053 +@@ -284,8 +284,10 @@ static void dump_ipv4_mac_header(struct nf_log_buf *m,
26054 +
26055 + switch (dev->type) {
26056 + case ARPHRD_ETHER:
26057 +- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
26058 +- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
26059 ++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
26060 ++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
26061 ++ nf_log_dump_vlan(m, skb);
26062 ++ nf_log_buf_add(m, "MACPROTO=%04x ",
26063 + ntohs(eth_hdr(skb)->h_proto));
26064 + return;
26065 + default:
26066 +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
26067 +index 134e923822750..355c4499fa1b5 100644
26068 +--- a/net/ipv4/nexthop.c
26069 ++++ b/net/ipv4/nexthop.c
26070 +@@ -842,7 +842,7 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
26071 + remove_nh_grp_entry(net, nhge, nlinfo);
26072 +
26073 + /* make sure all see the newly published array before releasing rtnl */
26074 +- synchronize_rcu();
26075 ++ synchronize_net();
26076 + }
26077 +
26078 + static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
26079 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
26080 +index 58642b29a499d..9bd30fd4de4b4 100644
26081 +--- a/net/ipv4/route.c
26082 ++++ b/net/ipv4/route.c
26083 +@@ -2769,10 +2769,12 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
26084 + if (IS_ERR(rt))
26085 + return rt;
26086 +
26087 +- if (flp4->flowi4_proto)
26088 ++ if (flp4->flowi4_proto) {
26089 ++ flp4->flowi4_oif = rt->dst.dev->ifindex;
26090 + rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
26091 + flowi4_to_flowi(flp4),
26092 + sk, 0);
26093 ++ }
26094 +
26095 + return rt;
26096 + }
26097 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
26098 +index b1ce2054291d4..75be97f6a7da1 100644
26099 +--- a/net/ipv4/tcp_input.c
26100 ++++ b/net/ipv4/tcp_input.c
26101 +@@ -5766,6 +5766,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
26102 + tcp_data_snd_check(sk);
26103 + if (!inet_csk_ack_scheduled(sk))
26104 + goto no_ack;
26105 ++ } else {
26106 ++ tcp_update_wl(tp, TCP_SKB_CB(skb)->seq);
26107 + }
26108 +
26109 + __tcp_ack_snd_check(sk, 0);
26110 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
26111 +index 4a664ad4f4d4b..f88693929e8d0 100644
26112 +--- a/net/ipv6/ip6_fib.c
26113 ++++ b/net/ipv6/ip6_fib.c
26114 +@@ -2618,8 +2618,10 @@ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
26115 + iter->skip = *pos;
26116 +
26117 + if (iter->tbl) {
26118 ++ loff_t p = 0;
26119 ++
26120 + ipv6_route_seq_setup_walk(iter, net);
26121 +- return ipv6_route_seq_next(seq, NULL, pos);
26122 ++ return ipv6_route_seq_next(seq, NULL, &p);
26123 + } else {
26124 + return NULL;
26125 + }
26126 +diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
26127 +index da64550a57075..8210ff34ed9b7 100644
26128 +--- a/net/ipv6/netfilter/nf_log_ipv6.c
26129 ++++ b/net/ipv6/netfilter/nf_log_ipv6.c
26130 +@@ -297,9 +297,11 @@ static void dump_ipv6_mac_header(struct nf_log_buf *m,
26131 +
26132 + switch (dev->type) {
26133 + case ARPHRD_ETHER:
26134 +- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
26135 +- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
26136 +- ntohs(eth_hdr(skb)->h_proto));
26137 ++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
26138 ++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
26139 ++ nf_log_dump_vlan(m, skb);
26140 ++ nf_log_buf_add(m, "MACPROTO=%04x ",
26141 ++ ntohs(eth_hdr(skb)->h_proto));
26142 + return;
26143 + default:
26144 + break;
26145 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
26146 +index 87fddd84c621e..82d516d117385 100644
26147 +--- a/net/mac80211/cfg.c
26148 ++++ b/net/mac80211/cfg.c
26149 +@@ -709,7 +709,8 @@ void sta_set_rate_info_tx(struct sta_info *sta,
26150 + u16 brate;
26151 +
26152 + sband = ieee80211_get_sband(sta->sdata);
26153 +- if (sband) {
26154 ++ WARN_ON_ONCE(sband && !sband->bitrates);
26155 ++ if (sband && sband->bitrates) {
26156 + brate = sband->bitrates[rate->idx].bitrate;
26157 + rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
26158 + }
26159 +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
26160 +index f2840d1d95cfb..fb4f2b9b294f0 100644
26161 +--- a/net/mac80211/sta_info.c
26162 ++++ b/net/mac80211/sta_info.c
26163 +@@ -2122,6 +2122,10 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
26164 + int rate_idx = STA_STATS_GET(LEGACY_IDX, rate);
26165 +
26166 + sband = local->hw.wiphy->bands[band];
26167 ++
26168 ++ if (WARN_ON_ONCE(!sband->bitrates))
26169 ++ break;
26170 ++
26171 + brate = sband->bitrates[rate_idx].bitrate;
26172 + if (rinfo->bw == RATE_INFO_BW_5)
26173 + shift = 2;
26174 +diff --git a/net/mptcp/Kconfig b/net/mptcp/Kconfig
26175 +index 698bc35251609..abb0a992d4a08 100644
26176 +--- a/net/mptcp/Kconfig
26177 ++++ b/net/mptcp/Kconfig
26178 +@@ -22,11 +22,8 @@ config MPTCP_IPV6
26179 + select IPV6
26180 + default y
26181 +
26182 +-endif
26183 +-
26184 + config MPTCP_KUNIT_TESTS
26185 + tristate "This builds the MPTCP KUnit tests" if !KUNIT_ALL_TESTS
26186 +- select MPTCP
26187 + depends on KUNIT
26188 + default KUNIT_ALL_TESTS
26189 + help
26190 +@@ -39,3 +36,4 @@ config MPTCP_KUNIT_TESTS
26191 +
26192 + If unsure, say N.
26193 +
26194 ++endif
26195 +diff --git a/net/mptcp/options.c b/net/mptcp/options.c
26196 +index 888bbbbb3e8a4..3127527fc7ac0 100644
26197 +--- a/net/mptcp/options.c
26198 ++++ b/net/mptcp/options.c
26199 +@@ -296,6 +296,7 @@ void mptcp_get_options(const struct sk_buff *skb,
26200 + mp_opt->mp_capable = 0;
26201 + mp_opt->mp_join = 0;
26202 + mp_opt->add_addr = 0;
26203 ++ mp_opt->ahmac = 0;
26204 + mp_opt->rm_addr = 0;
26205 + mp_opt->dss = 0;
26206 +
26207 +@@ -516,7 +517,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
26208 + return ret;
26209 + }
26210 +
26211 +- if (subflow->use_64bit_ack) {
26212 ++ if (READ_ONCE(msk->use_64bit_ack)) {
26213 + ack_size = TCPOLEN_MPTCP_DSS_ACK64;
26214 + opts->ext_copy.data_ack = READ_ONCE(msk->ack_seq);
26215 + opts->ext_copy.ack64 = 1;
26216 +@@ -626,6 +627,12 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
26217 + if (unlikely(mptcp_check_fallback(sk)))
26218 + return false;
26219 +
26220 ++ /* prevent adding of any MPTCP related options on reset packet
26221 ++ * until we support MP_TCPRST/MP_FASTCLOSE
26222 ++ */
26223 ++ if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST))
26224 ++ return false;
26225 ++
26226 + if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts))
26227 + ret = true;
26228 + else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining,
26229 +@@ -676,7 +683,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
26230 + return false;
26231 + }
26232 +
26233 +-static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
26234 ++static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
26235 + struct mptcp_subflow_context *subflow,
26236 + struct sk_buff *skb,
26237 + struct mptcp_options_received *mp_opt)
26238 +@@ -693,15 +700,20 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
26239 + TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
26240 + subflow->mp_join && mp_opt->mp_join &&
26241 + READ_ONCE(msk->pm.server_side))
26242 +- tcp_send_ack(sk);
26243 ++ tcp_send_ack(ssk);
26244 + goto fully_established;
26245 + }
26246 +
26247 +- /* we should process OoO packets before the first subflow is fully
26248 +- * established, but not expected for MP_JOIN subflows
26249 ++ /* we must process OoO packets before the first subflow is fully
26250 ++ * established. OoO packets are instead a protocol violation
26251 ++ * for MP_JOIN subflows as the peer must not send any data
26252 ++ * before receiving the forth ack - cfr. RFC 8684 section 3.2.
26253 + */
26254 +- if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1)
26255 ++ if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) {
26256 ++ if (subflow->mp_join)
26257 ++ goto reset;
26258 + return subflow->mp_capable;
26259 ++ }
26260 +
26261 + if (mp_opt->dss && mp_opt->use_ack) {
26262 + /* subflows are fully established as soon as we get any
26263 +@@ -713,9 +725,12 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
26264 + }
26265 +
26266 + /* If the first established packet does not contain MP_CAPABLE + data
26267 +- * then fallback to TCP
26268 ++ * then fallback to TCP. Fallback scenarios requires a reset for
26269 ++ * MP_JOIN subflows.
26270 + */
26271 + if (!mp_opt->mp_capable) {
26272 ++ if (subflow->mp_join)
26273 ++ goto reset;
26274 + subflow->mp_capable = 0;
26275 + pr_fallback(msk);
26276 + __mptcp_do_fallback(msk);
26277 +@@ -732,12 +747,16 @@ fully_established:
26278 +
26279 + subflow->pm_notified = 1;
26280 + if (subflow->mp_join) {
26281 +- clear_3rdack_retransmission(sk);
26282 ++ clear_3rdack_retransmission(ssk);
26283 + mptcp_pm_subflow_established(msk, subflow);
26284 + } else {
26285 + mptcp_pm_fully_established(msk);
26286 + }
26287 + return true;
26288 ++
26289 ++reset:
26290 ++ mptcp_subflow_reset(ssk);
26291 ++ return false;
26292 + }
26293 +
26294 + static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
26295 +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
26296 +index 5d747c6a610e8..b295eb6e9580b 100644
26297 +--- a/net/mptcp/protocol.c
26298 ++++ b/net/mptcp/protocol.c
26299 +@@ -1383,6 +1383,20 @@ static void pm_work(struct mptcp_sock *msk)
26300 + spin_unlock_bh(&msk->pm.lock);
26301 + }
26302 +
26303 ++static void __mptcp_close_subflow(struct mptcp_sock *msk)
26304 ++{
26305 ++ struct mptcp_subflow_context *subflow, *tmp;
26306 ++
26307 ++ list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
26308 ++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
26309 ++
26310 ++ if (inet_sk_state_load(ssk) != TCP_CLOSE)
26311 ++ continue;
26312 ++
26313 ++ __mptcp_close_ssk((struct sock *)msk, ssk, subflow, 0);
26314 ++ }
26315 ++}
26316 ++
26317 + static void mptcp_worker(struct work_struct *work)
26318 + {
26319 + struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
26320 +@@ -1400,6 +1414,9 @@ static void mptcp_worker(struct work_struct *work)
26321 + mptcp_clean_una(sk);
26322 + mptcp_check_data_fin_ack(sk);
26323 + __mptcp_flush_join_list(msk);
26324 ++ if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
26325 ++ __mptcp_close_subflow(msk);
26326 ++
26327 + __mptcp_move_skbs(msk);
26328 +
26329 + if (msk->pm.status)
26330 +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
26331 +index 20f04ac85409e..9724636426905 100644
26332 +--- a/net/mptcp/protocol.h
26333 ++++ b/net/mptcp/protocol.h
26334 +@@ -90,6 +90,7 @@
26335 + #define MPTCP_WORK_RTX 2
26336 + #define MPTCP_WORK_EOF 3
26337 + #define MPTCP_FALLBACK_DONE 4
26338 ++#define MPTCP_WORK_CLOSE_SUBFLOW 5
26339 +
26340 + struct mptcp_options_received {
26341 + u64 sndr_key;
26342 +@@ -202,6 +203,7 @@ struct mptcp_sock {
26343 + bool fully_established;
26344 + bool rcv_data_fin;
26345 + bool snd_data_fin_enable;
26346 ++ bool use_64bit_ack; /* Set when we received a 64-bit DSN */
26347 + spinlock_t join_list_lock;
26348 + struct work_struct work;
26349 + struct list_head conn_list;
26350 +@@ -294,7 +296,6 @@ struct mptcp_subflow_context {
26351 + backup : 1,
26352 + data_avail : 1,
26353 + rx_eof : 1,
26354 +- use_64bit_ack : 1, /* Set when we received a 64-bit DSN */
26355 + can_ack : 1; /* only after processing the remote a key */
26356 + u32 remote_nonce;
26357 + u64 thmac;
26358 +@@ -348,6 +349,7 @@ void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
26359 + struct mptcp_options_received *mp_opt);
26360 + bool mptcp_subflow_data_available(struct sock *sk);
26361 + void __init mptcp_subflow_init(void);
26362 ++void mptcp_subflow_reset(struct sock *ssk);
26363 +
26364 + /* called with sk socket lock held */
26365 + int __mptcp_subflow_connect(struct sock *sk, int ifindex,
26366 +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
26367 +index 6f035af1c9d25..559f5bbd96229 100644
26368 +--- a/net/mptcp/subflow.c
26369 ++++ b/net/mptcp/subflow.c
26370 +@@ -270,6 +270,19 @@ static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
26371 + return thmac == subflow->thmac;
26372 + }
26373 +
26374 ++void mptcp_subflow_reset(struct sock *ssk)
26375 ++{
26376 ++ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
26377 ++ struct sock *sk = subflow->conn;
26378 ++
26379 ++ tcp_set_state(ssk, TCP_CLOSE);
26380 ++ tcp_send_active_reset(ssk, GFP_ATOMIC);
26381 ++ tcp_done(ssk);
26382 ++ if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
26383 ++ schedule_work(&mptcp_sk(sk)->work))
26384 ++ sock_hold(sk);
26385 ++}
26386 ++
26387 + static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
26388 + {
26389 + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
26390 +@@ -342,8 +355,7 @@ fallback:
26391 + return;
26392 +
26393 + do_reset:
26394 +- tcp_send_active_reset(sk, GFP_ATOMIC);
26395 +- tcp_done(sk);
26396 ++ mptcp_subflow_reset(sk);
26397 + }
26398 +
26399 + struct request_sock_ops mptcp_subflow_request_sock_ops;
26400 +@@ -769,12 +781,11 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
26401 + if (!mpext->dsn64) {
26402 + map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
26403 + mpext->data_seq);
26404 +- subflow->use_64bit_ack = 0;
26405 + pr_debug("expanded seq=%llu", subflow->map_seq);
26406 + } else {
26407 + map_seq = mpext->data_seq;
26408 +- subflow->use_64bit_ack = 1;
26409 + }
26410 ++ WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
26411 +
26412 + if (subflow->map_valid) {
26413 + /* Allow replacing only with an identical map */
26414 +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
26415 +index 678c5b14841c1..8dbfd84322a88 100644
26416 +--- a/net/netfilter/ipvs/ip_vs_ctl.c
26417 ++++ b/net/netfilter/ipvs/ip_vs_ctl.c
26418 +@@ -2508,6 +2508,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len)
26419 + /* Set timeout values for (tcp tcpfin udp) */
26420 + ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg);
26421 + goto out_unlock;
26422 ++ } else if (!len) {
26423 ++ /* No more commands with len == 0 below */
26424 ++ ret = -EINVAL;
26425 ++ goto out_unlock;
26426 + }
26427 +
26428 + usvc_compat = (struct ip_vs_service_user *)arg;
26429 +@@ -2584,9 +2588,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len)
26430 + break;
26431 + case IP_VS_SO_SET_DELDEST:
26432 + ret = ip_vs_del_dest(svc, &udest);
26433 +- break;
26434 +- default:
26435 +- ret = -EINVAL;
26436 + }
26437 +
26438 + out_unlock:
26439 +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
26440 +index b00866d777fe0..d2e5a8f644b80 100644
26441 +--- a/net/netfilter/ipvs/ip_vs_xmit.c
26442 ++++ b/net/netfilter/ipvs/ip_vs_xmit.c
26443 +@@ -609,6 +609,8 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
26444 + if (ret == NF_ACCEPT) {
26445 + nf_reset_ct(skb);
26446 + skb_forward_csum(skb);
26447 ++ if (skb->dev)
26448 ++ skb->tstamp = 0;
26449 + }
26450 + return ret;
26451 + }
26452 +@@ -649,6 +651,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
26453 +
26454 + if (!local) {
26455 + skb_forward_csum(skb);
26456 ++ if (skb->dev)
26457 ++ skb->tstamp = 0;
26458 + NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
26459 + NULL, skb_dst(skb)->dev, dst_output);
26460 + } else
26461 +@@ -669,6 +673,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
26462 + if (!local) {
26463 + ip_vs_drop_early_demux_sk(skb);
26464 + skb_forward_csum(skb);
26465 ++ if (skb->dev)
26466 ++ skb->tstamp = 0;
26467 + NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
26468 + NULL, skb_dst(skb)->dev, dst_output);
26469 + } else
26470 +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
26471 +index e8c86ee4c1c48..c8fb2187ad4b2 100644
26472 +--- a/net/netfilter/nf_conntrack_proto_tcp.c
26473 ++++ b/net/netfilter/nf_conntrack_proto_tcp.c
26474 +@@ -541,13 +541,20 @@ static bool tcp_in_window(const struct nf_conn *ct,
26475 + swin = win << sender->td_scale;
26476 + sender->td_maxwin = (swin == 0 ? 1 : swin);
26477 + sender->td_maxend = end + sender->td_maxwin;
26478 +- /*
26479 +- * We haven't seen traffic in the other direction yet
26480 +- * but we have to tweak window tracking to pass III
26481 +- * and IV until that happens.
26482 +- */
26483 +- if (receiver->td_maxwin == 0)
26484 ++ if (receiver->td_maxwin == 0) {
26485 ++ /* We haven't seen traffic in the other
26486 ++ * direction yet but we have to tweak window
26487 ++ * tracking to pass III and IV until that
26488 ++ * happens.
26489 ++ */
26490 + receiver->td_end = receiver->td_maxend = sack;
26491 ++ } else if (sack == receiver->td_end + 1) {
26492 ++ /* Likely a reply to a keepalive.
26493 ++ * Needed for III.
26494 ++ */
26495 ++ receiver->td_end++;
26496 ++ }
26497 ++
26498 + }
26499 + } else if (((state->state == TCP_CONNTRACK_SYN_SENT
26500 + && dir == IP_CT_DIR_ORIGINAL)
26501 +diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c
26502 +index 2b01a151eaa80..a579e59ee5c5e 100644
26503 +--- a/net/netfilter/nf_dup_netdev.c
26504 ++++ b/net/netfilter/nf_dup_netdev.c
26505 +@@ -19,6 +19,7 @@ static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev)
26506 + skb_push(skb, skb->mac_len);
26507 +
26508 + skb->dev = dev;
26509 ++ skb->tstamp = 0;
26510 + dev_queue_xmit(skb);
26511 + }
26512 +
26513 +diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
26514 +index ae5628ddbe6d7..fd7c5f0f5c25b 100644
26515 +--- a/net/netfilter/nf_log_common.c
26516 ++++ b/net/netfilter/nf_log_common.c
26517 +@@ -171,6 +171,18 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
26518 + }
26519 + EXPORT_SYMBOL_GPL(nf_log_dump_packet_common);
26520 +
26521 ++void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb)
26522 ++{
26523 ++ u16 vid;
26524 ++
26525 ++ if (!skb_vlan_tag_present(skb))
26526 ++ return;
26527 ++
26528 ++ vid = skb_vlan_tag_get(skb);
26529 ++ nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid);
26530 ++}
26531 ++EXPORT_SYMBOL_GPL(nf_log_dump_vlan);
26532 ++
26533 + /* bridge and netdev logging families share this code. */
26534 + void nf_log_l2packet(struct net *net, u_int8_t pf,
26535 + __be16 protocol,
26536 +diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
26537 +index 3087e23297dbf..b77985986b24e 100644
26538 +--- a/net/netfilter/nft_fwd_netdev.c
26539 ++++ b/net/netfilter/nft_fwd_netdev.c
26540 +@@ -138,6 +138,7 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr,
26541 + return;
26542 +
26543 + skb->dev = dev;
26544 ++ skb->tstamp = 0;
26545 + neigh_xmit(neigh_table, dev, addr, skb);
26546 + out:
26547 + regs->verdict.code = verdict;
26548 +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
26549 +index e894254c17d43..8709f3d4e7c4b 100644
26550 +--- a/net/nfc/netlink.c
26551 ++++ b/net/nfc/netlink.c
26552 +@@ -1217,7 +1217,7 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
26553 + u32 idx;
26554 + char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
26555 +
26556 +- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
26557 ++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME])
26558 + return -EINVAL;
26559 +
26560 + idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
26561 +diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
26562 +index e2235849a57e9..7d50c45fea376 100644
26563 +--- a/net/openvswitch/flow_table.c
26564 ++++ b/net/openvswitch/flow_table.c
26565 +@@ -172,7 +172,7 @@ static struct table_instance *table_instance_alloc(int new_size)
26566 +
26567 + static void __mask_array_destroy(struct mask_array *ma)
26568 + {
26569 +- free_percpu(ma->masks_usage_cntr);
26570 ++ free_percpu(ma->masks_usage_stats);
26571 + kfree(ma);
26572 + }
26573 +
26574 +@@ -196,15 +196,15 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma)
26575 + ma->masks_usage_zero_cntr[i] = 0;
26576 +
26577 + for_each_possible_cpu(cpu) {
26578 +- u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
26579 +- cpu);
26580 ++ struct mask_array_stats *stats;
26581 + unsigned int start;
26582 + u64 counter;
26583 +
26584 ++ stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
26585 + do {
26586 +- start = u64_stats_fetch_begin_irq(&ma->syncp);
26587 +- counter = usage_counters[i];
26588 +- } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
26589 ++ start = u64_stats_fetch_begin_irq(&stats->syncp);
26590 ++ counter = stats->usage_cntrs[i];
26591 ++ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
26592 +
26593 + ma->masks_usage_zero_cntr[i] += counter;
26594 + }
26595 +@@ -227,9 +227,10 @@ static struct mask_array *tbl_mask_array_alloc(int size)
26596 + sizeof(struct sw_flow_mask *) *
26597 + size);
26598 +
26599 +- new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size,
26600 +- __alignof__(u64));
26601 +- if (!new->masks_usage_cntr) {
26602 ++ new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) +
26603 ++ sizeof(u64) * size,
26604 ++ __alignof__(u64));
26605 ++ if (!new->masks_usage_stats) {
26606 + kfree(new);
26607 + return NULL;
26608 + }
26609 +@@ -723,6 +724,8 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
26610 +
26611 + /* Flow lookup does full lookup on flow table. It starts with
26612 + * mask from index passed in *index.
26613 ++ * This function MUST be called with BH disabled due to the use
26614 ++ * of CPU specific variables.
26615 + */
26616 + static struct sw_flow *flow_lookup(struct flow_table *tbl,
26617 + struct table_instance *ti,
26618 +@@ -732,7 +735,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
26619 + u32 *n_cache_hit,
26620 + u32 *index)
26621 + {
26622 +- u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
26623 ++ struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats);
26624 + struct sw_flow *flow;
26625 + struct sw_flow_mask *mask;
26626 + int i;
26627 +@@ -742,9 +745,9 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
26628 + if (mask) {
26629 + flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
26630 + if (flow) {
26631 +- u64_stats_update_begin(&ma->syncp);
26632 +- usage_counters[*index]++;
26633 +- u64_stats_update_end(&ma->syncp);
26634 ++ u64_stats_update_begin(&stats->syncp);
26635 ++ stats->usage_cntrs[*index]++;
26636 ++ u64_stats_update_end(&stats->syncp);
26637 + (*n_cache_hit)++;
26638 + return flow;
26639 + }
26640 +@@ -763,9 +766,9 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
26641 + flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
26642 + if (flow) { /* Found */
26643 + *index = i;
26644 +- u64_stats_update_begin(&ma->syncp);
26645 +- usage_counters[*index]++;
26646 +- u64_stats_update_end(&ma->syncp);
26647 ++ u64_stats_update_begin(&stats->syncp);
26648 ++ stats->usage_cntrs[*index]++;
26649 ++ u64_stats_update_end(&stats->syncp);
26650 + return flow;
26651 + }
26652 + }
26653 +@@ -851,9 +854,17 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
26654 + struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
26655 + u32 __always_unused n_mask_hit;
26656 + u32 __always_unused n_cache_hit;
26657 ++ struct sw_flow *flow;
26658 + u32 index = 0;
26659 +
26660 +- return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
26661 ++ /* This function gets called trough the netlink interface and therefore
26662 ++ * is preemptible. However, flow_lookup() function needs to be called
26663 ++ * with BH disabled due to CPU specific variables.
26664 ++ */
26665 ++ local_bh_disable();
26666 ++ flow = flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
26667 ++ local_bh_enable();
26668 ++ return flow;
26669 + }
26670 +
26671 + struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
26672 +@@ -1109,7 +1120,6 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
26673 +
26674 + for (i = 0; i < ma->max; i++) {
26675 + struct sw_flow_mask *mask;
26676 +- unsigned int start;
26677 + int cpu;
26678 +
26679 + mask = rcu_dereference_ovsl(ma->masks[i]);
26680 +@@ -1120,14 +1130,16 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
26681 + masks_and_count[i].counter = 0;
26682 +
26683 + for_each_possible_cpu(cpu) {
26684 +- u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
26685 +- cpu);
26686 ++ struct mask_array_stats *stats;
26687 ++ unsigned int start;
26688 + u64 counter;
26689 +
26690 ++ stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
26691 + do {
26692 +- start = u64_stats_fetch_begin_irq(&ma->syncp);
26693 +- counter = usage_counters[i];
26694 +- } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
26695 ++ start = u64_stats_fetch_begin_irq(&stats->syncp);
26696 ++ counter = stats->usage_cntrs[i];
26697 ++ } while (u64_stats_fetch_retry_irq(&stats->syncp,
26698 ++ start));
26699 +
26700 + masks_and_count[i].counter += counter;
26701 + }
26702 +diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
26703 +index 6e7d4ac593531..43144396e192c 100644
26704 +--- a/net/openvswitch/flow_table.h
26705 ++++ b/net/openvswitch/flow_table.h
26706 +@@ -38,12 +38,16 @@ struct mask_count {
26707 + u64 counter;
26708 + };
26709 +
26710 ++struct mask_array_stats {
26711 ++ struct u64_stats_sync syncp;
26712 ++ u64 usage_cntrs[];
26713 ++};
26714 ++
26715 + struct mask_array {
26716 + struct rcu_head rcu;
26717 + int count, max;
26718 +- u64 __percpu *masks_usage_cntr;
26719 ++ struct mask_array_stats __percpu *masks_usage_stats;
26720 + u64 *masks_usage_zero_cntr;
26721 +- struct u64_stats_sync syncp;
26722 + struct sw_flow_mask __rcu *masks[];
26723 + };
26724 +
26725 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
26726 +index a780afdf570d2..0bac241a41235 100644
26727 +--- a/net/sched/act_ct.c
26728 ++++ b/net/sched/act_ct.c
26729 +@@ -156,11 +156,11 @@ tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
26730 + __be16 target_dst = target.dst.u.udp.port;
26731 +
26732 + if (target_src != tuple->src.u.udp.port)
26733 +- tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
26734 ++ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
26735 + offsetof(struct udphdr, source),
26736 + 0xFFFF, be16_to_cpu(target_src));
26737 + if (target_dst != tuple->dst.u.udp.port)
26738 +- tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
26739 ++ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
26740 + offsetof(struct udphdr, dest),
26741 + 0xFFFF, be16_to_cpu(target_dst));
26742 + }
26743 +diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
26744 +index a229751ee8c46..85c0d0d5b9da5 100644
26745 +--- a/net/sched/act_tunnel_key.c
26746 ++++ b/net/sched/act_tunnel_key.c
26747 +@@ -459,7 +459,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
26748 +
26749 + metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
26750 + 0, flags,
26751 +- key_id, 0);
26752 ++ key_id, opts_len);
26753 + } else {
26754 + NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
26755 + ret = -EINVAL;
26756 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
26757 +index 41a55c6cbeb8f..faeabff283a2b 100644
26758 +--- a/net/sched/cls_api.c
26759 ++++ b/net/sched/cls_api.c
26760 +@@ -3712,7 +3712,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
26761 + entry->gate.num_entries = tcf_gate_num_entries(act);
26762 + err = tcf_gate_get_entries(entry, act);
26763 + if (err)
26764 +- goto err_out;
26765 ++ goto err_out_locked;
26766 + } else {
26767 + err = -EOPNOTSUPP;
26768 + goto err_out_locked;
26769 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
26770 +index a406627b1d552..7c0e4fac9748d 100644
26771 +--- a/net/smc/smc_core.c
26772 ++++ b/net/smc/smc_core.c
26773 +@@ -1597,7 +1597,7 @@ out:
26774 + return rc;
26775 + }
26776 +
26777 +-#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
26778 ++#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
26779 +
26780 + static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
26781 + bool is_dmb, int bufsize)
26782 +diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
26783 +index 3ea33466ebe98..da9332de26c5d 100644
26784 +--- a/net/smc/smc_llc.c
26785 ++++ b/net/smc/smc_llc.c
26786 +@@ -233,8 +233,6 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
26787 + default:
26788 + flow->type = SMC_LLC_FLOW_NONE;
26789 + }
26790 +- if (qentry == lgr->delayed_event)
26791 +- lgr->delayed_event = NULL;
26792 + smc_llc_flow_qentry_set(flow, qentry);
26793 + spin_unlock_bh(&lgr->llc_flow_lock);
26794 + return true;
26795 +@@ -1603,13 +1601,12 @@ static void smc_llc_event_work(struct work_struct *work)
26796 + struct smc_llc_qentry *qentry;
26797 +
26798 + if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
26799 +- if (smc_link_usable(lgr->delayed_event->link)) {
26800 +- smc_llc_event_handler(lgr->delayed_event);
26801 +- } else {
26802 +- qentry = lgr->delayed_event;
26803 +- lgr->delayed_event = NULL;
26804 ++ qentry = lgr->delayed_event;
26805 ++ lgr->delayed_event = NULL;
26806 ++ if (smc_link_usable(qentry->link))
26807 ++ smc_llc_event_handler(qentry);
26808 ++ else
26809 + kfree(qentry);
26810 +- }
26811 + }
26812 +
26813 + again:
26814 +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
26815 +index 258b04372f854..bd4678db9d76b 100644
26816 +--- a/net/sunrpc/auth_gss/svcauth_gss.c
26817 ++++ b/net/sunrpc/auth_gss/svcauth_gss.c
26818 +@@ -1147,9 +1147,9 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
26819 + struct gssp_in_token *in_token)
26820 + {
26821 + struct kvec *argv = &rqstp->rq_arg.head[0];
26822 +- unsigned int page_base, length;
26823 +- int pages, i, res;
26824 +- size_t inlen;
26825 ++ unsigned int length, pgto_offs, pgfrom_offs;
26826 ++ int pages, i, res, pgto, pgfrom;
26827 ++ size_t inlen, to_offs, from_offs;
26828 +
26829 + res = gss_read_common_verf(gc, argv, authp, in_handle);
26830 + if (res)
26831 +@@ -1177,17 +1177,24 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
26832 + memcpy(page_address(in_token->pages[0]), argv->iov_base, length);
26833 + inlen -= length;
26834 +
26835 +- i = 1;
26836 +- page_base = rqstp->rq_arg.page_base;
26837 ++ to_offs = length;
26838 ++ from_offs = rqstp->rq_arg.page_base;
26839 + while (inlen) {
26840 +- length = min_t(unsigned int, inlen, PAGE_SIZE);
26841 +- memcpy(page_address(in_token->pages[i]),
26842 +- page_address(rqstp->rq_arg.pages[i]) + page_base,
26843 ++ pgto = to_offs >> PAGE_SHIFT;
26844 ++ pgfrom = from_offs >> PAGE_SHIFT;
26845 ++ pgto_offs = to_offs & ~PAGE_MASK;
26846 ++ pgfrom_offs = from_offs & ~PAGE_MASK;
26847 ++
26848 ++ length = min_t(unsigned int, inlen,
26849 ++ min_t(unsigned int, PAGE_SIZE - pgto_offs,
26850 ++ PAGE_SIZE - pgfrom_offs));
26851 ++ memcpy(page_address(in_token->pages[pgto]) + pgto_offs,
26852 ++ page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs,
26853 + length);
26854 +
26855 ++ to_offs += length;
26856 ++ from_offs += length;
26857 + inlen -= length;
26858 +- page_base = 0;
26859 +- i++;
26860 + }
26861 + return 0;
26862 + }
26863 +diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
26864 +index 7b94d971feb3b..c3d588b149aaa 100644
26865 +--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
26866 ++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
26867 +@@ -638,10 +638,11 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
26868 + while (remaining) {
26869 + len = min_t(u32, PAGE_SIZE - pageoff, remaining);
26870 +
26871 +- memcpy(dst, page_address(*ppages), len);
26872 ++ memcpy(dst, page_address(*ppages) + pageoff, len);
26873 + remaining -= len;
26874 + dst += len;
26875 + pageoff = 0;
26876 ++ ppages++;
26877 + }
26878 + }
26879 +
26880 +diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
26881 +index 940d176e0e872..d4beca895992d 100644
26882 +--- a/net/tipc/bcast.c
26883 ++++ b/net/tipc/bcast.c
26884 +@@ -108,6 +108,8 @@ static void tipc_bcbase_select_primary(struct net *net)
26885 + {
26886 + struct tipc_bc_base *bb = tipc_bc_base(net);
26887 + int all_dests = tipc_link_bc_peers(bb->link);
26888 ++ int max_win = tipc_link_max_win(bb->link);
26889 ++ int min_win = tipc_link_min_win(bb->link);
26890 + int i, mtu, prim;
26891 +
26892 + bb->primary_bearer = INVALID_BEARER_ID;
26893 +@@ -121,8 +123,12 @@ static void tipc_bcbase_select_primary(struct net *net)
26894 + continue;
26895 +
26896 + mtu = tipc_bearer_mtu(net, i);
26897 +- if (mtu < tipc_link_mtu(bb->link))
26898 ++ if (mtu < tipc_link_mtu(bb->link)) {
26899 + tipc_link_set_mtu(bb->link, mtu);
26900 ++ tipc_link_set_queue_limits(bb->link,
26901 ++ min_win,
26902 ++ max_win);
26903 ++ }
26904 + bb->bcast_support &= tipc_bearer_bcast_support(net, i);
26905 + if (bb->dests[i] < all_dests)
26906 + continue;
26907 +@@ -585,7 +591,7 @@ static int tipc_bc_link_set_queue_limits(struct net *net, u32 max_win)
26908 + if (max_win > TIPC_MAX_LINK_WIN)
26909 + return -EINVAL;
26910 + tipc_bcast_lock(net);
26911 +- tipc_link_set_queue_limits(l, BCLINK_WIN_MIN, max_win);
26912 ++ tipc_link_set_queue_limits(l, tipc_link_min_win(l), max_win);
26913 + tipc_bcast_unlock(net);
26914 + return 0;
26915 + }
26916 +diff --git a/net/tipc/msg.c b/net/tipc/msg.c
26917 +index 52e93ba4d8e2c..6812244018714 100644
26918 +--- a/net/tipc/msg.c
26919 ++++ b/net/tipc/msg.c
26920 +@@ -150,7 +150,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
26921 + if (fragid == FIRST_FRAGMENT) {
26922 + if (unlikely(head))
26923 + goto err;
26924 +- frag = skb_unshare(frag, GFP_ATOMIC);
26925 ++ if (skb_cloned(frag))
26926 ++ frag = skb_copy(frag, GFP_ATOMIC);
26927 + if (unlikely(!frag))
26928 + goto err;
26929 + head = *headbuf = frag;
26930 +diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
26931 +index 2f9c148f17e27..fe4edce459ad4 100644
26932 +--- a/net/tipc/name_distr.c
26933 ++++ b/net/tipc/name_distr.c
26934 +@@ -327,8 +327,13 @@ static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
26935 + struct tipc_msg *hdr;
26936 + u16 seqno;
26937 +
26938 ++ spin_lock_bh(&namedq->lock);
26939 + skb_queue_walk_safe(namedq, skb, tmp) {
26940 +- skb_linearize(skb);
26941 ++ if (unlikely(skb_linearize(skb))) {
26942 ++ __skb_unlink(skb, namedq);
26943 ++ kfree_skb(skb);
26944 ++ continue;
26945 ++ }
26946 + hdr = buf_msg(skb);
26947 + seqno = msg_named_seqno(hdr);
26948 + if (msg_is_last_bulk(hdr)) {
26949 +@@ -338,12 +343,14 @@ static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
26950 +
26951 + if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
26952 + __skb_unlink(skb, namedq);
26953 ++ spin_unlock_bh(&namedq->lock);
26954 + return skb;
26955 + }
26956 +
26957 + if (*open && (*rcv_nxt == seqno)) {
26958 + (*rcv_nxt)++;
26959 + __skb_unlink(skb, namedq);
26960 ++ spin_unlock_bh(&namedq->lock);
26961 + return skb;
26962 + }
26963 +
26964 +@@ -353,6 +360,7 @@ static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
26965 + continue;
26966 + }
26967 + }
26968 ++ spin_unlock_bh(&namedq->lock);
26969 + return NULL;
26970 + }
26971 +
26972 +diff --git a/net/tipc/node.c b/net/tipc/node.c
26973 +index 4edcee3088da1..e4cf515e323f3 100644
26974 +--- a/net/tipc/node.c
26975 ++++ b/net/tipc/node.c
26976 +@@ -1485,7 +1485,7 @@ static void node_lost_contact(struct tipc_node *n,
26977 +
26978 + /* Clean up broadcast state */
26979 + tipc_bcast_remove_peer(n->net, n->bc_entry.link);
26980 +- __skb_queue_purge(&n->bc_entry.namedq);
26981 ++ skb_queue_purge(&n->bc_entry.namedq);
26982 +
26983 + /* Abort any ongoing link failover */
26984 + for (i = 0; i < MAX_BEARERS; i++) {
26985 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
26986 +index b74e2741f74f4..cec86229a6a02 100644
26987 +--- a/net/tls/tls_device.c
26988 ++++ b/net/tls/tls_device.c
26989 +@@ -418,14 +418,14 @@ static int tls_push_data(struct sock *sk,
26990 + struct tls_context *tls_ctx = tls_get_ctx(sk);
26991 + struct tls_prot_info *prot = &tls_ctx->prot_info;
26992 + struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
26993 +- int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
26994 + struct tls_record_info *record = ctx->open_record;
26995 + int tls_push_record_flags;
26996 + struct page_frag *pfrag;
26997 + size_t orig_size = size;
26998 + u32 max_open_record_len;
26999 +- int copy, rc = 0;
27000 ++ bool more = false;
27001 + bool done = false;
27002 ++ int copy, rc = 0;
27003 + long timeo;
27004 +
27005 + if (flags &
27006 +@@ -492,9 +492,8 @@ handle_error:
27007 + if (!size) {
27008 + last_record:
27009 + tls_push_record_flags = flags;
27010 +- if (more) {
27011 +- tls_ctx->pending_open_record_frags =
27012 +- !!record->num_frags;
27013 ++ if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
27014 ++ more = true;
27015 + break;
27016 + }
27017 +
27018 +@@ -526,6 +525,8 @@ last_record:
27019 + }
27020 + } while (!done);
27021 +
27022 ++ tls_ctx->pending_open_record_frags = more;
27023 ++
27024 + if (orig_size - size > 0)
27025 + rc = orig_size - size;
27026 +
27027 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
27028 +index 7fd45f6ddb058..e14307f2bddcc 100644
27029 +--- a/net/wireless/nl80211.c
27030 ++++ b/net/wireless/nl80211.c
27031 +@@ -2355,7 +2355,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
27032 + * case we'll continue with more data in the next round,
27033 + * but break unconditionally so unsplit data stops here.
27034 + */
27035 +- state->split_start++;
27036 ++ if (state->split)
27037 ++ state->split_start++;
27038 ++ else
27039 ++ state->split_start = 0;
27040 + break;
27041 + case 9:
27042 + if (rdev->wiphy.extended_capabilities &&
27043 +@@ -4683,16 +4686,14 @@ static int nl80211_parse_he_obss_pd(struct nlattr *attrs,
27044 + if (err)
27045 + return err;
27046 +
27047 +- if (!tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] ||
27048 +- !tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
27049 +- return -EINVAL;
27050 +-
27051 +- he_obss_pd->min_offset =
27052 +- nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
27053 +- he_obss_pd->max_offset =
27054 +- nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
27055 ++ if (tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET])
27056 ++ he_obss_pd->min_offset =
27057 ++ nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
27058 ++ if (tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
27059 ++ he_obss_pd->max_offset =
27060 ++ nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
27061 +
27062 +- if (he_obss_pd->min_offset >= he_obss_pd->max_offset)
27063 ++ if (he_obss_pd->min_offset > he_obss_pd->max_offset)
27064 + return -EINVAL;
27065 +
27066 + he_obss_pd->enable = true;
27067 +diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
27068 +index 19c679456a0e2..c821e98671393 100644
27069 +--- a/samples/bpf/xdpsock_user.c
27070 ++++ b/samples/bpf/xdpsock_user.c
27071 +@@ -1004,7 +1004,7 @@ static void rx_drop_all(void)
27072 + }
27073 + }
27074 +
27075 +-static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb, int batch_size)
27076 ++static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
27077 + {
27078 + u32 idx;
27079 + unsigned int i;
27080 +@@ -1017,14 +1017,14 @@ static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb, int batch_size)
27081 + for (i = 0; i < batch_size; i++) {
27082 + struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
27083 + idx + i);
27084 +- tx_desc->addr = (frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
27085 ++ tx_desc->addr = (*frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
27086 + tx_desc->len = PKT_SIZE;
27087 + }
27088 +
27089 + xsk_ring_prod__submit(&xsk->tx, batch_size);
27090 + xsk->outstanding_tx += batch_size;
27091 +- frame_nb += batch_size;
27092 +- frame_nb %= NUM_FRAMES;
27093 ++ *frame_nb += batch_size;
27094 ++ *frame_nb %= NUM_FRAMES;
27095 + complete_tx_only(xsk, batch_size);
27096 + }
27097 +
27098 +@@ -1080,7 +1080,7 @@ static void tx_only_all(void)
27099 + }
27100 +
27101 + for (i = 0; i < num_socks; i++)
27102 +- tx_only(xsks[i], frame_nb[i], batch_size);
27103 ++ tx_only(xsks[i], &frame_nb[i], batch_size);
27104 +
27105 + pkt_cnt += batch_size;
27106 +
27107 +diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c
27108 +index a11bf6c5b53b4..cd3f16a6f5caf 100644
27109 +--- a/samples/mic/mpssd/mpssd.c
27110 ++++ b/samples/mic/mpssd/mpssd.c
27111 +@@ -403,9 +403,9 @@ mic_virtio_copy(struct mic_info *mic, int fd,
27112 +
27113 + static inline unsigned _vring_size(unsigned int num, unsigned long align)
27114 + {
27115 +- return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
27116 ++ return _ALIGN_UP(((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
27117 + + align - 1) & ~(align - 1))
27118 +- + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
27119 ++ + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num, 4);
27120 + }
27121 +
27122 + /*
27123 +diff --git a/scripts/package/builddeb b/scripts/package/builddeb
27124 +index 6df3c9f8b2da6..8277144298a00 100755
27125 +--- a/scripts/package/builddeb
27126 ++++ b/scripts/package/builddeb
27127 +@@ -202,8 +202,10 @@ EOF
27128 + done
27129 +
27130 + if [ "$ARCH" != "um" ]; then
27131 +- deploy_kernel_headers debian/linux-headers
27132 +- create_package linux-headers-$version debian/linux-headers
27133 ++ if is_enabled CONFIG_MODULES; then
27134 ++ deploy_kernel_headers debian/linux-headers
27135 ++ create_package linux-headers-$version debian/linux-headers
27136 ++ fi
27137 +
27138 + deploy_libc_headers debian/linux-libc-dev
27139 + create_package linux-libc-dev debian/linux-libc-dev
27140 +diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
27141 +index 48fbd3d0284a8..ccd46aad1dff6 100755
27142 +--- a/scripts/package/mkdebian
27143 ++++ b/scripts/package/mkdebian
27144 +@@ -183,13 +183,6 @@ Description: Linux kernel, version $version
27145 + This package contains the Linux kernel, modules and corresponding other
27146 + files, version: $version.
27147 +
27148 +-Package: $kernel_headers_packagename
27149 +-Architecture: $debarch
27150 +-Description: Linux kernel headers for $version on $debarch
27151 +- This package provides kernel header files for $version on $debarch
27152 +- .
27153 +- This is useful for people who need to build external modules
27154 +-
27155 + Package: linux-libc-dev
27156 + Section: devel
27157 + Provides: linux-kernel-headers
27158 +@@ -200,6 +193,18 @@ Description: Linux support headers for userspace development
27159 + Multi-Arch: same
27160 + EOF
27161 +
27162 ++if is_enabled CONFIG_MODULES; then
27163 ++cat <<EOF >> debian/control
27164 ++
27165 ++Package: $kernel_headers_packagename
27166 ++Architecture: $debarch
27167 ++Description: Linux kernel headers for $version on $debarch
27168 ++ This package provides kernel header files for $version on $debarch
27169 ++ .
27170 ++ This is useful for people who need to build external modules
27171 ++EOF
27172 ++fi
27173 ++
27174 + if is_enabled CONFIG_DEBUG_INFO; then
27175 + cat <<EOF >> debian/control
27176 +
27177 +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
27178 +index 011c3c76af865..21989fa0c1074 100644
27179 +--- a/security/integrity/ima/ima_crypto.c
27180 ++++ b/security/integrity/ima/ima_crypto.c
27181 +@@ -829,6 +829,8 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
27182 + /* now accumulate with current aggregate */
27183 + rc = crypto_shash_update(shash, d.digest,
27184 + crypto_shash_digestsize(tfm));
27185 ++ if (rc != 0)
27186 ++ return rc;
27187 + }
27188 + /*
27189 + * Extend cumulative digest over TPM registers 8-9, which contain
27190 +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
27191 +index 8a91711ca79b2..4c86cd4eece0c 100644
27192 +--- a/security/integrity/ima/ima_main.c
27193 ++++ b/security/integrity/ima/ima_main.c
27194 +@@ -531,6 +531,16 @@ int ima_file_hash(struct file *file, char *buf, size_t buf_size)
27195 + return -EOPNOTSUPP;
27196 +
27197 + mutex_lock(&iint->mutex);
27198 ++
27199 ++ /*
27200 ++ * ima_file_hash can be called when ima_collect_measurement has still
27201 ++ * not been called, we might not always have a hash.
27202 ++ */
27203 ++ if (!iint->ima_hash) {
27204 ++ mutex_unlock(&iint->mutex);
27205 ++ return -EOPNOTSUPP;
27206 ++ }
27207 ++
27208 + if (buf) {
27209 + size_t copied_size;
27210 +
27211 +diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
27212 +index b4de33074b37d..4a7a4b6bf79b2 100644
27213 +--- a/security/integrity/ima/ima_policy.c
27214 ++++ b/security/integrity/ima/ima_policy.c
27215 +@@ -59,6 +59,11 @@ enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB };
27216 +
27217 + enum policy_rule_list { IMA_DEFAULT_POLICY = 1, IMA_CUSTOM_POLICY };
27218 +
27219 ++struct ima_rule_opt_list {
27220 ++ size_t count;
27221 ++ char *items[];
27222 ++};
27223 ++
27224 + struct ima_rule_entry {
27225 + struct list_head list;
27226 + int action;
27227 +@@ -78,7 +83,7 @@ struct ima_rule_entry {
27228 + int type; /* audit type */
27229 + } lsm[MAX_LSM_RULES];
27230 + char *fsname;
27231 +- char *keyrings; /* Measure keys added to these keyrings */
27232 ++ struct ima_rule_opt_list *keyrings; /* Measure keys added to these keyrings */
27233 + struct ima_template_desc *template;
27234 + };
27235 +
27236 +@@ -206,10 +211,6 @@ static LIST_HEAD(ima_policy_rules);
27237 + static LIST_HEAD(ima_temp_rules);
27238 + static struct list_head *ima_rules = &ima_default_rules;
27239 +
27240 +-/* Pre-allocated buffer used for matching keyrings. */
27241 +-static char *ima_keyrings;
27242 +-static size_t ima_keyrings_len;
27243 +-
27244 + static int ima_policy __initdata;
27245 +
27246 + static int __init default_measure_policy_setup(char *str)
27247 +@@ -253,6 +254,72 @@ static int __init default_appraise_policy_setup(char *str)
27248 + }
27249 + __setup("ima_appraise_tcb", default_appraise_policy_setup);
27250 +
27251 ++static struct ima_rule_opt_list *ima_alloc_rule_opt_list(const substring_t *src)
27252 ++{
27253 ++ struct ima_rule_opt_list *opt_list;
27254 ++ size_t count = 0;
27255 ++ char *src_copy;
27256 ++ char *cur, *next;
27257 ++ size_t i;
27258 ++
27259 ++ src_copy = match_strdup(src);
27260 ++ if (!src_copy)
27261 ++ return ERR_PTR(-ENOMEM);
27262 ++
27263 ++ next = src_copy;
27264 ++ while ((cur = strsep(&next, "|"))) {
27265 ++ /* Don't accept an empty list item */
27266 ++ if (!(*cur)) {
27267 ++ kfree(src_copy);
27268 ++ return ERR_PTR(-EINVAL);
27269 ++ }
27270 ++ count++;
27271 ++ }
27272 ++
27273 ++ /* Don't accept an empty list */
27274 ++ if (!count) {
27275 ++ kfree(src_copy);
27276 ++ return ERR_PTR(-EINVAL);
27277 ++ }
27278 ++
27279 ++ opt_list = kzalloc(struct_size(opt_list, items, count), GFP_KERNEL);
27280 ++ if (!opt_list) {
27281 ++ kfree(src_copy);
27282 ++ return ERR_PTR(-ENOMEM);
27283 ++ }
27284 ++
27285 ++ /*
27286 ++ * strsep() has already replaced all instances of '|' with '\0',
27287 ++ * leaving a byte sequence of NUL-terminated strings. Reference each
27288 ++ * string with the array of items.
27289 ++ *
27290 ++ * IMPORTANT: Ownership of the allocated buffer is transferred from
27291 ++ * src_copy to the first element in the items array. To free the
27292 ++ * buffer, kfree() must only be called on the first element of the
27293 ++ * array.
27294 ++ */
27295 ++ for (i = 0, cur = src_copy; i < count; i++) {
27296 ++ opt_list->items[i] = cur;
27297 ++ cur = strchr(cur, '\0') + 1;
27298 ++ }
27299 ++ opt_list->count = count;
27300 ++
27301 ++ return opt_list;
27302 ++}
27303 ++
27304 ++static void ima_free_rule_opt_list(struct ima_rule_opt_list *opt_list)
27305 ++{
27306 ++ if (!opt_list)
27307 ++ return;
27308 ++
27309 ++ if (opt_list->count) {
27310 ++ kfree(opt_list->items[0]);
27311 ++ opt_list->count = 0;
27312 ++ }
27313 ++
27314 ++ kfree(opt_list);
27315 ++}
27316 ++
27317 + static void ima_lsm_free_rule(struct ima_rule_entry *entry)
27318 + {
27319 + int i;
27320 +@@ -274,7 +341,7 @@ static void ima_free_rule(struct ima_rule_entry *entry)
27321 + * the defined_templates list and cannot be freed here
27322 + */
27323 + kfree(entry->fsname);
27324 +- kfree(entry->keyrings);
27325 ++ ima_free_rule_opt_list(entry->keyrings);
27326 + ima_lsm_free_rule(entry);
27327 + kfree(entry);
27328 + }
27329 +@@ -394,8 +461,8 @@ int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
27330 + static bool ima_match_keyring(struct ima_rule_entry *rule,
27331 + const char *keyring, const struct cred *cred)
27332 + {
27333 +- char *next_keyring, *keyrings_ptr;
27334 + bool matched = false;
27335 ++ size_t i;
27336 +
27337 + if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
27338 + return false;
27339 +@@ -406,15 +473,8 @@ static bool ima_match_keyring(struct ima_rule_entry *rule,
27340 + if (!keyring)
27341 + return false;
27342 +
27343 +- strcpy(ima_keyrings, rule->keyrings);
27344 +-
27345 +- /*
27346 +- * "keyrings=" is specified in the policy in the format below:
27347 +- * keyrings=.builtin_trusted_keys|.ima|.evm
27348 +- */
27349 +- keyrings_ptr = ima_keyrings;
27350 +- while ((next_keyring = strsep(&keyrings_ptr, "|")) != NULL) {
27351 +- if (!strcmp(next_keyring, keyring)) {
27352 ++ for (i = 0; i < rule->keyrings->count; i++) {
27353 ++ if (!strcmp(rule->keyrings->items[i], keyring)) {
27354 + matched = true;
27355 + break;
27356 + }
27357 +@@ -1065,7 +1125,6 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
27358 + bool uid_token;
27359 + struct ima_template_desc *template_desc;
27360 + int result = 0;
27361 +- size_t keyrings_len;
27362 +
27363 + ab = integrity_audit_log_start(audit_context(), GFP_KERNEL,
27364 + AUDIT_INTEGRITY_POLICY_RULE);
27365 +@@ -1174,7 +1233,8 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
27366 + entry->func = POLICY_CHECK;
27367 + else if (strcmp(args[0].from, "KEXEC_CMDLINE") == 0)
27368 + entry->func = KEXEC_CMDLINE;
27369 +- else if (strcmp(args[0].from, "KEY_CHECK") == 0)
27370 ++ else if (IS_ENABLED(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) &&
27371 ++ strcmp(args[0].from, "KEY_CHECK") == 0)
27372 + entry->func = KEY_CHECK;
27373 + else
27374 + result = -EINVAL;
27375 +@@ -1231,37 +1291,19 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
27376 + case Opt_keyrings:
27377 + ima_log_string(ab, "keyrings", args[0].from);
27378 +
27379 +- keyrings_len = strlen(args[0].from) + 1;
27380 +-
27381 +- if ((entry->keyrings) ||
27382 +- (keyrings_len < 2)) {
27383 ++ if (!IS_ENABLED(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) ||
27384 ++ entry->keyrings) {
27385 + result = -EINVAL;
27386 + break;
27387 + }
27388 +
27389 +- if (keyrings_len > ima_keyrings_len) {
27390 +- char *tmpbuf;
27391 +-
27392 +- tmpbuf = krealloc(ima_keyrings, keyrings_len,
27393 +- GFP_KERNEL);
27394 +- if (!tmpbuf) {
27395 +- result = -ENOMEM;
27396 +- break;
27397 +- }
27398 +-
27399 +- ima_keyrings = tmpbuf;
27400 +- ima_keyrings_len = keyrings_len;
27401 +- }
27402 +-
27403 +- entry->keyrings = kstrdup(args[0].from, GFP_KERNEL);
27404 +- if (!entry->keyrings) {
27405 +- kfree(ima_keyrings);
27406 +- ima_keyrings = NULL;
27407 +- ima_keyrings_len = 0;
27408 +- result = -ENOMEM;
27409 ++ entry->keyrings = ima_alloc_rule_opt_list(args);
27410 ++ if (IS_ERR(entry->keyrings)) {
27411 ++ result = PTR_ERR(entry->keyrings);
27412 ++ entry->keyrings = NULL;
27413 + break;
27414 + }
27415 +- result = 0;
27416 ++
27417 + entry->flags |= IMA_KEYRINGS;
27418 + break;
27419 + case Opt_fsuuid:
27420 +@@ -1574,6 +1616,15 @@ static void policy_func_show(struct seq_file *m, enum ima_hooks func)
27421 + seq_printf(m, "func=%d ", func);
27422 + }
27423 +
27424 ++static void ima_show_rule_opt_list(struct seq_file *m,
27425 ++ const struct ima_rule_opt_list *opt_list)
27426 ++{
27427 ++ size_t i;
27428 ++
27429 ++ for (i = 0; i < opt_list->count; i++)
27430 ++ seq_printf(m, "%s%s", i ? "|" : "", opt_list->items[i]);
27431 ++}
27432 ++
27433 + int ima_policy_show(struct seq_file *m, void *v)
27434 + {
27435 + struct ima_rule_entry *entry = v;
27436 +@@ -1630,9 +1681,8 @@ int ima_policy_show(struct seq_file *m, void *v)
27437 + }
27438 +
27439 + if (entry->flags & IMA_KEYRINGS) {
27440 +- if (entry->keyrings != NULL)
27441 +- snprintf(tbuf, sizeof(tbuf), "%s", entry->keyrings);
27442 +- seq_printf(m, pt(Opt_keyrings), tbuf);
27443 ++ seq_puts(m, "keyrings=");
27444 ++ ima_show_rule_opt_list(m, entry->keyrings);
27445 + seq_puts(m, " ");
27446 + }
27447 +
27448 +diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
27449 +index c8b9c0b315d8f..250a92b187265 100644
27450 +--- a/sound/core/seq/oss/seq_oss.c
27451 ++++ b/sound/core/seq/oss/seq_oss.c
27452 +@@ -174,9 +174,12 @@ odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
27453 + if (snd_BUG_ON(!dp))
27454 + return -ENXIO;
27455 +
27456 +- mutex_lock(&register_mutex);
27457 ++ if (cmd != SNDCTL_SEQ_SYNC &&
27458 ++ mutex_lock_interruptible(&register_mutex))
27459 ++ return -ERESTARTSYS;
27460 + rc = snd_seq_oss_ioctl(dp, cmd, arg);
27461 +- mutex_unlock(&register_mutex);
27462 ++ if (cmd != SNDCTL_SEQ_SYNC)
27463 ++ mutex_unlock(&register_mutex);
27464 + return rc;
27465 + }
27466 +
27467 +diff --git a/sound/firewire/bebob/bebob_hwdep.c b/sound/firewire/bebob/bebob_hwdep.c
27468 +index 45b740f44c459..c362eb38ab906 100644
27469 +--- a/sound/firewire/bebob/bebob_hwdep.c
27470 ++++ b/sound/firewire/bebob/bebob_hwdep.c
27471 +@@ -36,12 +36,11 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
27472 + }
27473 +
27474 + memset(&event, 0, sizeof(event));
27475 ++ count = min_t(long, count, sizeof(event.lock_status));
27476 + if (bebob->dev_lock_changed) {
27477 + event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
27478 + event.lock_status.status = (bebob->dev_lock_count > 0);
27479 + bebob->dev_lock_changed = false;
27480 +-
27481 +- count = min_t(long, count, sizeof(event.lock_status));
27482 + }
27483 +
27484 + spin_unlock_irq(&bebob->lock);
27485 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
27486 +index 36a9dbc33aa01..476a8b871daa1 100644
27487 +--- a/sound/pci/hda/hda_intel.c
27488 ++++ b/sound/pci/hda/hda_intel.c
27489 +@@ -1001,12 +1001,14 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt)
27490 + azx_init_pci(chip);
27491 + hda_intel_init_chip(chip, true);
27492 +
27493 +- if (status && from_rt) {
27494 +- list_for_each_codec(codec, &chip->bus)
27495 +- if (!codec->relaxed_resume &&
27496 +- (status & (1 << codec->addr)))
27497 +- schedule_delayed_work(&codec->jackpoll_work,
27498 +- codec->jackpoll_interval);
27499 ++ if (from_rt) {
27500 ++ list_for_each_codec(codec, &chip->bus) {
27501 ++ if (codec->relaxed_resume)
27502 ++ continue;
27503 ++
27504 ++ if (codec->forced_resume || (status & (1 << codec->addr)))
27505 ++ pm_request_resume(hda_codec_dev(codec));
27506 ++ }
27507 + }
27508 +
27509 + /* power down again for link-controlled chips */
27510 +diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
27511 +index 02cc682caa55a..588059428d8f5 100644
27512 +--- a/sound/pci/hda/hda_jack.c
27513 ++++ b/sound/pci/hda/hda_jack.c
27514 +@@ -275,6 +275,23 @@ int snd_hda_jack_detect_state_mst(struct hda_codec *codec,
27515 + }
27516 + EXPORT_SYMBOL_GPL(snd_hda_jack_detect_state_mst);
27517 +
27518 ++static struct hda_jack_callback *
27519 ++find_callback_from_list(struct hda_jack_tbl *jack,
27520 ++ hda_jack_callback_fn func)
27521 ++{
27522 ++ struct hda_jack_callback *cb;
27523 ++
27524 ++ if (!func)
27525 ++ return NULL;
27526 ++
27527 ++ for (cb = jack->callback; cb; cb = cb->next) {
27528 ++ if (cb->func == func)
27529 ++ return cb;
27530 ++ }
27531 ++
27532 ++ return NULL;
27533 ++}
27534 ++
27535 + /**
27536 + * snd_hda_jack_detect_enable_mst - enable the jack-detection
27537 + * @codec: the HDA codec
27538 +@@ -297,7 +314,10 @@ snd_hda_jack_detect_enable_callback_mst(struct hda_codec *codec, hda_nid_t nid,
27539 + jack = snd_hda_jack_tbl_new(codec, nid, dev_id);
27540 + if (!jack)
27541 + return ERR_PTR(-ENOMEM);
27542 +- if (func) {
27543 ++
27544 ++ callback = find_callback_from_list(jack, func);
27545 ++
27546 ++ if (func && !callback) {
27547 + callback = kzalloc(sizeof(*callback), GFP_KERNEL);
27548 + if (!callback)
27549 + return ERR_PTR(-ENOMEM);
27550 +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
27551 +index b7dbf2e7f77af..a3eecdf9185e8 100644
27552 +--- a/sound/pci/hda/patch_ca0132.c
27553 ++++ b/sound/pci/hda/patch_ca0132.c
27554 +@@ -1065,6 +1065,7 @@ enum {
27555 + QUIRK_R3DI,
27556 + QUIRK_R3D,
27557 + QUIRK_AE5,
27558 ++ QUIRK_AE7,
27559 + };
27560 +
27561 + #ifdef CONFIG_PCI
27562 +@@ -1184,6 +1185,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
27563 + SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
27564 + SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
27565 + SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
27566 ++ SND_PCI_QUIRK(0x1102, 0x0081, "Sound Blaster AE-7", QUIRK_AE7),
27567 + {}
27568 + };
27569 +
27570 +@@ -4675,6 +4677,15 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
27571 + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
27572 + tmp = FLOAT_THREE;
27573 + break;
27574 ++ case QUIRK_AE7:
27575 ++ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
27576 ++ tmp = FLOAT_THREE;
27577 ++ chipio_set_conn_rate(codec, MEM_CONNID_MICIN2,
27578 ++ SR_96_000);
27579 ++ chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2,
27580 ++ SR_96_000);
27581 ++ dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO);
27582 ++ break;
27583 + default:
27584 + tmp = FLOAT_ONE;
27585 + break;
27586 +@@ -4720,6 +4731,14 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
27587 + case QUIRK_AE5:
27588 + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
27589 + break;
27590 ++ case QUIRK_AE7:
27591 ++ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x3f);
27592 ++ chipio_set_conn_rate(codec, MEM_CONNID_MICIN2,
27593 ++ SR_96_000);
27594 ++ chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2,
27595 ++ SR_96_000);
27596 ++ dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO);
27597 ++ break;
27598 + default:
27599 + break;
27600 + }
27601 +@@ -4729,7 +4748,10 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
27602 + if (ca0132_quirk(spec) == QUIRK_R3DI)
27603 + chipio_set_conn_rate(codec, 0x0F, SR_96_000);
27604 +
27605 +- tmp = FLOAT_ZERO;
27606 ++ if (ca0132_quirk(spec) == QUIRK_AE7)
27607 ++ tmp = FLOAT_THREE;
27608 ++ else
27609 ++ tmp = FLOAT_ZERO;
27610 + dspio_set_uint_param(codec, 0x80, 0x00, tmp);
27611 +
27612 + switch (ca0132_quirk(spec)) {
27613 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
27614 +index 4020500880905..56a8643adbdcd 100644
27615 +--- a/sound/pci/hda/patch_hdmi.c
27616 ++++ b/sound/pci/hda/patch_hdmi.c
27617 +@@ -2046,22 +2046,25 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
27618 + int pinctl;
27619 + int err = 0;
27620 +
27621 ++ mutex_lock(&spec->pcm_lock);
27622 + if (hinfo->nid) {
27623 + pcm_idx = hinfo_to_pcm_index(codec, hinfo);
27624 +- if (snd_BUG_ON(pcm_idx < 0))
27625 +- return -EINVAL;
27626 ++ if (snd_BUG_ON(pcm_idx < 0)) {
27627 ++ err = -EINVAL;
27628 ++ goto unlock;
27629 ++ }
27630 + cvt_idx = cvt_nid_to_cvt_index(codec, hinfo->nid);
27631 +- if (snd_BUG_ON(cvt_idx < 0))
27632 +- return -EINVAL;
27633 ++ if (snd_BUG_ON(cvt_idx < 0)) {
27634 ++ err = -EINVAL;
27635 ++ goto unlock;
27636 ++ }
27637 + per_cvt = get_cvt(spec, cvt_idx);
27638 +-
27639 + snd_BUG_ON(!per_cvt->assigned);
27640 + per_cvt->assigned = 0;
27641 + hinfo->nid = 0;
27642 +
27643 + azx_stream(get_azx_dev(substream))->stripe = 0;
27644 +
27645 +- mutex_lock(&spec->pcm_lock);
27646 + snd_hda_spdif_ctls_unassign(codec, pcm_idx);
27647 + clear_bit(pcm_idx, &spec->pcm_in_use);
27648 + pin_idx = hinfo_to_pin_index(codec, hinfo);
27649 +@@ -2091,10 +2094,11 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
27650 + per_pin->setup = false;
27651 + per_pin->channels = 0;
27652 + mutex_unlock(&per_pin->lock);
27653 +- unlock:
27654 +- mutex_unlock(&spec->pcm_lock);
27655 + }
27656 +
27657 ++unlock:
27658 ++ mutex_unlock(&spec->pcm_lock);
27659 ++
27660 + return err;
27661 + }
27662 +
27663 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
27664 +index d4f17b4658927..f2398721ac1ef 100644
27665 +--- a/sound/pci/hda/patch_realtek.c
27666 ++++ b/sound/pci/hda/patch_realtek.c
27667 +@@ -1150,6 +1150,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid)
27668 + codec->single_adc_amp = 1;
27669 + /* FIXME: do we need this for all Realtek codec models? */
27670 + codec->spdif_status_reset = 1;
27671 ++ codec->forced_resume = 1;
27672 + codec->patch_ops = alc_patch_ops;
27673 +
27674 + err = alc_codec_rename_from_preset(codec);
27675 +@@ -1929,6 +1930,8 @@ enum {
27676 + ALC1220_FIXUP_CLEVO_P950,
27677 + ALC1220_FIXUP_CLEVO_PB51ED,
27678 + ALC1220_FIXUP_CLEVO_PB51ED_PINS,
27679 ++ ALC887_FIXUP_ASUS_AUDIO,
27680 ++ ALC887_FIXUP_ASUS_HMIC,
27681 + };
27682 +
27683 + static void alc889_fixup_coef(struct hda_codec *codec,
27684 +@@ -2141,6 +2144,31 @@ static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
27685 + alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
27686 + }
27687 +
27688 ++static void alc887_asus_hp_automute_hook(struct hda_codec *codec,
27689 ++ struct hda_jack_callback *jack)
27690 ++{
27691 ++ struct alc_spec *spec = codec->spec;
27692 ++ unsigned int vref;
27693 ++
27694 ++ snd_hda_gen_hp_automute(codec, jack);
27695 ++
27696 ++ if (spec->gen.hp_jack_present)
27697 ++ vref = AC_PINCTL_VREF_80;
27698 ++ else
27699 ++ vref = AC_PINCTL_VREF_HIZ;
27700 ++ snd_hda_set_pin_ctl(codec, 0x19, PIN_HP | vref);
27701 ++}
27702 ++
27703 ++static void alc887_fixup_asus_jack(struct hda_codec *codec,
27704 ++ const struct hda_fixup *fix, int action)
27705 ++{
27706 ++ struct alc_spec *spec = codec->spec;
27707 ++ if (action != HDA_FIXUP_ACT_PROBE)
27708 ++ return;
27709 ++ snd_hda_set_pin_ctl_cache(codec, 0x1b, PIN_HP);
27710 ++ spec->gen.hp_automute_hook = alc887_asus_hp_automute_hook;
27711 ++}
27712 ++
27713 + static const struct hda_fixup alc882_fixups[] = {
27714 + [ALC882_FIXUP_ABIT_AW9D_MAX] = {
27715 + .type = HDA_FIXUP_PINS,
27716 +@@ -2398,6 +2426,20 @@ static const struct hda_fixup alc882_fixups[] = {
27717 + .chained = true,
27718 + .chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
27719 + },
27720 ++ [ALC887_FIXUP_ASUS_AUDIO] = {
27721 ++ .type = HDA_FIXUP_PINS,
27722 ++ .v.pins = (const struct hda_pintbl[]) {
27723 ++ { 0x15, 0x02a14150 }, /* use as headset mic, without its own jack detect */
27724 ++ { 0x19, 0x22219420 },
27725 ++ {}
27726 ++ },
27727 ++ },
27728 ++ [ALC887_FIXUP_ASUS_HMIC] = {
27729 ++ .type = HDA_FIXUP_FUNC,
27730 ++ .v.func = alc887_fixup_asus_jack,
27731 ++ .chained = true,
27732 ++ .chain_id = ALC887_FIXUP_ASUS_AUDIO,
27733 ++ },
27734 + };
27735 +
27736 + static const struct snd_pci_quirk alc882_fixup_tbl[] = {
27737 +@@ -2431,6 +2473,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
27738 + SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD),
27739 + SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
27740 + SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
27741 ++ SND_PCI_QUIRK(0x1043, 0x2390, "Asus D700SA", ALC887_FIXUP_ASUS_HMIC),
27742 + SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
27743 + SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
27744 + SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
27745 +@@ -6233,6 +6276,7 @@ enum {
27746 + ALC269_FIXUP_LEMOTE_A190X,
27747 + ALC256_FIXUP_INTEL_NUC8_RUGGED,
27748 + ALC255_FIXUP_XIAOMI_HEADSET_MIC,
27749 ++ ALC274_FIXUP_HP_MIC,
27750 + };
27751 +
27752 + static const struct hda_fixup alc269_fixups[] = {
27753 +@@ -7612,6 +7656,14 @@ static const struct hda_fixup alc269_fixups[] = {
27754 + .chained = true,
27755 + .chain_id = ALC289_FIXUP_ASUS_GA401
27756 + },
27757 ++ [ALC274_FIXUP_HP_MIC] = {
27758 ++ .type = HDA_FIXUP_VERBS,
27759 ++ .v.verbs = (const struct hda_verb[]) {
27760 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
27761 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
27762 ++ { }
27763 ++ },
27764 ++ },
27765 + };
27766 +
27767 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
27768 +@@ -7763,6 +7815,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
27769 + SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
27770 + SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
27771 + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
27772 ++ SND_PCI_QUIRK(0x103c, 0x874e, "HP", ALC274_FIXUP_HP_MIC),
27773 ++ SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
27774 + SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
27775 + SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
27776 + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
27777 +@@ -8088,6 +8142,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
27778 + {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
27779 + {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
27780 + {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
27781 ++ {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
27782 + {}
27783 + };
27784 + #define ALC225_STANDARD_PINS \
27785 +@@ -9622,6 +9677,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
27786 + SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
27787 + SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
27788 + SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
27789 ++ SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
27790 + SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
27791 + SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
27792 + SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
27793 +diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
27794 +index 946a70210f492..601ea45d3ea66 100644
27795 +--- a/sound/soc/codecs/Kconfig
27796 ++++ b/sound/soc/codecs/Kconfig
27797 +@@ -540,6 +540,7 @@ config SND_SOC_CQ0093VC
27798 + config SND_SOC_CROS_EC_CODEC
27799 + tristate "codec driver for ChromeOS EC"
27800 + depends on CROS_EC
27801 ++ select CRYPTO
27802 + select CRYPTO_LIB_SHA256
27803 + help
27804 + If you say yes here you will get support for the
27805 +diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
27806 +index c098518343959..3226c6d4493eb 100644
27807 +--- a/sound/soc/codecs/tas2770.c
27808 ++++ b/sound/soc/codecs/tas2770.c
27809 +@@ -16,7 +16,6 @@
27810 + #include <linux/i2c.h>
27811 + #include <linux/gpio.h>
27812 + #include <linux/gpio/consumer.h>
27813 +-#include <linux/pm_runtime.h>
27814 + #include <linux/regulator/consumer.h>
27815 + #include <linux/firmware.h>
27816 + #include <linux/regmap.h>
27817 +@@ -57,7 +56,12 @@ static int tas2770_set_bias_level(struct snd_soc_component *component,
27818 + TAS2770_PWR_CTRL_MASK,
27819 + TAS2770_PWR_CTRL_ACTIVE);
27820 + break;
27821 +-
27822 ++ case SND_SOC_BIAS_STANDBY:
27823 ++ case SND_SOC_BIAS_PREPARE:
27824 ++ snd_soc_component_update_bits(component,
27825 ++ TAS2770_PWR_CTRL,
27826 ++ TAS2770_PWR_CTRL_MASK, TAS2770_PWR_CTRL_MUTE);
27827 ++ break;
27828 + case SND_SOC_BIAS_OFF:
27829 + snd_soc_component_update_bits(component,
27830 + TAS2770_PWR_CTRL,
27831 +@@ -135,23 +139,18 @@ static int tas2770_dac_event(struct snd_soc_dapm_widget *w,
27832 + TAS2770_PWR_CTRL,
27833 + TAS2770_PWR_CTRL_MASK,
27834 + TAS2770_PWR_CTRL_MUTE);
27835 +- if (ret)
27836 +- goto end;
27837 + break;
27838 + case SND_SOC_DAPM_PRE_PMD:
27839 + ret = snd_soc_component_update_bits(component,
27840 + TAS2770_PWR_CTRL,
27841 + TAS2770_PWR_CTRL_MASK,
27842 + TAS2770_PWR_CTRL_SHUTDOWN);
27843 +- if (ret)
27844 +- goto end;
27845 + break;
27846 + default:
27847 + dev_err(tas2770->dev, "Not supported evevt\n");
27848 + return -EINVAL;
27849 + }
27850 +
27851 +-end:
27852 + if (ret < 0)
27853 + return ret;
27854 +
27855 +@@ -243,6 +242,9 @@ static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
27856 + return -EINVAL;
27857 + }
27858 +
27859 ++ if (ret < 0)
27860 ++ return ret;
27861 ++
27862 + tas2770->channel_size = bitwidth;
27863 +
27864 + ret = snd_soc_component_update_bits(component,
27865 +@@ -251,16 +253,15 @@ static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
27866 + TAS2770_TDM_CFG_REG5_50_MASK,
27867 + TAS2770_TDM_CFG_REG5_VSNS_ENABLE |
27868 + tas2770->v_sense_slot);
27869 +- if (ret)
27870 +- goto end;
27871 ++ if (ret < 0)
27872 ++ return ret;
27873 ++
27874 + ret = snd_soc_component_update_bits(component,
27875 + TAS2770_TDM_CFG_REG6,
27876 + TAS2770_TDM_CFG_REG6_ISNS_MASK |
27877 + TAS2770_TDM_CFG_REG6_50_MASK,
27878 + TAS2770_TDM_CFG_REG6_ISNS_ENABLE |
27879 + tas2770->i_sense_slot);
27880 +-
27881 +-end:
27882 + if (ret < 0)
27883 + return ret;
27884 +
27885 +@@ -278,36 +279,35 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
27886 + TAS2770_TDM_CFG_REG0,
27887 + TAS2770_TDM_CFG_REG0_SMP_MASK,
27888 + TAS2770_TDM_CFG_REG0_SMP_48KHZ);
27889 +- if (ret)
27890 +- goto end;
27891 ++ if (ret < 0)
27892 ++ return ret;
27893 ++
27894 + ret = snd_soc_component_update_bits(component,
27895 + TAS2770_TDM_CFG_REG0,
27896 + TAS2770_TDM_CFG_REG0_31_MASK,
27897 + TAS2770_TDM_CFG_REG0_31_44_1_48KHZ);
27898 +- if (ret)
27899 +- goto end;
27900 + break;
27901 + case 44100:
27902 + ret = snd_soc_component_update_bits(component,
27903 + TAS2770_TDM_CFG_REG0,
27904 + TAS2770_TDM_CFG_REG0_SMP_MASK,
27905 + TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
27906 +- if (ret)
27907 +- goto end;
27908 ++ if (ret < 0)
27909 ++ return ret;
27910 ++
27911 + ret = snd_soc_component_update_bits(component,
27912 + TAS2770_TDM_CFG_REG0,
27913 + TAS2770_TDM_CFG_REG0_31_MASK,
27914 + TAS2770_TDM_CFG_REG0_31_44_1_48KHZ);
27915 +- if (ret)
27916 +- goto end;
27917 + break;
27918 + case 96000:
27919 + ret = snd_soc_component_update_bits(component,
27920 + TAS2770_TDM_CFG_REG0,
27921 + TAS2770_TDM_CFG_REG0_SMP_MASK,
27922 + TAS2770_TDM_CFG_REG0_SMP_48KHZ);
27923 +- if (ret)
27924 +- goto end;
27925 ++ if (ret < 0)
27926 ++ return ret;
27927 ++
27928 + ret = snd_soc_component_update_bits(component,
27929 + TAS2770_TDM_CFG_REG0,
27930 + TAS2770_TDM_CFG_REG0_31_MASK,
27931 +@@ -318,8 +318,9 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
27932 + TAS2770_TDM_CFG_REG0,
27933 + TAS2770_TDM_CFG_REG0_SMP_MASK,
27934 + TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
27935 +- if (ret)
27936 +- goto end;
27937 ++ if (ret < 0)
27938 ++ return ret;
27939 ++
27940 + ret = snd_soc_component_update_bits(component,
27941 + TAS2770_TDM_CFG_REG0,
27942 + TAS2770_TDM_CFG_REG0_31_MASK,
27943 +@@ -330,22 +331,22 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
27944 + TAS2770_TDM_CFG_REG0,
27945 + TAS2770_TDM_CFG_REG0_SMP_MASK,
27946 + TAS2770_TDM_CFG_REG0_SMP_48KHZ);
27947 +- if (ret)
27948 +- goto end;
27949 ++ if (ret < 0)
27950 ++ return ret;
27951 ++
27952 + ret = snd_soc_component_update_bits(component,
27953 + TAS2770_TDM_CFG_REG0,
27954 + TAS2770_TDM_CFG_REG0_31_MASK,
27955 + TAS2770_TDM_CFG_REG0_31_176_4_192KHZ);
27956 +- if (ret)
27957 +- goto end;
27958 + break;
27959 + case 17640:
27960 + ret = snd_soc_component_update_bits(component,
27961 + TAS2770_TDM_CFG_REG0,
27962 + TAS2770_TDM_CFG_REG0_SMP_MASK,
27963 + TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
27964 +- if (ret)
27965 +- goto end;
27966 ++ if (ret < 0)
27967 ++ return ret;
27968 ++
27969 + ret = snd_soc_component_update_bits(component,
27970 + TAS2770_TDM_CFG_REG0,
27971 + TAS2770_TDM_CFG_REG0_31_MASK,
27972 +@@ -355,7 +356,6 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
27973 + ret = -EINVAL;
27974 + }
27975 +
27976 +-end:
27977 + if (ret < 0)
27978 + return ret;
27979 +
27980 +@@ -575,6 +575,8 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
27981 +
27982 + tas2770->component = component;
27983 +
27984 ++ tas2770_reset(tas2770);
27985 ++
27986 + return 0;
27987 + }
27988 +
27989 +@@ -701,29 +703,28 @@ static int tas2770_parse_dt(struct device *dev, struct tas2770_priv *tas2770)
27990 + rc = fwnode_property_read_u32(dev->fwnode, "ti,asi-format",
27991 + &tas2770->asi_format);
27992 + if (rc) {
27993 +- dev_err(tas2770->dev, "Looking up %s property failed %d\n",
27994 +- "ti,asi-format", rc);
27995 +- goto end;
27996 ++ dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
27997 ++ "ti,asi-format");
27998 ++ tas2770->asi_format = 0;
27999 + }
28000 +
28001 + rc = fwnode_property_read_u32(dev->fwnode, "ti,imon-slot-no",
28002 + &tas2770->i_sense_slot);
28003 + if (rc) {
28004 +- dev_err(tas2770->dev, "Looking up %s property failed %d\n",
28005 +- "ti,imon-slot-no", rc);
28006 +- goto end;
28007 ++ dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
28008 ++ "ti,imon-slot-no");
28009 ++ tas2770->i_sense_slot = 0;
28010 + }
28011 +
28012 + rc = fwnode_property_read_u32(dev->fwnode, "ti,vmon-slot-no",
28013 + &tas2770->v_sense_slot);
28014 + if (rc) {
28015 +- dev_err(tas2770->dev, "Looking up %s property failed %d\n",
28016 +- "ti,vmon-slot-no", rc);
28017 +- goto end;
28018 ++ dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
28019 ++ "ti,vmon-slot-no");
28020 ++ tas2770->v_sense_slot = 2;
28021 + }
28022 +
28023 +-end:
28024 +- return rc;
28025 ++ return 0;
28026 + }
28027 +
28028 + static int tas2770_i2c_probe(struct i2c_client *client,
28029 +@@ -771,8 +772,6 @@ static int tas2770_i2c_probe(struct i2c_client *client,
28030 + tas2770->channel_size = 0;
28031 + tas2770->slot_width = 0;
28032 +
28033 +- tas2770_reset(tas2770);
28034 +-
28035 + result = tas2770_register_codec(tas2770);
28036 + if (result)
28037 + dev_err(tas2770->dev, "Register codec failed.\n");
28038 +@@ -781,13 +780,6 @@ end:
28039 + return result;
28040 + }
28041 +
28042 +-static int tas2770_i2c_remove(struct i2c_client *client)
28043 +-{
28044 +- pm_runtime_disable(&client->dev);
28045 +- return 0;
28046 +-}
28047 +-
28048 +-
28049 + static const struct i2c_device_id tas2770_i2c_id[] = {
28050 + { "tas2770", 0},
28051 + { }
28052 +@@ -808,7 +800,6 @@ static struct i2c_driver tas2770_i2c_driver = {
28053 + .of_match_table = of_match_ptr(tas2770_of_match),
28054 + },
28055 + .probe = tas2770_i2c_probe,
28056 +- .remove = tas2770_i2c_remove,
28057 + .id_table = tas2770_i2c_id,
28058 + };
28059 +
28060 +diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c
28061 +index 8efe20605f9be..c7c782d279d0d 100644
28062 +--- a/sound/soc/codecs/tlv320adcx140.c
28063 ++++ b/sound/soc/codecs/tlv320adcx140.c
28064 +@@ -161,7 +161,7 @@ static const struct regmap_config adcx140_i2c_regmap = {
28065 + };
28066 +
28067 + /* Digital Volume control. From -100 to 27 dB in 0.5 dB steps */
28068 +-static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10000, 50, 0);
28069 ++static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10050, 50, 0);
28070 +
28071 + /* ADC gain. From 0 to 42 dB in 1 dB steps */
28072 + static DECLARE_TLV_DB_SCALE(adc_tlv, 0, 100, 0);
28073 +diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
28074 +index 467802875c133..2e2d8e463655a 100644
28075 +--- a/sound/soc/codecs/tlv320aic32x4.c
28076 ++++ b/sound/soc/codecs/tlv320aic32x4.c
28077 +@@ -665,7 +665,7 @@ static int aic32x4_set_processing_blocks(struct snd_soc_component *component,
28078 + }
28079 +
28080 + static int aic32x4_setup_clocks(struct snd_soc_component *component,
28081 +- unsigned int sample_rate)
28082 ++ unsigned int sample_rate, unsigned int channels)
28083 + {
28084 + u8 aosr;
28085 + u16 dosr;
28086 +@@ -753,7 +753,9 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
28087 + dosr);
28088 +
28089 + clk_set_rate(clocks[5].clk,
28090 +- sample_rate * 32);
28091 ++ sample_rate * 32 *
28092 ++ channels);
28093 ++
28094 + return 0;
28095 + }
28096 + }
28097 +@@ -775,7 +777,8 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
28098 + u8 iface1_reg = 0;
28099 + u8 dacsetup_reg = 0;
28100 +
28101 +- aic32x4_setup_clocks(component, params_rate(params));
28102 ++ aic32x4_setup_clocks(component, params_rate(params),
28103 ++ params_channels(params));
28104 +
28105 + switch (params_width(params)) {
28106 + case 16:
28107 +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
28108 +index 410cca57da52d..344bd2c33bea1 100644
28109 +--- a/sound/soc/codecs/wm_adsp.c
28110 ++++ b/sound/soc/codecs/wm_adsp.c
28111 +@@ -2049,6 +2049,7 @@ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
28112 + {
28113 + struct wm_coeff_ctl *ctl;
28114 + struct snd_kcontrol *kcontrol;
28115 ++ char ctl_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
28116 + int ret;
28117 +
28118 + ctl = wm_adsp_get_ctl(dsp, name, type, alg);
28119 +@@ -2059,8 +2060,25 @@ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
28120 + return -EINVAL;
28121 +
28122 + ret = wm_coeff_write_ctrl(ctl, buf, len);
28123 ++ if (ret)
28124 ++ return ret;
28125 ++
28126 ++ if (ctl->flags & WMFW_CTL_FLAG_SYS)
28127 ++ return 0;
28128 ++
28129 ++ if (dsp->component->name_prefix)
28130 ++ snprintf(ctl_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s",
28131 ++ dsp->component->name_prefix, ctl->name);
28132 ++ else
28133 ++ snprintf(ctl_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s",
28134 ++ ctl->name);
28135 ++
28136 ++ kcontrol = snd_soc_card_get_kcontrol(dsp->component->card, ctl_name);
28137 ++ if (!kcontrol) {
28138 ++ adsp_err(dsp, "Can't find kcontrol %s\n", ctl_name);
28139 ++ return -EINVAL;
28140 ++ }
28141 +
28142 +- kcontrol = snd_soc_card_get_kcontrol(dsp->component->card, ctl->name);
28143 + snd_ctl_notify(dsp->component->card->snd_card,
28144 + SNDRV_CTL_EVENT_MASK_VALUE, &kcontrol->id);
28145 +
28146 +diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
28147 +index cdff739924e2e..2ea354dd5434f 100644
28148 +--- a/sound/soc/fsl/fsl_sai.c
28149 ++++ b/sound/soc/fsl/fsl_sai.c
28150 +@@ -694,7 +694,7 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai)
28151 + return 0;
28152 + }
28153 +
28154 +-static struct snd_soc_dai_driver fsl_sai_dai = {
28155 ++static struct snd_soc_dai_driver fsl_sai_dai_template = {
28156 + .probe = fsl_sai_dai_probe,
28157 + .playback = {
28158 + .stream_name = "CPU-Playback",
28159 +@@ -966,12 +966,15 @@ static int fsl_sai_probe(struct platform_device *pdev)
28160 + return ret;
28161 + }
28162 +
28163 ++ memcpy(&sai->cpu_dai_drv, &fsl_sai_dai_template,
28164 ++ sizeof(fsl_sai_dai_template));
28165 ++
28166 + /* Sync Tx with Rx as default by following old DT binding */
28167 + sai->synchronous[RX] = true;
28168 + sai->synchronous[TX] = false;
28169 +- fsl_sai_dai.symmetric_rates = 1;
28170 +- fsl_sai_dai.symmetric_channels = 1;
28171 +- fsl_sai_dai.symmetric_samplebits = 1;
28172 ++ sai->cpu_dai_drv.symmetric_rates = 1;
28173 ++ sai->cpu_dai_drv.symmetric_channels = 1;
28174 ++ sai->cpu_dai_drv.symmetric_samplebits = 1;
28175 +
28176 + if (of_find_property(np, "fsl,sai-synchronous-rx", NULL) &&
28177 + of_find_property(np, "fsl,sai-asynchronous", NULL)) {
28178 +@@ -988,9 +991,9 @@ static int fsl_sai_probe(struct platform_device *pdev)
28179 + /* Discard all settings for asynchronous mode */
28180 + sai->synchronous[RX] = false;
28181 + sai->synchronous[TX] = false;
28182 +- fsl_sai_dai.symmetric_rates = 0;
28183 +- fsl_sai_dai.symmetric_channels = 0;
28184 +- fsl_sai_dai.symmetric_samplebits = 0;
28185 ++ sai->cpu_dai_drv.symmetric_rates = 0;
28186 ++ sai->cpu_dai_drv.symmetric_channels = 0;
28187 ++ sai->cpu_dai_drv.symmetric_samplebits = 0;
28188 + }
28189 +
28190 + if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) &&
28191 +@@ -1020,7 +1023,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
28192 + regcache_cache_only(sai->regmap, true);
28193 +
28194 + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
28195 +- &fsl_sai_dai, 1);
28196 ++ &sai->cpu_dai_drv, 1);
28197 + if (ret)
28198 + goto err_pm_disable;
28199 +
28200 +diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
28201 +index 6aba7d28f5f34..677ecfc1ec68f 100644
28202 +--- a/sound/soc/fsl/fsl_sai.h
28203 ++++ b/sound/soc/fsl/fsl_sai.h
28204 +@@ -180,6 +180,7 @@ struct fsl_sai {
28205 + unsigned int bclk_ratio;
28206 +
28207 + const struct fsl_sai_soc_data *soc_data;
28208 ++ struct snd_soc_dai_driver cpu_dai_drv;
28209 + struct snd_dmaengine_dai_dma_data dma_params_rx;
28210 + struct snd_dmaengine_dai_dma_data dma_params_tx;
28211 + };
28212 +diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c
28213 +index 15a27a2cd0cae..fad1eb6253d53 100644
28214 +--- a/sound/soc/fsl/imx-es8328.c
28215 ++++ b/sound/soc/fsl/imx-es8328.c
28216 +@@ -145,13 +145,13 @@ static int imx_es8328_probe(struct platform_device *pdev)
28217 + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
28218 + if (!data) {
28219 + ret = -ENOMEM;
28220 +- goto fail;
28221 ++ goto put_device;
28222 + }
28223 +
28224 + comp = devm_kzalloc(dev, 3 * sizeof(*comp), GFP_KERNEL);
28225 + if (!comp) {
28226 + ret = -ENOMEM;
28227 +- goto fail;
28228 ++ goto put_device;
28229 + }
28230 +
28231 + data->dev = dev;
28232 +@@ -182,12 +182,12 @@ static int imx_es8328_probe(struct platform_device *pdev)
28233 + ret = snd_soc_of_parse_card_name(&data->card, "model");
28234 + if (ret) {
28235 + dev_err(dev, "Unable to parse card name\n");
28236 +- goto fail;
28237 ++ goto put_device;
28238 + }
28239 + ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing");
28240 + if (ret) {
28241 + dev_err(dev, "Unable to parse routing: %d\n", ret);
28242 +- goto fail;
28243 ++ goto put_device;
28244 + }
28245 + data->card.num_links = 1;
28246 + data->card.owner = THIS_MODULE;
28247 +@@ -196,10 +196,12 @@ static int imx_es8328_probe(struct platform_device *pdev)
28248 + ret = snd_soc_register_card(&data->card);
28249 + if (ret) {
28250 + dev_err(dev, "Unable to register: %d\n", ret);
28251 +- goto fail;
28252 ++ goto put_device;
28253 + }
28254 +
28255 + platform_set_drvdata(pdev, data);
28256 ++put_device:
28257 ++ put_device(&ssi_pdev->dev);
28258 + fail:
28259 + of_node_put(ssi_np);
28260 + of_node_put(codec_np);
28261 +diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
28262 +index 0129d23694ed5..9a6f10ede427e 100644
28263 +--- a/sound/soc/intel/boards/sof_rt5682.c
28264 ++++ b/sound/soc/intel/boards/sof_rt5682.c
28265 +@@ -119,6 +119,19 @@ static const struct dmi_system_id sof_rt5682_quirk_table[] = {
28266 + .driver_data = (void *)(SOF_RT5682_MCLK_EN |
28267 + SOF_RT5682_SSP_CODEC(0)),
28268 + },
28269 ++ {
28270 ++ .callback = sof_rt5682_quirk_cb,
28271 ++ .matches = {
28272 ++ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Volteer"),
28273 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Terrador"),
28274 ++ },
28275 ++ .driver_data = (void *)(SOF_RT5682_MCLK_EN |
28276 ++ SOF_RT5682_SSP_CODEC(0) |
28277 ++ SOF_SPEAKER_AMP_PRESENT |
28278 ++ SOF_MAX98373_SPEAKER_AMP_PRESENT |
28279 ++ SOF_RT5682_SSP_AMP(2) |
28280 ++ SOF_RT5682_NUM_HDMIDEV(4)),
28281 ++ },
28282 + {}
28283 + };
28284 +
28285 +diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
28286 +index 06d0a4f80fc17..a6c690c5308d3 100644
28287 +--- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
28288 ++++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
28289 +@@ -673,7 +673,7 @@ static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev)
28290 + if (card == &mt8183_da7219_max98357_card) {
28291 + dai_link->be_hw_params_fixup =
28292 + mt8183_i2s_hw_params_fixup;
28293 +- dai_link->ops = &mt8183_mt6358_i2s_ops;
28294 ++ dai_link->ops = &mt8183_da7219_i2s_ops;
28295 + dai_link->cpus = i2s3_max98357a_cpus;
28296 + dai_link->num_cpus =
28297 + ARRAY_SIZE(i2s3_max98357a_cpus);
28298 +diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
28299 +index e00a4af29c13f..f25da84f175ac 100644
28300 +--- a/sound/soc/qcom/lpass-cpu.c
28301 ++++ b/sound/soc/qcom/lpass-cpu.c
28302 +@@ -209,21 +209,6 @@ static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
28303 + return 0;
28304 + }
28305 +
28306 +-static int lpass_cpu_daiops_hw_free(struct snd_pcm_substream *substream,
28307 +- struct snd_soc_dai *dai)
28308 +-{
28309 +- struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
28310 +- int ret;
28311 +-
28312 +- ret = regmap_write(drvdata->lpaif_map,
28313 +- LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id),
28314 +- 0);
28315 +- if (ret)
28316 +- dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
28317 +-
28318 +- return ret;
28319 +-}
28320 +-
28321 + static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
28322 + struct snd_soc_dai *dai)
28323 + {
28324 +@@ -304,7 +289,6 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
28325 + .startup = lpass_cpu_daiops_startup,
28326 + .shutdown = lpass_cpu_daiops_shutdown,
28327 + .hw_params = lpass_cpu_daiops_hw_params,
28328 +- .hw_free = lpass_cpu_daiops_hw_free,
28329 + .prepare = lpass_cpu_daiops_prepare,
28330 + .trigger = lpass_cpu_daiops_trigger,
28331 + };
28332 +diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
28333 +index 01179bc0e5e57..e62ac7e650785 100644
28334 +--- a/sound/soc/qcom/lpass-platform.c
28335 ++++ b/sound/soc/qcom/lpass-platform.c
28336 +@@ -61,7 +61,7 @@ static int lpass_platform_pcmops_open(struct snd_soc_component *component,
28337 + int ret, dma_ch, dir = substream->stream;
28338 + struct lpass_pcm_data *data;
28339 +
28340 +- data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL);
28341 ++ data = kzalloc(sizeof(*data), GFP_KERNEL);
28342 + if (!data)
28343 + return -ENOMEM;
28344 +
28345 +@@ -118,6 +118,7 @@ static int lpass_platform_pcmops_close(struct snd_soc_component *component,
28346 + if (v->free_dma_channel)
28347 + v->free_dma_channel(drvdata, data->dma_ch);
28348 +
28349 ++ kfree(data);
28350 + return 0;
28351 + }
28352 +
28353 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
28354 +index 5b60379237bff..d1e7dbb9fea36 100644
28355 +--- a/sound/soc/soc-topology.c
28356 ++++ b/sound/soc/soc-topology.c
28357 +@@ -592,6 +592,17 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
28358 + k->info = snd_soc_bytes_info_ext;
28359 + k->tlv.c = snd_soc_bytes_tlv_callback;
28360 +
28361 ++ /*
28362 ++ * When a topology-based implementation abuses the
28363 ++ * control interface and uses bytes_ext controls of
28364 ++ * more than 512 bytes, we need to disable the size
28365 ++ * checks, otherwise accesses to such controls will
28366 ++ * return an -EINVAL error and prevent the card from
28367 ++ * being configured.
28368 ++ */
28369 ++ if (IS_ENABLED(CONFIG_SND_CTL_VALIDATION) && sbe->max > 512)
28370 ++ k->access |= SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK;
28371 ++
28372 + ext_ops = tplg->bytes_ext_ops;
28373 + num_ops = tplg->bytes_ext_ops_count;
28374 + for (i = 0; i < num_ops; i++) {
28375 +diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c
28376 +index 186eea105bb15..009938d45ddd9 100644
28377 +--- a/sound/soc/sof/control.c
28378 ++++ b/sound/soc/sof/control.c
28379 +@@ -298,6 +298,10 @@ int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
28380 + const struct snd_ctl_tlv __user *tlvd =
28381 + (const struct snd_ctl_tlv __user *)binary_data;
28382 +
28383 ++ /* make sure we have at least a header */
28384 ++ if (size < sizeof(struct snd_ctl_tlv))
28385 ++ return -EINVAL;
28386 ++
28387 + /*
28388 + * The beginning of bytes data contains a header from where
28389 + * the length (as bytes) is needed to know the correct copy
28390 +@@ -306,6 +310,13 @@ int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
28391 + if (copy_from_user(&header, tlvd, sizeof(const struct snd_ctl_tlv)))
28392 + return -EFAULT;
28393 +
28394 ++ /* make sure TLV info is consistent */
28395 ++ if (header.length + sizeof(struct snd_ctl_tlv) > size) {
28396 ++ dev_err_ratelimited(scomp->dev, "error: inconsistent TLV, data %d + header %zu > %d\n",
28397 ++ header.length, sizeof(struct snd_ctl_tlv), size);
28398 ++ return -EINVAL;
28399 ++ }
28400 ++
28401 + /* be->max is coming from topology */
28402 + if (header.length > be->max) {
28403 + dev_err_ratelimited(scomp->dev, "error: Bytes data size %d exceeds max %d.\n",
28404 +diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
28405 +index 63ca920c8e6e0..7152e6d1cf673 100644
28406 +--- a/sound/soc/sof/intel/hda.c
28407 ++++ b/sound/soc/sof/intel/hda.c
28408 +@@ -1179,7 +1179,13 @@ void hda_machine_select(struct snd_sof_dev *sdev)
28409 +
28410 + mach = snd_soc_acpi_find_machine(desc->machines);
28411 + if (mach) {
28412 +- sof_pdata->tplg_filename = mach->sof_tplg_filename;
28413 ++ /*
28414 ++ * If tplg file name is overridden, use it instead of
28415 ++ * the one set in mach table
28416 ++ */
28417 ++ if (!sof_pdata->tplg_filename)
28418 ++ sof_pdata->tplg_filename = mach->sof_tplg_filename;
28419 ++
28420 + sof_pdata->machine = mach;
28421 +
28422 + if (mach->link_mask) {
28423 +diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
28424 +index aa3532ba14349..f3a8140773db5 100644
28425 +--- a/sound/soc/sof/sof-pci-dev.c
28426 ++++ b/sound/soc/sof/sof-pci-dev.c
28427 +@@ -35,8 +35,28 @@ static int sof_pci_debug;
28428 + module_param_named(sof_pci_debug, sof_pci_debug, int, 0444);
28429 + MODULE_PARM_DESC(sof_pci_debug, "SOF PCI debug options (0x0 all off)");
28430 +
28431 ++static const char *sof_override_tplg_name;
28432 ++
28433 + #define SOF_PCI_DISABLE_PM_RUNTIME BIT(0)
28434 +
28435 ++static int sof_tplg_cb(const struct dmi_system_id *id)
28436 ++{
28437 ++ sof_override_tplg_name = id->driver_data;
28438 ++ return 1;
28439 ++}
28440 ++
28441 ++static const struct dmi_system_id sof_tplg_table[] = {
28442 ++ {
28443 ++ .callback = sof_tplg_cb,
28444 ++ .matches = {
28445 ++ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Volteer"),
28446 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Terrador"),
28447 ++ },
28448 ++ .driver_data = "sof-tgl-rt5682-ssp0-max98373-ssp2.tplg",
28449 ++ },
28450 ++ {}
28451 ++};
28452 ++
28453 + static const struct dmi_system_id community_key_platforms[] = {
28454 + {
28455 + .ident = "Up Squared",
28456 +@@ -347,6 +367,10 @@ static int sof_pci_probe(struct pci_dev *pci,
28457 + sof_pdata->tplg_filename_prefix =
28458 + sof_pdata->desc->default_tplg_path;
28459 +
28460 ++ dmi_check_system(sof_tplg_table);
28461 ++ if (sof_override_tplg_name)
28462 ++ sof_pdata->tplg_filename = sof_override_tplg_name;
28463 ++
28464 + #if IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
28465 + /* set callback to enable runtime_pm */
28466 + sof_pdata->sof_probe_complete = sof_pci_probe_complete;
28467 +diff --git a/sound/usb/format.c b/sound/usb/format.c
28468 +index 1b28d01d1f4cd..3bfead393aa34 100644
28469 +--- a/sound/usb/format.c
28470 ++++ b/sound/usb/format.c
28471 +@@ -406,6 +406,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
28472 + case USB_ID(0x0e41, 0x4242): /* Line6 Helix Rack */
28473 + case USB_ID(0x0e41, 0x4244): /* Line6 Helix LT */
28474 + case USB_ID(0x0e41, 0x4246): /* Line6 HX-Stomp */
28475 ++ case USB_ID(0x0e41, 0x4247): /* Line6 Pod Go */
28476 + case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
28477 + case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
28478 + case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
28479 +diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
28480 +index c1daf4d57518c..3b218fd068b0e 100644
28481 +--- a/tools/build/Makefile.feature
28482 ++++ b/tools/build/Makefile.feature
28483 +@@ -38,8 +38,6 @@ FEATURE_TESTS_BASIC := \
28484 + get_current_dir_name \
28485 + gettid \
28486 + glibc \
28487 +- gtk2 \
28488 +- gtk2-infobar \
28489 + libbfd \
28490 + libcap \
28491 + libelf \
28492 +@@ -81,6 +79,8 @@ FEATURE_TESTS_EXTRA := \
28493 + compile-32 \
28494 + compile-x32 \
28495 + cplus-demangle \
28496 ++ gtk2 \
28497 ++ gtk2-infobar \
28498 + hello \
28499 + libbabeltrace \
28500 + libbfd-liberty \
28501 +@@ -111,7 +111,6 @@ FEATURE_DISPLAY ?= \
28502 + dwarf \
28503 + dwarf_getlocations \
28504 + glibc \
28505 +- gtk2 \
28506 + libbfd \
28507 + libcap \
28508 + libelf \
28509 +diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
28510 +index d220fe9527470..8da2556cdbfac 100644
28511 +--- a/tools/build/feature/Makefile
28512 ++++ b/tools/build/feature/Makefile
28513 +@@ -90,7 +90,7 @@ __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(
28514 + ###############################
28515 +
28516 + $(OUTPUT)test-all.bin:
28517 +- $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma
28518 ++ $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd
28519 +
28520 + $(OUTPUT)test-hello.bin:
28521 + $(BUILD)
28522 +diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
28523 +index 5479e543b1947..d2623992ccd61 100644
28524 +--- a/tools/build/feature/test-all.c
28525 ++++ b/tools/build/feature/test-all.c
28526 +@@ -78,14 +78,6 @@
28527 + # include "test-libslang.c"
28528 + #undef main
28529 +
28530 +-#define main main_test_gtk2
28531 +-# include "test-gtk2.c"
28532 +-#undef main
28533 +-
28534 +-#define main main_test_gtk2_infobar
28535 +-# include "test-gtk2-infobar.c"
28536 +-#undef main
28537 +-
28538 + #define main main_test_libbfd
28539 + # include "test-libbfd.c"
28540 + #undef main
28541 +@@ -205,8 +197,6 @@ int main(int argc, char *argv[])
28542 + main_test_libelf_getshdrstrndx();
28543 + main_test_libunwind();
28544 + main_test_libslang();
28545 +- main_test_gtk2(argc, argv);
28546 +- main_test_gtk2_infobar(argc, argv);
28547 + main_test_libbfd();
28548 + main_test_backtrace();
28549 + main_test_libnuma();
28550 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
28551 +index e493d6048143f..edd6f7b7d9b82 100644
28552 +--- a/tools/lib/bpf/libbpf.c
28553 ++++ b/tools/lib/bpf/libbpf.c
28554 +@@ -3841,6 +3841,36 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
28555 + return 0;
28556 + }
28557 +
28558 ++static int init_map_slots(struct bpf_map *map)
28559 ++{
28560 ++ const struct bpf_map *targ_map;
28561 ++ unsigned int i;
28562 ++ int fd, err;
28563 ++
28564 ++ for (i = 0; i < map->init_slots_sz; i++) {
28565 ++ if (!map->init_slots[i])
28566 ++ continue;
28567 ++
28568 ++ targ_map = map->init_slots[i];
28569 ++ fd = bpf_map__fd(targ_map);
28570 ++ err = bpf_map_update_elem(map->fd, &i, &fd, 0);
28571 ++ if (err) {
28572 ++ err = -errno;
28573 ++ pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
28574 ++ map->name, i, targ_map->name,
28575 ++ fd, err);
28576 ++ return err;
28577 ++ }
28578 ++ pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
28579 ++ map->name, i, targ_map->name, fd);
28580 ++ }
28581 ++
28582 ++ zfree(&map->init_slots);
28583 ++ map->init_slots_sz = 0;
28584 ++
28585 ++ return 0;
28586 ++}
28587 ++
28588 + static int
28589 + bpf_object__create_maps(struct bpf_object *obj)
28590 + {
28591 +@@ -3883,28 +3913,11 @@ bpf_object__create_maps(struct bpf_object *obj)
28592 + }
28593 +
28594 + if (map->init_slots_sz) {
28595 +- for (j = 0; j < map->init_slots_sz; j++) {
28596 +- const struct bpf_map *targ_map;
28597 +- int fd;
28598 +-
28599 +- if (!map->init_slots[j])
28600 +- continue;
28601 +-
28602 +- targ_map = map->init_slots[j];
28603 +- fd = bpf_map__fd(targ_map);
28604 +- err = bpf_map_update_elem(map->fd, &j, &fd, 0);
28605 +- if (err) {
28606 +- err = -errno;
28607 +- pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
28608 +- map->name, j, targ_map->name,
28609 +- fd, err);
28610 +- goto err_out;
28611 +- }
28612 +- pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
28613 +- map->name, j, targ_map->name, fd);
28614 ++ err = init_map_slots(map);
28615 ++ if (err < 0) {
28616 ++ zclose(map->fd);
28617 ++ goto err_out;
28618 + }
28619 +- zfree(&map->init_slots);
28620 +- map->init_slots_sz = 0;
28621 + }
28622 +
28623 + if (map->pin_path && !map->pinned) {
28624 +@@ -5425,7 +5438,7 @@ retry_load:
28625 + free(log_buf);
28626 + goto retry_load;
28627 + }
28628 +- ret = -errno;
28629 ++ ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
28630 + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
28631 + pr_warn("load bpf program failed: %s\n", cp);
28632 + pr_perm_msg(ret);
28633 +diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
28634 +index 2208444ecb448..cfcdbd7be066e 100644
28635 +--- a/tools/lib/perf/evlist.c
28636 ++++ b/tools/lib/perf/evlist.c
28637 +@@ -45,6 +45,9 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
28638 + if (!evsel->own_cpus || evlist->has_user_cpus) {
28639 + perf_cpu_map__put(evsel->cpus);
28640 + evsel->cpus = perf_cpu_map__get(evlist->cpus);
28641 ++ } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) {
28642 ++ perf_cpu_map__put(evsel->cpus);
28643 ++ evsel->cpus = perf_cpu_map__get(evlist->cpus);
28644 + } else if (evsel->cpus != evsel->own_cpus) {
28645 + perf_cpu_map__put(evsel->cpus);
28646 + evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
28647 +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
28648 +index 190be4fa5c218..2d6690b308564 100644
28649 +--- a/tools/perf/Makefile.config
28650 ++++ b/tools/perf/Makefile.config
28651 +@@ -724,12 +724,14 @@ ifndef NO_SLANG
28652 + endif
28653 + endif
28654 +
28655 +-ifndef NO_GTK2
28656 ++ifdef GTK2
28657 + FLAGS_GTK2=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
28658 ++ $(call feature_check,gtk2)
28659 + ifneq ($(feature-gtk2), 1)
28660 + msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
28661 + NO_GTK2 := 1
28662 + else
28663 ++ $(call feature_check,gtk2-infobar)
28664 + ifeq ($(feature-gtk2-infobar), 1)
28665 + GTK_CFLAGS := -DHAVE_GTK_INFO_BAR_SUPPORT
28666 + endif
28667 +diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
28668 +index 6031167939ae6..515e6ed635f1a 100644
28669 +--- a/tools/perf/Makefile.perf
28670 ++++ b/tools/perf/Makefile.perf
28671 +@@ -48,7 +48,7 @@ include ../scripts/utilities.mak
28672 + #
28673 + # Define NO_SLANG if you do not want TUI support.
28674 + #
28675 +-# Define NO_GTK2 if you do not want GTK+ GUI support.
28676 ++# Define GTK2 if you want GTK+ GUI support.
28677 + #
28678 + # Define NO_DEMANGLE if you do not want C++ symbol demangling.
28679 + #
28680 +@@ -386,7 +386,7 @@ ifneq ($(OUTPUT),)
28681 + CFLAGS += -I$(OUTPUT)
28682 + endif
28683 +
28684 +-ifndef NO_GTK2
28685 ++ifdef GTK2
28686 + ALL_PROGRAMS += $(OUTPUT)libperf-gtk.so
28687 + GTK_IN := $(OUTPUT)gtk-in.o
28688 + endif
28689 +@@ -886,7 +886,7 @@ check: $(OUTPUT)common-cmds.h
28690 +
28691 + ### Installation rules
28692 +
28693 +-ifndef NO_GTK2
28694 ++ifdef GTK2
28695 + install-gtk: $(OUTPUT)libperf-gtk.so
28696 + $(call QUIET_INSTALL, 'GTK UI') \
28697 + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(libdir_SQ)'; \
28698 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
28699 +index fddc97cac9841..eef64b1411a4a 100644
28700 +--- a/tools/perf/builtin-stat.c
28701 ++++ b/tools/perf/builtin-stat.c
28702 +@@ -2063,8 +2063,10 @@ static void setup_system_wide(int forks)
28703 + struct evsel *counter;
28704 +
28705 + evlist__for_each_entry(evsel_list, counter) {
28706 +- if (!counter->core.system_wide)
28707 ++ if (!counter->core.system_wide &&
28708 ++ strcmp(counter->name, "duration_time")) {
28709 + return;
28710 ++ }
28711 + }
28712 +
28713 + if (evsel_list->core.nr_entries)
28714 +diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
28715 +index bea461b6f937b..44a75f234db17 100644
28716 +--- a/tools/perf/builtin-trace.c
28717 ++++ b/tools/perf/builtin-trace.c
28718 +@@ -1762,7 +1762,11 @@ static int trace__read_syscall_info(struct trace *trace, int id)
28719 + if (table == NULL)
28720 + return -ENOMEM;
28721 +
28722 +- memset(table + trace->sctbl->syscalls.max_id, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
28723 ++ // Need to memset from offset 0 and +1 members if brand new
28724 ++ if (trace->syscalls.table == NULL)
28725 ++ memset(table, 0, (id + 1) * sizeof(*sc));
28726 ++ else
28727 ++ memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
28728 +
28729 + trace->syscalls.table = table;
28730 + trace->sctbl->syscalls.max_id = id;
28731 +diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
28732 +index 05cf2af9e2c27..d09ec2f030719 100644
28733 +--- a/tools/perf/builtin-version.c
28734 ++++ b/tools/perf/builtin-version.c
28735 +@@ -60,7 +60,6 @@ static void library_status(void)
28736 + STATUS(HAVE_DWARF_SUPPORT, dwarf);
28737 + STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations);
28738 + STATUS(HAVE_GLIBC_SUPPORT, glibc);
28739 +- STATUS(HAVE_GTK2_SUPPORT, gtk2);
28740 + #ifndef HAVE_SYSCALL_TABLE_SUPPORT
28741 + STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit);
28742 + #endif
28743 +diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
28744 +index 0af4e81c46e2b..3a0348caec7d6 100644
28745 +--- a/tools/perf/util/intel-pt.c
28746 ++++ b/tools/perf/util/intel-pt.c
28747 +@@ -1101,6 +1101,8 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
28748 +
28749 + if (queue->tid == -1 || pt->have_sched_switch) {
28750 + ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
28751 ++ if (ptq->tid == -1)
28752 ++ ptq->pid = -1;
28753 + thread__zput(ptq->thread);
28754 + }
28755 +
28756 +@@ -2603,10 +2605,8 @@ static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
28757 + tid = sample->tid;
28758 + }
28759 +
28760 +- if (tid == -1) {
28761 +- pr_err("context_switch event has no tid\n");
28762 +- return -EINVAL;
28763 +- }
28764 ++ if (tid == -1)
28765 ++ intel_pt_log("context_switch event has no tid\n");
28766 +
28767 + ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
28768 + if (ret <= 0)
28769 +diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
28770 +index ab5030fcfed4e..d948a7f910cfa 100644
28771 +--- a/tools/perf/util/metricgroup.c
28772 ++++ b/tools/perf/util/metricgroup.c
28773 +@@ -150,6 +150,18 @@ static void expr_ids__exit(struct expr_ids *ids)
28774 + free(ids->id[i].id);
28775 + }
28776 +
28777 ++static bool contains_event(struct evsel **metric_events, int num_events,
28778 ++ const char *event_name)
28779 ++{
28780 ++ int i;
28781 ++
28782 ++ for (i = 0; i < num_events; i++) {
28783 ++ if (!strcmp(metric_events[i]->name, event_name))
28784 ++ return true;
28785 ++ }
28786 ++ return false;
28787 ++}
28788 ++
28789 + /**
28790 + * Find a group of events in perf_evlist that correpond to those from a parsed
28791 + * metric expression. Note, as find_evsel_group is called in the same order as
28792 +@@ -180,7 +192,11 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
28793 + int i = 0, matched_events = 0, events_to_match;
28794 + const int idnum = (int)hashmap__size(&pctx->ids);
28795 +
28796 +- /* duration_time is grouped separately. */
28797 ++ /*
28798 ++ * duration_time is always grouped separately, when events are grouped
28799 ++ * (ie has_constraint is false) then ignore it in the matching loop and
28800 ++ * add it to metric_events at the end.
28801 ++ */
28802 + if (!has_constraint &&
28803 + hashmap__find(&pctx->ids, "duration_time", (void **)&val_ptr))
28804 + events_to_match = idnum - 1;
28805 +@@ -207,23 +223,20 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
28806 + sizeof(struct evsel *) * idnum);
28807 + current_leader = ev->leader;
28808 + }
28809 +- if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr)) {
28810 +- if (has_constraint) {
28811 +- /*
28812 +- * Events aren't grouped, ensure the same event
28813 +- * isn't matched from two groups.
28814 +- */
28815 +- for (i = 0; i < matched_events; i++) {
28816 +- if (!strcmp(ev->name,
28817 +- metric_events[i]->name)) {
28818 +- break;
28819 +- }
28820 +- }
28821 +- if (i != matched_events)
28822 +- continue;
28823 +- }
28824 ++ /*
28825 ++ * Check for duplicate events with the same name. For example,
28826 ++ * uncore_imc/cas_count_read/ will turn into 6 events per socket
28827 ++ * on skylakex. Only the first such event is placed in
28828 ++ * metric_events. If events aren't grouped then this also
28829 ++ * ensures that the same event in different sibling groups
28830 ++ * aren't both added to metric_events.
28831 ++ */
28832 ++ if (contains_event(metric_events, matched_events, ev->name))
28833 ++ continue;
28834 ++ /* Does this event belong to the parse context? */
28835 ++ if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr))
28836 + metric_events[matched_events++] = ev;
28837 +- }
28838 ++
28839 + if (matched_events == events_to_match)
28840 + break;
28841 + }
28842 +@@ -239,7 +252,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
28843 + }
28844 +
28845 + if (matched_events != idnum) {
28846 +- /* Not whole match */
28847 ++ /* Not a whole match */
28848 + return NULL;
28849 + }
28850 +
28851 +@@ -247,8 +260,32 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
28852 +
28853 + for (i = 0; i < idnum; i++) {
28854 + ev = metric_events[i];
28855 +- ev->metric_leader = ev;
28856 ++ /* Don't free the used events. */
28857 + set_bit(ev->idx, evlist_used);
28858 ++ /*
28859 ++ * The metric leader points to the identically named event in
28860 ++ * metric_events.
28861 ++ */
28862 ++ ev->metric_leader = ev;
28863 ++ /*
28864 ++ * Mark two events with identical names in the same group (or
28865 ++ * globally) as being in use as uncore events may be duplicated
28866 ++ * for each pmu. Set the metric leader of such events to be the
28867 ++ * event that appears in metric_events.
28868 ++ */
28869 ++ evlist__for_each_entry_continue(perf_evlist, ev) {
28870 ++ /*
28871 ++ * If events are grouped then the search can terminate
28872 ++ * when then group is left.
28873 ++ */
28874 ++ if (!has_constraint &&
28875 ++ ev->leader != metric_events[i]->leader)
28876 ++ break;
28877 ++ if (!strcmp(metric_events[i]->name, ev->name)) {
28878 ++ set_bit(ev->idx, evlist_used);
28879 ++ ev->metric_leader = metric_events[i];
28880 ++ }
28881 ++ }
28882 + }
28883 +
28884 + return metric_events[0];
28885 +diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py
28886 +index 46ff97e909c6f..1bc36a1db14f6 100755
28887 +--- a/tools/power/pm-graph/sleepgraph.py
28888 ++++ b/tools/power/pm-graph/sleepgraph.py
28889 +@@ -171,7 +171,7 @@ class SystemValues:
28890 + tracefuncs = {
28891 + 'sys_sync': {},
28892 + 'ksys_sync': {},
28893 +- '__pm_notifier_call_chain': {},
28894 ++ 'pm_notifier_call_chain_robust': {},
28895 + 'pm_prepare_console': {},
28896 + 'pm_notifier_call_chain': {},
28897 + 'freeze_processes': {},
28898 +diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
28899 +index 8995092d541ec..3b796dd5e5772 100644
28900 +--- a/tools/testing/radix-tree/idr-test.c
28901 ++++ b/tools/testing/radix-tree/idr-test.c
28902 +@@ -523,8 +523,27 @@ static void *ida_random_fn(void *arg)
28903 + return NULL;
28904 + }
28905 +
28906 ++static void *ida_leak_fn(void *arg)
28907 ++{
28908 ++ struct ida *ida = arg;
28909 ++ time_t s = time(NULL);
28910 ++ int i, ret;
28911 ++
28912 ++ rcu_register_thread();
28913 ++
28914 ++ do for (i = 0; i < 1000; i++) {
28915 ++ ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL);
28916 ++ if (ret >= 0)
28917 ++ ida_free(ida, 128);
28918 ++ } while (time(NULL) < s + 2);
28919 ++
28920 ++ rcu_unregister_thread();
28921 ++ return NULL;
28922 ++}
28923 ++
28924 + void ida_thread_tests(void)
28925 + {
28926 ++ DEFINE_IDA(ida);
28927 + pthread_t threads[20];
28928 + int i;
28929 +
28930 +@@ -536,6 +555,16 @@ void ida_thread_tests(void)
28931 +
28932 + while (i--)
28933 + pthread_join(threads[i], NULL);
28934 ++
28935 ++ for (i = 0; i < ARRAY_SIZE(threads); i++)
28936 ++ if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) {
28937 ++ perror("creating ida thread");
28938 ++ exit(1);
28939 ++ }
28940 ++
28941 ++ while (i--)
28942 ++ pthread_join(threads[i], NULL);
28943 ++ assert(ida_is_empty(&ida));
28944 + }
28945 +
28946 + void ida_tests(void)
28947 +diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
28948 +index 944ad4721c83c..da14eaac71d03 100644
28949 +--- a/tools/testing/selftests/bpf/bench.c
28950 ++++ b/tools/testing/selftests/bpf/bench.c
28951 +@@ -311,7 +311,6 @@ extern const struct bench bench_rename_kretprobe;
28952 + extern const struct bench bench_rename_rawtp;
28953 + extern const struct bench bench_rename_fentry;
28954 + extern const struct bench bench_rename_fexit;
28955 +-extern const struct bench bench_rename_fmodret;
28956 + extern const struct bench bench_trig_base;
28957 + extern const struct bench bench_trig_tp;
28958 + extern const struct bench bench_trig_rawtp;
28959 +@@ -332,7 +331,6 @@ static const struct bench *benchs[] = {
28960 + &bench_rename_rawtp,
28961 + &bench_rename_fentry,
28962 + &bench_rename_fexit,
28963 +- &bench_rename_fmodret,
28964 + &bench_trig_base,
28965 + &bench_trig_tp,
28966 + &bench_trig_rawtp,
28967 +@@ -462,4 +460,3 @@ int main(int argc, char **argv)
28968 +
28969 + return 0;
28970 + }
28971 +-
28972 +diff --git a/tools/testing/selftests/bpf/benchs/bench_rename.c b/tools/testing/selftests/bpf/benchs/bench_rename.c
28973 +index e74cff40f4fea..a967674098ada 100644
28974 +--- a/tools/testing/selftests/bpf/benchs/bench_rename.c
28975 ++++ b/tools/testing/selftests/bpf/benchs/bench_rename.c
28976 +@@ -106,12 +106,6 @@ static void setup_fexit()
28977 + attach_bpf(ctx.skel->progs.prog5);
28978 + }
28979 +
28980 +-static void setup_fmodret()
28981 +-{
28982 +- setup_ctx();
28983 +- attach_bpf(ctx.skel->progs.prog6);
28984 +-}
28985 +-
28986 + static void *consumer(void *input)
28987 + {
28988 + return NULL;
28989 +@@ -182,14 +176,3 @@ const struct bench bench_rename_fexit = {
28990 + .report_progress = hits_drops_report_progress,
28991 + .report_final = hits_drops_report_final,
28992 + };
28993 +-
28994 +-const struct bench bench_rename_fmodret = {
28995 +- .name = "rename-fmodret",
28996 +- .validate = validate,
28997 +- .setup = setup_fmodret,
28998 +- .producer_thread = producer,
28999 +- .consumer_thread = consumer,
29000 +- .measure = measure,
29001 +- .report_progress = hits_drops_report_progress,
29002 +- .report_final = hits_drops_report_final,
29003 +-};
29004 +diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
29005 +index 47fa04adc1471..21c2d265c3e8e 100644
29006 +--- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c
29007 ++++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
29008 +@@ -265,7 +265,7 @@ void test_sk_assign(void)
29009 + TEST("ipv6 udp port redir", AF_INET6, SOCK_DGRAM, false),
29010 + TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true),
29011 + };
29012 +- int server = -1;
29013 ++ __s64 server = -1;
29014 + int server_map;
29015 + int self_net;
29016 +
29017 +diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
29018 +index 5f54c6aec7f07..b25c9c45c1484 100644
29019 +--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
29020 ++++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
29021 +@@ -45,9 +45,9 @@ static int getsetsockopt(void)
29022 + goto err;
29023 + }
29024 +
29025 +- if (*(int *)big_buf != 0x08) {
29026 ++ if (*big_buf != 0x08) {
29027 + log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08",
29028 +- *(int *)big_buf);
29029 ++ (int)*big_buf);
29030 + goto err;
29031 + }
29032 +
29033 +diff --git a/tools/testing/selftests/bpf/prog_tests/test_overhead.c b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
29034 +index 2702df2b23433..9966685866fdf 100644
29035 +--- a/tools/testing/selftests/bpf/prog_tests/test_overhead.c
29036 ++++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
29037 +@@ -61,10 +61,9 @@ void test_test_overhead(void)
29038 + const char *raw_tp_name = "raw_tp/task_rename";
29039 + const char *fentry_name = "fentry/__set_task_comm";
29040 + const char *fexit_name = "fexit/__set_task_comm";
29041 +- const char *fmodret_name = "fmod_ret/__set_task_comm";
29042 + const char *kprobe_func = "__set_task_comm";
29043 + struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog;
29044 +- struct bpf_program *fentry_prog, *fexit_prog, *fmodret_prog;
29045 ++ struct bpf_program *fentry_prog, *fexit_prog;
29046 + struct bpf_object *obj;
29047 + struct bpf_link *link;
29048 + int err, duration = 0;
29049 +@@ -97,11 +96,6 @@ void test_test_overhead(void)
29050 + if (CHECK(!fexit_prog, "find_probe",
29051 + "prog '%s' not found\n", fexit_name))
29052 + goto cleanup;
29053 +- fmodret_prog = bpf_object__find_program_by_title(obj, fmodret_name);
29054 +- if (CHECK(!fmodret_prog, "find_probe",
29055 +- "prog '%s' not found\n", fmodret_name))
29056 +- goto cleanup;
29057 +-
29058 + err = bpf_object__load(obj);
29059 + if (CHECK(err, "obj_load", "err %d\n", err))
29060 + goto cleanup;
29061 +@@ -148,12 +142,6 @@ void test_test_overhead(void)
29062 + test_run("fexit");
29063 + bpf_link__destroy(link);
29064 +
29065 +- /* attach fmod_ret */
29066 +- link = bpf_program__attach_trace(fmodret_prog);
29067 +- if (CHECK(IS_ERR(link), "attach fmod_ret", "err %ld\n", PTR_ERR(link)))
29068 +- goto cleanup;
29069 +- test_run("fmod_ret");
29070 +- bpf_link__destroy(link);
29071 + cleanup:
29072 + prctl(PR_SET_NAME, comm, 0L, 0L, 0L);
29073 + bpf_object__close(obj);
29074 +diff --git a/tools/testing/selftests/bpf/progs/test_overhead.c b/tools/testing/selftests/bpf/progs/test_overhead.c
29075 +index 42403d088abc9..abb7344b531f4 100644
29076 +--- a/tools/testing/selftests/bpf/progs/test_overhead.c
29077 ++++ b/tools/testing/selftests/bpf/progs/test_overhead.c
29078 +@@ -39,10 +39,4 @@ int BPF_PROG(prog5, struct task_struct *tsk, const char *buf, bool exec)
29079 + return 0;
29080 + }
29081 +
29082 +-SEC("fmod_ret/__set_task_comm")
29083 +-int BPF_PROG(prog6, struct task_struct *tsk, const char *buf, bool exec)
29084 +-{
29085 +- return !tsk;
29086 +-}
29087 +-
29088 + char _license[] SEC("license") = "GPL";
29089 +diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
29090 +index bbf8296f4d663..1032b292af5b7 100644
29091 +--- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c
29092 ++++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
29093 +@@ -19,6 +19,17 @@
29094 + #define IP6(aaaa, bbbb, cccc, dddd) \
29095 + { bpf_htonl(aaaa), bpf_htonl(bbbb), bpf_htonl(cccc), bpf_htonl(dddd) }
29096 +
29097 ++/* Macros for least-significant byte and word accesses. */
29098 ++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
29099 ++#define LSE_INDEX(index, size) (index)
29100 ++#else
29101 ++#define LSE_INDEX(index, size) ((size) - (index) - 1)
29102 ++#endif
29103 ++#define LSB(value, index) \
29104 ++ (((__u8 *)&(value))[LSE_INDEX((index), sizeof(value))])
29105 ++#define LSW(value, index) \
29106 ++ (((__u16 *)&(value))[LSE_INDEX((index), sizeof(value) / 2)])
29107 ++
29108 + #define MAX_SOCKS 32
29109 +
29110 + struct {
29111 +@@ -369,171 +380,146 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
29112 + {
29113 + struct bpf_sock *sk;
29114 + int err, family;
29115 +- __u16 *half;
29116 +- __u8 *byte;
29117 + bool v4;
29118 +
29119 + v4 = (ctx->family == AF_INET);
29120 +
29121 + /* Narrow loads from family field */
29122 +- byte = (__u8 *)&ctx->family;
29123 +- half = (__u16 *)&ctx->family;
29124 +- if (byte[0] != (v4 ? AF_INET : AF_INET6) ||
29125 +- byte[1] != 0 || byte[2] != 0 || byte[3] != 0)
29126 ++ if (LSB(ctx->family, 0) != (v4 ? AF_INET : AF_INET6) ||
29127 ++ LSB(ctx->family, 1) != 0 || LSB(ctx->family, 2) != 0 || LSB(ctx->family, 3) != 0)
29128 + return SK_DROP;
29129 +- if (half[0] != (v4 ? AF_INET : AF_INET6))
29130 ++ if (LSW(ctx->family, 0) != (v4 ? AF_INET : AF_INET6))
29131 + return SK_DROP;
29132 +
29133 +- byte = (__u8 *)&ctx->protocol;
29134 +- if (byte[0] != IPPROTO_TCP ||
29135 +- byte[1] != 0 || byte[2] != 0 || byte[3] != 0)
29136 ++ /* Narrow loads from protocol field */
29137 ++ if (LSB(ctx->protocol, 0) != IPPROTO_TCP ||
29138 ++ LSB(ctx->protocol, 1) != 0 || LSB(ctx->protocol, 2) != 0 || LSB(ctx->protocol, 3) != 0)
29139 + return SK_DROP;
29140 +- half = (__u16 *)&ctx->protocol;
29141 +- if (half[0] != IPPROTO_TCP)
29142 ++ if (LSW(ctx->protocol, 0) != IPPROTO_TCP)
29143 + return SK_DROP;
29144 +
29145 + /* Narrow loads from remote_port field. Expect non-0 value. */
29146 +- byte = (__u8 *)&ctx->remote_port;
29147 +- if (byte[0] == 0 && byte[1] == 0 && byte[2] == 0 && byte[3] == 0)
29148 ++ if (LSB(ctx->remote_port, 0) == 0 && LSB(ctx->remote_port, 1) == 0 &&
29149 ++ LSB(ctx->remote_port, 2) == 0 && LSB(ctx->remote_port, 3) == 0)
29150 + return SK_DROP;
29151 +- half = (__u16 *)&ctx->remote_port;
29152 +- if (half[0] == 0)
29153 ++ if (LSW(ctx->remote_port, 0) == 0)
29154 + return SK_DROP;
29155 +
29156 + /* Narrow loads from local_port field. Expect DST_PORT. */
29157 +- byte = (__u8 *)&ctx->local_port;
29158 +- if (byte[0] != ((DST_PORT >> 0) & 0xff) ||
29159 +- byte[1] != ((DST_PORT >> 8) & 0xff) ||
29160 +- byte[2] != 0 || byte[3] != 0)
29161 ++ if (LSB(ctx->local_port, 0) != ((DST_PORT >> 0) & 0xff) ||
29162 ++ LSB(ctx->local_port, 1) != ((DST_PORT >> 8) & 0xff) ||
29163 ++ LSB(ctx->local_port, 2) != 0 || LSB(ctx->local_port, 3) != 0)
29164 + return SK_DROP;
29165 +- half = (__u16 *)&ctx->local_port;
29166 +- if (half[0] != DST_PORT)
29167 ++ if (LSW(ctx->local_port, 0) != DST_PORT)
29168 + return SK_DROP;
29169 +
29170 + /* Narrow loads from IPv4 fields */
29171 + if (v4) {
29172 + /* Expect non-0.0.0.0 in remote_ip4 */
29173 +- byte = (__u8 *)&ctx->remote_ip4;
29174 +- if (byte[0] == 0 && byte[1] == 0 &&
29175 +- byte[2] == 0 && byte[3] == 0)
29176 ++ if (LSB(ctx->remote_ip4, 0) == 0 && LSB(ctx->remote_ip4, 1) == 0 &&
29177 ++ LSB(ctx->remote_ip4, 2) == 0 && LSB(ctx->remote_ip4, 3) == 0)
29178 + return SK_DROP;
29179 +- half = (__u16 *)&ctx->remote_ip4;
29180 +- if (half[0] == 0 && half[1] == 0)
29181 ++ if (LSW(ctx->remote_ip4, 0) == 0 && LSW(ctx->remote_ip4, 1) == 0)
29182 + return SK_DROP;
29183 +
29184 + /* Expect DST_IP4 in local_ip4 */
29185 +- byte = (__u8 *)&ctx->local_ip4;
29186 +- if (byte[0] != ((DST_IP4 >> 0) & 0xff) ||
29187 +- byte[1] != ((DST_IP4 >> 8) & 0xff) ||
29188 +- byte[2] != ((DST_IP4 >> 16) & 0xff) ||
29189 +- byte[3] != ((DST_IP4 >> 24) & 0xff))
29190 ++ if (LSB(ctx->local_ip4, 0) != ((DST_IP4 >> 0) & 0xff) ||
29191 ++ LSB(ctx->local_ip4, 1) != ((DST_IP4 >> 8) & 0xff) ||
29192 ++ LSB(ctx->local_ip4, 2) != ((DST_IP4 >> 16) & 0xff) ||
29193 ++ LSB(ctx->local_ip4, 3) != ((DST_IP4 >> 24) & 0xff))
29194 + return SK_DROP;
29195 +- half = (__u16 *)&ctx->local_ip4;
29196 +- if (half[0] != ((DST_IP4 >> 0) & 0xffff) ||
29197 +- half[1] != ((DST_IP4 >> 16) & 0xffff))
29198 ++ if (LSW(ctx->local_ip4, 0) != ((DST_IP4 >> 0) & 0xffff) ||
29199 ++ LSW(ctx->local_ip4, 1) != ((DST_IP4 >> 16) & 0xffff))
29200 + return SK_DROP;
29201 + } else {
29202 + /* Expect 0.0.0.0 IPs when family != AF_INET */
29203 +- byte = (__u8 *)&ctx->remote_ip4;
29204 +- if (byte[0] != 0 || byte[1] != 0 &&
29205 +- byte[2] != 0 || byte[3] != 0)
29206 ++ if (LSB(ctx->remote_ip4, 0) != 0 || LSB(ctx->remote_ip4, 1) != 0 ||
29207 ++ LSB(ctx->remote_ip4, 2) != 0 || LSB(ctx->remote_ip4, 3) != 0)
29208 + return SK_DROP;
29209 +- half = (__u16 *)&ctx->remote_ip4;
29210 +- if (half[0] != 0 || half[1] != 0)
29211 ++ if (LSW(ctx->remote_ip4, 0) != 0 || LSW(ctx->remote_ip4, 1) != 0)
29212 + return SK_DROP;
29213 +
29214 +- byte = (__u8 *)&ctx->local_ip4;
29215 +- if (byte[0] != 0 || byte[1] != 0 &&
29216 +- byte[2] != 0 || byte[3] != 0)
29217 ++ if (LSB(ctx->local_ip4, 0) != 0 || LSB(ctx->local_ip4, 1) != 0 ||
29218 ++ LSB(ctx->local_ip4, 2) != 0 || LSB(ctx->local_ip4, 3) != 0)
29219 + return SK_DROP;
29220 +- half = (__u16 *)&ctx->local_ip4;
29221 +- if (half[0] != 0 || half[1] != 0)
29222 ++ if (LSW(ctx->local_ip4, 0) != 0 || LSW(ctx->local_ip4, 1) != 0)
29223 + return SK_DROP;
29224 + }
29225 +
29226 + /* Narrow loads from IPv6 fields */
29227 + if (!v4) {
29228 +- /* Expenct non-:: IP in remote_ip6 */
29229 +- byte = (__u8 *)&ctx->remote_ip6;
29230 +- if (byte[0] == 0 && byte[1] == 0 &&
29231 +- byte[2] == 0 && byte[3] == 0 &&
29232 +- byte[4] == 0 && byte[5] == 0 &&
29233 +- byte[6] == 0 && byte[7] == 0 &&
29234 +- byte[8] == 0 && byte[9] == 0 &&
29235 +- byte[10] == 0 && byte[11] == 0 &&
29236 +- byte[12] == 0 && byte[13] == 0 &&
29237 +- byte[14] == 0 && byte[15] == 0)
29238 ++ /* Expect non-:: IP in remote_ip6 */
29239 ++ if (LSB(ctx->remote_ip6[0], 0) == 0 && LSB(ctx->remote_ip6[0], 1) == 0 &&
29240 ++ LSB(ctx->remote_ip6[0], 2) == 0 && LSB(ctx->remote_ip6[0], 3) == 0 &&
29241 ++ LSB(ctx->remote_ip6[1], 0) == 0 && LSB(ctx->remote_ip6[1], 1) == 0 &&
29242 ++ LSB(ctx->remote_ip6[1], 2) == 0 && LSB(ctx->remote_ip6[1], 3) == 0 &&
29243 ++ LSB(ctx->remote_ip6[2], 0) == 0 && LSB(ctx->remote_ip6[2], 1) == 0 &&
29244 ++ LSB(ctx->remote_ip6[2], 2) == 0 && LSB(ctx->remote_ip6[2], 3) == 0 &&
29245 ++ LSB(ctx->remote_ip6[3], 0) == 0 && LSB(ctx->remote_ip6[3], 1) == 0 &&
29246 ++ LSB(ctx->remote_ip6[3], 2) == 0 && LSB(ctx->remote_ip6[3], 3) == 0)
29247 + return SK_DROP;
29248 +- half = (__u16 *)&ctx->remote_ip6;
29249 +- if (half[0] == 0 && half[1] == 0 &&
29250 +- half[2] == 0 && half[3] == 0 &&
29251 +- half[4] == 0 && half[5] == 0 &&
29252 +- half[6] == 0 && half[7] == 0)
29253 ++ if (LSW(ctx->remote_ip6[0], 0) == 0 && LSW(ctx->remote_ip6[0], 1) == 0 &&
29254 ++ LSW(ctx->remote_ip6[1], 0) == 0 && LSW(ctx->remote_ip6[1], 1) == 0 &&
29255 ++ LSW(ctx->remote_ip6[2], 0) == 0 && LSW(ctx->remote_ip6[2], 1) == 0 &&
29256 ++ LSW(ctx->remote_ip6[3], 0) == 0 && LSW(ctx->remote_ip6[3], 1) == 0)
29257 + return SK_DROP;
29258 +-
29259 + /* Expect DST_IP6 in local_ip6 */
29260 +- byte = (__u8 *)&ctx->local_ip6;
29261 +- if (byte[0] != ((DST_IP6[0] >> 0) & 0xff) ||
29262 +- byte[1] != ((DST_IP6[0] >> 8) & 0xff) ||
29263 +- byte[2] != ((DST_IP6[0] >> 16) & 0xff) ||
29264 +- byte[3] != ((DST_IP6[0] >> 24) & 0xff) ||
29265 +- byte[4] != ((DST_IP6[1] >> 0) & 0xff) ||
29266 +- byte[5] != ((DST_IP6[1] >> 8) & 0xff) ||
29267 +- byte[6] != ((DST_IP6[1] >> 16) & 0xff) ||
29268 +- byte[7] != ((DST_IP6[1] >> 24) & 0xff) ||
29269 +- byte[8] != ((DST_IP6[2] >> 0) & 0xff) ||
29270 +- byte[9] != ((DST_IP6[2] >> 8) & 0xff) ||
29271 +- byte[10] != ((DST_IP6[2] >> 16) & 0xff) ||
29272 +- byte[11] != ((DST_IP6[2] >> 24) & 0xff) ||
29273 +- byte[12] != ((DST_IP6[3] >> 0) & 0xff) ||
29274 +- byte[13] != ((DST_IP6[3] >> 8) & 0xff) ||
29275 +- byte[14] != ((DST_IP6[3] >> 16) & 0xff) ||
29276 +- byte[15] != ((DST_IP6[3] >> 24) & 0xff))
29277 ++ if (LSB(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xff) ||
29278 ++ LSB(ctx->local_ip6[0], 1) != ((DST_IP6[0] >> 8) & 0xff) ||
29279 ++ LSB(ctx->local_ip6[0], 2) != ((DST_IP6[0] >> 16) & 0xff) ||
29280 ++ LSB(ctx->local_ip6[0], 3) != ((DST_IP6[0] >> 24) & 0xff) ||
29281 ++ LSB(ctx->local_ip6[1], 0) != ((DST_IP6[1] >> 0) & 0xff) ||
29282 ++ LSB(ctx->local_ip6[1], 1) != ((DST_IP6[1] >> 8) & 0xff) ||
29283 ++ LSB(ctx->local_ip6[1], 2) != ((DST_IP6[1] >> 16) & 0xff) ||
29284 ++ LSB(ctx->local_ip6[1], 3) != ((DST_IP6[1] >> 24) & 0xff) ||
29285 ++ LSB(ctx->local_ip6[2], 0) != ((DST_IP6[2] >> 0) & 0xff) ||
29286 ++ LSB(ctx->local_ip6[2], 1) != ((DST_IP6[2] >> 8) & 0xff) ||
29287 ++ LSB(ctx->local_ip6[2], 2) != ((DST_IP6[2] >> 16) & 0xff) ||
29288 ++ LSB(ctx->local_ip6[2], 3) != ((DST_IP6[2] >> 24) & 0xff) ||
29289 ++ LSB(ctx->local_ip6[3], 0) != ((DST_IP6[3] >> 0) & 0xff) ||
29290 ++ LSB(ctx->local_ip6[3], 1) != ((DST_IP6[3] >> 8) & 0xff) ||
29291 ++ LSB(ctx->local_ip6[3], 2) != ((DST_IP6[3] >> 16) & 0xff) ||
29292 ++ LSB(ctx->local_ip6[3], 3) != ((DST_IP6[3] >> 24) & 0xff))
29293 + return SK_DROP;
29294 +- half = (__u16 *)&ctx->local_ip6;
29295 +- if (half[0] != ((DST_IP6[0] >> 0) & 0xffff) ||
29296 +- half[1] != ((DST_IP6[0] >> 16) & 0xffff) ||
29297 +- half[2] != ((DST_IP6[1] >> 0) & 0xffff) ||
29298 +- half[3] != ((DST_IP6[1] >> 16) & 0xffff) ||
29299 +- half[4] != ((DST_IP6[2] >> 0) & 0xffff) ||
29300 +- half[5] != ((DST_IP6[2] >> 16) & 0xffff) ||
29301 +- half[6] != ((DST_IP6[3] >> 0) & 0xffff) ||
29302 +- half[7] != ((DST_IP6[3] >> 16) & 0xffff))
29303 ++ if (LSW(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xffff) ||
29304 ++ LSW(ctx->local_ip6[0], 1) != ((DST_IP6[0] >> 16) & 0xffff) ||
29305 ++ LSW(ctx->local_ip6[1], 0) != ((DST_IP6[1] >> 0) & 0xffff) ||
29306 ++ LSW(ctx->local_ip6[1], 1) != ((DST_IP6[1] >> 16) & 0xffff) ||
29307 ++ LSW(ctx->local_ip6[2], 0) != ((DST_IP6[2] >> 0) & 0xffff) ||
29308 ++ LSW(ctx->local_ip6[2], 1) != ((DST_IP6[2] >> 16) & 0xffff) ||
29309 ++ LSW(ctx->local_ip6[3], 0) != ((DST_IP6[3] >> 0) & 0xffff) ||
29310 ++ LSW(ctx->local_ip6[3], 1) != ((DST_IP6[3] >> 16) & 0xffff))
29311 + return SK_DROP;
29312 + } else {
29313 + /* Expect :: IPs when family != AF_INET6 */
29314 +- byte = (__u8 *)&ctx->remote_ip6;
29315 +- if (byte[0] != 0 || byte[1] != 0 ||
29316 +- byte[2] != 0 || byte[3] != 0 ||
29317 +- byte[4] != 0 || byte[5] != 0 ||
29318 +- byte[6] != 0 || byte[7] != 0 ||
29319 +- byte[8] != 0 || byte[9] != 0 ||
29320 +- byte[10] != 0 || byte[11] != 0 ||
29321 +- byte[12] != 0 || byte[13] != 0 ||
29322 +- byte[14] != 0 || byte[15] != 0)
29323 ++ if (LSB(ctx->remote_ip6[0], 0) != 0 || LSB(ctx->remote_ip6[0], 1) != 0 ||
29324 ++ LSB(ctx->remote_ip6[0], 2) != 0 || LSB(ctx->remote_ip6[0], 3) != 0 ||
29325 ++ LSB(ctx->remote_ip6[1], 0) != 0 || LSB(ctx->remote_ip6[1], 1) != 0 ||
29326 ++ LSB(ctx->remote_ip6[1], 2) != 0 || LSB(ctx->remote_ip6[1], 3) != 0 ||
29327 ++ LSB(ctx->remote_ip6[2], 0) != 0 || LSB(ctx->remote_ip6[2], 1) != 0 ||
29328 ++ LSB(ctx->remote_ip6[2], 2) != 0 || LSB(ctx->remote_ip6[2], 3) != 0 ||
29329 ++ LSB(ctx->remote_ip6[3], 0) != 0 || LSB(ctx->remote_ip6[3], 1) != 0 ||
29330 ++ LSB(ctx->remote_ip6[3], 2) != 0 || LSB(ctx->remote_ip6[3], 3) != 0)
29331 + return SK_DROP;
29332 +- half = (__u16 *)&ctx->remote_ip6;
29333 +- if (half[0] != 0 || half[1] != 0 ||
29334 +- half[2] != 0 || half[3] != 0 ||
29335 +- half[4] != 0 || half[5] != 0 ||
29336 +- half[6] != 0 || half[7] != 0)
29337 ++ if (LSW(ctx->remote_ip6[0], 0) != 0 || LSW(ctx->remote_ip6[0], 1) != 0 ||
29338 ++ LSW(ctx->remote_ip6[1], 0) != 0 || LSW(ctx->remote_ip6[1], 1) != 0 ||
29339 ++ LSW(ctx->remote_ip6[2], 0) != 0 || LSW(ctx->remote_ip6[2], 1) != 0 ||
29340 ++ LSW(ctx->remote_ip6[3], 0) != 0 || LSW(ctx->remote_ip6[3], 1) != 0)
29341 + return SK_DROP;
29342 +
29343 +- byte = (__u8 *)&ctx->local_ip6;
29344 +- if (byte[0] != 0 || byte[1] != 0 ||
29345 +- byte[2] != 0 || byte[3] != 0 ||
29346 +- byte[4] != 0 || byte[5] != 0 ||
29347 +- byte[6] != 0 || byte[7] != 0 ||
29348 +- byte[8] != 0 || byte[9] != 0 ||
29349 +- byte[10] != 0 || byte[11] != 0 ||
29350 +- byte[12] != 0 || byte[13] != 0 ||
29351 +- byte[14] != 0 || byte[15] != 0)
29352 ++ if (LSB(ctx->local_ip6[0], 0) != 0 || LSB(ctx->local_ip6[0], 1) != 0 ||
29353 ++ LSB(ctx->local_ip6[0], 2) != 0 || LSB(ctx->local_ip6[0], 3) != 0 ||
29354 ++ LSB(ctx->local_ip6[1], 0) != 0 || LSB(ctx->local_ip6[1], 1) != 0 ||
29355 ++ LSB(ctx->local_ip6[1], 2) != 0 || LSB(ctx->local_ip6[1], 3) != 0 ||
29356 ++ LSB(ctx->local_ip6[2], 0) != 0 || LSB(ctx->local_ip6[2], 1) != 0 ||
29357 ++ LSB(ctx->local_ip6[2], 2) != 0 || LSB(ctx->local_ip6[2], 3) != 0 ||
29358 ++ LSB(ctx->local_ip6[3], 0) != 0 || LSB(ctx->local_ip6[3], 1) != 0 ||
29359 ++ LSB(ctx->local_ip6[3], 2) != 0 || LSB(ctx->local_ip6[3], 3) != 0)
29360 + return SK_DROP;
29361 +- half = (__u16 *)&ctx->local_ip6;
29362 +- if (half[0] != 0 || half[1] != 0 ||
29363 +- half[2] != 0 || half[3] != 0 ||
29364 +- half[4] != 0 || half[5] != 0 ||
29365 +- half[6] != 0 || half[7] != 0)
29366 ++ if (LSW(ctx->remote_ip6[0], 0) != 0 || LSW(ctx->remote_ip6[0], 1) != 0 ||
29367 ++ LSW(ctx->remote_ip6[1], 0) != 0 || LSW(ctx->remote_ip6[1], 1) != 0 ||
29368 ++ LSW(ctx->remote_ip6[2], 0) != 0 || LSW(ctx->remote_ip6[2], 1) != 0 ||
29369 ++ LSW(ctx->remote_ip6[3], 0) != 0 || LSW(ctx->remote_ip6[3], 1) != 0)
29370 + return SK_DROP;
29371 + }
29372 +
29373 +diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
29374 +index 458b0d69133e4..553a282d816ab 100644
29375 +--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
29376 ++++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
29377 +@@ -18,11 +18,11 @@
29378 + #define MAX_ULONG_STR_LEN 7
29379 + #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
29380 +
29381 ++const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
29382 + static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
29383 + {
29384 +- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
29385 + unsigned char i;
29386 +- char name[64];
29387 ++ char name[sizeof(tcp_mem_name)];
29388 + int ret;
29389 +
29390 + memset(name, 0, sizeof(name));
29391 +diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
29392 +index b2e6f9b0894d8..2b64bc563a12e 100644
29393 +--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
29394 ++++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
29395 +@@ -18,11 +18,11 @@
29396 + #define MAX_ULONG_STR_LEN 7
29397 + #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
29398 +
29399 ++const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
29400 + static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
29401 + {
29402 +- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
29403 + unsigned char i;
29404 +- char name[64];
29405 ++ char name[sizeof(tcp_mem_name)];
29406 + int ret;
29407 +
29408 + memset(name, 0, sizeof(name));
29409 +diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c
29410 +index 29fa09d6a6c6d..e9dfa0313d1bb 100644
29411 +--- a/tools/testing/selftests/bpf/progs/test_vmlinux.c
29412 ++++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c
29413 +@@ -19,12 +19,14 @@ SEC("tp/syscalls/sys_enter_nanosleep")
29414 + int handle__tp(struct trace_event_raw_sys_enter *args)
29415 + {
29416 + struct __kernel_timespec *ts;
29417 ++ long tv_nsec;
29418 +
29419 + if (args->id != __NR_nanosleep)
29420 + return 0;
29421 +
29422 + ts = (void *)args->args[0];
29423 +- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
29424 ++ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
29425 ++ tv_nsec != MY_TV_NSEC)
29426 + return 0;
29427 +
29428 + tp_called = true;
29429 +@@ -35,12 +37,14 @@ SEC("raw_tp/sys_enter")
29430 + int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
29431 + {
29432 + struct __kernel_timespec *ts;
29433 ++ long tv_nsec;
29434 +
29435 + if (id != __NR_nanosleep)
29436 + return 0;
29437 +
29438 + ts = (void *)PT_REGS_PARM1_CORE(regs);
29439 +- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
29440 ++ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
29441 ++ tv_nsec != MY_TV_NSEC)
29442 + return 0;
29443 +
29444 + raw_tp_called = true;
29445 +@@ -51,12 +55,14 @@ SEC("tp_btf/sys_enter")
29446 + int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
29447 + {
29448 + struct __kernel_timespec *ts;
29449 ++ long tv_nsec;
29450 +
29451 + if (id != __NR_nanosleep)
29452 + return 0;
29453 +
29454 + ts = (void *)PT_REGS_PARM1_CORE(regs);
29455 +- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
29456 ++ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
29457 ++ tv_nsec != MY_TV_NSEC)
29458 + return 0;
29459 +
29460 + tp_btf_called = true;
29461 +diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
29462 +index 7449a4b8f1f9a..9098f1e7433fd 100644
29463 +--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
29464 ++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
29465 +@@ -25,12 +25,12 @@ echo 'wakeup_latency u64 lat pid_t pid' >> synthetic_events
29466 + echo 'hist:keys=pid:ts1=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger
29467 + echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts1:onmatch(sched.sched_wakeup).wakeup_latency($wakeup_lat,next_pid) if next_comm=="ping"' > events/sched/sched_switch/trigger
29468 +
29469 +-echo 'waking+wakeup_latency u64 lat; pid_t pid' >> synthetic_events
29470 +-echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking+wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger
29471 +-echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking+wakeup_latency/trigger
29472 ++echo 'waking_plus_wakeup_latency u64 lat; pid_t pid' >> synthetic_events
29473 ++echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking_plus_wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger
29474 ++echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking_plus_wakeup_latency/trigger
29475 +
29476 + ping $LOCALHOST -c 3
29477 +-if ! grep -q "pid:" events/synthetic/waking+wakeup_latency/hist; then
29478 ++if ! grep -q "pid:" events/synthetic/waking_plus_wakeup_latency/hist; then
29479 + fail "Failed to create combined histogram"
29480 + fi
29481 +
29482 +diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
29483 +index 1aba83c87ad32..846c7ed71556f 100644
29484 +--- a/tools/testing/selftests/livepatch/functions.sh
29485 ++++ b/tools/testing/selftests/livepatch/functions.sh
29486 +@@ -278,7 +278,7 @@ function check_result {
29487 + # help differentiate repeated testing runs. Remove them with a
29488 + # post-comparison sed filter.
29489 +
29490 +- result=$(dmesg | comm -13 "$SAVED_DMESG" - | \
29491 ++ result=$(dmesg | comm --nocheck-order -13 "$SAVED_DMESG" - | \
29492 + grep -e 'livepatch:' -e 'test_klp' | \
29493 + grep -v '\(tainting\|taints\) kernel' | \
29494 + sed 's/^\[[ 0-9.]*\] //')
29495 +diff --git a/tools/testing/selftests/lkdtm/run.sh b/tools/testing/selftests/lkdtm/run.sh
29496 +index 8383eb89d88a9..bb7a1775307b8 100755
29497 +--- a/tools/testing/selftests/lkdtm/run.sh
29498 ++++ b/tools/testing/selftests/lkdtm/run.sh
29499 +@@ -82,7 +82,7 @@ dmesg > "$DMESG"
29500 + ($SHELL -c 'cat <(echo '"$test"') >'"$TRIGGER" 2>/dev/null) || true
29501 +
29502 + # Record and dump the results
29503 +-dmesg | diff --changed-group-format='%>' --unchanged-group-format='' "$DMESG" - > "$LOG" || true
29504 ++dmesg | comm --nocheck-order -13 "$DMESG" - > "$LOG" || true
29505 +
29506 + cat "$LOG"
29507 + # Check for expected output
29508 +diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
29509 +index 3b42c06b59858..c5e50ab2ced60 100644
29510 +--- a/tools/testing/selftests/net/config
29511 ++++ b/tools/testing/selftests/net/config
29512 +@@ -31,3 +31,4 @@ CONFIG_NET_SCH_ETF=m
29513 + CONFIG_NET_SCH_NETEM=y
29514 + CONFIG_TEST_BLACKHOLE_DEV=m
29515 + CONFIG_KALLSYMS=y
29516 ++CONFIG_NET_FOU=m
29517 +diff --git a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
29518 +index a0b5f57d6bd31..0727e2012b685 100755
29519 +--- a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
29520 ++++ b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
29521 +@@ -215,10 +215,16 @@ switch_create()
29522 +
29523 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
29524 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
29525 ++
29526 ++ sysctl_set net.ipv4.conf.all.rp_filter 0
29527 ++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
29528 ++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
29529 + }
29530 +
29531 + switch_destroy()
29532 + {
29533 ++ sysctl_restore net.ipv4.conf.all.rp_filter
29534 ++
29535 + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
29536 + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
29537 +
29538 +@@ -359,6 +365,10 @@ ns_switch_create()
29539 +
29540 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
29541 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
29542 ++
29543 ++ sysctl_set net.ipv4.conf.all.rp_filter 0
29544 ++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
29545 ++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
29546 + }
29547 + export -f ns_switch_create
29548 +
29549 +diff --git a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
29550 +index 1209031bc794d..5d97fa347d75a 100755
29551 +--- a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
29552 ++++ b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
29553 +@@ -237,10 +237,16 @@ switch_create()
29554 +
29555 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
29556 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
29557 ++
29558 ++ sysctl_set net.ipv4.conf.all.rp_filter 0
29559 ++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
29560 ++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
29561 + }
29562 +
29563 + switch_destroy()
29564 + {
29565 ++ sysctl_restore net.ipv4.conf.all.rp_filter
29566 ++
29567 + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
29568 + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
29569 +
29570 +@@ -402,6 +408,10 @@ ns_switch_create()
29571 +
29572 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
29573 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
29574 ++
29575 ++ sysctl_set net.ipv4.conf.all.rp_filter 0
29576 ++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
29577 ++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
29578 + }
29579 + export -f ns_switch_create
29580 +
29581 +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
29582 +index 57d75b7f62203..e9449430f98df 100755
29583 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
29584 ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
29585 +@@ -444,9 +444,9 @@ do_transfer()
29586 + duration=$(printf "(duration %05sms)" $duration)
29587 + if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
29588 + echo "$duration [ FAIL ] client exit code $retc, server $rets" 1>&2
29589 +- echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2
29590 ++ echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
29591 + ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
29592 +- echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2
29593 ++ echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
29594 + ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
29595 +
29596 + cat "$capout"
29597 +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
29598 +index f39c1129ce5f0..c2943e4dfcfe6 100755
29599 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
29600 ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
29601 +@@ -176,9 +176,9 @@ do_transfer()
29602 +
29603 + if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
29604 + echo " client exit code $retc, server $rets" 1>&2
29605 +- echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2
29606 ++ echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
29607 + ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
29608 +- echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2
29609 ++ echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
29610 + ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
29611 +
29612 + cat "$capout"
29613 +diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
29614 +index 8a2fe6d64bf24..c9ce3dfa42ee7 100755
29615 +--- a/tools/testing/selftests/net/rtnetlink.sh
29616 ++++ b/tools/testing/selftests/net/rtnetlink.sh
29617 +@@ -520,6 +520,11 @@ kci_test_encap_fou()
29618 + return $ksft_skip
29619 + fi
29620 +
29621 ++ if ! /sbin/modprobe -q -n fou; then
29622 ++ echo "SKIP: module fou is not found"
29623 ++ return $ksft_skip
29624 ++ fi
29625 ++ /sbin/modprobe -q fou
29626 + ip -netns "$testns" fou add port 7777 ipproto 47 2>/dev/null
29627 + if [ $? -ne 0 ];then
29628 + echo "FAIL: can't add fou port 7777, skipping test"
29629 +diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
29630 +index 55ef15184057d..386bca731e581 100644
29631 +--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c
29632 ++++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
29633 +@@ -64,6 +64,7 @@ int bufsize;
29634 + int debug;
29635 + int testing;
29636 + volatile int gotsig;
29637 ++bool prefixes_enabled;
29638 + char *cipath = "/dev/fb0";
29639 + long cioffset;
29640 +
29641 +@@ -77,7 +78,12 @@ void sighandler(int sig, siginfo_t *info, void *ctx)
29642 + }
29643 + gotsig = sig;
29644 + #ifdef __powerpc64__
29645 +- ucp->uc_mcontext.gp_regs[PT_NIP] += 4;
29646 ++ if (prefixes_enabled) {
29647 ++ u32 inst = *(u32 *)ucp->uc_mcontext.gp_regs[PT_NIP];
29648 ++ ucp->uc_mcontext.gp_regs[PT_NIP] += ((inst >> 26 == 1) ? 8 : 4);
29649 ++ } else {
29650 ++ ucp->uc_mcontext.gp_regs[PT_NIP] += 4;
29651 ++ }
29652 + #else
29653 + ucp->uc_mcontext.uc_regs->gregs[PT_NIP] += 4;
29654 + #endif
29655 +@@ -648,6 +654,8 @@ int main(int argc, char *argv[])
29656 + exit(1);
29657 + }
29658 +
29659 ++ prefixes_enabled = have_hwcap2(PPC_FEATURE2_ARCH_3_1);
29660 ++
29661 + rc |= test_harness(test_alignment_handler_vsx_206,
29662 + "test_alignment_handler_vsx_206");
29663 + rc |= test_harness(test_alignment_handler_vsx_207,
29664 +diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
29665 +index 8a8d0f456946c..0d783e1065c86 100755
29666 +--- a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
29667 ++++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
29668 +@@ -1,17 +1,19 @@
29669 + #!/bin/sh
29670 + # SPDX-License-Identifier: GPL-2.0-only
29671 +
29672 ++KSELFTESTS_SKIP=4
29673 ++
29674 + . ./eeh-functions.sh
29675 +
29676 + if ! eeh_supported ; then
29677 + echo "EEH not supported on this system, skipping"
29678 +- exit 0;
29679 ++ exit $KSELFTESTS_SKIP;
29680 + fi
29681 +
29682 + if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \
29683 + [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then
29684 + echo "debugfs EEH testing files are missing. Is debugfs mounted?"
29685 +- exit 1;
29686 ++ exit $KSELFTESTS_SKIP;
29687 + fi
29688 +
29689 + pre_lspci=`mktemp`
29690 +@@ -84,4 +86,5 @@ echo "$failed devices failed to recover ($dev_count tested)"
29691 + lspci | diff -u $pre_lspci -
29692 + rm -f $pre_lspci
29693 +
29694 +-exit $failed
29695 ++test "$failed" == 0
29696 ++exit $?
29697 +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
29698 +index 7a6d40286a421..9a9eb02539fb4 100644
29699 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
29700 ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
29701 +@@ -1667,64 +1667,79 @@ TEST_F(TRACE_poke, getpid_runs_normally)
29702 + }
29703 +
29704 + #if defined(__x86_64__)
29705 +-# define ARCH_REGS struct user_regs_struct
29706 +-# define SYSCALL_NUM orig_rax
29707 +-# define SYSCALL_RET rax
29708 ++# define ARCH_REGS struct user_regs_struct
29709 ++# define SYSCALL_NUM(_regs) (_regs).orig_rax
29710 ++# define SYSCALL_RET(_regs) (_regs).rax
29711 + #elif defined(__i386__)
29712 +-# define ARCH_REGS struct user_regs_struct
29713 +-# define SYSCALL_NUM orig_eax
29714 +-# define SYSCALL_RET eax
29715 ++# define ARCH_REGS struct user_regs_struct
29716 ++# define SYSCALL_NUM(_regs) (_regs).orig_eax
29717 ++# define SYSCALL_RET(_regs) (_regs).eax
29718 + #elif defined(__arm__)
29719 +-# define ARCH_REGS struct pt_regs
29720 +-# define SYSCALL_NUM ARM_r7
29721 +-# define SYSCALL_RET ARM_r0
29722 ++# define ARCH_REGS struct pt_regs
29723 ++# define SYSCALL_NUM(_regs) (_regs).ARM_r7
29724 ++# define SYSCALL_RET(_regs) (_regs).ARM_r0
29725 + #elif defined(__aarch64__)
29726 +-# define ARCH_REGS struct user_pt_regs
29727 +-# define SYSCALL_NUM regs[8]
29728 +-# define SYSCALL_RET regs[0]
29729 ++# define ARCH_REGS struct user_pt_regs
29730 ++# define SYSCALL_NUM(_regs) (_regs).regs[8]
29731 ++# define SYSCALL_RET(_regs) (_regs).regs[0]
29732 + #elif defined(__riscv) && __riscv_xlen == 64
29733 +-# define ARCH_REGS struct user_regs_struct
29734 +-# define SYSCALL_NUM a7
29735 +-# define SYSCALL_RET a0
29736 ++# define ARCH_REGS struct user_regs_struct
29737 ++# define SYSCALL_NUM(_regs) (_regs).a7
29738 ++# define SYSCALL_RET(_regs) (_regs).a0
29739 + #elif defined(__csky__)
29740 +-# define ARCH_REGS struct pt_regs
29741 +-#if defined(__CSKYABIV2__)
29742 +-# define SYSCALL_NUM regs[3]
29743 +-#else
29744 +-# define SYSCALL_NUM regs[9]
29745 +-#endif
29746 +-# define SYSCALL_RET a0
29747 ++# define ARCH_REGS struct pt_regs
29748 ++# if defined(__CSKYABIV2__)
29749 ++# define SYSCALL_NUM(_regs) (_regs).regs[3]
29750 ++# else
29751 ++# define SYSCALL_NUM(_regs) (_regs).regs[9]
29752 ++# endif
29753 ++# define SYSCALL_RET(_regs) (_regs).a0
29754 + #elif defined(__hppa__)
29755 +-# define ARCH_REGS struct user_regs_struct
29756 +-# define SYSCALL_NUM gr[20]
29757 +-# define SYSCALL_RET gr[28]
29758 ++# define ARCH_REGS struct user_regs_struct
29759 ++# define SYSCALL_NUM(_regs) (_regs).gr[20]
29760 ++# define SYSCALL_RET(_regs) (_regs).gr[28]
29761 + #elif defined(__powerpc__)
29762 +-# define ARCH_REGS struct pt_regs
29763 +-# define SYSCALL_NUM gpr[0]
29764 +-# define SYSCALL_RET gpr[3]
29765 ++# define ARCH_REGS struct pt_regs
29766 ++# define SYSCALL_NUM(_regs) (_regs).gpr[0]
29767 ++# define SYSCALL_RET(_regs) (_regs).gpr[3]
29768 ++# define SYSCALL_RET_SET(_regs, _val) \
29769 ++ do { \
29770 ++ typeof(_val) _result = (_val); \
29771 ++ /* \
29772 ++ * A syscall error is signaled by CR0 SO bit \
29773 ++ * and the code is stored as a positive value. \
29774 ++ */ \
29775 ++ if (_result < 0) { \
29776 ++ SYSCALL_RET(_regs) = -result; \
29777 ++ (_regs).ccr |= 0x10000000; \
29778 ++ } else { \
29779 ++ SYSCALL_RET(_regs) = result; \
29780 ++ (_regs).ccr &= ~0x10000000; \
29781 ++ } \
29782 ++ } while (0)
29783 + #elif defined(__s390__)
29784 +-# define ARCH_REGS s390_regs
29785 +-# define SYSCALL_NUM gprs[2]
29786 +-# define SYSCALL_RET gprs[2]
29787 ++# define ARCH_REGS s390_regs
29788 ++# define SYSCALL_NUM(_regs) (_regs).gprs[2]
29789 ++# define SYSCALL_RET(_regs) (_regs).gprs[2]
29790 + # define SYSCALL_NUM_RET_SHARE_REG
29791 + #elif defined(__mips__)
29792 +-# define ARCH_REGS struct pt_regs
29793 +-# define SYSCALL_NUM regs[2]
29794 +-# define SYSCALL_SYSCALL_NUM regs[4]
29795 +-# define SYSCALL_RET regs[2]
29796 ++# define ARCH_REGS struct pt_regs
29797 ++# define SYSCALL_NUM(_regs) (_regs).regs[2]
29798 ++# define SYSCALL_SYSCALL_NUM regs[4]
29799 ++# define SYSCALL_RET(_regs) (_regs).regs[2]
29800 + # define SYSCALL_NUM_RET_SHARE_REG
29801 + #elif defined(__xtensa__)
29802 +-# define ARCH_REGS struct user_pt_regs
29803 +-# define SYSCALL_NUM syscall
29804 ++# define ARCH_REGS struct user_pt_regs
29805 ++# define SYSCALL_NUM(_regs) (_regs).syscall
29806 + /*
29807 + * On xtensa syscall return value is in the register
29808 + * a2 of the current window which is not fixed.
29809 + */
29810 +-#define SYSCALL_RET(reg) a[(reg).windowbase * 4 + 2]
29811 ++#define SYSCALL_RET(_regs) (_regs).a[(_regs).windowbase * 4 + 2]
29812 + #elif defined(__sh__)
29813 +-# define ARCH_REGS struct pt_regs
29814 +-# define SYSCALL_NUM gpr[3]
29815 +-# define SYSCALL_RET gpr[0]
29816 ++# define ARCH_REGS struct pt_regs
29817 ++# define SYSCALL_NUM(_regs) (_regs).gpr[3]
29818 ++# define SYSCALL_RET(_regs) (_regs).gpr[0]
29819 + #else
29820 + # error "Do not know how to find your architecture's registers and syscalls"
29821 + #endif
29822 +@@ -1773,10 +1788,10 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
29823 + #endif
29824 +
29825 + #if defined(__mips__)
29826 +- if (regs.SYSCALL_NUM == __NR_O32_Linux)
29827 ++ if (SYSCALL_NUM(regs) == __NR_O32_Linux)
29828 + return regs.SYSCALL_SYSCALL_NUM;
29829 + #endif
29830 +- return regs.SYSCALL_NUM;
29831 ++ return SYSCALL_NUM(regs);
29832 + }
29833 +
29834 + /* Architecture-specific syscall changing routine. */
29835 +@@ -1799,14 +1814,14 @@ void change_syscall(struct __test_metadata *_metadata,
29836 + defined(__s390__) || defined(__hppa__) || defined(__riscv) || \
29837 + defined(__xtensa__) || defined(__csky__) || defined(__sh__)
29838 + {
29839 +- regs.SYSCALL_NUM = syscall;
29840 ++ SYSCALL_NUM(regs) = syscall;
29841 + }
29842 + #elif defined(__mips__)
29843 + {
29844 +- if (regs.SYSCALL_NUM == __NR_O32_Linux)
29845 ++ if (SYSCALL_NUM(regs) == __NR_O32_Linux)
29846 + regs.SYSCALL_SYSCALL_NUM = syscall;
29847 + else
29848 +- regs.SYSCALL_NUM = syscall;
29849 ++ SYSCALL_NUM(regs) = syscall;
29850 + }
29851 +
29852 + #elif defined(__arm__)
29853 +@@ -1840,11 +1855,8 @@ void change_syscall(struct __test_metadata *_metadata,
29854 + if (syscall == -1)
29855 + #ifdef SYSCALL_NUM_RET_SHARE_REG
29856 + TH_LOG("Can't modify syscall return on this architecture");
29857 +-
29858 +-#elif defined(__xtensa__)
29859 +- regs.SYSCALL_RET(regs) = result;
29860 + #else
29861 +- regs.SYSCALL_RET = result;
29862 ++ SYSCALL_RET(regs) = result;
29863 + #endif
29864 +
29865 + #ifdef HAVE_GETREGS
29866 +@@ -3715,7 +3727,7 @@ TEST(user_notification_filter_empty)
29867 + if (pid == 0) {
29868 + int listener;
29869 +
29870 +- listener = user_notif_syscall(__NR_mknod, SECCOMP_FILTER_FLAG_NEW_LISTENER);
29871 ++ listener = user_notif_syscall(__NR_mknodat, SECCOMP_FILTER_FLAG_NEW_LISTENER);
29872 + if (listener < 0)
29873 + _exit(EXIT_FAILURE);
29874 +
29875 +diff --git a/tools/testing/selftests/vm/config b/tools/testing/selftests/vm/config
29876 +index 3ba674b64fa9f..69dd0d1aa30b2 100644
29877 +--- a/tools/testing/selftests/vm/config
29878 ++++ b/tools/testing/selftests/vm/config
29879 +@@ -3,3 +3,4 @@ CONFIG_USERFAULTFD=y
29880 + CONFIG_TEST_VMALLOC=m
29881 + CONFIG_DEVICE_PRIVATE=y
29882 + CONFIG_TEST_HMM=m
29883 ++CONFIG_GUP_BENCHMARK=y