Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.8 commit in: /
Date: Thu, 29 Oct 2020 11:20:50
Message-Id: 1603970430.fd9934805d4cef23e83f4c726a0cad4ab6d7a7a3.mpagano@gentoo
1 commit: fd9934805d4cef23e83f4c726a0cad4ab6d7a7a3
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Oct 29 11:20:30 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Oct 29 11:20:30 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fd993480
7
8 Linux patch 5.8.17
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1016_linux-5.8.17.patch | 22065 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 22069 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e29fc26..333aabc 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -107,6 +107,10 @@ Patch: 1015_linux-5.8.16.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.8.16
23
24 +Patch: 1016_linux-5.8.17.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.8.17
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1016_linux-5.8.17.patch b/1016_linux-5.8.17.patch
33 new file mode 100644
34 index 0000000..362fc27
35 --- /dev/null
36 +++ b/1016_linux-5.8.17.patch
37 @@ -0,0 +1,22065 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index fb95fad81c79a..6746f91ebc490 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -577,7 +577,7 @@
43 + loops can be debugged more effectively on production
44 + systems.
45 +
46 +- clearcpuid=BITNUM [X86]
47 ++ clearcpuid=BITNUM[,BITNUM...] [X86]
48 + Disable CPUID feature X for the kernel. See
49 + arch/x86/include/asm/cpufeatures.h for the valid bit
50 + numbers. Note the Linux specific bits are not necessarily
51 +diff --git a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
52 +index fc823572bcff2..90c6d039b91b0 100644
53 +--- a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
54 ++++ b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
55 +@@ -23,8 +23,7 @@ properties:
56 + - items:
57 + - const: allwinner,sun7i-a20-crypto
58 + - const: allwinner,sun4i-a10-crypto
59 +- - items:
60 +- - const: allwinner,sun8i-a33-crypto
61 ++ - const: allwinner,sun8i-a33-crypto
62 +
63 + reg:
64 + maxItems: 1
65 +@@ -59,7 +58,9 @@ if:
66 + properties:
67 + compatible:
68 + contains:
69 +- const: allwinner,sun6i-a31-crypto
70 ++ enum:
71 ++ - allwinner,sun6i-a31-crypto
72 ++ - allwinner,sun8i-a33-crypto
73 +
74 + then:
75 + required:
76 +diff --git a/Documentation/devicetree/bindings/net/socionext-netsec.txt b/Documentation/devicetree/bindings/net/socionext-netsec.txt
77 +index 9d6c9feb12ff1..a3c1dffaa4bb4 100644
78 +--- a/Documentation/devicetree/bindings/net/socionext-netsec.txt
79 ++++ b/Documentation/devicetree/bindings/net/socionext-netsec.txt
80 +@@ -30,7 +30,9 @@ Optional properties: (See ethernet.txt file in the same directory)
81 + - max-frame-size: See ethernet.txt in the same directory.
82 +
83 + The MAC address will be determined using the optional properties
84 +-defined in ethernet.txt.
85 ++defined in ethernet.txt. The 'phy-mode' property is required, but may
86 ++be set to the empty string if the PHY configuration is programmed by
87 ++the firmware or set by hardware straps, and needs to be preserved.
88 +
89 + Example:
90 + eth0: ethernet@522d0000 {
91 +diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
92 +index 837d51f9e1fab..25e6673a085a0 100644
93 +--- a/Documentation/networking/ip-sysctl.rst
94 ++++ b/Documentation/networking/ip-sysctl.rst
95 +@@ -1142,13 +1142,15 @@ icmp_ratelimit - INTEGER
96 + icmp_msgs_per_sec - INTEGER
97 + Limit maximal number of ICMP packets sent per second from this host.
98 + Only messages whose type matches icmp_ratemask (see below) are
99 +- controlled by this limit.
100 ++ controlled by this limit. For security reasons, the precise count
101 ++ of messages per second is randomized.
102 +
103 + Default: 1000
104 +
105 + icmp_msgs_burst - INTEGER
106 + icmp_msgs_per_sec controls number of ICMP packets sent per second,
107 + while icmp_msgs_burst controls the burst size of these packets.
108 ++ For security reasons, the precise burst size is randomized.
109 +
110 + Default: 50
111 +
112 +diff --git a/Makefile b/Makefile
113 +index a4622ef65436e..9bdb93053ee93 100644
114 +--- a/Makefile
115 ++++ b/Makefile
116 +@@ -1,7 +1,7 @@
117 + # SPDX-License-Identifier: GPL-2.0
118 + VERSION = 5
119 + PATCHLEVEL = 8
120 +-SUBLEVEL = 16
121 ++SUBLEVEL = 17
122 + EXTRAVERSION =
123 + NAME = Kleptomaniac Octopus
124 +
125 +diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
126 +index ce81018345184..6b5c54576f54d 100644
127 +--- a/arch/arc/plat-hsdk/Kconfig
128 ++++ b/arch/arc/plat-hsdk/Kconfig
129 +@@ -8,5 +8,6 @@ menuconfig ARC_SOC_HSDK
130 + select ARC_HAS_ACCL_REGS
131 + select ARC_IRQ_NO_AUTOSAVE
132 + select CLK_HSDK
133 ++ select RESET_CONTROLLER
134 + select RESET_HSDK
135 + select HAVE_PCI
136 +diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
137 +index 911d8cf77f2c6..0339a46fa71c5 100644
138 +--- a/arch/arm/boot/dts/imx6sl.dtsi
139 ++++ b/arch/arm/boot/dts/imx6sl.dtsi
140 +@@ -939,8 +939,10 @@
141 + };
142 +
143 + rngb: rngb@21b4000 {
144 ++ compatible = "fsl,imx6sl-rngb", "fsl,imx25-rngb";
145 + reg = <0x021b4000 0x4000>;
146 + interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
147 ++ clocks = <&clks IMX6SL_CLK_DUMMY>;
148 + };
149 +
150 + weim: weim@21b8000 {
151 +diff --git a/arch/arm/boot/dts/iwg20d-q7-common.dtsi b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
152 +index ebbe1518ef8a6..63cafd220dba1 100644
153 +--- a/arch/arm/boot/dts/iwg20d-q7-common.dtsi
154 ++++ b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
155 +@@ -57,7 +57,7 @@
156 +
157 + lvds-receiver {
158 + compatible = "ti,ds90cf384a", "lvds-decoder";
159 +- powerdown-gpios = <&gpio7 25 GPIO_ACTIVE_LOW>;
160 ++ power-supply = <&vcc_3v3_tft1>;
161 +
162 + ports {
163 + #address-cells = <1>;
164 +@@ -81,6 +81,7 @@
165 + panel {
166 + compatible = "edt,etm0700g0dh6";
167 + backlight = <&lcd_backlight>;
168 ++ power-supply = <&vcc_3v3_tft1>;
169 +
170 + port {
171 + panel_in: endpoint {
172 +@@ -113,6 +114,17 @@
173 + };
174 + };
175 +
176 ++ vcc_3v3_tft1: regulator-panel {
177 ++ compatible = "regulator-fixed";
178 ++
179 ++ regulator-name = "vcc-3v3-tft1";
180 ++ regulator-min-microvolt = <3300000>;
181 ++ regulator-max-microvolt = <3300000>;
182 ++ enable-active-high;
183 ++ startup-delay-us = <500>;
184 ++ gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>;
185 ++ };
186 ++
187 + vcc_sdhi1: regulator-vcc-sdhi1 {
188 + compatible = "regulator-fixed";
189 +
190 +@@ -207,6 +219,7 @@
191 + reg = <0x38>;
192 + interrupt-parent = <&gpio2>;
193 + interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
194 ++ vcc-supply = <&vcc_3v3_tft1>;
195 + };
196 + };
197 +
198 +diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
199 +index eedb92526968a..a4ab8b96d0eb6 100644
200 +--- a/arch/arm/boot/dts/meson8.dtsi
201 ++++ b/arch/arm/boot/dts/meson8.dtsi
202 +@@ -239,8 +239,6 @@
203 + <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
204 + <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
205 + <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
206 +- <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
207 +- <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
208 + <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
209 + <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
210 + <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
211 +diff --git a/arch/arm/boot/dts/owl-s500.dtsi b/arch/arm/boot/dts/owl-s500.dtsi
212 +index 5ceb6cc4451d2..1dbe4e8b38ac7 100644
213 +--- a/arch/arm/boot/dts/owl-s500.dtsi
214 ++++ b/arch/arm/boot/dts/owl-s500.dtsi
215 +@@ -84,21 +84,21 @@
216 + global_timer: timer@b0020200 {
217 + compatible = "arm,cortex-a9-global-timer";
218 + reg = <0xb0020200 0x100>;
219 +- interrupts = <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
220 ++ interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
221 + status = "disabled";
222 + };
223 +
224 + twd_timer: timer@b0020600 {
225 + compatible = "arm,cortex-a9-twd-timer";
226 + reg = <0xb0020600 0x20>;
227 +- interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
228 ++ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
229 + status = "disabled";
230 + };
231 +
232 + twd_wdt: wdt@b0020620 {
233 + compatible = "arm,cortex-a9-twd-wdt";
234 + reg = <0xb0020620 0xe0>;
235 +- interrupts = <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
236 ++ interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
237 + status = "disabled";
238 + };
239 +
240 +diff --git a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
241 +index 5700e6b700d36..b85025d009437 100644
242 +--- a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
243 ++++ b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
244 +@@ -121,8 +121,6 @@
245 + reset-gpios = <&gpiog 0 GPIO_ACTIVE_LOW>; /* ETH_RST# */
246 + interrupt-parent = <&gpioa>;
247 + interrupts = <6 IRQ_TYPE_EDGE_FALLING>; /* ETH_MDINT# */
248 +- rxc-skew-ps = <1860>;
249 +- txc-skew-ps = <1860>;
250 + reset-assert-us = <10000>;
251 + reset-deassert-us = <300>;
252 + micrel,force-master;
253 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
254 +index 7c4bd615b3115..e4e3c92eb30d3 100644
255 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
256 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
257 +@@ -11,7 +11,6 @@
258 + serial0 = &uart4;
259 + serial1 = &usart3;
260 + serial2 = &uart8;
261 +- ethernet0 = &ethernet0;
262 + };
263 +
264 + chosen {
265 +@@ -26,23 +25,13 @@
266 +
267 + display_bl: display-bl {
268 + compatible = "pwm-backlight";
269 +- pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>;
270 ++ pwms = <&pwm2 3 500000 PWM_POLARITY_INVERTED>;
271 + brightness-levels = <0 16 22 30 40 55 75 102 138 188 255>;
272 + default-brightness-level = <8>;
273 + enable-gpios = <&gpioi 0 GPIO_ACTIVE_HIGH>;
274 + status = "okay";
275 + };
276 +
277 +- ethernet_vio: vioregulator {
278 +- compatible = "regulator-fixed";
279 +- regulator-name = "vio";
280 +- regulator-min-microvolt = <3300000>;
281 +- regulator-max-microvolt = <3300000>;
282 +- gpio = <&gpiog 3 GPIO_ACTIVE_LOW>;
283 +- regulator-always-on;
284 +- regulator-boot-on;
285 +- };
286 +-
287 + gpio-keys-polled {
288 + compatible = "gpio-keys-polled";
289 + #size-cells = <0>;
290 +@@ -141,28 +130,6 @@
291 + status = "okay";
292 + };
293 +
294 +-&ethernet0 {
295 +- status = "okay";
296 +- pinctrl-0 = <&ethernet0_rmii_pins_a>;
297 +- pinctrl-1 = <&ethernet0_rmii_sleep_pins_a>;
298 +- pinctrl-names = "default", "sleep";
299 +- phy-mode = "rmii";
300 +- max-speed = <100>;
301 +- phy-handle = <&phy0>;
302 +- st,eth-ref-clk-sel;
303 +- phy-reset-gpios = <&gpioh 15 GPIO_ACTIVE_LOW>;
304 +-
305 +- mdio0 {
306 +- #address-cells = <1>;
307 +- #size-cells = <0>;
308 +- compatible = "snps,dwmac-mdio";
309 +-
310 +- phy0: ethernet-phy@1 {
311 +- reg = <1>;
312 +- };
313 +- };
314 +-};
315 +-
316 + &i2c2 { /* Header X22 */
317 + pinctrl-names = "default";
318 + pinctrl-0 = <&i2c2_pins_a>;
319 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
320 +index ba905196fb549..a87ebc4843963 100644
321 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
322 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
323 +@@ -9,6 +9,10 @@
324 + #include <dt-bindings/mfd/st,stpmic1.h>
325 +
326 + / {
327 ++ aliases {
328 ++ ethernet0 = &ethernet0;
329 ++ };
330 ++
331 + memory@c0000000 {
332 + device_type = "memory";
333 + reg = <0xC0000000 0x40000000>;
334 +@@ -55,6 +59,16 @@
335 + no-map;
336 + };
337 + };
338 ++
339 ++ ethernet_vio: vioregulator {
340 ++ compatible = "regulator-fixed";
341 ++ regulator-name = "vio";
342 ++ regulator-min-microvolt = <3300000>;
343 ++ regulator-max-microvolt = <3300000>;
344 ++ gpio = <&gpiog 3 GPIO_ACTIVE_LOW>;
345 ++ regulator-always-on;
346 ++ regulator-boot-on;
347 ++ };
348 + };
349 +
350 + &adc {
351 +@@ -94,6 +108,28 @@
352 + status = "okay";
353 + };
354 +
355 ++&ethernet0 {
356 ++ status = "okay";
357 ++ pinctrl-0 = <&ethernet0_rmii_pins_a>;
358 ++ pinctrl-1 = <&ethernet0_rmii_sleep_pins_a>;
359 ++ pinctrl-names = "default", "sleep";
360 ++ phy-mode = "rmii";
361 ++ max-speed = <100>;
362 ++ phy-handle = <&phy0>;
363 ++ st,eth-ref-clk-sel;
364 ++ phy-reset-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>;
365 ++
366 ++ mdio0 {
367 ++ #address-cells = <1>;
368 ++ #size-cells = <0>;
369 ++ compatible = "snps,dwmac-mdio";
370 ++
371 ++ phy0: ethernet-phy@1 {
372 ++ reg = <1>;
373 ++ };
374 ++ };
375 ++};
376 ++
377 + &i2c4 {
378 + pinctrl-names = "default";
379 + pinctrl-0 = <&i2c4_pins_a>;
380 +@@ -249,7 +285,7 @@
381 + compatible = "ti,tsc2004";
382 + reg = <0x49>;
383 + vio-supply = <&v3v3>;
384 +- interrupts-extended = <&gpioh 3 IRQ_TYPE_EDGE_FALLING>;
385 ++ interrupts-extended = <&gpioh 15 IRQ_TYPE_EDGE_FALLING>;
386 + };
387 +
388 + eeprom@50 {
389 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
390 +index 930202742a3f6..905cd7bb98cf0 100644
391 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
392 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
393 +@@ -295,9 +295,9 @@
394 +
395 + &sdmmc2 {
396 + pinctrl-names = "default", "opendrain", "sleep";
397 +- pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_b>;
398 +- pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_b>;
399 +- pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_b>;
400 ++ pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_c>;
401 ++ pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_c>;
402 ++ pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_c>;
403 + bus-width = <8>;
404 + mmc-ddr-1_8v;
405 + no-sd;
406 +diff --git a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
407 +index 42d62d1ba1dc7..ea15073f0c79c 100644
408 +--- a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
409 ++++ b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
410 +@@ -223,16 +223,16 @@
411 + };
412 +
413 + &reg_dc1sw {
414 +- regulator-min-microvolt = <3000000>;
415 +- regulator-max-microvolt = <3000000>;
416 ++ regulator-min-microvolt = <3300000>;
417 ++ regulator-max-microvolt = <3300000>;
418 + regulator-name = "vcc-gmac-phy";
419 + };
420 +
421 + &reg_dcdc1 {
422 + regulator-always-on;
423 +- regulator-min-microvolt = <3000000>;
424 +- regulator-max-microvolt = <3000000>;
425 +- regulator-name = "vcc-3v0";
426 ++ regulator-min-microvolt = <3300000>;
427 ++ regulator-max-microvolt = <3300000>;
428 ++ regulator-name = "vcc-3v3";
429 + };
430 +
431 + &reg_dcdc2 {
432 +diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
433 +index 2aab043441e8f..eae8aaaadc3bf 100644
434 +--- a/arch/arm/mach-at91/pm.c
435 ++++ b/arch/arm/mach-at91/pm.c
436 +@@ -800,6 +800,7 @@ static void __init at91_pm_init(void (*pm_idle)(void))
437 +
438 + pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
439 + soc_pm.data.pmc = of_iomap(pmc_np, 0);
440 ++ of_node_put(pmc_np);
441 + if (!soc_pm.data.pmc) {
442 + pr_err("AT91: PM not supported, PMC not found\n");
443 + return;
444 +diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
445 +index 6f5f89711f256..a92d277f81a08 100644
446 +--- a/arch/arm/mach-omap2/cpuidle44xx.c
447 ++++ b/arch/arm/mach-omap2/cpuidle44xx.c
448 +@@ -174,8 +174,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
449 + */
450 + if (mpuss_can_lose_context) {
451 + error = cpu_cluster_pm_enter();
452 +- if (error)
453 ++ if (error) {
454 ++ omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
455 + goto cpu_cluster_pm_out;
456 ++ }
457 + }
458 + }
459 +
460 +diff --git a/arch/arm/mach-s3c24xx/mach-at2440evb.c b/arch/arm/mach-s3c24xx/mach-at2440evb.c
461 +index 58c5ef3cf1d7e..2d370f7f75fa2 100644
462 +--- a/arch/arm/mach-s3c24xx/mach-at2440evb.c
463 ++++ b/arch/arm/mach-s3c24xx/mach-at2440evb.c
464 +@@ -143,7 +143,7 @@ static struct gpiod_lookup_table at2440evb_mci_gpio_table = {
465 + .dev_id = "s3c2410-sdi",
466 + .table = {
467 + /* Card detect S3C2410_GPG(10) */
468 +- GPIO_LOOKUP("GPG", 10, "cd", GPIO_ACTIVE_LOW),
469 ++ GPIO_LOOKUP("GPIOG", 10, "cd", GPIO_ACTIVE_LOW),
470 + { },
471 + },
472 + };
473 +diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c
474 +index e1c372e5447b6..82cc37513779c 100644
475 +--- a/arch/arm/mach-s3c24xx/mach-h1940.c
476 ++++ b/arch/arm/mach-s3c24xx/mach-h1940.c
477 +@@ -468,9 +468,9 @@ static struct gpiod_lookup_table h1940_mmc_gpio_table = {
478 + .dev_id = "s3c2410-sdi",
479 + .table = {
480 + /* Card detect S3C2410_GPF(5) */
481 +- GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
482 ++ GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
483 + /* Write protect S3C2410_GPH(8) */
484 +- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
485 ++ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
486 + { },
487 + },
488 + };
489 +diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c
490 +index 9035f868fb34e..3a5b1124037b2 100644
491 +--- a/arch/arm/mach-s3c24xx/mach-mini2440.c
492 ++++ b/arch/arm/mach-s3c24xx/mach-mini2440.c
493 +@@ -244,9 +244,9 @@ static struct gpiod_lookup_table mini2440_mmc_gpio_table = {
494 + .dev_id = "s3c2410-sdi",
495 + .table = {
496 + /* Card detect S3C2410_GPG(8) */
497 +- GPIO_LOOKUP("GPG", 8, "cd", GPIO_ACTIVE_LOW),
498 ++ GPIO_LOOKUP("GPIOG", 8, "cd", GPIO_ACTIVE_LOW),
499 + /* Write protect S3C2410_GPH(8) */
500 +- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_HIGH),
501 ++ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_HIGH),
502 + { },
503 + },
504 + };
505 +diff --git a/arch/arm/mach-s3c24xx/mach-n30.c b/arch/arm/mach-s3c24xx/mach-n30.c
506 +index d856f23939aff..ffa20f52aa832 100644
507 +--- a/arch/arm/mach-s3c24xx/mach-n30.c
508 ++++ b/arch/arm/mach-s3c24xx/mach-n30.c
509 +@@ -359,9 +359,9 @@ static struct gpiod_lookup_table n30_mci_gpio_table = {
510 + .dev_id = "s3c2410-sdi",
511 + .table = {
512 + /* Card detect S3C2410_GPF(1) */
513 +- GPIO_LOOKUP("GPF", 1, "cd", GPIO_ACTIVE_LOW),
514 ++ GPIO_LOOKUP("GPIOF", 1, "cd", GPIO_ACTIVE_LOW),
515 + /* Write protect S3C2410_GPG(10) */
516 +- GPIO_LOOKUP("GPG", 10, "wp", GPIO_ACTIVE_LOW),
517 ++ GPIO_LOOKUP("GPIOG", 10, "wp", GPIO_ACTIVE_LOW),
518 + { },
519 + },
520 + };
521 +diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
522 +index fde98b175c752..c0a06f123cfea 100644
523 +--- a/arch/arm/mach-s3c24xx/mach-rx1950.c
524 ++++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
525 +@@ -571,9 +571,9 @@ static struct gpiod_lookup_table rx1950_mmc_gpio_table = {
526 + .dev_id = "s3c2410-sdi",
527 + .table = {
528 + /* Card detect S3C2410_GPF(5) */
529 +- GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
530 ++ GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
531 + /* Write protect S3C2410_GPH(8) */
532 +- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
533 ++ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
534 + { },
535 + },
536 + };
537 +diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
538 +index 12c26eb88afbc..43d91bfd23600 100644
539 +--- a/arch/arm/mm/cache-l2x0.c
540 ++++ b/arch/arm/mm/cache-l2x0.c
541 +@@ -1249,20 +1249,28 @@ static void __init l2c310_of_parse(const struct device_node *np,
542 +
543 + ret = of_property_read_u32(np, "prefetch-data", &val);
544 + if (ret == 0) {
545 +- if (val)
546 ++ if (val) {
547 + prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
548 +- else
549 ++ *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
550 ++ } else {
551 + prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
552 ++ *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
553 ++ }
554 ++ *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
555 + } else if (ret != -EINVAL) {
556 + pr_err("L2C-310 OF prefetch-data property value is missing\n");
557 + }
558 +
559 + ret = of_property_read_u32(np, "prefetch-instr", &val);
560 + if (ret == 0) {
561 +- if (val)
562 ++ if (val) {
563 + prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
564 +- else
565 ++ *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
566 ++ } else {
567 + prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
568 ++ *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
569 ++ }
570 ++ *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
571 + } else if (ret != -EINVAL) {
572 + pr_err("L2C-310 OF prefetch-instr property value is missing\n");
573 + }
574 +diff --git a/arch/arm64/boot/dts/actions/s700.dtsi b/arch/arm64/boot/dts/actions/s700.dtsi
575 +index 2006ad5424fa6..f8eb72bb41254 100644
576 +--- a/arch/arm64/boot/dts/actions/s700.dtsi
577 ++++ b/arch/arm64/boot/dts/actions/s700.dtsi
578 +@@ -231,7 +231,7 @@
579 +
580 + pinctrl: pinctrl@e01b0000 {
581 + compatible = "actions,s700-pinctrl";
582 +- reg = <0x0 0xe01b0000 0x0 0x1000>;
583 ++ reg = <0x0 0xe01b0000 0x0 0x100>;
584 + clocks = <&cmu CLK_GPIO>;
585 + gpio-controller;
586 + gpio-ranges = <&pinctrl 0 0 136>;
587 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
588 +index 4462a68c06815..cdc4209f94d0e 100644
589 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
590 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
591 +@@ -125,8 +125,7 @@
592 + <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
593 + <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
594 + <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
595 +- <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
596 +- <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
597 ++ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
598 + interrupt-names = "gp",
599 + "gpmmu",
600 + "pp",
601 +@@ -137,8 +136,7 @@
602 + "pp2",
603 + "ppmmu2",
604 + "pp3",
605 +- "ppmmu3",
606 +- "pmu";
607 ++ "ppmmu3";
608 + clocks = <&ccu CLK_BUS_GPU>, <&ccu CLK_GPU>;
609 + clock-names = "bus", "core";
610 + resets = <&ccu RST_BUS_GPU>;
611 +diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
612 +index ff5ba85b7562e..833bbc3359c44 100644
613 +--- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
614 ++++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
615 +@@ -41,13 +41,13 @@
616 +
617 + led-white {
618 + label = "vim3:white:sys";
619 +- gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>;
620 ++ gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_HIGH>;
621 + linux,default-trigger = "heartbeat";
622 + };
623 +
624 + led-red {
625 + label = "vim3:red";
626 +- gpios = <&gpio_expander 5 GPIO_ACTIVE_LOW>;
627 ++ gpios = <&gpio_expander 5 GPIO_ACTIVE_HIGH>;
628 + };
629 + };
630 +
631 +diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
632 +index 66ac66856e7e8..077e12a0de3f9 100644
633 +--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
634 ++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
635 +@@ -614,6 +614,7 @@
636 + gpc: gpc@303a0000 {
637 + compatible = "fsl,imx8mq-gpc";
638 + reg = <0x303a0000 0x10000>;
639 ++ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
640 + interrupt-parent = <&gic>;
641 + interrupt-controller;
642 + #interrupt-cells = <3>;
643 +diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
644 +index a5a12b2599a4a..01522dd10603e 100644
645 +--- a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
646 ++++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
647 +@@ -431,12 +431,11 @@
648 + status = "okay";
649 + pinctrl-names = "default";
650 + pinctrl-0 = <&nor_gpio1_pins>;
651 +- bus-width = <8>;
652 +- max-frequency = <50000000>;
653 +- non-removable;
654 ++
655 + flash@0 {
656 + compatible = "jedec,spi-nor";
657 + reg = <0>;
658 ++ spi-max-frequency = <50000000>;
659 + };
660 + };
661 +
662 +diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
663 +index 32bd140ac9fd4..103d2226c579b 100644
664 +--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
665 ++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
666 +@@ -228,14 +228,14 @@
667 + };
668 +
669 + thermal-zones {
670 +- cpu0_1-thermal {
671 ++ cpu0-1-thermal {
672 + polling-delay-passive = <250>;
673 + polling-delay = <1000>;
674 +
675 + thermal-sensors = <&tsens 5>;
676 +
677 + trips {
678 +- cpu0_1_alert0: trip-point@0 {
679 ++ cpu0_1_alert0: trip-point0 {
680 + temperature = <75000>;
681 + hysteresis = <2000>;
682 + type = "passive";
683 +@@ -258,7 +258,7 @@
684 + };
685 + };
686 +
687 +- cpu2_3-thermal {
688 ++ cpu2-3-thermal {
689 + polling-delay-passive = <250>;
690 + polling-delay = <1000>;
691 +
692 +@@ -1021,7 +1021,7 @@
693 + reg-names = "mdp_phys";
694 +
695 + interrupt-parent = <&mdss>;
696 +- interrupts = <0 0>;
697 ++ interrupts = <0>;
698 +
699 + clocks = <&gcc GCC_MDSS_AHB_CLK>,
700 + <&gcc GCC_MDSS_AXI_CLK>,
701 +@@ -1053,7 +1053,7 @@
702 + reg-names = "dsi_ctrl";
703 +
704 + interrupt-parent = <&mdss>;
705 +- interrupts = <4 0>;
706 ++ interrupts = <4>;
707 +
708 + assigned-clocks = <&gcc BYTE0_CLK_SRC>,
709 + <&gcc PCLK0_CLK_SRC>;
710 +diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
711 +index 0bcdf04711079..adf9a5988cdc2 100644
712 +--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
713 ++++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
714 +@@ -119,7 +119,7 @@
715 +
716 + wcd_codec: codec@f000 {
717 + compatible = "qcom,pm8916-wcd-analog-codec";
718 +- reg = <0xf000 0x200>;
719 ++ reg = <0xf000>;
720 + reg-names = "pmic-codec-core";
721 + clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
722 + clock-names = "mclk";
723 +diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
724 +index 31b9217bb5bfe..7f1b75b2bcee3 100644
725 +--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
726 ++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
727 +@@ -2193,7 +2193,7 @@
728 +
729 + system-cache-controller@9200000 {
730 + compatible = "qcom,sc7180-llcc";
731 +- reg = <0 0x09200000 0 0x200000>, <0 0x09600000 0 0x50000>;
732 ++ reg = <0 0x09200000 0 0x50000>, <0 0x09600000 0 0x50000>;
733 + reg-names = "llcc_base", "llcc_broadcast_base";
734 + interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
735 + };
736 +@@ -2357,7 +2357,7 @@
737 + <19200000>;
738 +
739 + interrupt-parent = <&mdss>;
740 +- interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
741 ++ interrupts = <0>;
742 +
743 + status = "disabled";
744 +
745 +@@ -2380,7 +2380,7 @@
746 + reg-names = "dsi_ctrl";
747 +
748 + interrupt-parent = <&mdss>;
749 +- interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
750 ++ interrupts = <4>;
751 +
752 + clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
753 + <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
754 +diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
755 +index 42171190cce46..065e8fe3a071c 100644
756 +--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
757 ++++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
758 +@@ -1214,9 +1214,8 @@
759 + reg = <0 0xe6ea0000 0 0x0064>;
760 + interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
761 + clocks = <&cpg CPG_MOD 210>;
762 +- dmas = <&dmac1 0x43>, <&dmac1 0x42>,
763 +- <&dmac2 0x43>, <&dmac2 0x42>;
764 +- dma-names = "tx", "rx", "tx", "rx";
765 ++ dmas = <&dmac0 0x43>, <&dmac0 0x42>;
766 ++ dma-names = "tx", "rx";
767 + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
768 + resets = <&cpg 210>;
769 + #address-cells = <1>;
770 +diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
771 +index 1991bdc36792f..27f74df8efbde 100644
772 +--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
773 ++++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
774 +@@ -1192,9 +1192,8 @@
775 + reg = <0 0xe6ea0000 0 0x0064>;
776 + interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
777 + clocks = <&cpg CPG_MOD 210>;
778 +- dmas = <&dmac1 0x43>, <&dmac1 0x42>,
779 +- <&dmac2 0x43>, <&dmac2 0x42>;
780 +- dma-names = "tx", "rx", "tx", "rx";
781 ++ dmas = <&dmac0 0x43>, <&dmac0 0x42>;
782 ++ dma-names = "tx", "rx";
783 + power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
784 + resets = <&cpg 210>;
785 + #address-cells = <1>;
786 +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
787 +index 9174ddc76bdc3..b8d04c5748bf3 100644
788 +--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
789 ++++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
790 +@@ -500,7 +500,7 @@
791 + };
792 +
793 + i2c0: i2c@ff020000 {
794 +- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
795 ++ compatible = "cdns,i2c-r1p14";
796 + status = "disabled";
797 + interrupt-parent = <&gic>;
798 + interrupts = <0 17 4>;
799 +@@ -511,7 +511,7 @@
800 + };
801 +
802 + i2c1: i2c@ff030000 {
803 +- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
804 ++ compatible = "cdns,i2c-r1p14";
805 + status = "disabled";
806 + interrupt-parent = <&gic>;
807 + interrupts = <0 18 4>;
808 +diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
809 +index 0bc46149e4917..4b39293d0f72d 100644
810 +--- a/arch/arm64/include/asm/insn.h
811 ++++ b/arch/arm64/include/asm/insn.h
812 +@@ -359,9 +359,13 @@ __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
813 + __AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000)
814 + __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
815 + __AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000)
816 ++__AARCH64_INSN_FUNCS(br_auth, 0xFEFFF800, 0xD61F0800)
817 + __AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000)
818 ++__AARCH64_INSN_FUNCS(blr_auth, 0xFEFFF800, 0xD63F0800)
819 + __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
820 ++__AARCH64_INSN_FUNCS(ret_auth, 0xFFFFFBFF, 0xD65F0BFF)
821 + __AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0)
822 ++__AARCH64_INSN_FUNCS(eret_auth, 0xFFFFFBFF, 0xD69F0BFF)
823 + __AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000)
824 + __AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F)
825 + __AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
826 +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
827 +index a1871bb32bb17..d207f63eb68e1 100644
828 +--- a/arch/arm64/include/asm/memory.h
829 ++++ b/arch/arm64/include/asm/memory.h
830 +@@ -163,7 +163,6 @@ extern u64 vabits_actual;
831 + #include <linux/bitops.h>
832 + #include <linux/mmdebug.h>
833 +
834 +-extern s64 physvirt_offset;
835 + extern s64 memstart_addr;
836 + /* PHYS_OFFSET - the physical address of the start of memory. */
837 + #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
838 +@@ -239,7 +238,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
839 + */
840 + #define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
841 +
842 +-#define __lm_to_phys(addr) (((addr) + physvirt_offset))
843 ++#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
844 + #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
845 +
846 + #define __virt_to_phys_nodebug(x) ({ \
847 +@@ -257,7 +256,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
848 + #define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
849 + #endif /* CONFIG_DEBUG_VIRTUAL */
850 +
851 +-#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset))
852 ++#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
853 + #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
854 +
855 + /*
856 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
857 +index 758e2d1577d0c..a1745d6ea4b58 100644
858 +--- a/arch/arm64/include/asm/pgtable.h
859 ++++ b/arch/arm64/include/asm/pgtable.h
860 +@@ -23,6 +23,8 @@
861 + #define VMALLOC_START (MODULES_END)
862 + #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
863 +
864 ++#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
865 ++
866 + #define FIRST_USER_ADDRESS 0UL
867 +
868 + #ifndef __ASSEMBLY__
869 +@@ -33,8 +35,6 @@
870 + #include <linux/mm_types.h>
871 + #include <linux/sched.h>
872 +
873 +-extern struct page *vmemmap;
874 +-
875 + extern void __pte_error(const char *file, int line, unsigned long val);
876 + extern void __pmd_error(const char *file, int line, unsigned long val);
877 + extern void __pud_error(const char *file, int line, unsigned long val);
878 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
879 +index 422ed2e38a6c8..6e8a7eec667e8 100644
880 +--- a/arch/arm64/kernel/cpu_errata.c
881 ++++ b/arch/arm64/kernel/cpu_errata.c
882 +@@ -234,14 +234,17 @@ static int detect_harden_bp_fw(void)
883 + smccc_end = NULL;
884 + break;
885 +
886 +-#if IS_ENABLED(CONFIG_KVM)
887 + case SMCCC_CONDUIT_SMC:
888 + cb = call_smc_arch_workaround_1;
889 ++#if IS_ENABLED(CONFIG_KVM)
890 + smccc_start = __smccc_workaround_1_smc;
891 + smccc_end = __smccc_workaround_1_smc +
892 + __SMCCC_WORKAROUND_1_SMC_SZ;
893 +- break;
894 ++#else
895 ++ smccc_start = NULL;
896 ++ smccc_end = NULL;
897 + #endif
898 ++ break;
899 +
900 + default:
901 + return -1;
902 +diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
903 +index a107375005bc9..ccc8c9e22b258 100644
904 +--- a/arch/arm64/kernel/insn.c
905 ++++ b/arch/arm64/kernel/insn.c
906 +@@ -176,7 +176,7 @@ bool __kprobes aarch64_insn_uses_literal(u32 insn)
907 +
908 + bool __kprobes aarch64_insn_is_branch(u32 insn)
909 + {
910 +- /* b, bl, cb*, tb*, b.cond, br, blr */
911 ++ /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
912 +
913 + return aarch64_insn_is_b(insn) ||
914 + aarch64_insn_is_bl(insn) ||
915 +@@ -185,8 +185,11 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
916 + aarch64_insn_is_tbz(insn) ||
917 + aarch64_insn_is_tbnz(insn) ||
918 + aarch64_insn_is_ret(insn) ||
919 ++ aarch64_insn_is_ret_auth(insn) ||
920 + aarch64_insn_is_br(insn) ||
921 ++ aarch64_insn_is_br_auth(insn) ||
922 + aarch64_insn_is_blr(insn) ||
923 ++ aarch64_insn_is_blr_auth(insn) ||
924 + aarch64_insn_is_bcond(insn);
925 + }
926 +
927 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
928 +index 581602413a130..c26d84ff0e224 100644
929 +--- a/arch/arm64/kernel/perf_event.c
930 ++++ b/arch/arm64/kernel/perf_event.c
931 +@@ -510,6 +510,11 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
932 +
933 + static inline void armv8pmu_enable_counter(u32 mask)
934 + {
935 ++ /*
936 ++ * Make sure event configuration register writes are visible before we
937 ++ * enable the counter.
938 ++ * */
939 ++ isb();
940 + write_sysreg(mask, pmcntenset_el0);
941 + }
942 +
943 +diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
944 +index 263d5fba4c8a3..c541fb48886e3 100644
945 +--- a/arch/arm64/kernel/probes/decode-insn.c
946 ++++ b/arch/arm64/kernel/probes/decode-insn.c
947 +@@ -29,7 +29,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
948 + aarch64_insn_is_msr_imm(insn) ||
949 + aarch64_insn_is_msr_reg(insn) ||
950 + aarch64_insn_is_exception(insn) ||
951 +- aarch64_insn_is_eret(insn))
952 ++ aarch64_insn_is_eret(insn) ||
953 ++ aarch64_insn_is_eret_auth(insn))
954 + return false;
955 +
956 + /*
957 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
958 +index 1e93cfc7c47ad..ca4410eb230a3 100644
959 +--- a/arch/arm64/mm/init.c
960 ++++ b/arch/arm64/mm/init.c
961 +@@ -54,12 +54,6 @@
962 + s64 memstart_addr __ro_after_init = -1;
963 + EXPORT_SYMBOL(memstart_addr);
964 +
965 +-s64 physvirt_offset __ro_after_init;
966 +-EXPORT_SYMBOL(physvirt_offset);
967 +-
968 +-struct page *vmemmap __ro_after_init;
969 +-EXPORT_SYMBOL(vmemmap);
970 +-
971 + /*
972 + * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
973 + * memory as some devices, namely the Raspberry Pi 4, have peripherals with
974 +@@ -290,20 +284,6 @@ void __init arm64_memblock_init(void)
975 + memstart_addr = round_down(memblock_start_of_DRAM(),
976 + ARM64_MEMSTART_ALIGN);
977 +
978 +- physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
979 +-
980 +- vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
981 +-
982 +- /*
983 +- * If we are running with a 52-bit kernel VA config on a system that
984 +- * does not support it, we have to offset our vmemmap and physvirt_offset
985 +- * s.t. we avoid the 52-bit portion of the direct linear map
986 +- */
987 +- if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) {
988 +- vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
989 +- physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
990 +- }
991 +-
992 + /*
993 + * Remove the memory that we will not be able to cover with the
994 + * linear mapping. Take care not to clip the kernel which may be
995 +@@ -318,6 +298,16 @@ void __init arm64_memblock_init(void)
996 + memblock_remove(0, memstart_addr);
997 + }
998 +
999 ++ /*
1000 ++ * If we are running with a 52-bit kernel VA config on a system that
1001 ++ * does not support it, we have to place the available physical
1002 ++ * memory in the 48-bit addressable part of the linear region, i.e.,
1003 ++ * we have to move it upward. Since memstart_addr represents the
1004 ++ * physical address of PAGE_OFFSET, we have to *subtract* from it.
1005 ++ */
1006 ++ if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
1007 ++ memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
1008 ++
1009 + /*
1010 + * Apply the memory limit if it was set. Since the kernel may be loaded
1011 + * high up in memory, add back the kernel region that must be accessible
1012 +diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
1013 +index 9ef4ec0aea008..59f7dfe50a4d0 100644
1014 +--- a/arch/m68k/coldfire/device.c
1015 ++++ b/arch/m68k/coldfire/device.c
1016 +@@ -554,7 +554,7 @@ static struct platform_device mcf_edma = {
1017 + };
1018 + #endif /* IS_ENABLED(CONFIG_MCF_EDMA) */
1019 +
1020 +-#if IS_ENABLED(CONFIG_MMC)
1021 ++#ifdef MCFSDHC_BASE
1022 + static struct mcf_esdhc_platform_data mcf_esdhc_data = {
1023 + .max_bus_width = 4,
1024 + .cd_type = ESDHC_CD_NONE,
1025 +@@ -579,7 +579,7 @@ static struct platform_device mcf_esdhc = {
1026 + .resource = mcf_esdhc_resources,
1027 + .dev.platform_data = &mcf_esdhc_data,
1028 + };
1029 +-#endif /* IS_ENABLED(CONFIG_MMC) */
1030 ++#endif /* MCFSDHC_BASE */
1031 +
1032 + static struct platform_device *mcf_devices[] __initdata = {
1033 + &mcf_uart,
1034 +@@ -613,7 +613,7 @@ static struct platform_device *mcf_devices[] __initdata = {
1035 + #if IS_ENABLED(CONFIG_MCF_EDMA)
1036 + &mcf_edma,
1037 + #endif
1038 +-#if IS_ENABLED(CONFIG_MMC)
1039 ++#ifdef MCFSDHC_BASE
1040 + &mcf_esdhc,
1041 + #endif
1042 + };
1043 +diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
1044 +index 2e87a9b6d312f..63bce836b9f10 100644
1045 +--- a/arch/microblaze/include/asm/Kbuild
1046 ++++ b/arch/microblaze/include/asm/Kbuild
1047 +@@ -1,7 +1,6 @@
1048 + # SPDX-License-Identifier: GPL-2.0
1049 + generated-y += syscall_table.h
1050 + generic-y += extable.h
1051 +-generic-y += hw_irq.h
1052 + generic-y += kvm_para.h
1053 + generic-y += local64.h
1054 + generic-y += mcs_spinlock.h
1055 +diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
1056 +index 3f9ae3585ab98..80c9534148821 100644
1057 +--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
1058 ++++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
1059 +@@ -13,20 +13,19 @@
1060 + */
1061 + #define MAX_EA_BITS_PER_CONTEXT 46
1062 +
1063 +-#define REGION_SHIFT (MAX_EA_BITS_PER_CONTEXT - 2)
1064 +
1065 + /*
1066 +- * Our page table limit us to 64TB. Hence for the kernel mapping,
1067 +- * each MAP area is limited to 16 TB.
1068 +- * The four map areas are: linear mapping, vmap, IO and vmemmap
1069 ++ * Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB
1070 ++ * of vmemmap space. To better support sparse memory layout, we use 61TB
1071 ++ * linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap.
1072 + */
1073 ++#define REGION_SHIFT (40)
1074 + #define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT)
1075 +
1076 + /*
1077 +- * Define the address range of the kernel non-linear virtual area
1078 +- * 16TB
1079 ++ * Define the address range of the kernel non-linear virtual area (61TB)
1080 + */
1081 +-#define H_KERN_VIRT_START ASM_CONST(0xc000100000000000)
1082 ++#define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000)
1083 +
1084 + #ifndef __ASSEMBLY__
1085 + #define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
1086 +diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
1087 +index 414d209f45bbe..c711fe8901109 100644
1088 +--- a/arch/powerpc/include/asm/drmem.h
1089 ++++ b/arch/powerpc/include/asm/drmem.h
1090 +@@ -8,14 +8,13 @@
1091 + #ifndef _ASM_POWERPC_LMB_H
1092 + #define _ASM_POWERPC_LMB_H
1093 +
1094 ++#include <linux/sched.h>
1095 ++
1096 + struct drmem_lmb {
1097 + u64 base_addr;
1098 + u32 drc_index;
1099 + u32 aa_index;
1100 + u32 flags;
1101 +-#ifdef CONFIG_MEMORY_HOTPLUG
1102 +- int nid;
1103 +-#endif
1104 + };
1105 +
1106 + struct drmem_lmb_info {
1107 +@@ -26,8 +25,22 @@ struct drmem_lmb_info {
1108 +
1109 + extern struct drmem_lmb_info *drmem_info;
1110 +
1111 ++static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
1112 ++ const struct drmem_lmb *start)
1113 ++{
1114 ++ /*
1115 ++ * DLPAR code paths can take several milliseconds per element
1116 ++ * when interacting with firmware. Ensure that we don't
1117 ++ * unfairly monopolize the CPU.
1118 ++ */
1119 ++ if (((++lmb - start) % 16) == 0)
1120 ++ cond_resched();
1121 ++
1122 ++ return lmb;
1123 ++}
1124 ++
1125 + #define for_each_drmem_lmb_in_range(lmb, start, end) \
1126 +- for ((lmb) = (start); (lmb) < (end); (lmb)++)
1127 ++ for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
1128 +
1129 + #define for_each_drmem_lmb(lmb) \
1130 + for_each_drmem_lmb_in_range((lmb), \
1131 +@@ -104,22 +117,4 @@ static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
1132 + lmb->aa_index = 0xffffffff;
1133 + }
1134 +
1135 +-#ifdef CONFIG_MEMORY_HOTPLUG
1136 +-static inline void lmb_set_nid(struct drmem_lmb *lmb)
1137 +-{
1138 +- lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr);
1139 +-}
1140 +-static inline void lmb_clear_nid(struct drmem_lmb *lmb)
1141 +-{
1142 +- lmb->nid = -1;
1143 +-}
1144 +-#else
1145 +-static inline void lmb_set_nid(struct drmem_lmb *lmb)
1146 +-{
1147 +-}
1148 +-static inline void lmb_clear_nid(struct drmem_lmb *lmb)
1149 +-{
1150 +-}
1151 +-#endif
1152 +-
1153 + #endif /* _ASM_POWERPC_LMB_H */
1154 +diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
1155 +index cb424799da0dc..5a00da670a407 100644
1156 +--- a/arch/powerpc/include/asm/hw_breakpoint.h
1157 ++++ b/arch/powerpc/include/asm/hw_breakpoint.h
1158 +@@ -40,6 +40,7 @@ struct arch_hw_breakpoint {
1159 + #else
1160 + #define HW_BREAKPOINT_SIZE 0x8
1161 + #endif
1162 ++#define HW_BREAKPOINT_SIZE_QUADWORD 0x10
1163 +
1164 + #define DABR_MAX_LEN 8
1165 + #define DAWR_MAX_LEN 512
1166 +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
1167 +index 88e6c78100d9b..c750afc62887c 100644
1168 +--- a/arch/powerpc/include/asm/reg.h
1169 ++++ b/arch/powerpc/include/asm/reg.h
1170 +@@ -815,7 +815,7 @@
1171 + #define THRM1_TIN (1 << 31)
1172 + #define THRM1_TIV (1 << 30)
1173 + #define THRM1_THRES(x) ((x&0x7f)<<23)
1174 +-#define THRM3_SITV(x) ((x&0x3fff)<<1)
1175 ++#define THRM3_SITV(x) ((x & 0x1fff) << 1)
1176 + #define THRM1_TID (1<<2)
1177 + #define THRM1_TIE (1<<1)
1178 + #define THRM1_V (1<<0)
1179 +diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h
1180 +index 85580b30aba48..7546402d796af 100644
1181 +--- a/arch/powerpc/include/asm/svm.h
1182 ++++ b/arch/powerpc/include/asm/svm.h
1183 +@@ -15,6 +15,8 @@ static inline bool is_secure_guest(void)
1184 + return mfmsr() & MSR_S;
1185 + }
1186 +
1187 ++void __init svm_swiotlb_init(void);
1188 ++
1189 + void dtl_cache_ctor(void *addr);
1190 + #define get_dtl_cache_ctor() (is_secure_guest() ? dtl_cache_ctor : NULL)
1191 +
1192 +@@ -25,6 +27,8 @@ static inline bool is_secure_guest(void)
1193 + return false;
1194 + }
1195 +
1196 ++static inline void svm_swiotlb_init(void) {}
1197 ++
1198 + #define get_dtl_cache_ctor() NULL
1199 +
1200 + #endif /* CONFIG_PPC_SVM */
1201 +diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
1202 +index 862985cf51804..cf87bbdcfdcb2 100644
1203 +--- a/arch/powerpc/include/asm/tlb.h
1204 ++++ b/arch/powerpc/include/asm/tlb.h
1205 +@@ -67,19 +67,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
1206 + return false;
1207 + return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
1208 + }
1209 +-static inline void mm_reset_thread_local(struct mm_struct *mm)
1210 +-{
1211 +- WARN_ON(atomic_read(&mm->context.copros) > 0);
1212 +- /*
1213 +- * It's possible for mm_access to take a reference on mm_users to
1214 +- * access the remote mm from another thread, but it's not allowed
1215 +- * to set mm_cpumask, so mm_users may be > 1 here.
1216 +- */
1217 +- WARN_ON(current->mm != mm);
1218 +- atomic_set(&mm->context.active_cpus, 1);
1219 +- cpumask_clear(mm_cpumask(mm));
1220 +- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
1221 +-}
1222 + #else /* CONFIG_PPC_BOOK3S_64 */
1223 + static inline int mm_is_thread_local(struct mm_struct *mm)
1224 + {
1225 +diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
1226 +index c55e67bab2710..2190be70c7fd9 100644
1227 +--- a/arch/powerpc/kernel/hw_breakpoint.c
1228 ++++ b/arch/powerpc/kernel/hw_breakpoint.c
1229 +@@ -519,9 +519,17 @@ static bool ea_hw_range_overlaps(unsigned long ea, int size,
1230 + struct arch_hw_breakpoint *info)
1231 + {
1232 + unsigned long hw_start_addr, hw_end_addr;
1233 ++ unsigned long align_size = HW_BREAKPOINT_SIZE;
1234 +
1235 +- hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
1236 +- hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
1237 ++ /*
1238 ++ * On p10 predecessors, quadword is handle differently then
1239 ++ * other instructions.
1240 ++ */
1241 ++ if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16)
1242 ++ align_size = HW_BREAKPOINT_SIZE_QUADWORD;
1243 ++
1244 ++ hw_start_addr = ALIGN_DOWN(info->address, align_size);
1245 ++ hw_end_addr = ALIGN(info->address + info->len, align_size);
1246 +
1247 + return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
1248 + }
1249 +@@ -635,6 +643,8 @@ static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
1250 + if (*type == CACHEOP) {
1251 + *size = cache_op_size();
1252 + *ea &= ~(*size - 1);
1253 ++ } else if (*type == LOAD_VMX || *type == STORE_VMX) {
1254 ++ *ea &= ~(*size - 1);
1255 + }
1256 + }
1257 +
1258 +diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
1259 +index 05b1cc0e009e4..3a22281a8264e 100644
1260 +--- a/arch/powerpc/kernel/irq.c
1261 ++++ b/arch/powerpc/kernel/irq.c
1262 +@@ -214,7 +214,7 @@ void replay_soft_interrupts(void)
1263 + struct pt_regs regs;
1264 +
1265 + ppc_save_regs(&regs);
1266 +- regs.softe = IRQS_ALL_DISABLED;
1267 ++ regs.softe = IRQS_ENABLED;
1268 +
1269 + again:
1270 + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
1271 +@@ -368,6 +368,12 @@ notrace void arch_local_irq_restore(unsigned long mask)
1272 + }
1273 + }
1274 +
1275 ++ /*
1276 ++ * Disable preempt here, so that the below preempt_enable will
1277 ++ * perform resched if required (a replayed interrupt may set
1278 ++ * need_resched).
1279 ++ */
1280 ++ preempt_disable();
1281 + irq_soft_mask_set(IRQS_ALL_DISABLED);
1282 + trace_hardirqs_off();
1283 +
1284 +@@ -377,6 +383,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
1285 + trace_hardirqs_on();
1286 + irq_soft_mask_set(IRQS_ENABLED);
1287 + __hard_irq_enable();
1288 ++ preempt_enable();
1289 + }
1290 + EXPORT_SYMBOL(arch_local_irq_restore);
1291 +
1292 +diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
1293 +index 697c7e4b5877f..8bd8d8de5c40b 100644
1294 +--- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c
1295 ++++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
1296 +@@ -219,6 +219,7 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf
1297 + brk.address = ALIGN_DOWN(bp_info->addr, HW_BREAKPOINT_SIZE);
1298 + brk.type = HW_BRK_TYPE_TRANSLATE;
1299 + brk.len = DABR_MAX_LEN;
1300 ++ brk.hw_len = DABR_MAX_LEN;
1301 + if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1302 + brk.type |= HW_BRK_TYPE_READ;
1303 + if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1304 +diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
1305 +index e2ab8a111b693..0b4694b8d2482 100644
1306 +--- a/arch/powerpc/kernel/tau_6xx.c
1307 ++++ b/arch/powerpc/kernel/tau_6xx.c
1308 +@@ -13,13 +13,14 @@
1309 + */
1310 +
1311 + #include <linux/errno.h>
1312 +-#include <linux/jiffies.h>
1313 + #include <linux/kernel.h>
1314 + #include <linux/param.h>
1315 + #include <linux/string.h>
1316 + #include <linux/mm.h>
1317 + #include <linux/interrupt.h>
1318 + #include <linux/init.h>
1319 ++#include <linux/delay.h>
1320 ++#include <linux/workqueue.h>
1321 +
1322 + #include <asm/io.h>
1323 + #include <asm/reg.h>
1324 +@@ -39,9 +40,7 @@ static struct tau_temp
1325 + unsigned char grew;
1326 + } tau[NR_CPUS];
1327 +
1328 +-struct timer_list tau_timer;
1329 +-
1330 +-#undef DEBUG
1331 ++static bool tau_int_enable;
1332 +
1333 + /* TODO: put these in a /proc interface, with some sanity checks, and maybe
1334 + * dynamic adjustment to minimize # of interrupts */
1335 +@@ -50,72 +49,49 @@ struct timer_list tau_timer;
1336 + #define step_size 2 /* step size when temp goes out of range */
1337 + #define window_expand 1 /* expand the window by this much */
1338 + /* configurable values for shrinking the window */
1339 +-#define shrink_timer 2*HZ /* period between shrinking the window */
1340 ++#define shrink_timer 2000 /* period between shrinking the window */
1341 + #define min_window 2 /* minimum window size, degrees C */
1342 +
1343 + static void set_thresholds(unsigned long cpu)
1344 + {
1345 +-#ifdef CONFIG_TAU_INT
1346 +- /*
1347 +- * setup THRM1,
1348 +- * threshold, valid bit, enable interrupts, interrupt when below threshold
1349 +- */
1350 +- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
1351 ++ u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0;
1352 +
1353 +- /* setup THRM2,
1354 +- * threshold, valid bit, enable interrupts, interrupt when above threshold
1355 +- */
1356 +- mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
1357 +-#else
1358 +- /* same thing but don't enable interrupts */
1359 +- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
1360 +- mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
1361 +-#endif
1362 ++ /* setup THRM1, threshold, valid bit, interrupt when below threshold */
1363 ++ mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID);
1364 ++
1365 ++ /* setup THRM2, threshold, valid bit, interrupt when above threshold */
1366 ++ mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie);
1367 + }
1368 +
1369 + static void TAUupdate(int cpu)
1370 + {
1371 +- unsigned thrm;
1372 +-
1373 +-#ifdef DEBUG
1374 +- printk("TAUupdate ");
1375 +-#endif
1376 ++ u32 thrm;
1377 ++ u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V;
1378 +
1379 + /* if both thresholds are crossed, the step_sizes cancel out
1380 + * and the window winds up getting expanded twice. */
1381 +- if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
1382 +- if(thrm & THRM1_TIN){ /* crossed low threshold */
1383 +- if (tau[cpu].low >= step_size){
1384 +- tau[cpu].low -= step_size;
1385 +- tau[cpu].high -= (step_size - window_expand);
1386 +- }
1387 +- tau[cpu].grew = 1;
1388 +-#ifdef DEBUG
1389 +- printk("low threshold crossed ");
1390 +-#endif
1391 ++ thrm = mfspr(SPRN_THRM1);
1392 ++ if ((thrm & bits) == bits) {
1393 ++ mtspr(SPRN_THRM1, 0);
1394 ++
1395 ++ if (tau[cpu].low >= step_size) {
1396 ++ tau[cpu].low -= step_size;
1397 ++ tau[cpu].high -= (step_size - window_expand);
1398 + }
1399 ++ tau[cpu].grew = 1;
1400 ++ pr_debug("%s: low threshold crossed\n", __func__);
1401 + }
1402 +- if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
1403 +- if(thrm & THRM1_TIN){ /* crossed high threshold */
1404 +- if (tau[cpu].high <= 127-step_size){
1405 +- tau[cpu].low += (step_size - window_expand);
1406 +- tau[cpu].high += step_size;
1407 +- }
1408 +- tau[cpu].grew = 1;
1409 +-#ifdef DEBUG
1410 +- printk("high threshold crossed ");
1411 +-#endif
1412 ++ thrm = mfspr(SPRN_THRM2);
1413 ++ if ((thrm & bits) == bits) {
1414 ++ mtspr(SPRN_THRM2, 0);
1415 ++
1416 ++ if (tau[cpu].high <= 127 - step_size) {
1417 ++ tau[cpu].low += (step_size - window_expand);
1418 ++ tau[cpu].high += step_size;
1419 + }
1420 ++ tau[cpu].grew = 1;
1421 ++ pr_debug("%s: high threshold crossed\n", __func__);
1422 + }
1423 +-
1424 +-#ifdef DEBUG
1425 +- printk("grew = %d\n", tau[cpu].grew);
1426 +-#endif
1427 +-
1428 +-#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
1429 +- set_thresholds(cpu);
1430 +-#endif
1431 +-
1432 + }
1433 +
1434 + #ifdef CONFIG_TAU_INT
1435 +@@ -140,17 +116,16 @@ void TAUException(struct pt_regs * regs)
1436 + static void tau_timeout(void * info)
1437 + {
1438 + int cpu;
1439 +- unsigned long flags;
1440 + int size;
1441 + int shrink;
1442 +
1443 +- /* disabling interrupts *should* be okay */
1444 +- local_irq_save(flags);
1445 + cpu = smp_processor_id();
1446 +
1447 +-#ifndef CONFIG_TAU_INT
1448 +- TAUupdate(cpu);
1449 +-#endif
1450 ++ if (!tau_int_enable)
1451 ++ TAUupdate(cpu);
1452 ++
1453 ++ /* Stop thermal sensor comparisons and interrupts */
1454 ++ mtspr(SPRN_THRM3, 0);
1455 +
1456 + size = tau[cpu].high - tau[cpu].low;
1457 + if (size > min_window && ! tau[cpu].grew) {
1458 +@@ -173,32 +148,26 @@ static void tau_timeout(void * info)
1459 +
1460 + set_thresholds(cpu);
1461 +
1462 +- /*
1463 +- * Do the enable every time, since otherwise a bunch of (relatively)
1464 +- * complex sleep code needs to be added. One mtspr every time
1465 +- * tau_timeout is called is probably not a big deal.
1466 +- *
1467 +- * Enable thermal sensor and set up sample interval timer
1468 +- * need 20 us to do the compare.. until a nice 'cpu_speed' function
1469 +- * call is implemented, just assume a 500 mhz clock. It doesn't really
1470 +- * matter if we take too long for a compare since it's all interrupt
1471 +- * driven anyway.
1472 +- *
1473 +- * use a extra long time.. (60 us @ 500 mhz)
1474 ++ /* Restart thermal sensor comparisons and interrupts.
1475 ++ * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet"
1476 ++ * recommends that "the maximum value be set in THRM3 under all
1477 ++ * conditions."
1478 + */
1479 +- mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
1480 +-
1481 +- local_irq_restore(flags);
1482 ++ mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E);
1483 + }
1484 +
1485 +-static void tau_timeout_smp(struct timer_list *unused)
1486 +-{
1487 ++static struct workqueue_struct *tau_workq;
1488 +
1489 +- /* schedule ourselves to be run again */
1490 +- mod_timer(&tau_timer, jiffies + shrink_timer) ;
1491 ++static void tau_work_func(struct work_struct *work)
1492 ++{
1493 ++ msleep(shrink_timer);
1494 + on_each_cpu(tau_timeout, NULL, 0);
1495 ++ /* schedule ourselves to be run again */
1496 ++ queue_work(tau_workq, work);
1497 + }
1498 +
1499 ++DECLARE_WORK(tau_work, tau_work_func);
1500 ++
1501 + /*
1502 + * setup the TAU
1503 + *
1504 +@@ -231,21 +200,19 @@ static int __init TAU_init(void)
1505 + return 1;
1506 + }
1507 +
1508 ++ tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
1509 ++ !strcmp(cur_cpu_spec->platform, "ppc750");
1510 +
1511 +- /* first, set up the window shrinking timer */
1512 +- timer_setup(&tau_timer, tau_timeout_smp, 0);
1513 +- tau_timer.expires = jiffies + shrink_timer;
1514 +- add_timer(&tau_timer);
1515 ++ tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0);
1516 ++ if (!tau_workq)
1517 ++ return -ENOMEM;
1518 +
1519 + on_each_cpu(TAU_init_smp, NULL, 0);
1520 +
1521 +- printk("Thermal assist unit ");
1522 +-#ifdef CONFIG_TAU_INT
1523 +- printk("using interrupts, ");
1524 +-#else
1525 +- printk("using timers, ");
1526 +-#endif
1527 +- printk("shrink_timer: %d jiffies\n", shrink_timer);
1528 ++ queue_work(tau_workq, &tau_work);
1529 ++
1530 ++ pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n",
1531 ++ tau_int_enable ? "interrupts" : "workqueue", shrink_timer);
1532 + tau_initialized = 1;
1533 +
1534 + return 0;
1535 +diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
1536 +index b5cc9b23cf024..277a07772e7d6 100644
1537 +--- a/arch/powerpc/mm/book3s64/radix_tlb.c
1538 ++++ b/arch/powerpc/mm/book3s64/radix_tlb.c
1539 +@@ -644,19 +644,29 @@ static void do_exit_flush_lazy_tlb(void *arg)
1540 + struct mm_struct *mm = arg;
1541 + unsigned long pid = mm->context.id;
1542 +
1543 ++ /*
1544 ++ * A kthread could have done a mmget_not_zero() after the flushing CPU
1545 ++ * checked mm_is_singlethreaded, and be in the process of
1546 ++ * kthread_use_mm when interrupted here. In that case, current->mm will
1547 ++ * be set to mm, because kthread_use_mm() setting ->mm and switching to
1548 ++ * the mm is done with interrupts off.
1549 ++ */
1550 + if (current->mm == mm)
1551 +- return; /* Local CPU */
1552 ++ goto out_flush;
1553 +
1554 + if (current->active_mm == mm) {
1555 +- /*
1556 +- * Must be a kernel thread because sender is single-threaded.
1557 +- */
1558 +- BUG_ON(current->mm);
1559 ++ WARN_ON_ONCE(current->mm != NULL);
1560 ++ /* Is a kernel thread and is using mm as the lazy tlb */
1561 + mmgrab(&init_mm);
1562 +- switch_mm(mm, &init_mm, current);
1563 + current->active_mm = &init_mm;
1564 ++ switch_mm_irqs_off(mm, &init_mm, current);
1565 + mmdrop(mm);
1566 + }
1567 ++
1568 ++ atomic_dec(&mm->context.active_cpus);
1569 ++ cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm));
1570 ++
1571 ++out_flush:
1572 + _tlbiel_pid(pid, RIC_FLUSH_ALL);
1573 + }
1574 +
1575 +@@ -671,7 +681,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
1576 + */
1577 + smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
1578 + (void *)mm, 1);
1579 +- mm_reset_thread_local(mm);
1580 + }
1581 +
1582 + void radix__flush_tlb_mm(struct mm_struct *mm)
1583 +diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
1584 +index 59327cefbc6a6..873fcfc7b8756 100644
1585 +--- a/arch/powerpc/mm/drmem.c
1586 ++++ b/arch/powerpc/mm/drmem.c
1587 +@@ -362,10 +362,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
1588 + if (!drmem_info->lmbs)
1589 + return;
1590 +
1591 +- for_each_drmem_lmb(lmb) {
1592 ++ for_each_drmem_lmb(lmb)
1593 + read_drconf_v1_cell(lmb, &prop);
1594 +- lmb_set_nid(lmb);
1595 +- }
1596 + }
1597 +
1598 + static void __init init_drmem_v2_lmbs(const __be32 *prop)
1599 +@@ -410,8 +408,6 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
1600 +
1601 + lmb->aa_index = dr_cell.aa_index;
1602 + lmb->flags = dr_cell.flags;
1603 +-
1604 +- lmb_set_nid(lmb);
1605 + }
1606 + }
1607 + }
1608 +diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
1609 +index 019b0c0bbbf31..ca91d04d0a7ae 100644
1610 +--- a/arch/powerpc/mm/kasan/kasan_init_32.c
1611 ++++ b/arch/powerpc/mm/kasan/kasan_init_32.c
1612 +@@ -121,8 +121,7 @@ void __init kasan_mmu_init(void)
1613 + {
1614 + int ret;
1615 +
1616 +- if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
1617 +- IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
1618 ++ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
1619 + ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
1620 +
1621 + if (ret)
1622 +@@ -133,11 +132,11 @@ void __init kasan_mmu_init(void)
1623 + void __init kasan_init(void)
1624 + {
1625 + struct memblock_region *reg;
1626 ++ int ret;
1627 +
1628 + for_each_memblock(memory, reg) {
1629 + phys_addr_t base = reg->base;
1630 + phys_addr_t top = min(base + reg->size, total_lowmem);
1631 +- int ret;
1632 +
1633 + if (base >= top)
1634 + continue;
1635 +@@ -147,6 +146,13 @@ void __init kasan_init(void)
1636 + panic("kasan: kasan_init_region() failed");
1637 + }
1638 +
1639 ++ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
1640 ++ ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
1641 ++
1642 ++ if (ret)
1643 ++ panic("kasan: kasan_init_shadow_page_tables() failed");
1644 ++ }
1645 ++
1646 + kasan_remap_early_shadow_ro();
1647 +
1648 + clear_page(kasan_early_shadow_page);
1649 +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
1650 +index c2c11eb8dcfca..0f21bcb16405a 100644
1651 +--- a/arch/powerpc/mm/mem.c
1652 ++++ b/arch/powerpc/mm/mem.c
1653 +@@ -50,6 +50,7 @@
1654 + #include <asm/swiotlb.h>
1655 + #include <asm/rtas.h>
1656 + #include <asm/kasan.h>
1657 ++#include <asm/svm.h>
1658 +
1659 + #include <mm/mmu_decl.h>
1660 +
1661 +@@ -290,7 +291,10 @@ void __init mem_init(void)
1662 + * back to to-down.
1663 + */
1664 + memblock_set_bottom_up(true);
1665 +- swiotlb_init(0);
1666 ++ if (is_secure_guest())
1667 ++ svm_swiotlb_init();
1668 ++ else
1669 ++ swiotlb_init(0);
1670 + #endif
1671 +
1672 + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
1673 +diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h
1674 +index e608f9db12ddc..8965b4463d433 100644
1675 +--- a/arch/powerpc/perf/hv-gpci-requests.h
1676 ++++ b/arch/powerpc/perf/hv-gpci-requests.h
1677 +@@ -95,7 +95,7 @@ REQUEST(__field(0, 8, partition_id)
1678 +
1679 + #define REQUEST_NAME system_performance_capabilities
1680 + #define REQUEST_NUM 0x40
1681 +-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
1682 ++#define REQUEST_IDX_KIND "starting_index=0xffffffff"
1683 + #include I(REQUEST_BEGIN)
1684 + REQUEST(__field(0, 1, perf_collect_privileged)
1685 + __field(0x1, 1, capability_mask)
1686 +@@ -223,7 +223,7 @@ REQUEST(__field(0, 2, partition_id)
1687 +
1688 + #define REQUEST_NAME system_hypervisor_times
1689 + #define REQUEST_NUM 0xF0
1690 +-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
1691 ++#define REQUEST_IDX_KIND "starting_index=0xffffffff"
1692 + #include I(REQUEST_BEGIN)
1693 + REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
1694 + __count(0x8, 8, time_spent_processing_virtual_processor_timers)
1695 +@@ -234,7 +234,7 @@ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
1696 +
1697 + #define REQUEST_NAME system_tlbie_count_and_time
1698 + #define REQUEST_NUM 0xF4
1699 +-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
1700 ++#define REQUEST_IDX_KIND "starting_index=0xffffffff"
1701 + #include I(REQUEST_BEGIN)
1702 + REQUEST(__count(0, 8, tlbie_instructions_issued)
1703 + /*
1704 +diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
1705 +index 4c86da5eb28ab..0b5c8f4fbdbfd 100644
1706 +--- a/arch/powerpc/perf/isa207-common.c
1707 ++++ b/arch/powerpc/perf/isa207-common.c
1708 +@@ -269,6 +269,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
1709 +
1710 + mask |= CNST_PMC_MASK(pmc);
1711 + value |= CNST_PMC_VAL(pmc);
1712 ++
1713 ++ /*
1714 ++ * PMC5 and PMC6 are used to count cycles and instructions and
1715 ++ * they do not support most of the constraint bits. Add a check
1716 ++ * to exclude PMC5/6 from most of the constraints except for
1717 ++ * EBB/BHRB.
1718 ++ */
1719 ++ if (pmc >= 5)
1720 ++ goto ebb_bhrb;
1721 + }
1722 +
1723 + if (pmc <= 4) {
1724 +@@ -335,6 +344,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
1725 + }
1726 + }
1727 +
1728 ++ebb_bhrb:
1729 + if (!pmc && ebb)
1730 + /* EBB events must specify the PMC */
1731 + return -1;
1732 +diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
1733 +index fb7515b4fa9c6..b439b027a42f1 100644
1734 +--- a/arch/powerpc/platforms/Kconfig
1735 ++++ b/arch/powerpc/platforms/Kconfig
1736 +@@ -223,12 +223,11 @@ config TAU
1737 + temperature within 2-4 degrees Celsius. This option shows the current
1738 + on-die temperature in /proc/cpuinfo if the cpu supports it.
1739 +
1740 +- Unfortunately, on some chip revisions, this sensor is very inaccurate
1741 +- and in many cases, does not work at all, so don't assume the cpu
1742 +- temp is actually what /proc/cpuinfo says it is.
1743 ++ Unfortunately, this sensor is very inaccurate when uncalibrated, so
1744 ++ don't assume the cpu temp is actually what /proc/cpuinfo says it is.
1745 +
1746 + config TAU_INT
1747 +- bool "Interrupt driven TAU driver (DANGEROUS)"
1748 ++ bool "Interrupt driven TAU driver (EXPERIMENTAL)"
1749 + depends on TAU
1750 + help
1751 + The TAU supports an interrupt driven mode which causes an interrupt
1752 +@@ -236,12 +235,7 @@ config TAU_INT
1753 + to get notified the temp has exceeded a range. With this option off,
1754 + a timer is used to re-check the temperature periodically.
1755 +
1756 +- However, on some cpus it appears that the TAU interrupt hardware
1757 +- is buggy and can cause a situation which would lead unexplained hard
1758 +- lockups.
1759 +-
1760 +- Unless you are extending the TAU driver, or enjoy kernel/hardware
1761 +- debugging, leave this option off.
1762 ++ If in doubt, say N here.
1763 +
1764 + config TAU_AVERAGE
1765 + bool "Average high and low temp"
1766 +diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
1767 +index 543c816fa99ef..0e6693bacb7e7 100644
1768 +--- a/arch/powerpc/platforms/powernv/opal-dump.c
1769 ++++ b/arch/powerpc/platforms/powernv/opal-dump.c
1770 +@@ -318,15 +318,14 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
1771 + return count;
1772 + }
1773 +
1774 +-static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
1775 +- uint32_t type)
1776 ++static void create_dump_obj(uint32_t id, size_t size, uint32_t type)
1777 + {
1778 + struct dump_obj *dump;
1779 + int rc;
1780 +
1781 + dump = kzalloc(sizeof(*dump), GFP_KERNEL);
1782 + if (!dump)
1783 +- return NULL;
1784 ++ return;
1785 +
1786 + dump->kobj.kset = dump_kset;
1787 +
1788 +@@ -346,21 +345,39 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
1789 + rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
1790 + if (rc) {
1791 + kobject_put(&dump->kobj);
1792 +- return NULL;
1793 ++ return;
1794 + }
1795 +
1796 ++ /*
1797 ++ * As soon as the sysfs file for this dump is created/activated there is
1798 ++ * a chance the opal_errd daemon (or any userspace) might read and
1799 ++ * acknowledge the dump before kobject_uevent() is called. If that
1800 ++ * happens then there is a potential race between
1801 ++ * dump_ack_store->kobject_put() and kobject_uevent() which leads to a
1802 ++ * use-after-free of a kernfs object resulting in a kernel crash.
1803 ++ *
1804 ++ * To avoid that, we need to take a reference on behalf of the bin file,
1805 ++ * so that our reference remains valid while we call kobject_uevent().
1806 ++ * We then drop our reference before exiting the function, leaving the
1807 ++ * bin file to drop the last reference (if it hasn't already).
1808 ++ */
1809 ++
1810 ++ /* Take a reference for the bin file */
1811 ++ kobject_get(&dump->kobj);
1812 + rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
1813 +- if (rc) {
1814 ++ if (rc == 0) {
1815 ++ kobject_uevent(&dump->kobj, KOBJ_ADD);
1816 ++
1817 ++ pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
1818 ++ __func__, dump->id, dump->size);
1819 ++ } else {
1820 ++ /* Drop reference count taken for bin file */
1821 + kobject_put(&dump->kobj);
1822 +- return NULL;
1823 + }
1824 +
1825 +- pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
1826 +- __func__, dump->id, dump->size);
1827 +-
1828 +- kobject_uevent(&dump->kobj, KOBJ_ADD);
1829 +-
1830 +- return dump;
1831 ++ /* Drop our reference */
1832 ++ kobject_put(&dump->kobj);
1833 ++ return;
1834 + }
1835 +
1836 + static irqreturn_t process_dump(int irq, void *data)
1837 +diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
1838 +index 8b748690dac22..9f236149b4027 100644
1839 +--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
1840 ++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
1841 +@@ -356,25 +356,32 @@ static int dlpar_add_lmb(struct drmem_lmb *);
1842 +
1843 + static int dlpar_remove_lmb(struct drmem_lmb *lmb)
1844 + {
1845 ++ struct memory_block *mem_block;
1846 + unsigned long block_sz;
1847 + int rc;
1848 +
1849 + if (!lmb_is_removable(lmb))
1850 + return -EINVAL;
1851 +
1852 ++ mem_block = lmb_to_memblock(lmb);
1853 ++ if (mem_block == NULL)
1854 ++ return -EINVAL;
1855 ++
1856 + rc = dlpar_offline_lmb(lmb);
1857 +- if (rc)
1858 ++ if (rc) {
1859 ++ put_device(&mem_block->dev);
1860 + return rc;
1861 ++ }
1862 +
1863 + block_sz = pseries_memory_block_size();
1864 +
1865 +- __remove_memory(lmb->nid, lmb->base_addr, block_sz);
1866 ++ __remove_memory(mem_block->nid, lmb->base_addr, block_sz);
1867 ++ put_device(&mem_block->dev);
1868 +
1869 + /* Update memory regions for memory remove */
1870 + memblock_remove(lmb->base_addr, block_sz);
1871 +
1872 + invalidate_lmb_associativity_index(lmb);
1873 +- lmb_clear_nid(lmb);
1874 + lmb->flags &= ~DRCONF_MEM_ASSIGNED;
1875 +
1876 + return 0;
1877 +@@ -631,7 +638,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
1878 + static int dlpar_add_lmb(struct drmem_lmb *lmb)
1879 + {
1880 + unsigned long block_sz;
1881 +- int rc;
1882 ++ int nid, rc;
1883 +
1884 + if (lmb->flags & DRCONF_MEM_ASSIGNED)
1885 + return -EINVAL;
1886 +@@ -642,11 +649,13 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
1887 + return rc;
1888 + }
1889 +
1890 +- lmb_set_nid(lmb);
1891 + block_sz = memory_block_size_bytes();
1892 +
1893 ++ /* Find the node id for this address. */
1894 ++ nid = memory_add_physaddr_to_nid(lmb->base_addr);
1895 ++
1896 + /* Add the memory */
1897 +- rc = __add_memory(lmb->nid, lmb->base_addr, block_sz);
1898 ++ rc = __add_memory(nid, lmb->base_addr, block_sz);
1899 + if (rc) {
1900 + invalidate_lmb_associativity_index(lmb);
1901 + return rc;
1902 +@@ -654,9 +663,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
1903 +
1904 + rc = dlpar_online_lmb(lmb);
1905 + if (rc) {
1906 +- __remove_memory(lmb->nid, lmb->base_addr, block_sz);
1907 ++ __remove_memory(nid, lmb->base_addr, block_sz);
1908 + invalidate_lmb_associativity_index(lmb);
1909 +- lmb_clear_nid(lmb);
1910 + } else {
1911 + lmb->flags |= DRCONF_MEM_ASSIGNED;
1912 + }
1913 +diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
1914 +index 9c569078a09fd..6c2c66450dac8 100644
1915 +--- a/arch/powerpc/platforms/pseries/papr_scm.c
1916 ++++ b/arch/powerpc/platforms/pseries/papr_scm.c
1917 +@@ -702,6 +702,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
1918 + p->bus_desc.of_node = p->pdev->dev.of_node;
1919 + p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
1920 +
1921 ++ /* Set the dimm command family mask to accept PDSMs */
1922 ++ set_bit(NVDIMM_FAMILY_PAPR, &p->bus_desc.dimm_family_mask);
1923 ++
1924 + if (!p->bus_desc.provider_name)
1925 + return -ENOMEM;
1926 +
1927 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
1928 +index 13c86a292c6d7..b2b245b25edba 100644
1929 +--- a/arch/powerpc/platforms/pseries/ras.c
1930 ++++ b/arch/powerpc/platforms/pseries/ras.c
1931 +@@ -521,18 +521,55 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
1932 + return 0; /* need to perform reset */
1933 + }
1934 +
1935 ++static int mce_handle_err_realmode(int disposition, u8 error_type)
1936 ++{
1937 ++#ifdef CONFIG_PPC_BOOK3S_64
1938 ++ if (disposition == RTAS_DISP_NOT_RECOVERED) {
1939 ++ switch (error_type) {
1940 ++ case MC_ERROR_TYPE_SLB:
1941 ++ case MC_ERROR_TYPE_ERAT:
1942 ++ /*
1943 ++ * Store the old slb content in paca before flushing.
1944 ++ * Print this when we go to virtual mode.
1945 ++ * There are chances that we may hit MCE again if there
1946 ++ * is a parity error on the SLB entry we trying to read
1947 ++ * for saving. Hence limit the slb saving to single
1948 ++ * level of recursion.
1949 ++ */
1950 ++ if (local_paca->in_mce == 1)
1951 ++ slb_save_contents(local_paca->mce_faulty_slbs);
1952 ++ flush_and_reload_slb();
1953 ++ disposition = RTAS_DISP_FULLY_RECOVERED;
1954 ++ break;
1955 ++ default:
1956 ++ break;
1957 ++ }
1958 ++ } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
1959 ++ /* Platform corrected itself but could be degraded */
1960 ++ pr_err("MCE: limited recovery, system may be degraded\n");
1961 ++ disposition = RTAS_DISP_FULLY_RECOVERED;
1962 ++ }
1963 ++#endif
1964 ++ return disposition;
1965 ++}
1966 +
1967 +-static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
1968 ++static int mce_handle_err_virtmode(struct pt_regs *regs,
1969 ++ struct rtas_error_log *errp,
1970 ++ struct pseries_mc_errorlog *mce_log,
1971 ++ int disposition)
1972 + {
1973 + struct mce_error_info mce_err = { 0 };
1974 +- unsigned long eaddr = 0, paddr = 0;
1975 +- struct pseries_errorlog *pseries_log;
1976 +- struct pseries_mc_errorlog *mce_log;
1977 +- int disposition = rtas_error_disposition(errp);
1978 + int initiator = rtas_error_initiator(errp);
1979 + int severity = rtas_error_severity(errp);
1980 ++ unsigned long eaddr = 0, paddr = 0;
1981 + u8 error_type, err_sub_type;
1982 +
1983 ++ if (!mce_log)
1984 ++ goto out;
1985 ++
1986 ++ error_type = mce_log->error_type;
1987 ++ err_sub_type = rtas_mc_error_sub_type(mce_log);
1988 ++
1989 + if (initiator == RTAS_INITIATOR_UNKNOWN)
1990 + mce_err.initiator = MCE_INITIATOR_UNKNOWN;
1991 + else if (initiator == RTAS_INITIATOR_CPU)
1992 +@@ -571,18 +608,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
1993 + mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
1994 + mce_err.error_class = MCE_ECLASS_UNKNOWN;
1995 +
1996 +- if (!rtas_error_extended(errp))
1997 +- goto out;
1998 +-
1999 +- pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
2000 +- if (pseries_log == NULL)
2001 +- goto out;
2002 +-
2003 +- mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
2004 +- error_type = mce_log->error_type;
2005 +- err_sub_type = rtas_mc_error_sub_type(mce_log);
2006 +-
2007 +- switch (mce_log->error_type) {
2008 ++ switch (error_type) {
2009 + case MC_ERROR_TYPE_UE:
2010 + mce_err.error_type = MCE_ERROR_TYPE_UE;
2011 + mce_common_process_ue(regs, &mce_err);
2012 +@@ -682,37 +708,31 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
2013 + mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
2014 + break;
2015 + }
2016 ++out:
2017 ++ save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
2018 ++ &mce_err, regs->nip, eaddr, paddr);
2019 ++ return disposition;
2020 ++}
2021 +
2022 +-#ifdef CONFIG_PPC_BOOK3S_64
2023 +- if (disposition == RTAS_DISP_NOT_RECOVERED) {
2024 +- switch (error_type) {
2025 +- case MC_ERROR_TYPE_SLB:
2026 +- case MC_ERROR_TYPE_ERAT:
2027 +- /*
2028 +- * Store the old slb content in paca before flushing.
2029 +- * Print this when we go to virtual mode.
2030 +- * There are chances that we may hit MCE again if there
2031 +- * is a parity error on the SLB entry we trying to read
2032 +- * for saving. Hence limit the slb saving to single
2033 +- * level of recursion.
2034 +- */
2035 +- if (local_paca->in_mce == 1)
2036 +- slb_save_contents(local_paca->mce_faulty_slbs);
2037 +- flush_and_reload_slb();
2038 +- disposition = RTAS_DISP_FULLY_RECOVERED;
2039 +- break;
2040 +- default:
2041 +- break;
2042 +- }
2043 +- } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
2044 +- /* Platform corrected itself but could be degraded */
2045 +- printk(KERN_ERR "MCE: limited recovery, system may "
2046 +- "be degraded\n");
2047 +- disposition = RTAS_DISP_FULLY_RECOVERED;
2048 +- }
2049 +-#endif
2050 ++static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
2051 ++{
2052 ++ struct pseries_errorlog *pseries_log;
2053 ++ struct pseries_mc_errorlog *mce_log = NULL;
2054 ++ int disposition = rtas_error_disposition(errp);
2055 ++ u8 error_type;
2056 ++
2057 ++ if (!rtas_error_extended(errp))
2058 ++ goto out;
2059 ++
2060 ++ pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
2061 ++ if (!pseries_log)
2062 ++ goto out;
2063 ++
2064 ++ mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
2065 ++ error_type = mce_log->error_type;
2066 ++
2067 ++ disposition = mce_handle_err_realmode(disposition, error_type);
2068 +
2069 +-out:
2070 + /*
2071 + * Enable translation as we will be accessing per-cpu variables
2072 + * in save_mce_event() which may fall outside RMO region, also
2073 +@@ -723,10 +743,10 @@ out:
2074 + * Note: All the realmode handling like flushing SLB entries for
2075 + * SLB multihit is done by now.
2076 + */
2077 ++out:
2078 + mtmsr(mfmsr() | MSR_IR | MSR_DR);
2079 +- save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
2080 +- &mce_err, regs->nip, eaddr, paddr);
2081 +-
2082 ++ disposition = mce_handle_err_virtmode(regs, errp, mce_log,
2083 ++ disposition);
2084 + return disposition;
2085 + }
2086 +
2087 +diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
2088 +index bbb97169bf63e..6268545947b83 100644
2089 +--- a/arch/powerpc/platforms/pseries/rng.c
2090 ++++ b/arch/powerpc/platforms/pseries/rng.c
2091 +@@ -36,6 +36,7 @@ static __init int rng_init(void)
2092 +
2093 + ppc_md.get_random_seed = pseries_get_random_long;
2094 +
2095 ++ of_node_put(dn);
2096 + return 0;
2097 + }
2098 + machine_subsys_initcall(pseries, rng_init);
2099 +diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c
2100 +index 40c0637203d5b..81085eb8f2255 100644
2101 +--- a/arch/powerpc/platforms/pseries/svm.c
2102 ++++ b/arch/powerpc/platforms/pseries/svm.c
2103 +@@ -7,6 +7,7 @@
2104 + */
2105 +
2106 + #include <linux/mm.h>
2107 ++#include <linux/memblock.h>
2108 + #include <asm/machdep.h>
2109 + #include <asm/svm.h>
2110 + #include <asm/swiotlb.h>
2111 +@@ -34,6 +35,31 @@ static int __init init_svm(void)
2112 + }
2113 + machine_early_initcall(pseries, init_svm);
2114 +
2115 ++/*
2116 ++ * Initialize SWIOTLB. Essentially the same as swiotlb_init(), except that it
2117 ++ * can allocate the buffer anywhere in memory. Since the hypervisor doesn't have
2118 ++ * any addressing limitation, we don't need to allocate it in low addresses.
2119 ++ */
2120 ++void __init svm_swiotlb_init(void)
2121 ++{
2122 ++ unsigned char *vstart;
2123 ++ unsigned long bytes, io_tlb_nslabs;
2124 ++
2125 ++ io_tlb_nslabs = (swiotlb_size_or_default() >> IO_TLB_SHIFT);
2126 ++ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
2127 ++
2128 ++ bytes = io_tlb_nslabs << IO_TLB_SHIFT;
2129 ++
2130 ++ vstart = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
2131 ++ if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false))
2132 ++ return;
2133 ++
2134 ++ if (io_tlb_start)
2135 ++ memblock_free_early(io_tlb_start,
2136 ++ PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
2137 ++ panic("SVM: Cannot allocate SWIOTLB buffer");
2138 ++}
2139 ++
2140 + int set_memory_encrypted(unsigned long addr, int numpages)
2141 + {
2142 + if (!PAGE_ALIGNED(addr))
2143 +diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
2144 +index ad8117148ea3b..21b9d1bf39ff6 100644
2145 +--- a/arch/powerpc/sysdev/xics/icp-hv.c
2146 ++++ b/arch/powerpc/sysdev/xics/icp-hv.c
2147 +@@ -174,6 +174,7 @@ int icp_hv_init(void)
2148 +
2149 + icp_ops = &icp_hv_ops;
2150 +
2151 ++ of_node_put(np);
2152 + return 0;
2153 + }
2154 +
2155 +diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
2156 +index 7efe4bc3ccf63..ac5862cee142a 100644
2157 +--- a/arch/powerpc/xmon/xmon.c
2158 ++++ b/arch/powerpc/xmon/xmon.c
2159 +@@ -962,6 +962,7 @@ static void insert_cpu_bpts(void)
2160 + brk.address = dabr[i].address;
2161 + brk.type = (dabr[i].enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2162 + brk.len = 8;
2163 ++ brk.hw_len = 8;
2164 + __set_breakpoint(i, &brk);
2165 + }
2166 + }
2167 +diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
2168 +index 5967f30141563..c93486a9989bc 100644
2169 +--- a/arch/s390/pci/pci_bus.c
2170 ++++ b/arch/s390/pci/pci_bus.c
2171 +@@ -197,9 +197,10 @@ void pcibios_bus_add_device(struct pci_dev *pdev)
2172 + * With pdev->no_vf_scan the common PCI probing code does not
2173 + * perform PF/VF linking.
2174 + */
2175 +- if (zdev->vfn)
2176 ++ if (zdev->vfn) {
2177 + zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
2178 +-
2179 ++ pdev->no_command_memory = 1;
2180 ++ }
2181 + }
2182 +
2183 + static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
2184 +diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
2185 +index 8735c468230a5..555203e3e7b45 100644
2186 +--- a/arch/um/drivers/vector_kern.c
2187 ++++ b/arch/um/drivers/vector_kern.c
2188 +@@ -1403,7 +1403,7 @@ static int vector_net_load_bpf_flash(struct net_device *dev,
2189 + kfree(vp->bpf->filter);
2190 + vp->bpf->filter = NULL;
2191 + } else {
2192 +- vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
2193 ++ vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC);
2194 + if (vp->bpf == NULL) {
2195 + netdev_err(dev, "failed to allocate memory for firmware\n");
2196 + goto flash_fail;
2197 +@@ -1415,7 +1415,7 @@ static int vector_net_load_bpf_flash(struct net_device *dev,
2198 + if (request_firmware(&fw, efl->data, &vdevice->pdev.dev))
2199 + goto flash_fail;
2200 +
2201 +- vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_KERNEL);
2202 ++ vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC);
2203 + if (!vp->bpf->filter)
2204 + goto free_buffer;
2205 +
2206 +diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
2207 +index 25eaa6a0c6583..c07436e89e599 100644
2208 +--- a/arch/um/kernel/time.c
2209 ++++ b/arch/um/kernel/time.c
2210 +@@ -70,13 +70,17 @@ static void time_travel_handle_message(struct um_timetravel_msg *msg,
2211 + * read of the message and write of the ACK.
2212 + */
2213 + if (mode != TTMH_READ) {
2214 ++ bool disabled = irqs_disabled();
2215 ++
2216 ++ BUG_ON(mode == TTMH_IDLE && !disabled);
2217 ++
2218 ++ if (disabled)
2219 ++ local_irq_enable();
2220 + while (os_poll(1, &time_travel_ext_fd) != 0) {
2221 +- if (mode == TTMH_IDLE) {
2222 +- BUG_ON(!irqs_disabled());
2223 +- local_irq_enable();
2224 +- local_irq_disable();
2225 +- }
2226 ++ /* nothing */
2227 + }
2228 ++ if (disabled)
2229 ++ local_irq_disable();
2230 + }
2231 +
2232 + ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
2233 +diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
2234 +index c8862696a47b9..7d0394f4ebf97 100644
2235 +--- a/arch/x86/boot/compressed/pgtable_64.c
2236 ++++ b/arch/x86/boot/compressed/pgtable_64.c
2237 +@@ -5,15 +5,6 @@
2238 + #include "pgtable.h"
2239 + #include "../string.h"
2240 +
2241 +-/*
2242 +- * __force_order is used by special_insns.h asm code to force instruction
2243 +- * serialization.
2244 +- *
2245 +- * It is not referenced from the code, but GCC < 5 with -fPIE would fail
2246 +- * due to an undefined symbol. Define it to make these ancient GCCs work.
2247 +- */
2248 +-unsigned long __force_order;
2249 +-
2250 + #define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
2251 + #define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */
2252 +
2253 +diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
2254 +index fb616203ce427..be50ef8572cce 100644
2255 +--- a/arch/x86/events/amd/iommu.c
2256 ++++ b/arch/x86/events/amd/iommu.c
2257 +@@ -379,7 +379,7 @@ static __init int _init_events_attrs(void)
2258 + while (amd_iommu_v2_event_descs[i].attr.attr.name)
2259 + i++;
2260 +
2261 +- attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
2262 ++ attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
2263 + if (!attrs)
2264 + return -ENOMEM;
2265 +
2266 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
2267 +index 4103665c6e032..29640b4079af0 100644
2268 +--- a/arch/x86/events/core.c
2269 ++++ b/arch/x86/events/core.c
2270 +@@ -1087,8 +1087,10 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
2271 +
2272 + cpuc->event_list[n] = event;
2273 + n++;
2274 +- if (is_counter_pair(&event->hw))
2275 ++ if (is_counter_pair(&event->hw)) {
2276 + cpuc->n_pair++;
2277 ++ cpuc->n_txn_pair++;
2278 ++ }
2279 + }
2280 + return n;
2281 + }
2282 +@@ -1953,6 +1955,7 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
2283 +
2284 + perf_pmu_disable(pmu);
2285 + __this_cpu_write(cpu_hw_events.n_txn, 0);
2286 ++ __this_cpu_write(cpu_hw_events.n_txn_pair, 0);
2287 + }
2288 +
2289 + /*
2290 +@@ -1978,6 +1981,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
2291 + */
2292 + __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
2293 + __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
2294 ++ __this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair));
2295 + perf_pmu_enable(pmu);
2296 + }
2297 +
2298 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
2299 +index dc43cc124e096..221d1766d6e6c 100644
2300 +--- a/arch/x86/events/intel/ds.c
2301 ++++ b/arch/x86/events/intel/ds.c
2302 +@@ -670,9 +670,7 @@ unlock:
2303 +
2304 + static inline void intel_pmu_drain_pebs_buffer(void)
2305 + {
2306 +- struct pt_regs regs;
2307 +-
2308 +- x86_pmu.drain_pebs(&regs);
2309 ++ x86_pmu.drain_pebs(NULL);
2310 + }
2311 +
2312 + /*
2313 +@@ -1737,6 +1735,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
2314 + struct x86_perf_regs perf_regs;
2315 + struct pt_regs *regs = &perf_regs.regs;
2316 + void *at = get_next_pebs_record_by_bit(base, top, bit);
2317 ++ struct pt_regs dummy_iregs;
2318 +
2319 + if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
2320 + /*
2321 +@@ -1749,6 +1748,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
2322 + } else if (!intel_pmu_save_and_restart(event))
2323 + return;
2324 +
2325 ++ if (!iregs)
2326 ++ iregs = &dummy_iregs;
2327 ++
2328 + while (count > 1) {
2329 + setup_sample(event, iregs, at, &data, regs);
2330 + perf_event_output(event, &data, regs);
2331 +@@ -1758,16 +1760,22 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
2332 + }
2333 +
2334 + setup_sample(event, iregs, at, &data, regs);
2335 +-
2336 +- /*
2337 +- * All but the last records are processed.
2338 +- * The last one is left to be able to call the overflow handler.
2339 +- */
2340 +- if (perf_event_overflow(event, &data, regs)) {
2341 +- x86_pmu_stop(event, 0);
2342 +- return;
2343 ++ if (iregs == &dummy_iregs) {
2344 ++ /*
2345 ++ * The PEBS records may be drained in the non-overflow context,
2346 ++ * e.g., large PEBS + context switch. Perf should treat the
2347 ++ * last record the same as other PEBS records, and doesn't
2348 ++ * invoke the generic overflow handler.
2349 ++ */
2350 ++ perf_event_output(event, &data, regs);
2351 ++ } else {
2352 ++ /*
2353 ++ * All but the last records are processed.
2354 ++ * The last one is left to be able to call the overflow handler.
2355 ++ */
2356 ++ if (perf_event_overflow(event, &data, regs))
2357 ++ x86_pmu_stop(event, 0);
2358 + }
2359 +-
2360 + }
2361 +
2362 + static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
2363 +diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
2364 +index 1038e9f1e3542..3b70c2ff177c0 100644
2365 +--- a/arch/x86/events/intel/uncore_snb.c
2366 ++++ b/arch/x86/events/intel/uncore_snb.c
2367 +@@ -115,6 +115,10 @@
2368 + #define ICL_UNC_CBO_0_PER_CTR0 0x702
2369 + #define ICL_UNC_CBO_MSR_OFFSET 0x8
2370 +
2371 ++/* ICL ARB register */
2372 ++#define ICL_UNC_ARB_PER_CTR 0x3b1
2373 ++#define ICL_UNC_ARB_PERFEVTSEL 0x3b3
2374 ++
2375 + DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
2376 + DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
2377 + DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
2378 +@@ -302,15 +306,21 @@ void skl_uncore_cpu_init(void)
2379 + snb_uncore_arb.ops = &skl_uncore_msr_ops;
2380 + }
2381 +
2382 ++static struct intel_uncore_ops icl_uncore_msr_ops = {
2383 ++ .disable_event = snb_uncore_msr_disable_event,
2384 ++ .enable_event = snb_uncore_msr_enable_event,
2385 ++ .read_counter = uncore_msr_read_counter,
2386 ++};
2387 ++
2388 + static struct intel_uncore_type icl_uncore_cbox = {
2389 + .name = "cbox",
2390 +- .num_counters = 4,
2391 ++ .num_counters = 2,
2392 + .perf_ctr_bits = 44,
2393 + .perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
2394 + .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
2395 + .event_mask = SNB_UNC_RAW_EVENT_MASK,
2396 + .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
2397 +- .ops = &skl_uncore_msr_ops,
2398 ++ .ops = &icl_uncore_msr_ops,
2399 + .format_group = &snb_uncore_format_group,
2400 + };
2401 +
2402 +@@ -339,13 +349,25 @@ static struct intel_uncore_type icl_uncore_clockbox = {
2403 + .single_fixed = 1,
2404 + .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
2405 + .format_group = &icl_uncore_clock_format_group,
2406 +- .ops = &skl_uncore_msr_ops,
2407 ++ .ops = &icl_uncore_msr_ops,
2408 + .event_descs = icl_uncore_events,
2409 + };
2410 +
2411 ++static struct intel_uncore_type icl_uncore_arb = {
2412 ++ .name = "arb",
2413 ++ .num_counters = 1,
2414 ++ .num_boxes = 1,
2415 ++ .perf_ctr_bits = 44,
2416 ++ .perf_ctr = ICL_UNC_ARB_PER_CTR,
2417 ++ .event_ctl = ICL_UNC_ARB_PERFEVTSEL,
2418 ++ .event_mask = SNB_UNC_RAW_EVENT_MASK,
2419 ++ .ops = &icl_uncore_msr_ops,
2420 ++ .format_group = &snb_uncore_format_group,
2421 ++};
2422 ++
2423 + static struct intel_uncore_type *icl_msr_uncores[] = {
2424 + &icl_uncore_cbox,
2425 +- &snb_uncore_arb,
2426 ++ &icl_uncore_arb,
2427 + &icl_uncore_clockbox,
2428 + NULL,
2429 + };
2430 +@@ -363,7 +385,6 @@ void icl_uncore_cpu_init(void)
2431 + {
2432 + uncore_msr_uncores = icl_msr_uncores;
2433 + icl_uncore_cbox.num_boxes = icl_get_cbox_num();
2434 +- snb_uncore_arb.ops = &skl_uncore_msr_ops;
2435 + }
2436 +
2437 + enum {
2438 +diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
2439 +index 07652fa20ebbe..6a03fe8054a81 100644
2440 +--- a/arch/x86/events/intel/uncore_snbep.c
2441 ++++ b/arch/x86/events/intel/uncore_snbep.c
2442 +@@ -4550,10 +4550,10 @@ static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
2443 + INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
2444 +
2445 + INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
2446 +- INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"),
2447 ++ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
2448 + INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
2449 + INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
2450 +- INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
2451 ++ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
2452 + INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
2453 + { /* end: all zeroes */ },
2454 + };
2455 +@@ -5009,17 +5009,17 @@ static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
2456 + INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
2457 +
2458 + INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
2459 +- INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"),
2460 ++ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
2461 + INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
2462 + INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
2463 +- INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
2464 ++ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
2465 + INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
2466 +
2467 + INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
2468 +- INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "3.814697266e-6"),
2469 ++ INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
2470 + INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
2471 + INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
2472 +- INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "3.814697266e-6"),
2473 ++ INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
2474 + INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
2475 + { /* end: all zeroes */ },
2476 + };
2477 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
2478 +index e17a3d8a47ede..d4d482d16fe18 100644
2479 +--- a/arch/x86/events/perf_event.h
2480 ++++ b/arch/x86/events/perf_event.h
2481 +@@ -198,6 +198,7 @@ struct cpu_hw_events {
2482 + they've never been enabled yet */
2483 + int n_txn; /* the # last events in the below arrays;
2484 + added in the current transaction */
2485 ++ int n_txn_pair;
2486 + int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
2487 + u64 tags[X86_PMC_IDX_MAX];
2488 +
2489 +diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
2490 +index eb8e781c43539..b8f7c9659ef6b 100644
2491 +--- a/arch/x86/include/asm/special_insns.h
2492 ++++ b/arch/x86/include/asm/special_insns.h
2493 +@@ -11,45 +11,47 @@
2494 + #include <linux/jump_label.h>
2495 +
2496 + /*
2497 +- * Volatile isn't enough to prevent the compiler from reordering the
2498 +- * read/write functions for the control registers and messing everything up.
2499 +- * A memory clobber would solve the problem, but would prevent reordering of
2500 +- * all loads stores around it, which can hurt performance. Solution is to
2501 +- * use a variable and mimic reads and writes to it to enforce serialization
2502 ++ * The compiler should not reorder volatile asm statements with respect to each
2503 ++ * other: they should execute in program order. However GCC 4.9.x and 5.x have
2504 ++ * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder
2505 ++ * volatile asm. The write functions are not affected since they have memory
2506 ++ * clobbers preventing reordering. To prevent reads from being reordered with
2507 ++ * respect to writes, use a dummy memory operand.
2508 + */
2509 +-extern unsigned long __force_order;
2510 ++
2511 ++#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL)
2512 +
2513 + void native_write_cr0(unsigned long val);
2514 +
2515 + static inline unsigned long native_read_cr0(void)
2516 + {
2517 + unsigned long val;
2518 +- asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
2519 ++ asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER);
2520 + return val;
2521 + }
2522 +
2523 + static __always_inline unsigned long native_read_cr2(void)
2524 + {
2525 + unsigned long val;
2526 +- asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
2527 ++ asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER);
2528 + return val;
2529 + }
2530 +
2531 + static __always_inline void native_write_cr2(unsigned long val)
2532 + {
2533 +- asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
2534 ++ asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
2535 + }
2536 +
2537 + static inline unsigned long __native_read_cr3(void)
2538 + {
2539 + unsigned long val;
2540 +- asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
2541 ++ asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
2542 + return val;
2543 + }
2544 +
2545 + static inline void native_write_cr3(unsigned long val)
2546 + {
2547 +- asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
2548 ++ asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
2549 + }
2550 +
2551 + static inline unsigned long native_read_cr4(void)
2552 +@@ -64,10 +66,10 @@ static inline unsigned long native_read_cr4(void)
2553 + asm volatile("1: mov %%cr4, %0\n"
2554 + "2:\n"
2555 + _ASM_EXTABLE(1b, 2b)
2556 +- : "=r" (val), "=m" (__force_order) : "0" (0));
2557 ++ : "=r" (val) : "0" (0), __FORCE_ORDER);
2558 + #else
2559 + /* CR4 always exists on x86_64. */
2560 +- asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
2561 ++ asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER);
2562 + #endif
2563 + return val;
2564 + }
2565 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
2566 +index 95c090a45b4b4..d8ef789e00c15 100644
2567 +--- a/arch/x86/kernel/cpu/common.c
2568 ++++ b/arch/x86/kernel/cpu/common.c
2569 +@@ -358,7 +358,7 @@ void native_write_cr0(unsigned long val)
2570 + unsigned long bits_missing = 0;
2571 +
2572 + set_register:
2573 +- asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order));
2574 ++ asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
2575 +
2576 + if (static_branch_likely(&cr_pinning)) {
2577 + if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
2578 +@@ -377,7 +377,7 @@ void native_write_cr4(unsigned long val)
2579 + unsigned long bits_changed = 0;
2580 +
2581 + set_register:
2582 +- asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
2583 ++ asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
2584 +
2585 + if (static_branch_likely(&cr_pinning)) {
2586 + if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
2587 +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
2588 +index 14e4b4d17ee5b..07673a034d39c 100644
2589 +--- a/arch/x86/kernel/cpu/mce/core.c
2590 ++++ b/arch/x86/kernel/cpu/mce/core.c
2591 +@@ -370,42 +370,105 @@ static int msr_to_offset(u32 msr)
2592 + return -1;
2593 + }
2594 +
2595 ++__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
2596 ++ struct pt_regs *regs, int trapnr,
2597 ++ unsigned long error_code,
2598 ++ unsigned long fault_addr)
2599 ++{
2600 ++ pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
2601 ++ (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
2602 ++
2603 ++ show_stack_regs(regs);
2604 ++
2605 ++ panic("MCA architectural violation!\n");
2606 ++
2607 ++ while (true)
2608 ++ cpu_relax();
2609 ++
2610 ++ return true;
2611 ++}
2612 ++
2613 + /* MSR access wrappers used for error injection */
2614 +-static u64 mce_rdmsrl(u32 msr)
2615 ++static noinstr u64 mce_rdmsrl(u32 msr)
2616 + {
2617 +- u64 v;
2618 ++ DECLARE_ARGS(val, low, high);
2619 +
2620 + if (__this_cpu_read(injectm.finished)) {
2621 +- int offset = msr_to_offset(msr);
2622 ++ int offset;
2623 ++ u64 ret;
2624 +
2625 ++ instrumentation_begin();
2626 ++
2627 ++ offset = msr_to_offset(msr);
2628 + if (offset < 0)
2629 +- return 0;
2630 +- return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
2631 +- }
2632 ++ ret = 0;
2633 ++ else
2634 ++ ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
2635 +
2636 +- if (rdmsrl_safe(msr, &v)) {
2637 +- WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
2638 +- /*
2639 +- * Return zero in case the access faulted. This should
2640 +- * not happen normally but can happen if the CPU does
2641 +- * something weird, or if the code is buggy.
2642 +- */
2643 +- v = 0;
2644 ++ instrumentation_end();
2645 ++
2646 ++ return ret;
2647 + }
2648 +
2649 +- return v;
2650 ++ /*
2651 ++ * RDMSR on MCA MSRs should not fault. If they do, this is very much an
2652 ++ * architectural violation and needs to be reported to hw vendor. Panic
2653 ++ * the box to not allow any further progress.
2654 ++ */
2655 ++ asm volatile("1: rdmsr\n"
2656 ++ "2:\n"
2657 ++ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault)
2658 ++ : EAX_EDX_RET(val, low, high) : "c" (msr));
2659 ++
2660 ++
2661 ++ return EAX_EDX_VAL(val, low, high);
2662 ++}
2663 ++
2664 ++__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
2665 ++ struct pt_regs *regs, int trapnr,
2666 ++ unsigned long error_code,
2667 ++ unsigned long fault_addr)
2668 ++{
2669 ++ pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
2670 ++ (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
2671 ++ regs->ip, (void *)regs->ip);
2672 ++
2673 ++ show_stack_regs(regs);
2674 ++
2675 ++ panic("MCA architectural violation!\n");
2676 ++
2677 ++ while (true)
2678 ++ cpu_relax();
2679 ++
2680 ++ return true;
2681 + }
2682 +
2683 +-static void mce_wrmsrl(u32 msr, u64 v)
2684 ++static noinstr void mce_wrmsrl(u32 msr, u64 v)
2685 + {
2686 ++ u32 low, high;
2687 ++
2688 + if (__this_cpu_read(injectm.finished)) {
2689 +- int offset = msr_to_offset(msr);
2690 ++ int offset;
2691 ++
2692 ++ instrumentation_begin();
2693 +
2694 ++ offset = msr_to_offset(msr);
2695 + if (offset >= 0)
2696 + *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
2697 ++
2698 ++ instrumentation_end();
2699 ++
2700 + return;
2701 + }
2702 +- wrmsrl(msr, v);
2703 ++
2704 ++ low = (u32)v;
2705 ++ high = (u32)(v >> 32);
2706 ++
2707 ++ /* See comment in mce_rdmsrl() */
2708 ++ asm volatile("1: wrmsr\n"
2709 ++ "2:\n"
2710 ++ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault)
2711 ++ : : "c" (msr), "a"(low), "d" (high) : "memory");
2712 + }
2713 +
2714 + /*
2715 +diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
2716 +index 6473070b5da49..b122610e9046a 100644
2717 +--- a/arch/x86/kernel/cpu/mce/internal.h
2718 ++++ b/arch/x86/kernel/cpu/mce/internal.h
2719 +@@ -185,4 +185,14 @@ extern bool amd_filter_mce(struct mce *m);
2720 + static inline bool amd_filter_mce(struct mce *m) { return false; };
2721 + #endif
2722 +
2723 ++__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
2724 ++ struct pt_regs *regs, int trapnr,
2725 ++ unsigned long error_code,
2726 ++ unsigned long fault_addr);
2727 ++
2728 ++__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
2729 ++ struct pt_regs *regs, int trapnr,
2730 ++ unsigned long error_code,
2731 ++ unsigned long fault_addr);
2732 ++
2733 + #endif /* __X86_MCE_INTERNAL_H__ */
2734 +diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
2735 +index e1da619add192..567ce09a02868 100644
2736 +--- a/arch/x86/kernel/cpu/mce/severity.c
2737 ++++ b/arch/x86/kernel/cpu/mce/severity.c
2738 +@@ -9,9 +9,11 @@
2739 + #include <linux/seq_file.h>
2740 + #include <linux/init.h>
2741 + #include <linux/debugfs.h>
2742 +-#include <asm/mce.h>
2743 + #include <linux/uaccess.h>
2744 +
2745 ++#include <asm/mce.h>
2746 ++#include <asm/intel-family.h>
2747 ++
2748 + #include "internal.h"
2749 +
2750 + /*
2751 +@@ -40,9 +42,14 @@ static struct severity {
2752 + unsigned char context;
2753 + unsigned char excp;
2754 + unsigned char covered;
2755 ++ unsigned char cpu_model;
2756 ++ unsigned char cpu_minstepping;
2757 ++ unsigned char bank_lo, bank_hi;
2758 + char *msg;
2759 + } severities[] = {
2760 + #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
2761 ++#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
2762 ++#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s
2763 + #define KERNEL .context = IN_KERNEL
2764 + #define USER .context = IN_USER
2765 + #define KERNEL_RECOV .context = IN_KERNEL_RECOV
2766 +@@ -97,7 +104,6 @@ static struct severity {
2767 + KEEP, "Corrected error",
2768 + NOSER, BITCLR(MCI_STATUS_UC)
2769 + ),
2770 +-
2771 + /*
2772 + * known AO MCACODs reported via MCE or CMC:
2773 + *
2774 +@@ -113,6 +119,18 @@ static struct severity {
2775 + AO, "Action optional: last level cache writeback error",
2776 + SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
2777 + ),
2778 ++ /*
2779 ++ * Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured
2780 ++ * to report uncorrected errors using CMCI with a special signature.
2781 ++ * UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported
2782 ++ * in one of the memory controller banks.
2783 ++ * Set severity to "AO" for same action as normal patrol scrub error.
2784 ++ */
2785 ++ MCESEV(
2786 ++ AO, "Uncorrected Patrol Scrub Error",
2787 ++ SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
2788 ++ MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18)
2789 ++ ),
2790 +
2791 + /* ignore OVER for UCNA */
2792 + MCESEV(
2793 +@@ -324,6 +342,12 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
2794 + continue;
2795 + if (s->excp && excp != s->excp)
2796 + continue;
2797 ++ if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model)
2798 ++ continue;
2799 ++ if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
2800 ++ continue;
2801 ++ if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi))
2802 ++ continue;
2803 + if (msg)
2804 + *msg = s->msg;
2805 + s->covered = 1;
2806 +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
2807 +index 7401cc12c3ccf..42679610c9bea 100644
2808 +--- a/arch/x86/kernel/dumpstack.c
2809 ++++ b/arch/x86/kernel/dumpstack.c
2810 +@@ -115,7 +115,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
2811 + unsigned long prologue = regs->ip - PROLOGUE_SIZE;
2812 +
2813 + if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
2814 +- printk("%sCode: Bad RIP value.\n", loglvl);
2815 ++ printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
2816 ++ loglvl, prologue);
2817 + } else {
2818 + printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
2819 + __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
2820 +diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
2821 +index 61ddc3a5e5c2b..f8ff895aaf7e1 100644
2822 +--- a/arch/x86/kernel/fpu/init.c
2823 ++++ b/arch/x86/kernel/fpu/init.c
2824 +@@ -243,9 +243,9 @@ static void __init fpu__init_system_ctx_switch(void)
2825 + */
2826 + static void __init fpu__init_parse_early_param(void)
2827 + {
2828 +- char arg[32];
2829 ++ char arg[128];
2830 + char *argptr = arg;
2831 +- int bit;
2832 ++ int arglen, res, bit;
2833 +
2834 + #ifdef CONFIG_X86_32
2835 + if (cmdline_find_option_bool(boot_command_line, "no387"))
2836 +@@ -268,12 +268,26 @@ static void __init fpu__init_parse_early_param(void)
2837 + if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
2838 + setup_clear_cpu_cap(X86_FEATURE_XSAVES);
2839 +
2840 +- if (cmdline_find_option(boot_command_line, "clearcpuid", arg,
2841 +- sizeof(arg)) &&
2842 +- get_option(&argptr, &bit) &&
2843 +- bit >= 0 &&
2844 +- bit < NCAPINTS * 32)
2845 +- setup_clear_cpu_cap(bit);
2846 ++ arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
2847 ++ if (arglen <= 0)
2848 ++ return;
2849 ++
2850 ++ pr_info("Clearing CPUID bits:");
2851 ++ do {
2852 ++ res = get_option(&argptr, &bit);
2853 ++ if (res == 0 || res == 3)
2854 ++ break;
2855 ++
2856 ++ /* If the argument was too long, the last bit may be cut off */
2857 ++ if (res == 1 && arglen >= sizeof(arg))
2858 ++ break;
2859 ++
2860 ++ if (bit >= 0 && bit < NCAPINTS * 32) {
2861 ++ pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
2862 ++ setup_clear_cpu_cap(bit);
2863 ++ }
2864 ++ } while (res == 2);
2865 ++ pr_cont("\n");
2866 + }
2867 +
2868 + /*
2869 +diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
2870 +index d7c5e44b26f73..091752c3a19e2 100644
2871 +--- a/arch/x86/kernel/nmi.c
2872 ++++ b/arch/x86/kernel/nmi.c
2873 +@@ -102,7 +102,6 @@ fs_initcall(nmi_warning_debugfs);
2874 +
2875 + static void nmi_check_duration(struct nmiaction *action, u64 duration)
2876 + {
2877 +- u64 whole_msecs = READ_ONCE(action->max_duration);
2878 + int remainder_ns, decimal_msecs;
2879 +
2880 + if (duration < nmi_longest_ns || duration < action->max_duration)
2881 +@@ -110,12 +109,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
2882 +
2883 + action->max_duration = duration;
2884 +
2885 +- remainder_ns = do_div(whole_msecs, (1000 * 1000));
2886 ++ remainder_ns = do_div(duration, (1000 * 1000));
2887 + decimal_msecs = remainder_ns / 1000;
2888 +
2889 + printk_ratelimited(KERN_INFO
2890 + "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
2891 +- action->handler, whole_msecs, decimal_msecs);
2892 ++ action->handler, duration, decimal_msecs);
2893 + }
2894 +
2895 + static int nmi_handle(unsigned int type, struct pt_regs *regs)
2896 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
2897 +index d0e2825ae6174..571cb8657e53e 100644
2898 +--- a/arch/x86/kvm/emulate.c
2899 ++++ b/arch/x86/kvm/emulate.c
2900 +@@ -3594,7 +3594,7 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
2901 + u64 tsc_aux = 0;
2902 +
2903 + if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
2904 +- return emulate_gp(ctxt, 0);
2905 ++ return emulate_ud(ctxt);
2906 + ctxt->dst.val = tsc_aux;
2907 + return X86EMUL_CONTINUE;
2908 + }
2909 +diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
2910 +index d057376bd3d33..698969e18fe35 100644
2911 +--- a/arch/x86/kvm/ioapic.c
2912 ++++ b/arch/x86/kvm/ioapic.c
2913 +@@ -197,12 +197,9 @@ static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)
2914 +
2915 + /*
2916 + * If no longer has pending EOI in LAPICs, update
2917 +- * EOI for this vetor.
2918 ++ * EOI for this vector.
2919 + */
2920 + rtc_irq_eoi(ioapic, vcpu, entry->fields.vector);
2921 +- kvm_ioapic_update_eoi_one(vcpu, ioapic,
2922 +- entry->fields.trig_mode,
2923 +- irq);
2924 + break;
2925 + }
2926 + }
2927 +diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
2928 +index cfe83d4ae6252..ca0781b41df9d 100644
2929 +--- a/arch/x86/kvm/kvm_cache_regs.h
2930 ++++ b/arch/x86/kvm/kvm_cache_regs.h
2931 +@@ -7,7 +7,7 @@
2932 + #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
2933 + #define KVM_POSSIBLE_CR4_GUEST_BITS \
2934 + (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
2935 +- | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
2936 ++ | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD)
2937 +
2938 + #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
2939 + static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
2940 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
2941 +index 4ce2ddd26c0b7..ccb72af1bcb5d 100644
2942 +--- a/arch/x86/kvm/lapic.c
2943 ++++ b/arch/x86/kvm/lapic.c
2944 +@@ -490,6 +490,12 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
2945 + }
2946 + }
2947 +
2948 ++void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
2949 ++{
2950 ++ apic_clear_irr(vec, vcpu->arch.apic);
2951 ++}
2952 ++EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
2953 ++
2954 + static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
2955 + {
2956 + struct kvm_vcpu *vcpu;
2957 +@@ -2462,6 +2468,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2958 + __apic_update_ppr(apic, &ppr);
2959 + return apic_has_interrupt_for_ppr(apic, ppr);
2960 + }
2961 ++EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2962 +
2963 + int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2964 + {
2965 +diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
2966 +index 754f29beb83e3..4fb86e3a9dd3d 100644
2967 +--- a/arch/x86/kvm/lapic.h
2968 ++++ b/arch/x86/kvm/lapic.h
2969 +@@ -89,6 +89,7 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
2970 + bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
2971 + int shorthand, unsigned int dest, int dest_mode);
2972 + int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
2973 ++void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec);
2974 + bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr);
2975 + bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr);
2976 + void kvm_apic_update_ppr(struct kvm_vcpu *vcpu);
2977 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
2978 +index 1e6724c30cc05..57cd70801216f 100644
2979 +--- a/arch/x86/kvm/mmu/mmu.c
2980 ++++ b/arch/x86/kvm/mmu/mmu.c
2981 +@@ -6341,6 +6341,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
2982 + cond_resched_lock(&kvm->mmu_lock);
2983 + }
2984 + }
2985 ++ kvm_mmu_commit_zap_page(kvm, &invalid_list);
2986 +
2987 + spin_unlock(&kvm->mmu_lock);
2988 + srcu_read_unlock(&kvm->srcu, rcu_idx);
2989 +diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
2990 +index e80daa98682f5..b74722e0abb53 100644
2991 +--- a/arch/x86/kvm/svm/avic.c
2992 ++++ b/arch/x86/kvm/svm/avic.c
2993 +@@ -868,6 +868,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
2994 + * - Tell IOMMU to use legacy mode for this interrupt.
2995 + * - Retrieve ga_tag of prior interrupt remapping data.
2996 + */
2997 ++ pi.prev_ga_tag = 0;
2998 + pi.is_guest_mode = false;
2999 + ret = irq_set_vcpu_affinity(host_irq, &pi);
3000 +
3001 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
3002 +index a5810928b011f..27e41fac91965 100644
3003 +--- a/arch/x86/kvm/vmx/nested.c
3004 ++++ b/arch/x86/kvm/vmx/nested.c
3005 +@@ -2402,6 +2402,8 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
3006 + vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
3007 + vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
3008 + vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
3009 ++
3010 ++ vmx->segment_cache.bitmask = 0;
3011 + }
3012 +
3013 + if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
3014 +@@ -3295,8 +3297,10 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
3015 + prepare_vmcs02_early(vmx, vmcs12);
3016 +
3017 + if (from_vmentry) {
3018 +- if (unlikely(!nested_get_vmcs12_pages(vcpu)))
3019 ++ if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
3020 ++ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3021 + return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
3022 ++ }
3023 +
3024 + if (nested_vmx_check_vmentry_hw(vcpu)) {
3025 + vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3026 +@@ -3480,6 +3484,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3027 + if (unlikely(status != NVMX_VMENTRY_SUCCESS))
3028 + goto vmentry_failed;
3029 +
3030 ++ /* Emulate processing of posted interrupts on VM-Enter. */
3031 ++ if (nested_cpu_has_posted_intr(vmcs12) &&
3032 ++ kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
3033 ++ vmx->nested.pi_pending = true;
3034 ++ kvm_make_request(KVM_REQ_EVENT, vcpu);
3035 ++ kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
3036 ++ }
3037 ++
3038 + /* Hide L1D cache contents from the nested guest. */
3039 + vmx->vcpu.arch.l1tf_flush_l1d = true;
3040 +
3041 +diff --git a/block/blk-core.c b/block/blk-core.c
3042 +index 619a3dcd3f5e7..8d6435b731186 100644
3043 +--- a/block/blk-core.c
3044 ++++ b/block/blk-core.c
3045 +@@ -798,11 +798,10 @@ static void handle_bad_sector(struct bio *bio, sector_t maxsector)
3046 + {
3047 + char b[BDEVNAME_SIZE];
3048 +
3049 +- printk(KERN_INFO "attempt to access beyond end of device\n");
3050 +- printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
3051 +- bio_devname(bio, b), bio->bi_opf,
3052 +- (unsigned long long)bio_end_sector(bio),
3053 +- (long long)maxsector);
3054 ++ pr_info_ratelimited("attempt to access beyond end of device\n"
3055 ++ "%s: rw=%d, want=%llu, limit=%llu\n",
3056 ++ bio_devname(bio, b), bio->bi_opf,
3057 ++ bio_end_sector(bio), maxsector);
3058 + }
3059 +
3060 + #ifdef CONFIG_FAIL_MAKE_REQUEST
3061 +diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
3062 +index 062229395a507..7b52e7657b2d1 100644
3063 +--- a/block/blk-mq-sysfs.c
3064 ++++ b/block/blk-mq-sysfs.c
3065 +@@ -36,8 +36,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
3066 + struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
3067 + kobj);
3068 +
3069 +- cancel_delayed_work_sync(&hctx->run_work);
3070 +-
3071 + if (hctx->flags & BLK_MQ_F_BLOCKING)
3072 + cleanup_srcu_struct(hctx->srcu);
3073 + blk_free_flush_queue(hctx->fq);
3074 +diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
3075 +index 02643e149d5e1..95fea6c18baf7 100644
3076 +--- a/block/blk-sysfs.c
3077 ++++ b/block/blk-sysfs.c
3078 +@@ -896,9 +896,16 @@ static void __blk_release_queue(struct work_struct *work)
3079 +
3080 + blk_free_queue_stats(q->stats);
3081 +
3082 +- if (queue_is_mq(q))
3083 ++ if (queue_is_mq(q)) {
3084 ++ struct blk_mq_hw_ctx *hctx;
3085 ++ int i;
3086 ++
3087 + cancel_delayed_work_sync(&q->requeue_work);
3088 +
3089 ++ queue_for_each_hw_ctx(q, hctx, i)
3090 ++ cancel_delayed_work_sync(&hctx->run_work);
3091 ++ }
3092 ++
3093 + blk_exit_queue(q);
3094 +
3095 + blk_queue_free_zone_bitmaps(q);
3096 +diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
3097 +index 43c6aa784858b..e62d735ed2660 100644
3098 +--- a/crypto/algif_aead.c
3099 ++++ b/crypto/algif_aead.c
3100 +@@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
3101 + SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
3102 +
3103 + skcipher_request_set_sync_tfm(skreq, null_tfm);
3104 +- skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
3105 ++ skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
3106 + NULL, NULL);
3107 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
3108 +
3109 +@@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
3110 + areq->outlen = outlen;
3111 +
3112 + aead_request_set_callback(&areq->cra_u.aead_req,
3113 +- CRYPTO_TFM_REQ_MAY_BACKLOG,
3114 ++ CRYPTO_TFM_REQ_MAY_SLEEP,
3115 + af_alg_async_cb, areq);
3116 + err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
3117 + crypto_aead_decrypt(&areq->cra_u.aead_req);
3118 +
3119 + /* AIO operation in progress */
3120 +- if (err == -EINPROGRESS || err == -EBUSY)
3121 ++ if (err == -EINPROGRESS)
3122 + return -EIOCBQUEUED;
3123 +
3124 + sock_put(sk);
3125 + } else {
3126 + /* Synchronous operation */
3127 + aead_request_set_callback(&areq->cra_u.aead_req,
3128 ++ CRYPTO_TFM_REQ_MAY_SLEEP |
3129 + CRYPTO_TFM_REQ_MAY_BACKLOG,
3130 + crypto_req_done, &ctx->wait);
3131 + err = crypto_wait_req(ctx->enc ?
3132 +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
3133 +index 81c4022285a7c..30069a92a9b22 100644
3134 +--- a/crypto/algif_skcipher.c
3135 ++++ b/crypto/algif_skcipher.c
3136 +@@ -123,7 +123,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
3137 + crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
3138 +
3139 + /* AIO operation in progress */
3140 +- if (err == -EINPROGRESS || err == -EBUSY)
3141 ++ if (err == -EINPROGRESS)
3142 + return -EIOCBQUEUED;
3143 +
3144 + sock_put(sk);
3145 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
3146 +index 5b310eea9e527..adab46ca5dff7 100644
3147 +--- a/drivers/android/binder.c
3148 ++++ b/drivers/android/binder.c
3149 +@@ -223,7 +223,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
3150 + struct binder_work {
3151 + struct list_head entry;
3152 +
3153 +- enum {
3154 ++ enum binder_work_type {
3155 + BINDER_WORK_TRANSACTION = 1,
3156 + BINDER_WORK_TRANSACTION_COMPLETE,
3157 + BINDER_WORK_RETURN_ERROR,
3158 +@@ -885,27 +885,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked(
3159 + return w;
3160 + }
3161 +
3162 +-/**
3163 +- * binder_dequeue_work_head() - Dequeues the item at head of list
3164 +- * @proc: binder_proc associated with list
3165 +- * @list: list to dequeue head
3166 +- *
3167 +- * Removes the head of the list if there are items on the list
3168 +- *
3169 +- * Return: pointer dequeued binder_work, NULL if list was empty
3170 +- */
3171 +-static struct binder_work *binder_dequeue_work_head(
3172 +- struct binder_proc *proc,
3173 +- struct list_head *list)
3174 +-{
3175 +- struct binder_work *w;
3176 +-
3177 +- binder_inner_proc_lock(proc);
3178 +- w = binder_dequeue_work_head_ilocked(list);
3179 +- binder_inner_proc_unlock(proc);
3180 +- return w;
3181 +-}
3182 +-
3183 + static void
3184 + binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
3185 + static void binder_free_thread(struct binder_thread *thread);
3186 +@@ -2345,8 +2324,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
3187 + * file is done when the transaction is torn
3188 + * down.
3189 + */
3190 +- WARN_ON(failed_at &&
3191 +- proc->tsk == current->group_leader);
3192 + } break;
3193 + case BINDER_TYPE_PTR:
3194 + /*
3195 +@@ -4589,13 +4566,17 @@ static void binder_release_work(struct binder_proc *proc,
3196 + struct list_head *list)
3197 + {
3198 + struct binder_work *w;
3199 ++ enum binder_work_type wtype;
3200 +
3201 + while (1) {
3202 +- w = binder_dequeue_work_head(proc, list);
3203 ++ binder_inner_proc_lock(proc);
3204 ++ w = binder_dequeue_work_head_ilocked(list);
3205 ++ wtype = w ? w->type : 0;
3206 ++ binder_inner_proc_unlock(proc);
3207 + if (!w)
3208 + return;
3209 +
3210 +- switch (w->type) {
3211 ++ switch (wtype) {
3212 + case BINDER_WORK_TRANSACTION: {
3213 + struct binder_transaction *t;
3214 +
3215 +@@ -4629,9 +4610,11 @@ static void binder_release_work(struct binder_proc *proc,
3216 + kfree(death);
3217 + binder_stats_deleted(BINDER_STAT_DEATH);
3218 + } break;
3219 ++ case BINDER_WORK_NODE:
3220 ++ break;
3221 + default:
3222 + pr_err("unexpected work type, %d, not freed\n",
3223 +- w->type);
3224 ++ wtype);
3225 + break;
3226 + }
3227 + }
3228 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
3229 +index a5fef9aa419fd..91a0c84d55c97 100644
3230 +--- a/drivers/bluetooth/btusb.c
3231 ++++ b/drivers/bluetooth/btusb.c
3232 +@@ -2849,6 +2849,7 @@ static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
3233 + buf = kmalloc(size, GFP_KERNEL);
3234 + if (!buf) {
3235 + kfree(dr);
3236 ++ usb_free_urb(urb);
3237 + return -ENOMEM;
3238 + }
3239 +
3240 +diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
3241 +index 85a30fb9177bb..f83d67eafc9f0 100644
3242 +--- a/drivers/bluetooth/hci_ldisc.c
3243 ++++ b/drivers/bluetooth/hci_ldisc.c
3244 +@@ -538,6 +538,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
3245 + clear_bit(HCI_UART_PROTO_READY, &hu->flags);
3246 + percpu_up_write(&hu->proto_lock);
3247 +
3248 ++ cancel_work_sync(&hu->init_ready);
3249 + cancel_work_sync(&hu->write_work);
3250 +
3251 + if (hdev) {
3252 +diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
3253 +index 7b233312e723f..3977bba485c22 100644
3254 +--- a/drivers/bluetooth/hci_serdev.c
3255 ++++ b/drivers/bluetooth/hci_serdev.c
3256 +@@ -355,6 +355,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
3257 + struct hci_dev *hdev = hu->hdev;
3258 +
3259 + clear_bit(HCI_UART_PROTO_READY, &hu->flags);
3260 ++
3261 ++ cancel_work_sync(&hu->init_ready);
3262 + if (test_bit(HCI_UART_REGISTERED, &hu->flags))
3263 + hci_unregister_dev(hdev);
3264 + hci_free_dev(hdev);
3265 +diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile
3266 +index 66e2700c9032a..bc1469778cf87 100644
3267 +--- a/drivers/bus/mhi/core/Makefile
3268 ++++ b/drivers/bus/mhi/core/Makefile
3269 +@@ -1,3 +1,3 @@
3270 +-obj-$(CONFIG_MHI_BUS) := mhi.o
3271 ++obj-$(CONFIG_MHI_BUS) += mhi.o
3272 +
3273 + mhi-y := init.o main.o pm.o boot.o
3274 +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
3275 +index 77b8d551ae7fe..dd559661c15b3 100644
3276 +--- a/drivers/char/ipmi/ipmi_si_intf.c
3277 ++++ b/drivers/char/ipmi/ipmi_si_intf.c
3278 +@@ -1963,7 +1963,7 @@ static int try_smi_init(struct smi_info *new_smi)
3279 + /* Do this early so it's available for logs. */
3280 + if (!new_smi->io.dev) {
3281 + pr_err("IPMI interface added with no device\n");
3282 +- rv = EIO;
3283 ++ rv = -EIO;
3284 + goto out_err;
3285 + }
3286 +
3287 +diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
3288 +index 37c22667e8319..4313ecb2af5b2 100644
3289 +--- a/drivers/clk/at91/clk-main.c
3290 ++++ b/drivers/clk/at91/clk-main.c
3291 +@@ -437,12 +437,17 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
3292 + return -EINVAL;
3293 +
3294 + regmap_read(regmap, AT91_CKGR_MOR, &tmp);
3295 +- tmp &= ~MOR_KEY_MASK;
3296 +
3297 + if (index && !(tmp & AT91_PMC_MOSCSEL))
3298 +- regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
3299 ++ tmp = AT91_PMC_MOSCSEL;
3300 + else if (!index && (tmp & AT91_PMC_MOSCSEL))
3301 +- regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
3302 ++ tmp = 0;
3303 ++ else
3304 ++ return 0;
3305 ++
3306 ++ regmap_update_bits(regmap, AT91_CKGR_MOR,
3307 ++ AT91_PMC_MOSCSEL | MOR_KEY_MASK,
3308 ++ tmp | AT91_PMC_KEY);
3309 +
3310 + while (!clk_sam9x5_main_ready(regmap))
3311 + cpu_relax();
3312 +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
3313 +index 011802f1a6df9..f18b4d9e9455b 100644
3314 +--- a/drivers/clk/bcm/clk-bcm2835.c
3315 ++++ b/drivers/clk/bcm/clk-bcm2835.c
3316 +@@ -1337,8 +1337,10 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
3317 + pll->hw.init = &init;
3318 +
3319 + ret = devm_clk_hw_register(cprman->dev, &pll->hw);
3320 +- if (ret)
3321 ++ if (ret) {
3322 ++ kfree(pll);
3323 + return NULL;
3324 ++ }
3325 + return &pll->hw;
3326 + }
3327 +
3328 +diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
3329 +index a64aace213c27..7762c5825e77d 100644
3330 +--- a/drivers/clk/imx/clk-imx8mq.c
3331 ++++ b/drivers/clk/imx/clk-imx8mq.c
3332 +@@ -157,10 +157,10 @@ static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys
3333 + "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
3334 +
3335 + static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
3336 +- "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
3337 ++ "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
3338 +
3339 + static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
3340 +- "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
3341 ++ "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
3342 +
3343 + static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out",
3344 + "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
3345 +diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
3346 +index 7edf8c8432b67..64ea895f1a7df 100644
3347 +--- a/drivers/clk/keystone/sci-clk.c
3348 ++++ b/drivers/clk/keystone/sci-clk.c
3349 +@@ -522,7 +522,7 @@ static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider)
3350 + np = of_find_node_with_property(np, *clk_name);
3351 + if (!np) {
3352 + clk_name++;
3353 +- break;
3354 ++ continue;
3355 + }
3356 +
3357 + if (!of_device_is_available(np))
3358 +diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
3359 +index 9766cccf5844c..6e0d3a1667291 100644
3360 +--- a/drivers/clk/mediatek/clk-mt6779.c
3361 ++++ b/drivers/clk/mediatek/clk-mt6779.c
3362 +@@ -919,6 +919,8 @@ static const struct mtk_gate infra_clks[] = {
3363 + "pwm_sel", 19),
3364 + GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm",
3365 + "pwm_sel", 21),
3366 ++ GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0",
3367 ++ "uart_sel", 22),
3368 + GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1",
3369 + "uart_sel", 23),
3370 + GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2",
3371 +diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
3372 +index 53715e36326c6..9918cb375de30 100644
3373 +--- a/drivers/clk/meson/axg-audio.c
3374 ++++ b/drivers/clk/meson/axg-audio.c
3375 +@@ -1209,13 +1209,132 @@ static struct clk_hw_onecell_data sm1_audio_hw_onecell_data = {
3376 + };
3377 +
3378 +
3379 +-/* Convenience table to populate regmap in .probe()
3380 +- * Note that this table is shared between both AXG and G12A,
3381 +- * with spdifout_b clocks being exclusive to G12A. Since those
3382 +- * clocks are not declared within the AXG onecell table, we do not
3383 +- * feel the need to have separate AXG/G12A regmap tables.
3384 +- */
3385 ++/* Convenience table to populate regmap in .probe(). */
3386 + static struct clk_regmap *const axg_clk_regmaps[] = {
3387 ++ &ddr_arb,
3388 ++ &pdm,
3389 ++ &tdmin_a,
3390 ++ &tdmin_b,
3391 ++ &tdmin_c,
3392 ++ &tdmin_lb,
3393 ++ &tdmout_a,
3394 ++ &tdmout_b,
3395 ++ &tdmout_c,
3396 ++ &frddr_a,
3397 ++ &frddr_b,
3398 ++ &frddr_c,
3399 ++ &toddr_a,
3400 ++ &toddr_b,
3401 ++ &toddr_c,
3402 ++ &loopback,
3403 ++ &spdifin,
3404 ++ &spdifout,
3405 ++ &resample,
3406 ++ &power_detect,
3407 ++ &mst_a_mclk_sel,
3408 ++ &mst_b_mclk_sel,
3409 ++ &mst_c_mclk_sel,
3410 ++ &mst_d_mclk_sel,
3411 ++ &mst_e_mclk_sel,
3412 ++ &mst_f_mclk_sel,
3413 ++ &mst_a_mclk_div,
3414 ++ &mst_b_mclk_div,
3415 ++ &mst_c_mclk_div,
3416 ++ &mst_d_mclk_div,
3417 ++ &mst_e_mclk_div,
3418 ++ &mst_f_mclk_div,
3419 ++ &mst_a_mclk,
3420 ++ &mst_b_mclk,
3421 ++ &mst_c_mclk,
3422 ++ &mst_d_mclk,
3423 ++ &mst_e_mclk,
3424 ++ &mst_f_mclk,
3425 ++ &spdifout_clk_sel,
3426 ++ &spdifout_clk_div,
3427 ++ &spdifout_clk,
3428 ++ &spdifin_clk_sel,
3429 ++ &spdifin_clk_div,
3430 ++ &spdifin_clk,
3431 ++ &pdm_dclk_sel,
3432 ++ &pdm_dclk_div,
3433 ++ &pdm_dclk,
3434 ++ &pdm_sysclk_sel,
3435 ++ &pdm_sysclk_div,
3436 ++ &pdm_sysclk,
3437 ++ &mst_a_sclk_pre_en,
3438 ++ &mst_b_sclk_pre_en,
3439 ++ &mst_c_sclk_pre_en,
3440 ++ &mst_d_sclk_pre_en,
3441 ++ &mst_e_sclk_pre_en,
3442 ++ &mst_f_sclk_pre_en,
3443 ++ &mst_a_sclk_div,
3444 ++ &mst_b_sclk_div,
3445 ++ &mst_c_sclk_div,
3446 ++ &mst_d_sclk_div,
3447 ++ &mst_e_sclk_div,
3448 ++ &mst_f_sclk_div,
3449 ++ &mst_a_sclk_post_en,
3450 ++ &mst_b_sclk_post_en,
3451 ++ &mst_c_sclk_post_en,
3452 ++ &mst_d_sclk_post_en,
3453 ++ &mst_e_sclk_post_en,
3454 ++ &mst_f_sclk_post_en,
3455 ++ &mst_a_sclk,
3456 ++ &mst_b_sclk,
3457 ++ &mst_c_sclk,
3458 ++ &mst_d_sclk,
3459 ++ &mst_e_sclk,
3460 ++ &mst_f_sclk,
3461 ++ &mst_a_lrclk_div,
3462 ++ &mst_b_lrclk_div,
3463 ++ &mst_c_lrclk_div,
3464 ++ &mst_d_lrclk_div,
3465 ++ &mst_e_lrclk_div,
3466 ++ &mst_f_lrclk_div,
3467 ++ &mst_a_lrclk,
3468 ++ &mst_b_lrclk,
3469 ++ &mst_c_lrclk,
3470 ++ &mst_d_lrclk,
3471 ++ &mst_e_lrclk,
3472 ++ &mst_f_lrclk,
3473 ++ &tdmin_a_sclk_sel,
3474 ++ &tdmin_b_sclk_sel,
3475 ++ &tdmin_c_sclk_sel,
3476 ++ &tdmin_lb_sclk_sel,
3477 ++ &tdmout_a_sclk_sel,
3478 ++ &tdmout_b_sclk_sel,
3479 ++ &tdmout_c_sclk_sel,
3480 ++ &tdmin_a_sclk_pre_en,
3481 ++ &tdmin_b_sclk_pre_en,
3482 ++ &tdmin_c_sclk_pre_en,
3483 ++ &tdmin_lb_sclk_pre_en,
3484 ++ &tdmout_a_sclk_pre_en,
3485 ++ &tdmout_b_sclk_pre_en,
3486 ++ &tdmout_c_sclk_pre_en,
3487 ++ &tdmin_a_sclk_post_en,
3488 ++ &tdmin_b_sclk_post_en,
3489 ++ &tdmin_c_sclk_post_en,
3490 ++ &tdmin_lb_sclk_post_en,
3491 ++ &tdmout_a_sclk_post_en,
3492 ++ &tdmout_b_sclk_post_en,
3493 ++ &tdmout_c_sclk_post_en,
3494 ++ &tdmin_a_sclk,
3495 ++ &tdmin_b_sclk,
3496 ++ &tdmin_c_sclk,
3497 ++ &tdmin_lb_sclk,
3498 ++ &tdmout_a_sclk,
3499 ++ &tdmout_b_sclk,
3500 ++ &tdmout_c_sclk,
3501 ++ &tdmin_a_lrclk,
3502 ++ &tdmin_b_lrclk,
3503 ++ &tdmin_c_lrclk,
3504 ++ &tdmin_lb_lrclk,
3505 ++ &tdmout_a_lrclk,
3506 ++ &tdmout_b_lrclk,
3507 ++ &tdmout_c_lrclk,
3508 ++};
3509 ++
3510 ++static struct clk_regmap *const g12a_clk_regmaps[] = {
3511 + &ddr_arb,
3512 + &pdm,
3513 + &tdmin_a,
3514 +@@ -1713,8 +1832,8 @@ static const struct audioclk_data axg_audioclk_data = {
3515 + };
3516 +
3517 + static const struct audioclk_data g12a_audioclk_data = {
3518 +- .regmap_clks = axg_clk_regmaps,
3519 +- .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
3520 ++ .regmap_clks = g12a_clk_regmaps,
3521 ++ .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
3522 + .hw_onecell_data = &g12a_audio_hw_onecell_data,
3523 + .reset_offset = AUDIO_SW_RESET,
3524 + .reset_num = 26,
3525 +diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
3526 +index 30c15766ebb16..05d032be15c8f 100644
3527 +--- a/drivers/clk/meson/g12a.c
3528 ++++ b/drivers/clk/meson/g12a.c
3529 +@@ -298,6 +298,17 @@ static struct clk_regmap g12a_fclk_div2 = {
3530 + &g12a_fclk_div2_div.hw
3531 + },
3532 + .num_parents = 1,
3533 ++ /*
3534 ++ * Similar to fclk_div3, it seems that this clock is used by
3535 ++ * the resident firmware and is required by the platform to
3536 ++ * operate correctly.
3537 ++ * Until the following condition are met, we need this clock to
3538 ++ * be marked as critical:
3539 ++ * a) Mark the clock used by a firmware resource, if possible
3540 ++ * b) CCF has a clock hand-off mechanism to make the sure the
3541 ++ * clock stays on until the proper driver comes along
3542 ++ */
3543 ++ .flags = CLK_IS_CRITICAL,
3544 + },
3545 + };
3546 +
3547 +diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
3548 +index c6fb57cd576f5..aa5c0c6ead017 100644
3549 +--- a/drivers/clk/qcom/gcc-sdm660.c
3550 ++++ b/drivers/clk/qcom/gcc-sdm660.c
3551 +@@ -666,7 +666,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
3552 + .cmd_rcgr = 0x48044,
3553 + .mnd_width = 0,
3554 + .hid_width = 5,
3555 +- .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
3556 ++ .parent_map = gcc_parent_map_xo_gpll0,
3557 + .freq_tbl = ftbl_hmss_rbcpr_clk_src,
3558 + .clkr.hw.init = &(struct clk_init_data){
3559 + .name = "hmss_rbcpr_clk_src",
3560 +diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
3561 +index b333fc28c94b6..37c858d689e0d 100644
3562 +--- a/drivers/clk/rockchip/clk-half-divider.c
3563 ++++ b/drivers/clk/rockchip/clk-half-divider.c
3564 +@@ -166,7 +166,7 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
3565 + unsigned long flags,
3566 + spinlock_t *lock)
3567 + {
3568 +- struct clk *clk;
3569 ++ struct clk *clk = ERR_PTR(-ENOMEM);
3570 + struct clk_mux *mux = NULL;
3571 + struct clk_gate *gate = NULL;
3572 + struct clk_divider *div = NULL;
3573 +diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
3574 +index 09aa44cb8a91d..ba04cb381cd3f 100644
3575 +--- a/drivers/clocksource/hyperv_timer.c
3576 ++++ b/drivers/clocksource/hyperv_timer.c
3577 +@@ -341,7 +341,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
3578 + return read_hv_clock_tsc();
3579 + }
3580 +
3581 +-static u64 read_hv_sched_clock_tsc(void)
3582 ++static u64 notrace read_hv_sched_clock_tsc(void)
3583 + {
3584 + return (read_hv_clock_tsc() - hv_sched_clock_offset) *
3585 + (NSEC_PER_SEC / HV_CLOCK_HZ);
3586 +@@ -404,7 +404,7 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
3587 + return read_hv_clock_msr();
3588 + }
3589 +
3590 +-static u64 read_hv_sched_clock_msr(void)
3591 ++static u64 notrace read_hv_sched_clock_msr(void)
3592 + {
3593 + return (read_hv_clock_msr() - hv_sched_clock_offset) *
3594 + (NSEC_PER_SEC / HV_CLOCK_HZ);
3595 +diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
3596 +index df1c941260d14..b4af4094309b0 100644
3597 +--- a/drivers/cpufreq/armada-37xx-cpufreq.c
3598 ++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
3599 +@@ -484,6 +484,12 @@ remove_opp:
3600 + /* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
3601 + late_initcall(armada37xx_cpufreq_driver_init);
3602 +
3603 ++static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
3604 ++ { .compatible = "marvell,armada-3700-nb-pm" },
3605 ++ { },
3606 ++};
3607 ++MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
3608 ++
3609 + MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@××××××××××××××.com>");
3610 + MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
3611 + MODULE_LICENSE("GPL");
3612 +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
3613 +index 8646eb197cd96..31f5c4ebbac9f 100644
3614 +--- a/drivers/cpufreq/powernv-cpufreq.c
3615 ++++ b/drivers/cpufreq/powernv-cpufreq.c
3616 +@@ -884,12 +884,15 @@ static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
3617 + unsigned long action, void *unused)
3618 + {
3619 + int cpu;
3620 +- struct cpufreq_policy cpu_policy;
3621 ++ struct cpufreq_policy *cpu_policy;
3622 +
3623 + rebooting = true;
3624 + for_each_online_cpu(cpu) {
3625 +- cpufreq_get_policy(&cpu_policy, cpu);
3626 +- powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
3627 ++ cpu_policy = cpufreq_cpu_get(cpu);
3628 ++ if (!cpu_policy)
3629 ++ continue;
3630 ++ powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
3631 ++ cpufreq_cpu_put(cpu_policy);
3632 + }
3633 +
3634 + return NOTIFY_DONE;
3635 +diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
3636 +index b957061424a1f..8f3d6d31da52f 100644
3637 +--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
3638 ++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
3639 +@@ -120,7 +120,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
3640 + /* Be sure all data is written before enabling the task */
3641 + wmb();
3642 +
3643 +- v = 1 | (ce->chanlist[flow].tl->t_common_ctl & 0x7F) << 8;
3644 ++ /* Only H6 needs to write a part of t_common_ctl along with "1", but since it is ignored
3645 ++ * on older SoCs, we have no reason to complicate things.
3646 ++ */
3647 ++ v = 1 | ((le32_to_cpu(ce->chanlist[flow].tl->t_common_ctl) & 0x7F) << 8);
3648 + writel(v, ce->base + CE_TLR);
3649 + mutex_unlock(&ce->mlock);
3650 +
3651 +diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
3652 +index bc35aa0ec07ae..d7f2840cf0a94 100644
3653 +--- a/drivers/crypto/caam/Kconfig
3654 ++++ b/drivers/crypto/caam/Kconfig
3655 +@@ -114,6 +114,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
3656 + select CRYPTO_AUTHENC
3657 + select CRYPTO_SKCIPHER
3658 + select CRYPTO_DES
3659 ++ select CRYPTO_XTS
3660 + help
3661 + Selecting this will use CAAM Queue Interface (QI) for sending
3662 + & receiving crypto jobs to/from CAAM. This gives better performance
3663 +diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
3664 +index 315d53499ce85..829d41a1e5da1 100644
3665 +--- a/drivers/crypto/caam/caamalg_qi.c
3666 ++++ b/drivers/crypto/caam/caamalg_qi.c
3667 +@@ -18,6 +18,8 @@
3668 + #include "qi.h"
3669 + #include "jr.h"
3670 + #include "caamalg_desc.h"
3671 ++#include <crypto/xts.h>
3672 ++#include <asm/unaligned.h>
3673 +
3674 + /*
3675 + * crypto alg
3676 +@@ -67,6 +69,12 @@ struct caam_ctx {
3677 + struct device *qidev;
3678 + spinlock_t lock; /* Protects multiple init of driver context */
3679 + struct caam_drv_ctx *drv_ctx[NUM_OP];
3680 ++ bool xts_key_fallback;
3681 ++ struct crypto_skcipher *fallback;
3682 ++};
3683 ++
3684 ++struct caam_skcipher_req_ctx {
3685 ++ struct skcipher_request fallback_req;
3686 + };
3687 +
3688 + static int aead_set_sh_desc(struct crypto_aead *aead)
3689 +@@ -726,12 +734,21 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
3690 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
3691 + struct device *jrdev = ctx->jrdev;
3692 + int ret = 0;
3693 ++ int err;
3694 +
3695 +- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
3696 ++ err = xts_verify_key(skcipher, key, keylen);
3697 ++ if (err) {
3698 + dev_dbg(jrdev, "key size mismatch\n");
3699 +- return -EINVAL;
3700 ++ return err;
3701 + }
3702 +
3703 ++ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
3704 ++ ctx->xts_key_fallback = true;
3705 ++
3706 ++ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
3707 ++ if (err)
3708 ++ return err;
3709 ++
3710 + ctx->cdata.keylen = keylen;
3711 + ctx->cdata.key_virt = key;
3712 + ctx->cdata.key_inline = true;
3713 +@@ -1373,6 +1390,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
3714 + return edesc;
3715 + }
3716 +
3717 ++static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
3718 ++{
3719 ++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
3720 ++ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
3721 ++
3722 ++ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
3723 ++}
3724 ++
3725 + static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
3726 + {
3727 + struct skcipher_edesc *edesc;
3728 +@@ -1383,6 +1408,22 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
3729 + if (!req->cryptlen)
3730 + return 0;
3731 +
3732 ++ if (ctx->fallback && (xts_skcipher_ivsize(req) ||
3733 ++ ctx->xts_key_fallback)) {
3734 ++ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
3735 ++
3736 ++ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
3737 ++ skcipher_request_set_callback(&rctx->fallback_req,
3738 ++ req->base.flags,
3739 ++ req->base.complete,
3740 ++ req->base.data);
3741 ++ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
3742 ++ req->dst, req->cryptlen, req->iv);
3743 ++
3744 ++ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
3745 ++ crypto_skcipher_decrypt(&rctx->fallback_req);
3746 ++ }
3747 ++
3748 + if (unlikely(caam_congested))
3749 + return -EAGAIN;
3750 +
3751 +@@ -1507,6 +1548,7 @@ static struct caam_skcipher_alg driver_algs[] = {
3752 + .base = {
3753 + .cra_name = "xts(aes)",
3754 + .cra_driver_name = "xts-aes-caam-qi",
3755 ++ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
3756 + .cra_blocksize = AES_BLOCK_SIZE,
3757 + },
3758 + .setkey = xts_skcipher_setkey,
3759 +@@ -2440,9 +2482,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
3760 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3761 + struct caam_skcipher_alg *caam_alg =
3762 + container_of(alg, typeof(*caam_alg), skcipher);
3763 ++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
3764 ++ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3765 ++ int ret = 0;
3766 ++
3767 ++ if (alg_aai == OP_ALG_AAI_XTS) {
3768 ++ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
3769 ++ struct crypto_skcipher *fallback;
3770 ++
3771 ++ fallback = crypto_alloc_skcipher(tfm_name, 0,
3772 ++ CRYPTO_ALG_NEED_FALLBACK);
3773 ++ if (IS_ERR(fallback)) {
3774 ++ dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
3775 ++ tfm_name, PTR_ERR(fallback));
3776 ++ return PTR_ERR(fallback);
3777 ++ }
3778 ++
3779 ++ ctx->fallback = fallback;
3780 ++ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
3781 ++ crypto_skcipher_reqsize(fallback));
3782 ++ }
3783 ++
3784 ++ ret = caam_init_common(ctx, &caam_alg->caam, false);
3785 ++ if (ret && ctx->fallback)
3786 ++ crypto_free_skcipher(ctx->fallback);
3787 +
3788 +- return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
3789 +- false);
3790 ++ return ret;
3791 + }
3792 +
3793 + static int caam_aead_init(struct crypto_aead *tfm)
3794 +@@ -2468,7 +2533,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
3795 +
3796 + static void caam_cra_exit(struct crypto_skcipher *tfm)
3797 + {
3798 +- caam_exit_common(crypto_skcipher_ctx(tfm));
3799 ++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
3800 ++
3801 ++ if (ctx->fallback)
3802 ++ crypto_free_skcipher(ctx->fallback);
3803 ++ caam_exit_common(ctx);
3804 + }
3805 +
3806 + static void caam_aead_exit(struct crypto_aead *tfm)
3807 +@@ -2502,7 +2571,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3808 + alg->base.cra_module = THIS_MODULE;
3809 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
3810 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3811 +- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
3812 ++ alg->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
3813 +
3814 + alg->init = caam_cra_init;
3815 + alg->exit = caam_cra_exit;
3816 +diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
3817 +index 64112c736810e..7234b95241e91 100644
3818 +--- a/drivers/crypto/ccp/ccp-ops.c
3819 ++++ b/drivers/crypto/ccp/ccp-ops.c
3820 +@@ -1746,7 +1746,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
3821 + break;
3822 + default:
3823 + ret = -EINVAL;
3824 +- goto e_ctx;
3825 ++ goto e_data;
3826 + }
3827 + } else {
3828 + /* Stash the context */
3829 +diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
3830 +index d39e1664fc7ed..3c65bf070c908 100644
3831 +--- a/drivers/crypto/ccree/cc_pm.c
3832 ++++ b/drivers/crypto/ccree/cc_pm.c
3833 +@@ -65,8 +65,12 @@ const struct dev_pm_ops ccree_pm = {
3834 + int cc_pm_get(struct device *dev)
3835 + {
3836 + int rc = pm_runtime_get_sync(dev);
3837 ++ if (rc < 0) {
3838 ++ pm_runtime_put_noidle(dev);
3839 ++ return rc;
3840 ++ }
3841 +
3842 +- return (rc == 1 ? 0 : rc);
3843 ++ return 0;
3844 + }
3845 +
3846 + void cc_pm_put_suspend(struct device *dev)
3847 +diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
3848 +index 54093115eb95d..bad8e90ba168d 100644
3849 +--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
3850 ++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
3851 +@@ -92,11 +92,13 @@ static void chtls_sock_release(struct kref *ref)
3852 + static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
3853 + struct sock *sk)
3854 + {
3855 ++ struct adapter *adap = pci_get_drvdata(cdev->pdev);
3856 + struct net_device *ndev = cdev->ports[0];
3857 + #if IS_ENABLED(CONFIG_IPV6)
3858 + struct net_device *temp;
3859 + int addr_type;
3860 + #endif
3861 ++ int i;
3862 +
3863 + switch (sk->sk_family) {
3864 + case PF_INET:
3865 +@@ -127,8 +129,12 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
3866 + return NULL;
3867 +
3868 + if (is_vlan_dev(ndev))
3869 +- return vlan_dev_real_dev(ndev);
3870 +- return ndev;
3871 ++ ndev = vlan_dev_real_dev(ndev);
3872 ++
3873 ++ for_each_port(adap, i)
3874 ++ if (cdev->ports[i] == ndev)
3875 ++ return ndev;
3876 ++ return NULL;
3877 + }
3878 +
3879 + static void assign_rxopt(struct sock *sk, unsigned int opt)
3880 +@@ -477,7 +483,6 @@ void chtls_destroy_sock(struct sock *sk)
3881 + chtls_purge_write_queue(sk);
3882 + free_tls_keyid(sk);
3883 + kref_put(&csk->kref, chtls_sock_release);
3884 +- csk->cdev = NULL;
3885 + if (sk->sk_family == AF_INET)
3886 + sk->sk_prot = &tcp_prot;
3887 + #if IS_ENABLED(CONFIG_IPV6)
3888 +@@ -736,14 +741,13 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
3889 +
3890 + #if IS_ENABLED(CONFIG_IPV6)
3891 + if (sk->sk_family == PF_INET6) {
3892 +- struct chtls_sock *csk;
3893 ++ struct net_device *ndev = chtls_find_netdev(cdev, sk);
3894 + int addr_type = 0;
3895 +
3896 +- csk = rcu_dereference_sk_user_data(sk);
3897 + addr_type = ipv6_addr_type((const struct in6_addr *)
3898 + &sk->sk_v6_rcv_saddr);
3899 + if (addr_type != IPV6_ADDR_ANY)
3900 +- cxgb4_clip_release(csk->egress_dev, (const u32 *)
3901 ++ cxgb4_clip_release(ndev, (const u32 *)
3902 + &sk->sk_v6_rcv_saddr, 1);
3903 + }
3904 + #endif
3905 +@@ -1156,6 +1160,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
3906 + ndev = n->dev;
3907 + if (!ndev)
3908 + goto free_dst;
3909 ++ if (is_vlan_dev(ndev))
3910 ++ ndev = vlan_dev_real_dev(ndev);
3911 ++
3912 + port_id = cxgb4_port_idx(ndev);
3913 +
3914 + csk = chtls_sock_create(cdev);
3915 +diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
3916 +index 2e9acae1cba3b..9fb5ca6682ea2 100644
3917 +--- a/drivers/crypto/chelsio/chtls/chtls_io.c
3918 ++++ b/drivers/crypto/chelsio/chtls/chtls_io.c
3919 +@@ -902,9 +902,9 @@ static int chtls_skb_copy_to_page_nocache(struct sock *sk,
3920 + return 0;
3921 + }
3922 +
3923 +-static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
3924 ++static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
3925 + {
3926 +- return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
3927 ++ return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0);
3928 + }
3929 +
3930 + static int csk_wait_memory(struct chtls_dev *cdev,
3931 +@@ -1240,6 +1240,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
3932 + copied = 0;
3933 + csk = rcu_dereference_sk_user_data(sk);
3934 + cdev = csk->cdev;
3935 ++ lock_sock(sk);
3936 + timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
3937 +
3938 + err = sk_stream_wait_connect(sk, &timeo);
3939 +diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
3940 +index 64614a9bdf219..047826f18bd35 100644
3941 +--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
3942 ++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
3943 +@@ -332,11 +332,14 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
3944 + ret = sec_alloc_pbuf_resource(dev, res);
3945 + if (ret) {
3946 + dev_err(dev, "fail to alloc pbuf dma resource!\n");
3947 +- goto alloc_fail;
3948 ++ goto alloc_pbuf_fail;
3949 + }
3950 + }
3951 +
3952 + return 0;
3953 ++alloc_pbuf_fail:
3954 ++ if (ctx->alg_type == SEC_AEAD)
3955 ++ sec_free_mac_resource(dev, qp_ctx->res);
3956 + alloc_fail:
3957 + sec_free_civ_resource(dev, res);
3958 +
3959 +@@ -447,8 +450,10 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
3960 + ctx->fake_req_limit = QM_Q_DEPTH >> 1;
3961 + ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
3962 + GFP_KERNEL);
3963 +- if (!ctx->qp_ctx)
3964 +- return -ENOMEM;
3965 ++ if (!ctx->qp_ctx) {
3966 ++ ret = -ENOMEM;
3967 ++ goto err_destroy_qps;
3968 ++ }
3969 +
3970 + for (i = 0; i < sec->ctx_q_num; i++) {
3971 + ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
3972 +@@ -457,12 +462,15 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
3973 + }
3974 +
3975 + return 0;
3976 ++
3977 + err_sec_release_qp_ctx:
3978 + for (i = i - 1; i >= 0; i--)
3979 + sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
3980 +
3981 +- sec_destroy_qps(ctx->qps, sec->ctx_q_num);
3982 + kfree(ctx->qp_ctx);
3983 ++err_destroy_qps:
3984 ++ sec_destroy_qps(ctx->qps, sec->ctx_q_num);
3985 ++
3986 + return ret;
3987 + }
3988 +
3989 +diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
3990 +index ad73fc9466821..3be6e0db0f9fc 100644
3991 +--- a/drivers/crypto/ixp4xx_crypto.c
3992 ++++ b/drivers/crypto/ixp4xx_crypto.c
3993 +@@ -528,7 +528,7 @@ static void release_ixp_crypto(struct device *dev)
3994 +
3995 + if (crypt_virt) {
3996 + dma_free_coherent(dev,
3997 +- NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
3998 ++ NPE_QLEN * sizeof(struct crypt_ctl),
3999 + crypt_virt, crypt_phys);
4000 + }
4001 + }
4002 +diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
4003 +index 7e3ad085b5bdd..efce3a83b35a8 100644
4004 +--- a/drivers/crypto/mediatek/mtk-platform.c
4005 ++++ b/drivers/crypto/mediatek/mtk-platform.c
4006 +@@ -442,7 +442,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
4007 + static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
4008 + {
4009 + struct mtk_ring **ring = cryp->ring;
4010 +- int i, err = ENOMEM;
4011 ++ int i;
4012 +
4013 + for (i = 0; i < MTK_RING_MAX; i++) {
4014 + ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
4015 +@@ -469,14 +469,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
4016 + return 0;
4017 +
4018 + err_cleanup:
4019 +- for (; i--; ) {
4020 ++ do {
4021 + dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
4022 + ring[i]->res_base, ring[i]->res_dma);
4023 + dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
4024 + ring[i]->cmd_base, ring[i]->cmd_dma);
4025 + kfree(ring[i]);
4026 +- }
4027 +- return err;
4028 ++ } while (i--);
4029 ++ return -ENOMEM;
4030 + }
4031 +
4032 + static int mtk_crypto_probe(struct platform_device *pdev)
4033 +diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
4034 +index 82691a057d2a1..bc956dfb34de6 100644
4035 +--- a/drivers/crypto/omap-sham.c
4036 ++++ b/drivers/crypto/omap-sham.c
4037 +@@ -456,6 +456,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
4038 + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
4039 + u32 val, mask;
4040 +
4041 ++ if (likely(ctx->digcnt))
4042 ++ omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
4043 ++
4044 + /*
4045 + * Setting ALGO_CONST only for the first iteration and
4046 + * CLOSE_HASH only for the last one. Note that flags mode bits
4047 +diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
4048 +index 7384e91c8b32b..0d32b641a7f9d 100644
4049 +--- a/drivers/crypto/picoxcell_crypto.c
4050 ++++ b/drivers/crypto/picoxcell_crypto.c
4051 +@@ -1666,11 +1666,6 @@ static int spacc_probe(struct platform_device *pdev)
4052 + goto err_clk_put;
4053 + }
4054 +
4055 +- ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
4056 +- if (ret)
4057 +- goto err_clk_disable;
4058 +-
4059 +-
4060 + /*
4061 + * Use an IRQ threshold of 50% as a default. This seems to be a
4062 + * reasonable trade off of latency against throughput but can be
4063 +@@ -1678,6 +1673,10 @@ static int spacc_probe(struct platform_device *pdev)
4064 + */
4065 + engine->stat_irq_thresh = (engine->fifo_sz / 2);
4066 +
4067 ++ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
4068 ++ if (ret)
4069 ++ goto err_clk_disable;
4070 ++
4071 + /*
4072 + * Configure the interrupts. We only use the STAT_CNT interrupt as we
4073 + * only submit a new packet for processing when we complete another in
4074 +diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
4075 +index 4ef3eb11361c2..4a4c3284ae1f3 100644
4076 +--- a/drivers/crypto/stm32/Kconfig
4077 ++++ b/drivers/crypto/stm32/Kconfig
4078 +@@ -3,6 +3,7 @@ config CRYPTO_DEV_STM32_CRC
4079 + tristate "Support for STM32 crc accelerators"
4080 + depends on ARCH_STM32
4081 + select CRYPTO_HASH
4082 ++ select CRC32
4083 + help
4084 + This enables support for the CRC32 hw accelerator which can be found
4085 + on STMicroelectronics STM32 SOC.
4086 +diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
4087 +index 3ba41148c2a46..2c13f5214d2cf 100644
4088 +--- a/drivers/crypto/stm32/stm32-crc32.c
4089 ++++ b/drivers/crypto/stm32/stm32-crc32.c
4090 +@@ -6,6 +6,7 @@
4091 +
4092 + #include <linux/bitrev.h>
4093 + #include <linux/clk.h>
4094 ++#include <linux/crc32.h>
4095 + #include <linux/crc32poly.h>
4096 + #include <linux/module.h>
4097 + #include <linux/mod_devicetable.h>
4098 +@@ -147,7 +148,6 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
4099 + struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
4100 + struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
4101 + struct stm32_crc *crc;
4102 +- unsigned long flags;
4103 +
4104 + crc = stm32_crc_get_next_crc();
4105 + if (!crc)
4106 +@@ -155,7 +155,15 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
4107 +
4108 + pm_runtime_get_sync(crc->dev);
4109 +
4110 +- spin_lock_irqsave(&crc->lock, flags);
4111 ++ if (!spin_trylock(&crc->lock)) {
4112 ++ /* Hardware is busy, calculate crc32 by software */
4113 ++ if (mctx->poly == CRC32_POLY_LE)
4114 ++ ctx->partial = crc32_le(ctx->partial, d8, length);
4115 ++ else
4116 ++ ctx->partial = __crc32c_le(ctx->partial, d8, length);
4117 ++
4118 ++ goto pm_out;
4119 ++ }
4120 +
4121 + /*
4122 + * Restore previously calculated CRC for this context as init value
4123 +@@ -195,8 +203,9 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
4124 + /* Store partial result */
4125 + ctx->partial = readl_relaxed(crc->regs + CRC_DR);
4126 +
4127 +- spin_unlock_irqrestore(&crc->lock, flags);
4128 ++ spin_unlock(&crc->lock);
4129 +
4130 ++pm_out:
4131 + pm_runtime_mark_last_busy(crc->dev);
4132 + pm_runtime_put_autosuspend(crc->dev);
4133 +
4134 +diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
4135 +index 323822372b4ce..7480fc1042093 100644
4136 +--- a/drivers/dma/dmatest.c
4137 ++++ b/drivers/dma/dmatest.c
4138 +@@ -1240,15 +1240,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
4139 + add_threaded_test(info);
4140 +
4141 + /* Check if channel was added successfully */
4142 +- dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
4143 +-
4144 +- if (dtc->chan) {
4145 ++ if (!list_empty(&info->channels)) {
4146 + /*
4147 + * if new channel was not successfully added, revert the
4148 + * "test_channel" string to the name of the last successfully
4149 + * added channel. exception for when users issues empty string
4150 + * to channel parameter.
4151 + */
4152 ++ dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
4153 + if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
4154 + && (strcmp("", strim(test_channel)) != 0)) {
4155 + ret = -EINVAL;
4156 +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
4157 +index a1b56f52db2f2..5e7fdc0b6e3db 100644
4158 +--- a/drivers/dma/dw/core.c
4159 ++++ b/drivers/dma/dw/core.c
4160 +@@ -772,6 +772,10 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
4161 + if (dws->dma_dev != chan->device->dev)
4162 + return false;
4163 +
4164 ++ /* permit channels in accordance with the channels mask */
4165 ++ if (dws->channels && !(dws->channels & dwc->mask))
4166 ++ return false;
4167 ++
4168 + /* We have to copy data since dws can be temporary storage */
4169 + memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
4170 +
4171 +diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c
4172 +index 7a085b3c1854c..d9810980920a1 100644
4173 +--- a/drivers/dma/dw/dw.c
4174 ++++ b/drivers/dma/dw/dw.c
4175 +@@ -14,7 +14,7 @@
4176 + static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
4177 + {
4178 + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
4179 +- u32 cfghi = DWC_CFGH_FIFO_MODE;
4180 ++ u32 cfghi = is_slave_direction(dwc->direction) ? 0 : DWC_CFGH_FIFO_MODE;
4181 + u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
4182 + bool hs_polarity = dwc->dws.hs_polarity;
4183 +
4184 +diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c
4185 +index 9e27831dee324..43e975fb67142 100644
4186 +--- a/drivers/dma/dw/of.c
4187 ++++ b/drivers/dma/dw/of.c
4188 +@@ -22,18 +22,21 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
4189 + };
4190 + dma_cap_mask_t cap;
4191 +
4192 +- if (dma_spec->args_count != 3)
4193 ++ if (dma_spec->args_count < 3 || dma_spec->args_count > 4)
4194 + return NULL;
4195 +
4196 + slave.src_id = dma_spec->args[0];
4197 + slave.dst_id = dma_spec->args[0];
4198 + slave.m_master = dma_spec->args[1];
4199 + slave.p_master = dma_spec->args[2];
4200 ++ if (dma_spec->args_count >= 4)
4201 ++ slave.channels = dma_spec->args[3];
4202 +
4203 + if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
4204 + slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
4205 + slave.m_master >= dw->pdata->nr_masters ||
4206 +- slave.p_master >= dw->pdata->nr_masters))
4207 ++ slave.p_master >= dw->pdata->nr_masters ||
4208 ++ slave.channels >= BIT(dw->pdata->nr_channels)))
4209 + return NULL;
4210 +
4211 + dma_cap_zero(cap);
4212 +diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
4213 +index fd782aee02d92..98c56606ab1a9 100644
4214 +--- a/drivers/dma/ioat/dma.c
4215 ++++ b/drivers/dma/ioat/dma.c
4216 +@@ -389,7 +389,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
4217 + struct ioat_descs *descs = &ioat_chan->descs[i];
4218 +
4219 + descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
4220 +- SZ_2M, &descs->hw, flags);
4221 ++ IOAT_CHUNK_SIZE, &descs->hw, flags);
4222 + if (!descs->virt) {
4223 + int idx;
4224 +
4225 +diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
4226 +index b194658b8b5c9..fbec28dc661d7 100644
4227 +--- a/drivers/edac/aspeed_edac.c
4228 ++++ b/drivers/edac/aspeed_edac.c
4229 +@@ -209,8 +209,8 @@ static int config_irq(void *ctx, struct platform_device *pdev)
4230 + /* register interrupt handler */
4231 + irq = platform_get_irq(pdev, 0);
4232 + dev_dbg(&pdev->dev, "got irq %d\n", irq);
4233 +- if (!irq)
4234 +- return -ENODEV;
4235 ++ if (irq < 0)
4236 ++ return irq;
4237 +
4238 + rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH,
4239 + DRV_NAME, ctx);
4240 +diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
4241 +index 191aa7c19ded7..324a46b8479b0 100644
4242 +--- a/drivers/edac/i5100_edac.c
4243 ++++ b/drivers/edac/i5100_edac.c
4244 +@@ -1061,16 +1061,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
4245 + PCI_DEVICE_ID_INTEL_5100_19, 0);
4246 + if (!einj) {
4247 + ret = -ENODEV;
4248 +- goto bail_einj;
4249 ++ goto bail_mc_free;
4250 + }
4251 +
4252 + rc = pci_enable_device(einj);
4253 + if (rc < 0) {
4254 + ret = rc;
4255 +- goto bail_disable_einj;
4256 ++ goto bail_einj;
4257 + }
4258 +
4259 +-
4260 + mci->pdev = &pdev->dev;
4261 +
4262 + priv = mci->pvt_info;
4263 +@@ -1136,14 +1135,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
4264 + bail_scrub:
4265 + priv->scrub_enable = 0;
4266 + cancel_delayed_work_sync(&(priv->i5100_scrubbing));
4267 +- edac_mc_free(mci);
4268 +-
4269 +-bail_disable_einj:
4270 + pci_disable_device(einj);
4271 +
4272 + bail_einj:
4273 + pci_dev_put(einj);
4274 +
4275 ++bail_mc_free:
4276 ++ edac_mc_free(mci);
4277 ++
4278 + bail_disable_ch1:
4279 + pci_disable_device(ch1mm);
4280 +
4281 +diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
4282 +index 8be3e89a510e4..d7419a90a2f5b 100644
4283 +--- a/drivers/edac/ti_edac.c
4284 ++++ b/drivers/edac/ti_edac.c
4285 +@@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev)
4286 +
4287 + /* add EMIF ECC error handler */
4288 + error_irq = platform_get_irq(pdev, 0);
4289 +- if (!error_irq) {
4290 ++ if (error_irq < 0) {
4291 ++ ret = error_irq;
4292 + edac_printk(KERN_ERR, EDAC_MOD_NAME,
4293 + "EMIF irq number not defined.\n");
4294 + goto err;
4295 +diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
4296 +index 6998dc86b5ce8..b797a713c3313 100644
4297 +--- a/drivers/firmware/arm_scmi/mailbox.c
4298 ++++ b/drivers/firmware/arm_scmi/mailbox.c
4299 +@@ -110,7 +110,7 @@ static int mailbox_chan_free(int id, void *p, void *data)
4300 + struct scmi_chan_info *cinfo = p;
4301 + struct scmi_mailbox *smbox = cinfo->transport_info;
4302 +
4303 +- if (!IS_ERR(smbox->chan)) {
4304 ++ if (smbox && !IS_ERR(smbox->chan)) {
4305 + mbox_free_channel(smbox->chan);
4306 + cinfo->transport_info = NULL;
4307 + smbox->chan = NULL;
4308 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
4309 +index 7c1cc0ba30a55..78cf9e4fddbdf 100644
4310 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
4311 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
4312 +@@ -8178,8 +8178,7 @@ static int dm_update_plane_state(struct dc *dc,
4313 + dm_old_plane_state->dc_state,
4314 + dm_state->context)) {
4315 +
4316 +- ret = EINVAL;
4317 +- return ret;
4318 ++ return -EINVAL;
4319 + }
4320 +
4321 +
4322 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
4323 +index d016f50e187c8..d261f425b80ec 100644
4324 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
4325 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
4326 +@@ -2538,7 +2538,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
4327 +
4328 + copy_stream_update_to_stream(dc, context, stream, stream_update);
4329 +
4330 +- if (update_type > UPDATE_TYPE_FAST) {
4331 ++ if (update_type >= UPDATE_TYPE_FULL) {
4332 + if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
4333 + DC_ERROR("Mode validation failed for stream update!\n");
4334 + dc_release_state(context);
4335 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
4336 +index ebff9b1e312e5..124c081a0f2ca 100644
4337 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
4338 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
4339 +@@ -75,7 +75,7 @@ static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *d
4340 + else
4341 + bl_pwm &= 0xFFFF;
4342 +
4343 +- current_backlight = bl_pwm << (1 + bl_int_count);
4344 ++ current_backlight = (uint64_t)bl_pwm << (1 + bl_int_count);
4345 +
4346 + if (bl_period == 0)
4347 + bl_period = 0xFFFF;
4348 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
4349 +index 20bdabebbc434..76cd4f3de4eaf 100644
4350 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
4351 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
4352 +@@ -3165,6 +3165,9 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
4353 + context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
4354 + dc->debug.enable_dram_clock_change_one_display_vactive;
4355 +
4356 ++ /*Unsafe due to current pipe merge and split logic*/
4357 ++ ASSERT(context != dc->current_state);
4358 ++
4359 + if (fast_validate) {
4360 + return dcn20_validate_bandwidth_internal(dc, context, true);
4361 + }
4362 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
4363 +index f00a568350848..c6ab3dee4fd69 100644
4364 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
4365 ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
4366 +@@ -1184,6 +1184,9 @@ bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
4367 +
4368 + BW_VAL_TRACE_COUNT();
4369 +
4370 ++ /*Unsafe due to current pipe merge and split logic*/
4371 ++ ASSERT(context != dc->current_state);
4372 ++
4373 + out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel);
4374 +
4375 + if (pipe_cnt == 0)
4376 +diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
4377 +index 5d67a41f7c3a8..3dd70d813f694 100644
4378 +--- a/drivers/gpu/drm/drm_debugfs_crc.c
4379 ++++ b/drivers/gpu/drm/drm_debugfs_crc.c
4380 +@@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
4381 + source[len - 1] = '\0';
4382 +
4383 + ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
4384 +- if (ret)
4385 ++ if (ret) {
4386 ++ kfree(source);
4387 + return ret;
4388 ++ }
4389 +
4390 + spin_lock_irq(&crc->lock);
4391 +
4392 +diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
4393 +index 8b2d5c945c95c..1d85af9a481ac 100644
4394 +--- a/drivers/gpu/drm/drm_gem_vram_helper.c
4395 ++++ b/drivers/gpu/drm/drm_gem_vram_helper.c
4396 +@@ -175,6 +175,10 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
4397 + }
4398 + }
4399 +
4400 ++/*
4401 ++ * Note that on error, drm_gem_vram_init will free the buffer object.
4402 ++ */
4403 ++
4404 + static int drm_gem_vram_init(struct drm_device *dev,
4405 + struct drm_gem_vram_object *gbo,
4406 + size_t size, unsigned long pg_align)
4407 +@@ -184,15 +188,19 @@ static int drm_gem_vram_init(struct drm_device *dev,
4408 + int ret;
4409 + size_t acc_size;
4410 +
4411 +- if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
4412 ++ if (WARN_ONCE(!vmm, "VRAM MM not initialized")) {
4413 ++ kfree(gbo);
4414 + return -EINVAL;
4415 ++ }
4416 + bdev = &vmm->bdev;
4417 +
4418 + gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
4419 +
4420 + ret = drm_gem_object_init(dev, &gbo->bo.base, size);
4421 +- if (ret)
4422 ++ if (ret) {
4423 ++ kfree(gbo);
4424 + return ret;
4425 ++ }
4426 +
4427 + acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
4428 +
4429 +@@ -203,13 +211,13 @@ static int drm_gem_vram_init(struct drm_device *dev,
4430 + &gbo->placement, pg_align, false, acc_size,
4431 + NULL, NULL, ttm_buffer_object_destroy);
4432 + if (ret)
4433 +- goto err_drm_gem_object_release;
4434 ++ /*
4435 ++ * A failing ttm_bo_init will call ttm_buffer_object_destroy
4436 ++ * to release gbo->bo.base and kfree gbo.
4437 ++ */
4438 ++ return ret;
4439 +
4440 + return 0;
4441 +-
4442 +-err_drm_gem_object_release:
4443 +- drm_gem_object_release(&gbo->bo.base);
4444 +- return ret;
4445 + }
4446 +
4447 + /**
4448 +@@ -243,13 +251,9 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
4449 +
4450 + ret = drm_gem_vram_init(dev, gbo, size, pg_align);
4451 + if (ret < 0)
4452 +- goto err_kfree;
4453 ++ return ERR_PTR(ret);
4454 +
4455 + return gbo;
4456 +-
4457 +-err_kfree:
4458 +- kfree(gbo);
4459 +- return ERR_PTR(ret);
4460 + }
4461 + EXPORT_SYMBOL(drm_gem_vram_create);
4462 +
4463 +diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
4464 +index f41cbb753bb46..720a767118c9c 100644
4465 +--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
4466 ++++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
4467 +@@ -2078,7 +2078,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
4468 + intel_dp->dpcd,
4469 + sizeof(intel_dp->dpcd));
4470 + cdv_intel_edp_panel_vdd_off(gma_encoder);
4471 +- if (ret == 0) {
4472 ++ if (ret <= 0) {
4473 + /* if this fails, presume the device is a ghost */
4474 + DRM_INFO("failed to retrieve link info, disabling eDP\n");
4475 + drm_encoder_cleanup(encoder);
4476 +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
4477 +index d8b43500f12d1..2d01a293aa782 100644
4478 +--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
4479 ++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
4480 +@@ -485,7 +485,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
4481 + mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
4482 + cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
4483 + cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
4484 +- cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
4485 ++ cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
4486 + mtk_crtc_ddp_config(crtc, cmdq_handle);
4487 + cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
4488 + }
4489 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
4490 +index d6023ba8033c0..3bb567812b990 100644
4491 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
4492 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
4493 +@@ -864,7 +864,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
4494 + int i;
4495 +
4496 + a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
4497 +- sizeof(a6xx_state->indexed_regs));
4498 ++ sizeof(*a6xx_state->indexed_regs));
4499 + if (!a6xx_state->indexed_regs)
4500 + return;
4501 +
4502 +diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
4503 +index a74ccc5b8220d..5b5809c0e44b3 100644
4504 +--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
4505 ++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
4506 +@@ -189,10 +189,16 @@ struct msm_gem_address_space *
4507 + adreno_iommu_create_address_space(struct msm_gpu *gpu,
4508 + struct platform_device *pdev)
4509 + {
4510 +- struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
4511 +- struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
4512 ++ struct iommu_domain *iommu;
4513 ++ struct msm_mmu *mmu;
4514 + struct msm_gem_address_space *aspace;
4515 +
4516 ++ iommu = iommu_domain_alloc(&platform_bus_type);
4517 ++ if (!iommu)
4518 ++ return NULL;
4519 ++
4520 ++ mmu = msm_iommu_new(&pdev->dev, iommu);
4521 ++
4522 + aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
4523 + 0xffffffff - SZ_16M);
4524 +
4525 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
4526 +index 1026e1e5bec10..4d81a0c73616f 100644
4527 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
4528 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
4529 +@@ -881,7 +881,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
4530 + struct drm_plane *plane;
4531 + struct drm_display_mode *mode;
4532 +
4533 +- int cnt = 0, rc = 0, mixer_width, i, z_pos;
4534 ++ int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
4535 +
4536 + struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
4537 + int multirect_count = 0;
4538 +@@ -914,9 +914,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
4539 +
4540 + memset(pipe_staged, 0, sizeof(pipe_staged));
4541 +
4542 +- mixer_width = mode->hdisplay / cstate->num_mixers;
4543 ++ if (cstate->num_mixers) {
4544 ++ mixer_width = mode->hdisplay / cstate->num_mixers;
4545 +
4546 +- _dpu_crtc_setup_lm_bounds(crtc, state);
4547 ++ _dpu_crtc_setup_lm_bounds(crtc, state);
4548 ++ }
4549 +
4550 + crtc_rect.x2 = mode->hdisplay;
4551 + crtc_rect.y2 = mode->vdisplay;
4552 +diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
4553 +index 497cf443a9afa..0b02e65a89e79 100644
4554 +--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
4555 ++++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
4556 +@@ -26,6 +26,7 @@
4557 + #include <drm/drm_drv.h>
4558 + #include <drm/drm_fb_cma_helper.h>
4559 + #include <drm/drm_fb_helper.h>
4560 ++#include <drm/drm_fourcc.h>
4561 + #include <drm/drm_gem_cma_helper.h>
4562 + #include <drm/drm_gem_framebuffer_helper.h>
4563 + #include <drm/drm_irq.h>
4564 +@@ -87,8 +88,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
4565 + clk_disable_unprepare(mxsfb->clk_axi);
4566 + }
4567 +
4568 ++static struct drm_framebuffer *
4569 ++mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
4570 ++ const struct drm_mode_fb_cmd2 *mode_cmd)
4571 ++{
4572 ++ const struct drm_format_info *info;
4573 ++
4574 ++ info = drm_get_format_info(dev, mode_cmd);
4575 ++ if (!info)
4576 ++ return ERR_PTR(-EINVAL);
4577 ++
4578 ++ if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
4579 ++ dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
4580 ++ return ERR_PTR(-EINVAL);
4581 ++ }
4582 ++
4583 ++ return drm_gem_fb_create(dev, file_priv, mode_cmd);
4584 ++}
4585 ++
4586 + static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
4587 +- .fb_create = drm_gem_fb_create,
4588 ++ .fb_create = mxsfb_fb_create,
4589 + .atomic_check = drm_atomic_helper_check,
4590 + .atomic_commit = drm_atomic_helper_commit,
4591 + };
4592 +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
4593 +index 7debf2ca42522..4b4ca31a2d577 100644
4594 +--- a/drivers/gpu/drm/panel/panel-simple.c
4595 ++++ b/drivers/gpu/drm/panel/panel-simple.c
4596 +@@ -2862,12 +2862,12 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
4597 + static const struct panel_desc ortustech_com43h4m85ulc = {
4598 + .modes = &ortustech_com43h4m85ulc_mode,
4599 + .num_modes = 1,
4600 +- .bpc = 8,
4601 ++ .bpc = 6,
4602 + .size = {
4603 + .width = 56,
4604 + .height = 93,
4605 + },
4606 +- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
4607 ++ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
4608 + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
4609 + .connector_type = DRM_MODE_CONNECTOR_DPI,
4610 + };
4611 +diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
4612 +index c30c719a80594..3c4a85213c15f 100644
4613 +--- a/drivers/gpu/drm/panfrost/panfrost_device.h
4614 ++++ b/drivers/gpu/drm/panfrost/panfrost_device.h
4615 +@@ -69,6 +69,9 @@ struct panfrost_compatible {
4616 + int num_pm_domains;
4617 + /* Only required if num_pm_domains > 1. */
4618 + const char * const *pm_domain_names;
4619 ++
4620 ++ /* Vendor implementation quirks callback */
4621 ++ void (*vendor_quirk)(struct panfrost_device *pfdev);
4622 + };
4623 +
4624 + struct panfrost_device {
4625 +diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
4626 +index 882fecc33fdb1..6e11a73e81aa3 100644
4627 +--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
4628 ++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
4629 +@@ -667,7 +667,18 @@ static const struct panfrost_compatible default_data = {
4630 + .pm_domain_names = NULL,
4631 + };
4632 +
4633 ++static const struct panfrost_compatible amlogic_data = {
4634 ++ .num_supplies = ARRAY_SIZE(default_supplies),
4635 ++ .supply_names = default_supplies,
4636 ++ .vendor_quirk = panfrost_gpu_amlogic_quirk,
4637 ++};
4638 ++
4639 + static const struct of_device_id dt_match[] = {
4640 ++ /* Set first to probe before the generic compatibles */
4641 ++ { .compatible = "amlogic,meson-gxm-mali",
4642 ++ .data = &amlogic_data, },
4643 ++ { .compatible = "amlogic,meson-g12a-mali",
4644 ++ .data = &amlogic_data, },
4645 + { .compatible = "arm,mali-t604", .data = &default_data, },
4646 + { .compatible = "arm,mali-t624", .data = &default_data, },
4647 + { .compatible = "arm,mali-t628", .data = &default_data, },
4648 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
4649 +index f2c1ddc41a9bf..165403878ad9b 100644
4650 +--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
4651 ++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
4652 +@@ -75,6 +75,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
4653 + return 0;
4654 + }
4655 +
4656 ++void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev)
4657 ++{
4658 ++ /*
4659 ++ * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs
4660 ++ * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order
4661 ++ * to operate correctly.
4662 ++ */
4663 ++ gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK);
4664 ++ gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16));
4665 ++}
4666 ++
4667 + static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
4668 + {
4669 + u32 quirks = 0;
4670 +@@ -135,6 +146,10 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
4671 +
4672 + if (quirks)
4673 + gpu_write(pfdev, GPU_JM_CONFIG, quirks);
4674 ++
4675 ++ /* Here goes platform specific quirks */
4676 ++ if (pfdev->comp->vendor_quirk)
4677 ++ pfdev->comp->vendor_quirk(pfdev);
4678 + }
4679 +
4680 + #define MAX_HW_REVS 6
4681 +@@ -304,16 +319,18 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
4682 + int ret;
4683 + u32 val;
4684 +
4685 ++ panfrost_gpu_init_quirks(pfdev);
4686 ++
4687 + /* Just turn on everything for now */
4688 + gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
4689 + ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
4690 +- val, val == pfdev->features.l2_present, 100, 1000);
4691 ++ val, val == pfdev->features.l2_present, 100, 20000);
4692 + if (ret)
4693 + dev_err(pfdev->dev, "error powering up gpu L2");
4694 +
4695 + gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
4696 + ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
4697 +- val, val == pfdev->features.shader_present, 100, 1000);
4698 ++ val, val == pfdev->features.shader_present, 100, 20000);
4699 + if (ret)
4700 + dev_err(pfdev->dev, "error powering up gpu shader");
4701 +
4702 +@@ -355,7 +372,6 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
4703 + return err;
4704 + }
4705 +
4706 +- panfrost_gpu_init_quirks(pfdev);
4707 + panfrost_gpu_power_on(pfdev);
4708 +
4709 + return 0;
4710 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
4711 +index 4112412087b27..468c51e7e46db 100644
4712 +--- a/drivers/gpu/drm/panfrost/panfrost_gpu.h
4713 ++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
4714 +@@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
4715 + void panfrost_gpu_power_on(struct panfrost_device *pfdev);
4716 + void panfrost_gpu_power_off(struct panfrost_device *pfdev);
4717 +
4718 ++void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev);
4719 ++
4720 + #endif
4721 +diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
4722 +index ea38ac60581c6..eddaa62ad8b0e 100644
4723 +--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
4724 ++++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
4725 +@@ -51,6 +51,10 @@
4726 + #define GPU_STATUS 0x34
4727 + #define GPU_STATUS_PRFCNT_ACTIVE BIT(2)
4728 + #define GPU_LATEST_FLUSH_ID 0x38
4729 ++#define GPU_PWR_KEY 0x50 /* (WO) Power manager key register */
4730 ++#define GPU_PWR_KEY_UNLOCK 0x2968A819
4731 ++#define GPU_PWR_OVERRIDE0 0x54 /* (RW) Power manager override settings */
4732 ++#define GPU_PWR_OVERRIDE1 0x58 /* (RW) Power manager override settings */
4733 + #define GPU_FAULT_STATUS 0x3C
4734 + #define GPU_FAULT_ADDRESS_LO 0x40
4735 + #define GPU_FAULT_ADDRESS_HI 0x44
4736 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
4737 +index f1a81c9b184d4..fa09b3ae8b9d4 100644
4738 +--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
4739 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
4740 +@@ -13,6 +13,7 @@
4741 + #include <drm/drm_fourcc.h>
4742 + #include <drm/drm_gem_cma_helper.h>
4743 + #include <drm/drm_gem_framebuffer_helper.h>
4744 ++#include <drm/drm_managed.h>
4745 + #include <drm/drm_plane_helper.h>
4746 + #include <drm/drm_vblank.h>
4747 +
4748 +@@ -341,6 +342,13 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
4749 + .atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state,
4750 + };
4751 +
4752 ++static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res)
4753 ++{
4754 ++ struct rcar_du_vsp *vsp = res;
4755 ++
4756 ++ put_device(vsp->vsp);
4757 ++}
4758 ++
4759 + int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
4760 + unsigned int crtcs)
4761 + {
4762 +@@ -357,6 +365,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
4763 +
4764 + vsp->vsp = &pdev->dev;
4765 +
4766 ++ ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp);
4767 ++ if (ret < 0)
4768 ++ return ret;
4769 ++
4770 + ret = vsp1_du_init(vsp->vsp);
4771 + if (ret < 0)
4772 + return ret;
4773 +diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
4774 +index fa39d140adc6c..94825ec3a09d8 100644
4775 +--- a/drivers/gpu/drm/vgem/vgem_drv.c
4776 ++++ b/drivers/gpu/drm/vgem/vgem_drv.c
4777 +@@ -471,8 +471,8 @@ static int __init vgem_init(void)
4778 +
4779 + out_put:
4780 + drm_dev_put(&vgem_device->drm);
4781 ++ platform_device_unregister(vgem_device->platform);
4782 + return ret;
4783 +-
4784 + out_unregister:
4785 + platform_device_unregister(vgem_device->platform);
4786 + out_free:
4787 +diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
4788 +index 0a5c8cf409fb8..dc8cb8dfce58e 100644
4789 +--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
4790 ++++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
4791 +@@ -80,8 +80,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
4792 + vgdev->capsets[i].id > 0, 5 * HZ);
4793 + if (ret == 0) {
4794 + DRM_ERROR("timed out waiting for cap set %d\n", i);
4795 ++ spin_lock(&vgdev->display_info_lock);
4796 + kfree(vgdev->capsets);
4797 + vgdev->capsets = NULL;
4798 ++ spin_unlock(&vgdev->display_info_lock);
4799 + return;
4800 + }
4801 + DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
4802 +diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
4803 +index 9e663a5d99526..2517450bf46ba 100644
4804 +--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
4805 ++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
4806 +@@ -684,9 +684,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
4807 + int i = le32_to_cpu(cmd->capset_index);
4808 +
4809 + spin_lock(&vgdev->display_info_lock);
4810 +- vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
4811 +- vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
4812 +- vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
4813 ++ if (vgdev->capsets) {
4814 ++ vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
4815 ++ vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
4816 ++ vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
4817 ++ } else {
4818 ++ DRM_ERROR("invalid capset memory.");
4819 ++ }
4820 + spin_unlock(&vgdev->display_info_lock);
4821 + wake_up(&vgdev->resp_wq);
4822 + }
4823 +diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
4824 +index 4af2f19480f4f..b8b060354667e 100644
4825 +--- a/drivers/gpu/drm/vkms/vkms_composer.c
4826 ++++ b/drivers/gpu/drm/vkms/vkms_composer.c
4827 +@@ -33,7 +33,7 @@ static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
4828 + + (i * composer->pitch)
4829 + + (j * composer->cpp);
4830 + /* XRGB format ignores Alpha channel */
4831 +- memset(vaddr_out + src_offset + 24, 0, 8);
4832 ++ bitmap_clear(vaddr_out + src_offset, 24, 8);
4833 + crc = crc32_le(crc, vaddr_out + src_offset,
4834 + sizeof(u32));
4835 + }
4836 +diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
4837 +index 1e8b2169d8341..e6a3ea1b399a7 100644
4838 +--- a/drivers/gpu/drm/vkms/vkms_drv.c
4839 ++++ b/drivers/gpu/drm/vkms/vkms_drv.c
4840 +@@ -188,8 +188,8 @@ static int __init vkms_init(void)
4841 +
4842 + out_put:
4843 + drm_dev_put(&vkms_device->drm);
4844 ++ platform_device_unregister(vkms_device->platform);
4845 + return ret;
4846 +-
4847 + out_unregister:
4848 + platform_device_unregister(vkms_device->platform);
4849 + out_free:
4850 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
4851 +index b49ec7dde6457..b269c792d25dc 100644
4852 +--- a/drivers/hid/hid-ids.h
4853 ++++ b/drivers/hid/hid-ids.h
4854 +@@ -726,6 +726,7 @@
4855 + #define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049
4856 + #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
4857 + #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
4858 ++#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
4859 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
4860 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
4861 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e
4862 +@@ -1122,6 +1123,7 @@
4863 + #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819
4864 + #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
4865 + #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
4866 ++#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
4867 + #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
4868 +
4869 + #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
4870 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
4871 +index e3d475f4baf66..b2bff932c524f 100644
4872 +--- a/drivers/hid/hid-input.c
4873 ++++ b/drivers/hid/hid-input.c
4874 +@@ -797,7 +797,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
4875 + case 0x3b: /* Battery Strength */
4876 + hidinput_setup_battery(device, HID_INPUT_REPORT, field);
4877 + usage->type = EV_PWR;
4878 +- goto ignore;
4879 ++ return;
4880 +
4881 + case 0x3c: /* Invert */
4882 + map_key_clear(BTN_TOOL_RUBBER);
4883 +@@ -1059,7 +1059,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
4884 + case HID_DC_BATTERYSTRENGTH:
4885 + hidinput_setup_battery(device, HID_INPUT_REPORT, field);
4886 + usage->type = EV_PWR;
4887 +- goto ignore;
4888 ++ return;
4889 + }
4890 + goto unknown;
4891 +
4892 +diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
4893 +index 6c55682c59740..044a93f3c1178 100644
4894 +--- a/drivers/hid/hid-ite.c
4895 ++++ b/drivers/hid/hid-ite.c
4896 +@@ -44,6 +44,10 @@ static const struct hid_device_id ite_devices[] = {
4897 + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
4898 + USB_VENDOR_ID_SYNAPTICS,
4899 + USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
4900 ++ /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
4901 ++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
4902 ++ USB_VENDOR_ID_SYNAPTICS,
4903 ++ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
4904 + { }
4905 + };
4906 + MODULE_DEVICE_TABLE(hid, ite_devices);
4907 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
4908 +index e3152155c4b85..99f041afd5c0c 100644
4909 +--- a/drivers/hid/hid-multitouch.c
4910 ++++ b/drivers/hid/hid-multitouch.c
4911 +@@ -1973,6 +1973,12 @@ static const struct hid_device_id mt_devices[] = {
4912 + HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
4913 + USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
4914 +
4915 ++ /* Lenovo X1 TAB Gen 3 */
4916 ++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
4917 ++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
4918 ++ USB_VENDOR_ID_LENOVO,
4919 ++ USB_DEVICE_ID_LENOVO_X1_TAB3) },
4920 ++
4921 + /* MosArt panels */
4922 + { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
4923 + MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
4924 +diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
4925 +index 1a6e600197d0b..509b9bb1362cb 100644
4926 +--- a/drivers/hid/hid-roccat-kone.c
4927 ++++ b/drivers/hid/hid-roccat-kone.c
4928 +@@ -294,31 +294,40 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
4929 + struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
4930 + struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
4931 + int retval = 0, difference, old_profile;
4932 ++ struct kone_settings *settings = (struct kone_settings *)buf;
4933 +
4934 + /* I need to get my data in one piece */
4935 + if (off != 0 || count != sizeof(struct kone_settings))
4936 + return -EINVAL;
4937 +
4938 + mutex_lock(&kone->kone_lock);
4939 +- difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
4940 ++ difference = memcmp(settings, &kone->settings,
4941 ++ sizeof(struct kone_settings));
4942 + if (difference) {
4943 +- retval = kone_set_settings(usb_dev,
4944 +- (struct kone_settings const *)buf);
4945 +- if (retval) {
4946 +- mutex_unlock(&kone->kone_lock);
4947 +- return retval;
4948 ++ if (settings->startup_profile < 1 ||
4949 ++ settings->startup_profile > 5) {
4950 ++ retval = -EINVAL;
4951 ++ goto unlock;
4952 + }
4953 +
4954 ++ retval = kone_set_settings(usb_dev, settings);
4955 ++ if (retval)
4956 ++ goto unlock;
4957 ++
4958 + old_profile = kone->settings.startup_profile;
4959 +- memcpy(&kone->settings, buf, sizeof(struct kone_settings));
4960 ++ memcpy(&kone->settings, settings, sizeof(struct kone_settings));
4961 +
4962 + kone_profile_activated(kone, kone->settings.startup_profile);
4963 +
4964 + if (kone->settings.startup_profile != old_profile)
4965 + kone_profile_report(kone, kone->settings.startup_profile);
4966 + }
4967 ++unlock:
4968 + mutex_unlock(&kone->kone_lock);
4969 +
4970 ++ if (retval)
4971 ++ return retval;
4972 ++
4973 + return sizeof(struct kone_settings);
4974 + }
4975 + static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
4976 +diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c
4977 +index 94698cae04971..3e1d56585b91a 100644
4978 +--- a/drivers/hwmon/bt1-pvt.c
4979 ++++ b/drivers/hwmon/bt1-pvt.c
4980 +@@ -13,6 +13,7 @@
4981 + #include <linux/bitops.h>
4982 + #include <linux/clk.h>
4983 + #include <linux/completion.h>
4984 ++#include <linux/delay.h>
4985 + #include <linux/device.h>
4986 + #include <linux/hwmon-sysfs.h>
4987 + #include <linux/hwmon.h>
4988 +@@ -476,6 +477,7 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
4989 + long *val)
4990 + {
4991 + struct pvt_cache *cache = &pvt->cache[type];
4992 ++ unsigned long timeout;
4993 + u32 data;
4994 + int ret;
4995 +
4996 +@@ -499,7 +501,14 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
4997 + pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, 0);
4998 + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
4999 +
5000 +- wait_for_completion(&cache->conversion);
5001 ++ /*
5002 ++ * Wait with timeout since in case if the sensor is suddenly powered
5003 ++ * down the request won't be completed and the caller will hang up on
5004 ++ * this procedure until the power is back up again. Multiply the
5005 ++ * timeout by the factor of two to prevent a false timeout.
5006 ++ */
5007 ++ timeout = 2 * usecs_to_jiffies(ktime_to_us(pvt->timeout));
5008 ++ ret = wait_for_completion_timeout(&cache->conversion, timeout);
5009 +
5010 + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
5011 + pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID,
5012 +@@ -509,6 +518,9 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
5013 +
5014 + mutex_unlock(&pvt->iface_mtx);
5015 +
5016 ++ if (!ret)
5017 ++ return -ETIMEDOUT;
5018 ++
5019 + if (type == PVT_TEMP)
5020 + *val = pvt_calc_poly(&poly_N_to_temp, data);
5021 + else
5022 +@@ -654,44 +666,16 @@ static int pvt_write_trim(struct pvt_hwmon *pvt, long val)
5023 +
5024 + static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val)
5025 + {
5026 +- unsigned long rate;
5027 +- ktime_t kt;
5028 +- u32 data;
5029 +-
5030 +- rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
5031 +- if (!rate)
5032 +- return -ENODEV;
5033 +-
5034 +- /*
5035 +- * Don't bother with mutex here, since we just read data from MMIO.
5036 +- * We also have to scale the ticks timeout up to compensate the
5037 +- * ms-ns-data translations.
5038 +- */
5039 +- data = readl(pvt->regs + PVT_TTIMEOUT) + 1;
5040 ++ int ret;
5041 +
5042 +- /*
5043 +- * Calculate ref-clock based delay (Ttotal) between two consecutive
5044 +- * data samples of the same sensor. So we first must calculate the
5045 +- * delay introduced by the internal ref-clock timer (Tref * Fclk).
5046 +- * Then add the constant timeout cuased by each conversion latency
5047 +- * (Tmin). The basic formulae for each conversion is following:
5048 +- * Ttotal = Tref * Fclk + Tmin
5049 +- * Note if alarms are enabled the sensors are polled one after
5050 +- * another, so in order to have the delay being applicable for each
5051 +- * sensor the requested value must be equally redistirbuted.
5052 +- */
5053 +-#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
5054 +- kt = ktime_set(PVT_SENSORS_NUM * (u64)data, 0);
5055 +- kt = ktime_divns(kt, rate);
5056 +- kt = ktime_add_ns(kt, PVT_SENSORS_NUM * PVT_TOUT_MIN);
5057 +-#else
5058 +- kt = ktime_set(data, 0);
5059 +- kt = ktime_divns(kt, rate);
5060 +- kt = ktime_add_ns(kt, PVT_TOUT_MIN);
5061 +-#endif
5062 ++ ret = mutex_lock_interruptible(&pvt->iface_mtx);
5063 ++ if (ret)
5064 ++ return ret;
5065 +
5066 + /* Return the result in msec as hwmon sysfs interface requires. */
5067 +- *val = ktime_to_ms(kt);
5068 ++ *val = ktime_to_ms(pvt->timeout);
5069 ++
5070 ++ mutex_unlock(&pvt->iface_mtx);
5071 +
5072 + return 0;
5073 + }
5074 +@@ -699,7 +683,7 @@ static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val)
5075 + static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
5076 + {
5077 + unsigned long rate;
5078 +- ktime_t kt;
5079 ++ ktime_t kt, cache;
5080 + u32 data;
5081 + int ret;
5082 +
5083 +@@ -712,7 +696,7 @@ static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
5084 + * between all available sensors to have the requested delay
5085 + * applicable to each individual sensor.
5086 + */
5087 +- kt = ms_to_ktime(val);
5088 ++ cache = kt = ms_to_ktime(val);
5089 + #if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
5090 + kt = ktime_divns(kt, PVT_SENSORS_NUM);
5091 + #endif
5092 +@@ -741,6 +725,7 @@ static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
5093 + return ret;
5094 +
5095 + pvt_set_tout(pvt, data);
5096 ++ pvt->timeout = cache;
5097 +
5098 + mutex_unlock(&pvt->iface_mtx);
5099 +
5100 +@@ -982,10 +967,52 @@ static int pvt_request_clks(struct pvt_hwmon *pvt)
5101 + return 0;
5102 + }
5103 +
5104 +-static void pvt_init_iface(struct pvt_hwmon *pvt)
5105 ++static int pvt_check_pwr(struct pvt_hwmon *pvt)
5106 + {
5107 ++ unsigned long tout;
5108 ++ int ret = 0;
5109 ++ u32 data;
5110 ++
5111 ++ /*
5112 ++ * Test out the sensor conversion functionality. If it is not done on
5113 ++ * time then the domain must have been unpowered and we won't be able
5114 ++ * to use the device later in this driver.
5115 ++ * Note If the power source is lost during the normal driver work the
5116 ++ * data read procedure will either return -ETIMEDOUT (for the
5117 ++ * alarm-less driver configuration) or just stop the repeated
5118 ++ * conversion. In the later case alas we won't be able to detect the
5119 ++ * problem.
5120 ++ */
5121 ++ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_ALL, PVT_INTR_ALL);
5122 ++ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
5123 ++ pvt_set_tout(pvt, 0);
5124 ++ readl(pvt->regs + PVT_DATA);
5125 ++
5126 ++ tout = PVT_TOUT_MIN / NSEC_PER_USEC;
5127 ++ usleep_range(tout, 2 * tout);
5128 ++
5129 ++ data = readl(pvt->regs + PVT_DATA);
5130 ++ if (!(data & PVT_DATA_VALID)) {
5131 ++ ret = -ENODEV;
5132 ++ dev_err(pvt->dev, "Sensor is powered down\n");
5133 ++ }
5134 ++
5135 ++ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
5136 ++
5137 ++ return ret;
5138 ++}
5139 ++
5140 ++static int pvt_init_iface(struct pvt_hwmon *pvt)
5141 ++{
5142 ++ unsigned long rate;
5143 + u32 trim, temp;
5144 +
5145 ++ rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
5146 ++ if (!rate) {
5147 ++ dev_err(pvt->dev, "Invalid reference clock rate\n");
5148 ++ return -ENODEV;
5149 ++ }
5150 ++
5151 + /*
5152 + * Make sure all interrupts and controller are disabled so not to
5153 + * accidentally have ISR executed before the driver data is fully
5154 +@@ -1000,12 +1027,37 @@ static void pvt_init_iface(struct pvt_hwmon *pvt)
5155 + pvt_set_mode(pvt, pvt_info[pvt->sensor].mode);
5156 + pvt_set_tout(pvt, PVT_TOUT_DEF);
5157 +
5158 ++ /*
5159 ++ * Preserve the current ref-clock based delay (Ttotal) between the
5160 ++ * sensors data samples in the driver data so not to recalculate it
5161 ++ * each time on the data requests and timeout reads. It consists of the
5162 ++ * delay introduced by the internal ref-clock timer (N / Fclk) and the
5163 ++ * constant timeout caused by each conversion latency (Tmin):
5164 ++ * Ttotal = N / Fclk + Tmin
5165 ++ * If alarms are enabled the sensors are polled one after another and
5166 ++ * in order to get the next measurement of a particular sensor the
5167 ++ * caller will have to wait for at most until all the others are
5168 ++ * polled. In that case the formulae will look a bit different:
5169 ++ * Ttotal = 5 * (N / Fclk + Tmin)
5170 ++ */
5171 ++#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
5172 ++ pvt->timeout = ktime_set(PVT_SENSORS_NUM * PVT_TOUT_DEF, 0);
5173 ++ pvt->timeout = ktime_divns(pvt->timeout, rate);
5174 ++ pvt->timeout = ktime_add_ns(pvt->timeout, PVT_SENSORS_NUM * PVT_TOUT_MIN);
5175 ++#else
5176 ++ pvt->timeout = ktime_set(PVT_TOUT_DEF, 0);
5177 ++ pvt->timeout = ktime_divns(pvt->timeout, rate);
5178 ++ pvt->timeout = ktime_add_ns(pvt->timeout, PVT_TOUT_MIN);
5179 ++#endif
5180 ++
5181 + trim = PVT_TRIM_DEF;
5182 + if (!of_property_read_u32(pvt->dev->of_node,
5183 + "baikal,pvt-temp-offset-millicelsius", &temp))
5184 + trim = pvt_calc_trim(temp);
5185 +
5186 + pvt_set_trim(pvt, trim);
5187 ++
5188 ++ return 0;
5189 + }
5190 +
5191 + static int pvt_request_irq(struct pvt_hwmon *pvt)
5192 +@@ -1109,7 +1161,13 @@ static int pvt_probe(struct platform_device *pdev)
5193 + if (ret)
5194 + return ret;
5195 +
5196 +- pvt_init_iface(pvt);
5197 ++ ret = pvt_check_pwr(pvt);
5198 ++ if (ret)
5199 ++ return ret;
5200 ++
5201 ++ ret = pvt_init_iface(pvt);
5202 ++ if (ret)
5203 ++ return ret;
5204 +
5205 + ret = pvt_request_irq(pvt);
5206 + if (ret)
5207 +diff --git a/drivers/hwmon/bt1-pvt.h b/drivers/hwmon/bt1-pvt.h
5208 +index 5eac73e948854..93b8dd5e7c944 100644
5209 +--- a/drivers/hwmon/bt1-pvt.h
5210 ++++ b/drivers/hwmon/bt1-pvt.h
5211 +@@ -10,6 +10,7 @@
5212 + #include <linux/completion.h>
5213 + #include <linux/hwmon.h>
5214 + #include <linux/kernel.h>
5215 ++#include <linux/ktime.h>
5216 + #include <linux/mutex.h>
5217 + #include <linux/seqlock.h>
5218 +
5219 +@@ -201,6 +202,7 @@ struct pvt_cache {
5220 + * if alarms are disabled).
5221 + * @sensor: current PVT sensor the data conversion is being performed for.
5222 + * @cache: data cache descriptor.
5223 ++ * @timeout: conversion timeout cache.
5224 + */
5225 + struct pvt_hwmon {
5226 + struct device *dev;
5227 +@@ -214,6 +216,7 @@ struct pvt_hwmon {
5228 + struct mutex iface_mtx;
5229 + enum pvt_sensor_type sensor;
5230 + struct pvt_cache cache[PVT_SENSORS_NUM];
5231 ++ ktime_t timeout;
5232 + };
5233 +
5234 + /*
5235 +diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
5236 +index 18b4e071067f7..de04dff28945b 100644
5237 +--- a/drivers/hwmon/pmbus/max34440.c
5238 ++++ b/drivers/hwmon/pmbus/max34440.c
5239 +@@ -388,7 +388,6 @@ static struct pmbus_driver_info max34440_info[] = {
5240 + .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
5241 + .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
5242 + .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
5243 +- .read_byte_data = max34440_read_byte_data,
5244 + .read_word_data = max34440_read_word_data,
5245 + .write_word_data = max34440_write_word_data,
5246 + },
5247 +@@ -419,7 +418,6 @@ static struct pmbus_driver_info max34440_info[] = {
5248 + .func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
5249 + .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
5250 + .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
5251 +- .read_byte_data = max34440_read_byte_data,
5252 + .read_word_data = max34440_read_word_data,
5253 + .write_word_data = max34440_write_word_data,
5254 + },
5255 +@@ -455,7 +453,6 @@ static struct pmbus_driver_info max34440_info[] = {
5256 + .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
5257 + .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
5258 + .func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
5259 +- .read_byte_data = max34440_read_byte_data,
5260 + .read_word_data = max34440_read_word_data,
5261 + .write_word_data = max34440_write_word_data,
5262 + },
5263 +diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
5264 +index 5a5120121e507..3964ceab2817c 100644
5265 +--- a/drivers/hwmon/w83627ehf.c
5266 ++++ b/drivers/hwmon/w83627ehf.c
5267 +@@ -1951,8 +1951,12 @@ static int w83627ehf_probe(struct platform_device *pdev)
5268 + data,
5269 + &w83627ehf_chip_info,
5270 + w83627ehf_groups);
5271 ++ if (IS_ERR(hwmon_dev)) {
5272 ++ err = PTR_ERR(hwmon_dev);
5273 ++ goto exit_release;
5274 ++ }
5275 +
5276 +- return PTR_ERR_OR_ZERO(hwmon_dev);
5277 ++ return 0;
5278 +
5279 + exit_release:
5280 + release_region(res->start, IOREGION_LENGTH);
5281 +diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
5282 +index 3ccc703dc9409..167fbc2e7033f 100644
5283 +--- a/drivers/hwtracing/coresight/coresight-cti.c
5284 ++++ b/drivers/hwtracing/coresight/coresight-cti.c
5285 +@@ -86,22 +86,16 @@ void cti_write_all_hw_regs(struct cti_drvdata *drvdata)
5286 + CS_LOCK(drvdata->base);
5287 + }
5288 +
5289 +-static void cti_enable_hw_smp_call(void *info)
5290 +-{
5291 +- struct cti_drvdata *drvdata = info;
5292 +-
5293 +- cti_write_all_hw_regs(drvdata);
5294 +-}
5295 +-
5296 + /* write regs to hardware and enable */
5297 + static int cti_enable_hw(struct cti_drvdata *drvdata)
5298 + {
5299 + struct cti_config *config = &drvdata->config;
5300 + struct device *dev = &drvdata->csdev->dev;
5301 ++ unsigned long flags;
5302 + int rc = 0;
5303 +
5304 + pm_runtime_get_sync(dev->parent);
5305 +- spin_lock(&drvdata->spinlock);
5306 ++ spin_lock_irqsave(&drvdata->spinlock, flags);
5307 +
5308 + /* no need to do anything if enabled or unpowered*/
5309 + if (config->hw_enabled || !config->hw_powered)
5310 +@@ -112,19 +106,11 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
5311 + if (rc)
5312 + goto cti_err_not_enabled;
5313 +
5314 +- if (drvdata->ctidev.cpu >= 0) {
5315 +- rc = smp_call_function_single(drvdata->ctidev.cpu,
5316 +- cti_enable_hw_smp_call,
5317 +- drvdata, 1);
5318 +- if (rc)
5319 +- goto cti_err_not_enabled;
5320 +- } else {
5321 +- cti_write_all_hw_regs(drvdata);
5322 +- }
5323 ++ cti_write_all_hw_regs(drvdata);
5324 +
5325 + config->hw_enabled = true;
5326 + atomic_inc(&drvdata->config.enable_req_count);
5327 +- spin_unlock(&drvdata->spinlock);
5328 ++ spin_unlock_irqrestore(&drvdata->spinlock, flags);
5329 + return rc;
5330 +
5331 + cti_state_unchanged:
5332 +@@ -132,7 +118,7 @@ cti_state_unchanged:
5333 +
5334 + /* cannot enable due to error */
5335 + cti_err_not_enabled:
5336 +- spin_unlock(&drvdata->spinlock);
5337 ++ spin_unlock_irqrestore(&drvdata->spinlock, flags);
5338 + pm_runtime_put(dev->parent);
5339 + return rc;
5340 + }
5341 +@@ -141,9 +127,7 @@ cti_err_not_enabled:
5342 + static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
5343 + {
5344 + struct cti_config *config = &drvdata->config;
5345 +- struct device *dev = &drvdata->csdev->dev;
5346 +
5347 +- pm_runtime_get_sync(dev->parent);
5348 + spin_lock(&drvdata->spinlock);
5349 + config->hw_powered = true;
5350 +
5351 +@@ -163,7 +147,6 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
5352 + /* did not re-enable due to no claim / no request */
5353 + cti_hp_not_enabled:
5354 + spin_unlock(&drvdata->spinlock);
5355 +- pm_runtime_put(dev->parent);
5356 + }
5357 +
5358 + /* disable hardware */
5359 +@@ -511,12 +494,15 @@ static bool cti_add_sysfs_link(struct cti_drvdata *drvdata,
5360 + return !link_err;
5361 + }
5362 +
5363 +-static void cti_remove_sysfs_link(struct cti_trig_con *tc)
5364 ++static void cti_remove_sysfs_link(struct cti_drvdata *drvdata,
5365 ++ struct cti_trig_con *tc)
5366 + {
5367 + struct coresight_sysfs_link link_info;
5368 +
5369 ++ link_info.orig = drvdata->csdev;
5370 + link_info.orig_name = tc->con_dev_name;
5371 + link_info.target = tc->con_dev;
5372 ++ link_info.target_name = dev_name(&drvdata->csdev->dev);
5373 + coresight_remove_sysfs_link(&link_info);
5374 + }
5375 +
5376 +@@ -606,8 +592,8 @@ void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
5377 + ctidrv = csdev_to_cti_drvdata(csdev->ect_dev);
5378 + ctidev = &ctidrv->ctidev;
5379 + list_for_each_entry(tc, &ctidev->trig_cons, node) {
5380 +- if (tc->con_dev == csdev->ect_dev) {
5381 +- cti_remove_sysfs_link(tc);
5382 ++ if (tc->con_dev == csdev) {
5383 ++ cti_remove_sysfs_link(ctidrv, tc);
5384 + tc->con_dev = NULL;
5385 + break;
5386 + }
5387 +@@ -651,7 +637,7 @@ static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata)
5388 + if (tc->con_dev) {
5389 + coresight_set_assoc_ectdev_mutex(tc->con_dev,
5390 + NULL);
5391 +- cti_remove_sysfs_link(tc);
5392 ++ cti_remove_sysfs_link(drvdata, tc);
5393 + tc->con_dev = NULL;
5394 + }
5395 + }
5396 +@@ -742,7 +728,8 @@ static int cti_dying_cpu(unsigned int cpu)
5397 +
5398 + spin_lock(&drvdata->spinlock);
5399 + drvdata->config.hw_powered = false;
5400 +- coresight_disclaim_device(drvdata->base);
5401 ++ if (drvdata->config.hw_enabled)
5402 ++ coresight_disclaim_device(drvdata->base);
5403 + spin_unlock(&drvdata->spinlock);
5404 + return 0;
5405 + }
5406 +diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
5407 +index 84f1dcb698272..9b0c5d719232f 100644
5408 +--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
5409 ++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
5410 +@@ -126,10 +126,10 @@ static void free_sink_buffer(struct etm_event_data *event_data)
5411 + cpumask_t *mask = &event_data->mask;
5412 + struct coresight_device *sink;
5413 +
5414 +- if (WARN_ON(cpumask_empty(mask)))
5415 ++ if (!event_data->snk_config)
5416 + return;
5417 +
5418 +- if (!event_data->snk_config)
5419 ++ if (WARN_ON(cpumask_empty(mask)))
5420 + return;
5421 +
5422 + cpu = cpumask_first(mask);
5423 +@@ -310,6 +310,16 @@ static void etm_event_start(struct perf_event *event, int flags)
5424 + if (!event_data)
5425 + goto fail;
5426 +
5427 ++ /*
5428 ++ * Check if this ETM is allowed to trace, as decided
5429 ++ * at etm_setup_aux(). This could be due to an unreachable
5430 ++ * sink from this ETM. We can't do much in this case if
5431 ++ * the sink was specified or hinted to the driver. For
5432 ++ * now, simply don't record anything on this ETM.
5433 ++ */
5434 ++ if (!cpumask_test_cpu(cpu, &event_data->mask))
5435 ++ goto fail_end_stop;
5436 ++
5437 + path = etm_event_cpu_path(event_data, cpu);
5438 + /* We need a sink, no need to continue without one */
5439 + sink = coresight_get_sink(path);
5440 +diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
5441 +index b673e738bc9a8..a588cd6de01c7 100644
5442 +--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
5443 ++++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
5444 +@@ -206,7 +206,7 @@ static ssize_t reset_store(struct device *dev,
5445 + * each trace run.
5446 + */
5447 + config->vinst_ctrl = BIT(0);
5448 +- if (drvdata->nr_addr_cmp == true) {
5449 ++ if (drvdata->nr_addr_cmp > 0) {
5450 + config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
5451 + /* SSSTATUS, bit[9] */
5452 + config->vinst_ctrl |= BIT(9);
5453 +diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
5454 +index 6089c481f8f19..d4e74b03c1e0f 100644
5455 +--- a/drivers/hwtracing/coresight/coresight-etm4x.c
5456 ++++ b/drivers/hwtracing/coresight/coresight-etm4x.c
5457 +@@ -48,12 +48,11 @@ module_param(pm_save_enable, int, 0444);
5458 + MODULE_PARM_DESC(pm_save_enable,
5459 + "Save/restore state on power down: 1 = never, 2 = self-hosted");
5460 +
5461 +-/* The number of ETMv4 currently registered */
5462 +-static int etm4_count;
5463 + static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
5464 + static void etm4_set_default_config(struct etmv4_config *config);
5465 + static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
5466 + struct perf_event *event);
5467 ++static u64 etm4_get_access_type(struct etmv4_config *config);
5468 +
5469 + static enum cpuhp_state hp_online;
5470 +
5471 +@@ -781,6 +780,22 @@ static void etm4_init_arch_data(void *info)
5472 + CS_LOCK(drvdata->base);
5473 + }
5474 +
5475 ++/* Set ELx trace filter access in the TRCVICTLR register */
5476 ++static void etm4_set_victlr_access(struct etmv4_config *config)
5477 ++{
5478 ++ u64 access_type;
5479 ++
5480 ++ config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK | ETM_EXLEVEL_NS_VICTLR_MASK);
5481 ++
5482 ++ /*
5483 ++ * TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering
5484 ++ * bits in vinst_ctrl, same bit pattern as TRCACATRn values returned by
5485 ++ * etm4_get_access_type() but with a relative shift in this register.
5486 ++ */
5487 ++ access_type = etm4_get_access_type(config) << ETM_EXLEVEL_LSHIFT_TRCVICTLR;
5488 ++ config->vinst_ctrl |= (u32)access_type;
5489 ++}
5490 ++
5491 + static void etm4_set_default_config(struct etmv4_config *config)
5492 + {
5493 + /* disable all events tracing */
5494 +@@ -798,6 +813,9 @@ static void etm4_set_default_config(struct etmv4_config *config)
5495 +
5496 + /* TRCVICTLR::EVENT = 0x01, select the always on logic */
5497 + config->vinst_ctrl = BIT(0);
5498 ++
5499 ++ /* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */
5500 ++ etm4_set_victlr_access(config);
5501 + }
5502 +
5503 + static u64 etm4_get_ns_access_type(struct etmv4_config *config)
5504 +@@ -1062,7 +1080,7 @@ out:
5505 +
5506 + void etm4_config_trace_mode(struct etmv4_config *config)
5507 + {
5508 +- u32 addr_acc, mode;
5509 ++ u32 mode;
5510 +
5511 + mode = config->mode;
5512 + mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
5513 +@@ -1074,15 +1092,7 @@ void etm4_config_trace_mode(struct etmv4_config *config)
5514 + if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
5515 + return;
5516 +
5517 +- addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
5518 +- /* clear default config */
5519 +- addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS |
5520 +- ETM_EXLEVEL_NS_HYP);
5521 +-
5522 +- addr_acc |= etm4_get_ns_access_type(config);
5523 +-
5524 +- config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
5525 +- config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
5526 ++ etm4_set_victlr_access(config);
5527 + }
5528 +
5529 + static int etm4_online_cpu(unsigned int cpu)
5530 +@@ -1179,7 +1189,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
5531 + state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
5532 + state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
5533 +
5534 +- for (i = 0; i < drvdata->nrseqstate; i++)
5535 ++ for (i = 0; i < drvdata->nrseqstate - 1; i++)
5536 + state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i));
5537 +
5538 + state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR);
5539 +@@ -1223,7 +1233,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
5540 + state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
5541 +
5542 + state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
5543 +- state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR1);
5544 ++ state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1);
5545 +
5546 + state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
5547 +
5548 +@@ -1284,7 +1294,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
5549 + writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
5550 + writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
5551 +
5552 +- for (i = 0; i < drvdata->nrseqstate; i++)
5553 ++ for (i = 0; i < drvdata->nrseqstate - 1; i++)
5554 + writel_relaxed(state->trcseqevr[i],
5555 + drvdata->base + TRCSEQEVRn(i));
5556 +
5557 +@@ -1333,7 +1343,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
5558 + writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
5559 +
5560 + writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
5561 +- writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR1);
5562 ++ writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1);
5563 +
5564 + writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
5565 +
5566 +@@ -1394,28 +1404,25 @@ static struct notifier_block etm4_cpu_pm_nb = {
5567 + .notifier_call = etm4_cpu_pm_notify,
5568 + };
5569 +
5570 +-/* Setup PM. Called with cpus locked. Deals with error conditions and counts */
5571 +-static int etm4_pm_setup_cpuslocked(void)
5572 ++/* Setup PM. Deals with error conditions and counts */
5573 ++static int __init etm4_pm_setup(void)
5574 + {
5575 + int ret;
5576 +
5577 +- if (etm4_count++)
5578 +- return 0;
5579 +-
5580 + ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb);
5581 + if (ret)
5582 +- goto reduce_count;
5583 ++ return ret;
5584 +
5585 +- ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
5586 +- "arm/coresight4:starting",
5587 +- etm4_starting_cpu, etm4_dying_cpu);
5588 ++ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
5589 ++ "arm/coresight4:starting",
5590 ++ etm4_starting_cpu, etm4_dying_cpu);
5591 +
5592 + if (ret)
5593 + goto unregister_notifier;
5594 +
5595 +- ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
5596 +- "arm/coresight4:online",
5597 +- etm4_online_cpu, NULL);
5598 ++ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
5599 ++ "arm/coresight4:online",
5600 ++ etm4_online_cpu, NULL);
5601 +
5602 + /* HP dyn state ID returned in ret on success */
5603 + if (ret > 0) {
5604 +@@ -1424,21 +1431,15 @@ static int etm4_pm_setup_cpuslocked(void)
5605 + }
5606 +
5607 + /* failed dyn state - remove others */
5608 +- cpuhp_remove_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING);
5609 ++ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
5610 +
5611 + unregister_notifier:
5612 + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
5613 +-
5614 +-reduce_count:
5615 +- --etm4_count;
5616 + return ret;
5617 + }
5618 +
5619 +-static void etm4_pm_clear(void)
5620 ++static void __init etm4_pm_clear(void)
5621 + {
5622 +- if (--etm4_count != 0)
5623 +- return;
5624 +-
5625 + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
5626 + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
5627 + if (hp_online) {
5628 +@@ -1491,22 +1492,12 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
5629 + if (!desc.name)
5630 + return -ENOMEM;
5631 +
5632 +- cpus_read_lock();
5633 + etmdrvdata[drvdata->cpu] = drvdata;
5634 +
5635 + if (smp_call_function_single(drvdata->cpu,
5636 + etm4_init_arch_data, drvdata, 1))
5637 + dev_err(dev, "ETM arch init failed\n");
5638 +
5639 +- ret = etm4_pm_setup_cpuslocked();
5640 +- cpus_read_unlock();
5641 +-
5642 +- /* etm4_pm_setup_cpuslocked() does its own cleanup - exit on error */
5643 +- if (ret) {
5644 +- etmdrvdata[drvdata->cpu] = NULL;
5645 +- return ret;
5646 +- }
5647 +-
5648 + if (etm4_arch_supported(drvdata->arch) == false) {
5649 + ret = -EINVAL;
5650 + goto err_arch_supported;
5651 +@@ -1553,7 +1544,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
5652 +
5653 + err_arch_supported:
5654 + etmdrvdata[drvdata->cpu] = NULL;
5655 +- etm4_pm_clear();
5656 + return ret;
5657 + }
5658 +
5659 +@@ -1591,4 +1581,23 @@ static struct amba_driver etm4x_driver = {
5660 + .probe = etm4_probe,
5661 + .id_table = etm4_ids,
5662 + };
5663 +-builtin_amba_driver(etm4x_driver);
5664 ++
5665 ++static int __init etm4x_init(void)
5666 ++{
5667 ++ int ret;
5668 ++
5669 ++ ret = etm4_pm_setup();
5670 ++
5671 ++ /* etm4_pm_setup() does its own cleanup - exit on error */
5672 ++ if (ret)
5673 ++ return ret;
5674 ++
5675 ++ ret = amba_driver_register(&etm4x_driver);
5676 ++ if (ret) {
5677 ++ pr_err("Error registering etm4x driver\n");
5678 ++ etm4_pm_clear();
5679 ++ }
5680 ++
5681 ++ return ret;
5682 ++}
5683 ++device_initcall(etm4x_init);
5684 +diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
5685 +index 47729e04aac72..ab38f9afd821a 100644
5686 +--- a/drivers/hwtracing/coresight/coresight-etm4x.h
5687 ++++ b/drivers/hwtracing/coresight/coresight-etm4x.h
5688 +@@ -192,6 +192,9 @@
5689 + #define ETM_EXLEVEL_NS_HYP BIT(14)
5690 + #define ETM_EXLEVEL_NS_NA BIT(15)
5691 +
5692 ++/* access level control in TRCVICTLR - same bits as TRCACATRn but shifted */
5693 ++#define ETM_EXLEVEL_LSHIFT_TRCVICTLR 8
5694 ++
5695 + /* secure / non secure masks - TRCVICTLR, IDR3 */
5696 + #define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16)
5697 + /* NS MON (EL3) mode never implemented */
5698 +diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
5699 +index e4912abda3aa2..85a6c099ddeb1 100644
5700 +--- a/drivers/hwtracing/coresight/coresight-platform.c
5701 ++++ b/drivers/hwtracing/coresight/coresight-platform.c
5702 +@@ -712,11 +712,11 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
5703 + return dir;
5704 +
5705 + if (dir == ACPI_CORESIGHT_LINK_MASTER) {
5706 +- if (ptr->outport > pdata->nr_outport)
5707 +- pdata->nr_outport = ptr->outport;
5708 ++ if (ptr->outport >= pdata->nr_outport)
5709 ++ pdata->nr_outport = ptr->outport + 1;
5710 + ptr++;
5711 + } else {
5712 +- WARN_ON(pdata->nr_inport == ptr->child_port);
5713 ++ WARN_ON(pdata->nr_inport == ptr->child_port + 1);
5714 + /*
5715 + * We do not track input port connections for a device.
5716 + * However we need the highest port number described,
5717 +@@ -724,8 +724,8 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
5718 + * record for an output connection. Hence, do not move
5719 + * the ptr for input connections
5720 + */
5721 +- if (ptr->child_port > pdata->nr_inport)
5722 +- pdata->nr_inport = ptr->child_port;
5723 ++ if (ptr->child_port >= pdata->nr_inport)
5724 ++ pdata->nr_inport = ptr->child_port + 1;
5725 + }
5726 + }
5727 +
5728 +diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
5729 +index f3efbb3b2b4d1..cf03af09c6ced 100644
5730 +--- a/drivers/hwtracing/coresight/coresight.c
5731 ++++ b/drivers/hwtracing/coresight/coresight.c
5732 +@@ -1023,7 +1023,6 @@ static void coresight_device_release(struct device *dev)
5733 + {
5734 + struct coresight_device *csdev = to_coresight_device(dev);
5735 +
5736 +- cti_remove_assoc_from_csdev(csdev);
5737 + fwnode_handle_put(csdev->dev.fwnode);
5738 + kfree(csdev->refcnt);
5739 + kfree(csdev);
5740 +@@ -1357,6 +1356,7 @@ void coresight_unregister(struct coresight_device *csdev)
5741 + {
5742 + etm_perf_del_symlink_sink(csdev);
5743 + /* Remove references of that device in the topology */
5744 ++ cti_remove_assoc_from_csdev(csdev);
5745 + coresight_remove_conns(csdev);
5746 + coresight_release_platform_data(csdev, csdev->pdata);
5747 + device_unregister(&csdev->dev);
5748 +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
5749 +index 735bf31a3fdff..6546d6cf3c24c 100644
5750 +--- a/drivers/i2c/busses/Kconfig
5751 ++++ b/drivers/i2c/busses/Kconfig
5752 +@@ -1191,6 +1191,7 @@ config I2C_RCAR
5753 + tristate "Renesas R-Car I2C Controller"
5754 + depends on ARCH_RENESAS || COMPILE_TEST
5755 + select I2C_SLAVE
5756 ++ select RESET_CONTROLLER if ARCH_RCAR_GEN3
5757 + help
5758 + If you say yes to this option, support will be included for the
5759 + R-Car I2C controller.
5760 +diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
5761 +index 2ade99b105b91..bbf8dd491d245 100644
5762 +--- a/drivers/i2c/i2c-core-acpi.c
5763 ++++ b/drivers/i2c/i2c-core-acpi.c
5764 +@@ -264,6 +264,7 @@ static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
5765 + void i2c_acpi_register_devices(struct i2c_adapter *adap)
5766 + {
5767 + acpi_status status;
5768 ++ acpi_handle handle;
5769 +
5770 + if (!has_acpi_companion(&adap->dev))
5771 + return;
5772 +@@ -274,6 +275,15 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
5773 + adap, NULL);
5774 + if (ACPI_FAILURE(status))
5775 + dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
5776 ++
5777 ++ if (!adap->dev.parent)
5778 ++ return;
5779 ++
5780 ++ handle = ACPI_HANDLE(adap->dev.parent);
5781 ++ if (!handle)
5782 ++ return;
5783 ++
5784 ++ acpi_walk_dep_device_list(handle);
5785 + }
5786 +
5787 + const struct acpi_device_id *
5788 +@@ -729,7 +739,6 @@ int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
5789 + return -ENOMEM;
5790 + }
5791 +
5792 +- acpi_walk_dep_device_list(handle);
5793 + return 0;
5794 + }
5795 +
5796 +diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
5797 +index 97f2e29265da7..cc7564446ccd2 100644
5798 +--- a/drivers/i3c/master.c
5799 ++++ b/drivers/i3c/master.c
5800 +@@ -1782,6 +1782,21 @@ static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
5801 + i3c_master_detach_free_devs(master);
5802 + }
5803 +
5804 ++static void i3c_master_attach_boardinfo(struct i3c_dev_desc *i3cdev)
5805 ++{
5806 ++ struct i3c_master_controller *master = i3cdev->common.master;
5807 ++ struct i3c_dev_boardinfo *i3cboardinfo;
5808 ++
5809 ++ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
5810 ++ if (i3cdev->info.pid != i3cboardinfo->pid)
5811 ++ continue;
5812 ++
5813 ++ i3cdev->boardinfo = i3cboardinfo;
5814 ++ i3cdev->info.static_addr = i3cboardinfo->static_addr;
5815 ++ return;
5816 ++ }
5817 ++}
5818 ++
5819 + static struct i3c_dev_desc *
5820 + i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
5821 + {
5822 +@@ -1837,10 +1852,10 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
5823 + if (ret)
5824 + goto err_detach_dev;
5825 +
5826 ++ i3c_master_attach_boardinfo(newdev);
5827 ++
5828 + olddev = i3c_master_search_i3c_dev_duplicate(newdev);
5829 + if (olddev) {
5830 +- newdev->boardinfo = olddev->boardinfo;
5831 +- newdev->info.static_addr = olddev->info.static_addr;
5832 + newdev->dev = olddev->dev;
5833 + if (newdev->dev)
5834 + newdev->dev->desc = newdev;
5835 +diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
5836 +index 3fee8bd7fe20b..3f2226928fe05 100644
5837 +--- a/drivers/i3c/master/i3c-master-cdns.c
5838 ++++ b/drivers/i3c/master/i3c-master-cdns.c
5839 +@@ -1635,8 +1635,10 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
5840 + master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
5841 + sizeof(*master->ibi.slots),
5842 + GFP_KERNEL);
5843 +- if (!master->ibi.slots)
5844 ++ if (!master->ibi.slots) {
5845 ++ ret = -ENOMEM;
5846 + goto err_disable_sysclk;
5847 ++ }
5848 +
5849 + writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
5850 + writel(MST_INT_IBIR_THR, master->regs + MST_IER);
5851 +diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
5852 +index 0e2068ec068b8..358636954619d 100644
5853 +--- a/drivers/iio/adc/stm32-adc-core.c
5854 ++++ b/drivers/iio/adc/stm32-adc-core.c
5855 +@@ -794,6 +794,13 @@ static int stm32_adc_core_runtime_resume(struct device *dev)
5856 + {
5857 + return stm32_adc_core_hw_start(dev);
5858 + }
5859 ++
5860 ++static int stm32_adc_core_runtime_idle(struct device *dev)
5861 ++{
5862 ++ pm_runtime_mark_last_busy(dev);
5863 ++
5864 ++ return 0;
5865 ++}
5866 + #endif
5867 +
5868 + static const struct dev_pm_ops stm32_adc_core_pm_ops = {
5869 +@@ -801,7 +808,7 @@ static const struct dev_pm_ops stm32_adc_core_pm_ops = {
5870 + pm_runtime_force_resume)
5871 + SET_RUNTIME_PM_OPS(stm32_adc_core_runtime_suspend,
5872 + stm32_adc_core_runtime_resume,
5873 +- NULL)
5874 ++ stm32_adc_core_runtime_idle)
5875 + };
5876 +
5877 + static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
5878 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
5879 +index 26de0dab60bbb..d28c7c6940b00 100644
5880 +--- a/drivers/infiniband/core/cma.c
5881 ++++ b/drivers/infiniband/core/cma.c
5882 +@@ -68,6 +68,9 @@ static const char * const cma_events[] = {
5883 + [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
5884 + };
5885 +
5886 ++static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
5887 ++ union ib_gid *mgid);
5888 ++
5889 + const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
5890 + {
5891 + size_t index = event;
5892 +@@ -345,13 +348,10 @@ struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
5893 +
5894 + struct cma_multicast {
5895 + struct rdma_id_private *id_priv;
5896 +- union {
5897 +- struct ib_sa_multicast *ib;
5898 +- } multicast;
5899 ++ struct ib_sa_multicast *sa_mc;
5900 + struct list_head list;
5901 + void *context;
5902 + struct sockaddr_storage addr;
5903 +- struct kref mcref;
5904 + u8 join_state;
5905 + };
5906 +
5907 +@@ -363,18 +363,6 @@ struct cma_work {
5908 + struct rdma_cm_event event;
5909 + };
5910 +
5911 +-struct cma_ndev_work {
5912 +- struct work_struct work;
5913 +- struct rdma_id_private *id;
5914 +- struct rdma_cm_event event;
5915 +-};
5916 +-
5917 +-struct iboe_mcast_work {
5918 +- struct work_struct work;
5919 +- struct rdma_id_private *id;
5920 +- struct cma_multicast *mc;
5921 +-};
5922 +-
5923 + union cma_ip_addr {
5924 + struct in6_addr ip6;
5925 + struct {
5926 +@@ -483,14 +471,6 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
5927 + rdma_start_port(cma_dev->device)];
5928 + }
5929 +
5930 +-static inline void release_mc(struct kref *kref)
5931 +-{
5932 +- struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
5933 +-
5934 +- kfree(mc->multicast.ib);
5935 +- kfree(mc);
5936 +-}
5937 +-
5938 + static void cma_release_dev(struct rdma_id_private *id_priv)
5939 + {
5940 + mutex_lock(&lock);
5941 +@@ -1783,19 +1763,30 @@ static void cma_release_port(struct rdma_id_private *id_priv)
5942 + mutex_unlock(&lock);
5943 + }
5944 +
5945 +-static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv,
5946 +- struct cma_multicast *mc)
5947 ++static void destroy_mc(struct rdma_id_private *id_priv,
5948 ++ struct cma_multicast *mc)
5949 + {
5950 +- struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
5951 +- struct net_device *ndev = NULL;
5952 ++ if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
5953 ++ ib_sa_free_multicast(mc->sa_mc);
5954 +
5955 +- if (dev_addr->bound_dev_if)
5956 +- ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
5957 +- if (ndev) {
5958 +- cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
5959 +- dev_put(ndev);
5960 ++ if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
5961 ++ struct rdma_dev_addr *dev_addr =
5962 ++ &id_priv->id.route.addr.dev_addr;
5963 ++ struct net_device *ndev = NULL;
5964 ++
5965 ++ if (dev_addr->bound_dev_if)
5966 ++ ndev = dev_get_by_index(dev_addr->net,
5967 ++ dev_addr->bound_dev_if);
5968 ++ if (ndev) {
5969 ++ union ib_gid mgid;
5970 ++
5971 ++ cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
5972 ++ &mgid);
5973 ++ cma_igmp_send(ndev, &mgid, false);
5974 ++ dev_put(ndev);
5975 ++ }
5976 + }
5977 +- kref_put(&mc->mcref, release_mc);
5978 ++ kfree(mc);
5979 + }
5980 +
5981 + static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
5982 +@@ -1803,16 +1794,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
5983 + struct cma_multicast *mc;
5984 +
5985 + while (!list_empty(&id_priv->mc_list)) {
5986 +- mc = container_of(id_priv->mc_list.next,
5987 +- struct cma_multicast, list);
5988 ++ mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
5989 ++ list);
5990 + list_del(&mc->list);
5991 +- if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
5992 +- id_priv->id.port_num)) {
5993 +- ib_sa_free_multicast(mc->multicast.ib);
5994 +- kfree(mc);
5995 +- } else {
5996 +- cma_leave_roce_mc_group(id_priv, mc);
5997 +- }
5998 ++ destroy_mc(id_priv, mc);
5999 + }
6000 + }
6001 +
6002 +@@ -2646,32 +2631,14 @@ static void cma_work_handler(struct work_struct *_work)
6003 + struct rdma_id_private *id_priv = work->id;
6004 +
6005 + mutex_lock(&id_priv->handler_mutex);
6006 +- if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
6007 ++ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
6008 ++ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
6009 + goto out_unlock;
6010 +-
6011 +- if (cma_cm_event_handler(id_priv, &work->event)) {
6012 +- cma_id_put(id_priv);
6013 +- destroy_id_handler_unlock(id_priv);
6014 +- goto out_free;
6015 ++ if (work->old_state != 0 || work->new_state != 0) {
6016 ++ if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
6017 ++ goto out_unlock;
6018 + }
6019 +
6020 +-out_unlock:
6021 +- mutex_unlock(&id_priv->handler_mutex);
6022 +- cma_id_put(id_priv);
6023 +-out_free:
6024 +- kfree(work);
6025 +-}
6026 +-
6027 +-static void cma_ndev_work_handler(struct work_struct *_work)
6028 +-{
6029 +- struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
6030 +- struct rdma_id_private *id_priv = work->id;
6031 +-
6032 +- mutex_lock(&id_priv->handler_mutex);
6033 +- if (id_priv->state == RDMA_CM_DESTROYING ||
6034 +- id_priv->state == RDMA_CM_DEVICE_REMOVAL)
6035 +- goto out_unlock;
6036 +-
6037 + if (cma_cm_event_handler(id_priv, &work->event)) {
6038 + cma_id_put(id_priv);
6039 + destroy_id_handler_unlock(id_priv);
6040 +@@ -2682,6 +2649,8 @@ out_unlock:
6041 + mutex_unlock(&id_priv->handler_mutex);
6042 + cma_id_put(id_priv);
6043 + out_free:
6044 ++ if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
6045 ++ rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
6046 + kfree(work);
6047 + }
6048 +
6049 +@@ -4295,63 +4264,66 @@ out:
6050 + }
6051 + EXPORT_SYMBOL(rdma_disconnect);
6052 +
6053 +-static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
6054 ++static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
6055 ++ struct ib_sa_multicast *multicast,
6056 ++ struct rdma_cm_event *event,
6057 ++ struct cma_multicast *mc)
6058 + {
6059 +- struct rdma_id_private *id_priv;
6060 +- struct cma_multicast *mc = multicast->context;
6061 +- struct rdma_cm_event event = {};
6062 +- int ret = 0;
6063 +-
6064 +- id_priv = mc->id_priv;
6065 +- mutex_lock(&id_priv->handler_mutex);
6066 +- if (id_priv->state != RDMA_CM_ADDR_BOUND &&
6067 +- id_priv->state != RDMA_CM_ADDR_RESOLVED)
6068 +- goto out;
6069 ++ struct rdma_dev_addr *dev_addr;
6070 ++ enum ib_gid_type gid_type;
6071 ++ struct net_device *ndev;
6072 +
6073 + if (!status)
6074 + status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
6075 + else
6076 + pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
6077 + status);
6078 +- mutex_lock(&id_priv->qp_mutex);
6079 +- if (!status && id_priv->id.qp) {
6080 +- status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
6081 +- be16_to_cpu(multicast->rec.mlid));
6082 +- if (status)
6083 +- pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
6084 +- status);
6085 ++
6086 ++ event->status = status;
6087 ++ event->param.ud.private_data = mc->context;
6088 ++ if (status) {
6089 ++ event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
6090 ++ return;
6091 + }
6092 +- mutex_unlock(&id_priv->qp_mutex);
6093 +
6094 +- event.status = status;
6095 +- event.param.ud.private_data = mc->context;
6096 +- if (!status) {
6097 +- struct rdma_dev_addr *dev_addr =
6098 +- &id_priv->id.route.addr.dev_addr;
6099 +- struct net_device *ndev =
6100 +- dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
6101 +- enum ib_gid_type gid_type =
6102 +- id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
6103 +- rdma_start_port(id_priv->cma_dev->device)];
6104 +-
6105 +- event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
6106 +- ret = ib_init_ah_from_mcmember(id_priv->id.device,
6107 +- id_priv->id.port_num,
6108 +- &multicast->rec,
6109 +- ndev, gid_type,
6110 +- &event.param.ud.ah_attr);
6111 +- if (ret)
6112 +- event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
6113 ++ dev_addr = &id_priv->id.route.addr.dev_addr;
6114 ++ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
6115 ++ gid_type =
6116 ++ id_priv->cma_dev
6117 ++ ->default_gid_type[id_priv->id.port_num -
6118 ++ rdma_start_port(
6119 ++ id_priv->cma_dev->device)];
6120 ++
6121 ++ event->event = RDMA_CM_EVENT_MULTICAST_JOIN;
6122 ++ if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
6123 ++ &multicast->rec, ndev, gid_type,
6124 ++ &event->param.ud.ah_attr)) {
6125 ++ event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
6126 ++ goto out;
6127 ++ }
6128 +
6129 +- event.param.ud.qp_num = 0xFFFFFF;
6130 +- event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
6131 +- if (ndev)
6132 +- dev_put(ndev);
6133 +- } else
6134 +- event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
6135 ++ event->param.ud.qp_num = 0xFFFFFF;
6136 ++ event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
6137 +
6138 +- ret = cma_cm_event_handler(id_priv, &event);
6139 ++out:
6140 ++ if (ndev)
6141 ++ dev_put(ndev);
6142 ++}
6143 +
6144 ++static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
6145 ++{
6146 ++ struct cma_multicast *mc = multicast->context;
6147 ++ struct rdma_id_private *id_priv = mc->id_priv;
6148 ++ struct rdma_cm_event event = {};
6149 ++ int ret = 0;
6150 ++
6151 ++ mutex_lock(&id_priv->handler_mutex);
6152 ++ if (id_priv->state != RDMA_CM_ADDR_BOUND &&
6153 ++ id_priv->state != RDMA_CM_ADDR_RESOLVED)
6154 ++ goto out;
6155 ++
6156 ++ cma_make_mc_event(status, id_priv, multicast, &event, mc);
6157 ++ ret = cma_cm_event_handler(id_priv, &event);
6158 + rdma_destroy_ah_attr(&event.param.ud.ah_attr);
6159 + if (ret) {
6160 + destroy_id_handler_unlock(id_priv);
6161 +@@ -4441,23 +4413,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
6162 + IB_SA_MCMEMBER_REC_MTU |
6163 + IB_SA_MCMEMBER_REC_HOP_LIMIT;
6164 +
6165 +- mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
6166 +- id_priv->id.port_num, &rec,
6167 +- comp_mask, GFP_KERNEL,
6168 +- cma_ib_mc_handler, mc);
6169 +- return PTR_ERR_OR_ZERO(mc->multicast.ib);
6170 +-}
6171 +-
6172 +-static void iboe_mcast_work_handler(struct work_struct *work)
6173 +-{
6174 +- struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
6175 +- struct cma_multicast *mc = mw->mc;
6176 +- struct ib_sa_multicast *m = mc->multicast.ib;
6177 +-
6178 +- mc->multicast.ib->context = mc;
6179 +- cma_ib_mc_handler(0, m);
6180 +- kref_put(&mc->mcref, release_mc);
6181 +- kfree(mw);
6182 ++ mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
6183 ++ id_priv->id.port_num, &rec, comp_mask,
6184 ++ GFP_KERNEL, cma_ib_mc_handler, mc);
6185 ++ return PTR_ERR_OR_ZERO(mc->sa_mc);
6186 + }
6187 +
6188 + static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
6189 +@@ -4492,52 +4451,47 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
6190 + static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
6191 + struct cma_multicast *mc)
6192 + {
6193 +- struct iboe_mcast_work *work;
6194 ++ struct cma_work *work;
6195 + struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
6196 + int err = 0;
6197 + struct sockaddr *addr = (struct sockaddr *)&mc->addr;
6198 + struct net_device *ndev = NULL;
6199 ++ struct ib_sa_multicast ib;
6200 + enum ib_gid_type gid_type;
6201 + bool send_only;
6202 +
6203 + send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
6204 +
6205 +- if (cma_zero_addr((struct sockaddr *)&mc->addr))
6206 ++ if (cma_zero_addr(addr))
6207 + return -EINVAL;
6208 +
6209 + work = kzalloc(sizeof *work, GFP_KERNEL);
6210 + if (!work)
6211 + return -ENOMEM;
6212 +
6213 +- mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
6214 +- if (!mc->multicast.ib) {
6215 +- err = -ENOMEM;
6216 +- goto out1;
6217 +- }
6218 +-
6219 + gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
6220 + rdma_start_port(id_priv->cma_dev->device)];
6221 +- cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid, gid_type);
6222 ++ cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
6223 +
6224 +- mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
6225 ++ ib.rec.pkey = cpu_to_be16(0xffff);
6226 + if (id_priv->id.ps == RDMA_PS_UDP)
6227 +- mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
6228 ++ ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
6229 +
6230 + if (dev_addr->bound_dev_if)
6231 + ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
6232 + if (!ndev) {
6233 + err = -ENODEV;
6234 +- goto out2;
6235 ++ goto err_free;
6236 + }
6237 +- mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
6238 +- mc->multicast.ib->rec.hop_limit = 1;
6239 +- mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
6240 ++ ib.rec.rate = iboe_get_rate(ndev);
6241 ++ ib.rec.hop_limit = 1;
6242 ++ ib.rec.mtu = iboe_get_mtu(ndev->mtu);
6243 +
6244 + if (addr->sa_family == AF_INET) {
6245 + if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
6246 +- mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
6247 ++ ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
6248 + if (!send_only) {
6249 +- err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
6250 ++ err = cma_igmp_send(ndev, &ib.rec.mgid,
6251 + true);
6252 + }
6253 + }
6254 +@@ -4546,24 +4500,22 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
6255 + err = -ENOTSUPP;
6256 + }
6257 + dev_put(ndev);
6258 +- if (err || !mc->multicast.ib->rec.mtu) {
6259 ++ if (err || !ib.rec.mtu) {
6260 + if (!err)
6261 + err = -EINVAL;
6262 +- goto out2;
6263 ++ goto err_free;
6264 + }
6265 + rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
6266 +- &mc->multicast.ib->rec.port_gid);
6267 ++ &ib.rec.port_gid);
6268 + work->id = id_priv;
6269 +- work->mc = mc;
6270 +- INIT_WORK(&work->work, iboe_mcast_work_handler);
6271 +- kref_get(&mc->mcref);
6272 ++ INIT_WORK(&work->work, cma_work_handler);
6273 ++ cma_make_mc_event(0, id_priv, &ib, &work->event, mc);
6274 ++ /* Balances with cma_id_put() in cma_work_handler */
6275 ++ cma_id_get(id_priv);
6276 + queue_work(cma_wq, &work->work);
6277 +-
6278 + return 0;
6279 +
6280 +-out2:
6281 +- kfree(mc->multicast.ib);
6282 +-out1:
6283 ++err_free:
6284 + kfree(work);
6285 + return err;
6286 + }
6287 +@@ -4575,6 +4527,10 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
6288 + struct cma_multicast *mc;
6289 + int ret;
6290 +
6291 ++ /* Not supported for kernel QPs */
6292 ++ if (WARN_ON(id->qp))
6293 ++ return -EINVAL;
6294 ++
6295 + if (!id->device)
6296 + return -EINVAL;
6297 +
6298 +@@ -4583,7 +4539,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
6299 + !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
6300 + return -EINVAL;
6301 +
6302 +- mc = kmalloc(sizeof *mc, GFP_KERNEL);
6303 ++ mc = kzalloc(sizeof(*mc), GFP_KERNEL);
6304 + if (!mc)
6305 + return -ENOMEM;
6306 +
6307 +@@ -4593,7 +4549,6 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
6308 + mc->join_state = join_state;
6309 +
6310 + if (rdma_protocol_roce(id->device, id->port_num)) {
6311 +- kref_init(&mc->mcref);
6312 + ret = cma_iboe_join_multicast(id_priv, mc);
6313 + if (ret)
6314 + goto out_err;
6315 +@@ -4625,25 +4580,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
6316 + id_priv = container_of(id, struct rdma_id_private, id);
6317 + spin_lock_irq(&id_priv->lock);
6318 + list_for_each_entry(mc, &id_priv->mc_list, list) {
6319 +- if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
6320 +- list_del(&mc->list);
6321 +- spin_unlock_irq(&id_priv->lock);
6322 +-
6323 +- if (id->qp)
6324 +- ib_detach_mcast(id->qp,
6325 +- &mc->multicast.ib->rec.mgid,
6326 +- be16_to_cpu(mc->multicast.ib->rec.mlid));
6327 +-
6328 +- BUG_ON(id_priv->cma_dev->device != id->device);
6329 +-
6330 +- if (rdma_cap_ib_mcast(id->device, id->port_num)) {
6331 +- ib_sa_free_multicast(mc->multicast.ib);
6332 +- kfree(mc);
6333 +- } else if (rdma_protocol_roce(id->device, id->port_num)) {
6334 +- cma_leave_roce_mc_group(id_priv, mc);
6335 +- }
6336 +- return;
6337 +- }
6338 ++ if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
6339 ++ continue;
6340 ++ list_del(&mc->list);
6341 ++ spin_unlock_irq(&id_priv->lock);
6342 ++
6343 ++ WARN_ON(id_priv->cma_dev->device != id->device);
6344 ++ destroy_mc(id_priv, mc);
6345 ++ return;
6346 + }
6347 + spin_unlock_irq(&id_priv->lock);
6348 + }
6349 +@@ -4652,7 +4596,7 @@ EXPORT_SYMBOL(rdma_leave_multicast);
6350 + static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
6351 + {
6352 + struct rdma_dev_addr *dev_addr;
6353 +- struct cma_ndev_work *work;
6354 ++ struct cma_work *work;
6355 +
6356 + dev_addr = &id_priv->id.route.addr.dev_addr;
6357 +
6358 +@@ -4665,7 +4609,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
6359 + if (!work)
6360 + return -ENOMEM;
6361 +
6362 +- INIT_WORK(&work->work, cma_ndev_work_handler);
6363 ++ INIT_WORK(&work->work, cma_work_handler);
6364 + work->id = id_priv;
6365 + work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
6366 + cma_id_get(id_priv);
6367 +diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
6368 +index a92fc3f90bb5b..19e36e52181be 100644
6369 +--- a/drivers/infiniband/core/cq.c
6370 ++++ b/drivers/infiniband/core/cq.c
6371 +@@ -197,24 +197,22 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
6372 + }
6373 +
6374 + /**
6375 +- * __ib_alloc_cq_user - allocate a completion queue
6376 ++ * __ib_alloc_cq allocate a completion queue
6377 + * @dev: device to allocate the CQ for
6378 + * @private: driver private data, accessible from cq->cq_context
6379 + * @nr_cqe: number of CQEs to allocate
6380 + * @comp_vector: HCA completion vectors for this CQ
6381 + * @poll_ctx: context to poll the CQ from.
6382 + * @caller: module owner name.
6383 +- * @udata: Valid user data or NULL for kernel object
6384 + *
6385 + * This is the proper interface to allocate a CQ for in-kernel users. A
6386 + * CQ allocated with this interface will automatically be polled from the
6387 + * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
6388 + * to use this CQ abstraction.
6389 + */
6390 +-struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
6391 +- int nr_cqe, int comp_vector,
6392 +- enum ib_poll_context poll_ctx,
6393 +- const char *caller, struct ib_udata *udata)
6394 ++struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
6395 ++ int comp_vector, enum ib_poll_context poll_ctx,
6396 ++ const char *caller)
6397 + {
6398 + struct ib_cq_init_attr cq_attr = {
6399 + .cqe = nr_cqe,
6400 +@@ -277,7 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
6401 + out_destroy_cq:
6402 + rdma_dim_destroy(cq);
6403 + rdma_restrack_del(&cq->res);
6404 +- cq->device->ops.destroy_cq(cq, udata);
6405 ++ cq->device->ops.destroy_cq(cq, NULL);
6406 + out_free_wc:
6407 + kfree(cq->wc);
6408 + out_free_cq:
6409 +@@ -285,7 +283,7 @@ out_free_cq:
6410 + trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
6411 + return ERR_PTR(ret);
6412 + }
6413 +-EXPORT_SYMBOL(__ib_alloc_cq_user);
6414 ++EXPORT_SYMBOL(__ib_alloc_cq);
6415 +
6416 + /**
6417 + * __ib_alloc_cq_any - allocate a completion queue
6418 +@@ -310,18 +308,19 @@ struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
6419 + atomic_inc_return(&counter) %
6420 + min_t(int, dev->num_comp_vectors, num_online_cpus());
6421 +
6422 +- return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
6423 +- caller, NULL);
6424 ++ return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
6425 ++ caller);
6426 + }
6427 + EXPORT_SYMBOL(__ib_alloc_cq_any);
6428 +
6429 + /**
6430 +- * ib_free_cq_user - free a completion queue
6431 ++ * ib_free_cq - free a completion queue
6432 + * @cq: completion queue to free.
6433 +- * @udata: User data or NULL for kernel object
6434 + */
6435 +-void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
6436 ++void ib_free_cq(struct ib_cq *cq)
6437 + {
6438 ++ int ret;
6439 ++
6440 + if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
6441 + return;
6442 + if (WARN_ON_ONCE(cq->cqe_used))
6443 +@@ -343,12 +342,13 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
6444 +
6445 + rdma_dim_destroy(cq);
6446 + trace_cq_free(cq);
6447 ++ ret = cq->device->ops.destroy_cq(cq, NULL);
6448 ++ WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
6449 + rdma_restrack_del(&cq->res);
6450 +- cq->device->ops.destroy_cq(cq, udata);
6451 + kfree(cq->wc);
6452 + kfree(cq);
6453 + }
6454 +-EXPORT_SYMBOL(ib_free_cq_user);
6455 ++EXPORT_SYMBOL(ib_free_cq);
6456 +
6457 + void ib_cq_pool_init(struct ib_device *dev)
6458 + {
6459 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
6460 +index d03dacaef7880..2643d5dbe1da8 100644
6461 +--- a/drivers/infiniband/core/ucma.c
6462 ++++ b/drivers/infiniband/core/ucma.c
6463 +@@ -586,6 +586,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
6464 + list_move_tail(&uevent->list, &list);
6465 + }
6466 + list_del(&ctx->list);
6467 ++ events_reported = ctx->events_reported;
6468 + mutex_unlock(&ctx->file->mut);
6469 +
6470 + list_for_each_entry_safe(uevent, tmp, &list, list) {
6471 +@@ -595,7 +596,6 @@ static int ucma_free_ctx(struct ucma_context *ctx)
6472 + kfree(uevent);
6473 + }
6474 +
6475 +- events_reported = ctx->events_reported;
6476 + mutex_destroy(&ctx->mutex);
6477 + kfree(ctx);
6478 + return events_reported;
6479 +@@ -1512,7 +1512,9 @@ static ssize_t ucma_process_join(struct ucma_file *file,
6480 + return 0;
6481 +
6482 + err3:
6483 ++ mutex_lock(&ctx->mutex);
6484 + rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
6485 ++ mutex_unlock(&ctx->mutex);
6486 + ucma_cleanup_mc_events(mc);
6487 + err2:
6488 + xa_erase(&multicast_table, mc->id);
6489 +@@ -1678,7 +1680,9 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
6490 +
6491 + cur_file = ctx->file;
6492 + if (cur_file == new_file) {
6493 ++ mutex_lock(&cur_file->mut);
6494 + resp.events_reported = ctx->events_reported;
6495 ++ mutex_unlock(&cur_file->mut);
6496 + goto response;
6497 + }
6498 +
6499 +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
6500 +index 82455a1392f1d..7e765fe211607 100644
6501 +--- a/drivers/infiniband/core/umem.c
6502 ++++ b/drivers/infiniband/core/umem.c
6503 +@@ -151,13 +151,24 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
6504 + dma_addr_t mask;
6505 + int i;
6506 +
6507 ++ /* rdma_for_each_block() has a bug if the page size is smaller than the
6508 ++ * page size used to build the umem. For now prevent smaller page sizes
6509 ++ * from being returned.
6510 ++ */
6511 ++ pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
6512 ++
6513 + /* At minimum, drivers must support PAGE_SIZE or smaller */
6514 + if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
6515 + return 0;
6516 +
6517 + va = virt;
6518 +- /* max page size not to exceed MR length */
6519 +- mask = roundup_pow_of_two(umem->length);
6520 ++ /* The best result is the smallest page size that results in the minimum
6521 ++ * number of required pages. Compute the largest page size that could
6522 ++ * work based on VA address bits that don't change.
6523 ++ */
6524 ++ mask = pgsz_bitmap &
6525 ++ GENMASK(BITS_PER_LONG - 1,
6526 ++ bits_per((umem->length - 1 + virt) ^ virt));
6527 + /* offset into first SGL */
6528 + pgoff = umem->address & ~PAGE_MASK;
6529 +
6530 +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
6531 +index 1b0ea945756f0..2e397d18dbf44 100644
6532 +--- a/drivers/infiniband/core/verbs.c
6533 ++++ b/drivers/infiniband/core/verbs.c
6534 +@@ -2016,16 +2016,21 @@ EXPORT_SYMBOL(rdma_set_cq_moderation);
6535 +
6536 + int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
6537 + {
6538 ++ int ret;
6539 ++
6540 + if (WARN_ON_ONCE(cq->shared))
6541 + return -EOPNOTSUPP;
6542 +
6543 + if (atomic_read(&cq->usecnt))
6544 + return -EBUSY;
6545 +
6546 ++ ret = cq->device->ops.destroy_cq(cq, udata);
6547 ++ if (ret)
6548 ++ return ret;
6549 ++
6550 + rdma_restrack_del(&cq->res);
6551 +- cq->device->ops.destroy_cq(cq, udata);
6552 + kfree(cq);
6553 +- return 0;
6554 ++ return ret;
6555 + }
6556 + EXPORT_SYMBOL(ib_destroy_cq_user);
6557 +
6558 +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
6559 +index cb6e873039df5..9f69abf01d331 100644
6560 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
6561 ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
6562 +@@ -2714,7 +2714,7 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
6563 + }
6564 +
6565 + /* Completion Queues */
6566 +-void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
6567 ++int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
6568 + {
6569 + struct bnxt_re_cq *cq;
6570 + struct bnxt_qplib_nq *nq;
6571 +@@ -2730,6 +2730,7 @@ void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
6572 + atomic_dec(&rdev->cq_count);
6573 + nq->budget--;
6574 + kfree(cq->cql);
6575 ++ return 0;
6576 + }
6577 +
6578 + int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
6579 +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
6580 +index e5fbbeba6d28d..f4a0ded67a8aa 100644
6581 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
6582 ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
6583 +@@ -193,7 +193,7 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
6584 + const struct ib_recv_wr **bad_recv_wr);
6585 + int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
6586 + struct ib_udata *udata);
6587 +-void bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
6588 ++int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
6589 + int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
6590 + int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
6591 + struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
6592 +diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
6593 +index b1bb61c65f4f6..7b076fc23cf38 100644
6594 +--- a/drivers/infiniband/hw/cxgb4/cq.c
6595 ++++ b/drivers/infiniband/hw/cxgb4/cq.c
6596 +@@ -967,7 +967,7 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
6597 + return !err || err == -ENODATA ? npolled : err;
6598 + }
6599 +
6600 +-void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
6601 ++int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
6602 + {
6603 + struct c4iw_cq *chp;
6604 + struct c4iw_ucontext *ucontext;
6605 +@@ -985,6 +985,7 @@ void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
6606 + ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
6607 + chp->destroy_skb, chp->wr_waitp);
6608 + c4iw_put_wr_wait(chp->wr_waitp);
6609 ++ return 0;
6610 + }
6611 +
6612 + int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
6613 +diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
6614 +index e8e11bd95e429..de0f278e31501 100644
6615 +--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
6616 ++++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
6617 +@@ -992,7 +992,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
6618 + struct ib_udata *udata);
6619 + struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
6620 + int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
6621 +-void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
6622 ++int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
6623 + int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
6624 + struct ib_udata *udata);
6625 + int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
6626 +diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
6627 +index 1889dd172a252..05f593940e7b0 100644
6628 +--- a/drivers/infiniband/hw/efa/efa.h
6629 ++++ b/drivers/infiniband/hw/efa/efa.h
6630 +@@ -139,7 +139,7 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
6631 + struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
6632 + struct ib_qp_init_attr *init_attr,
6633 + struct ib_udata *udata);
6634 +-void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
6635 ++int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
6636 + int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
6637 + struct ib_udata *udata);
6638 + struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
6639 +diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
6640 +index 7dd082441333c..bd2caa2353c75 100644
6641 +--- a/drivers/infiniband/hw/efa/efa_verbs.c
6642 ++++ b/drivers/infiniband/hw/efa/efa_verbs.c
6643 +@@ -843,7 +843,7 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
6644 + return efa_com_destroy_cq(&dev->edev, &params);
6645 + }
6646 +
6647 +-void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
6648 ++int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
6649 + {
6650 + struct efa_dev *dev = to_edev(ibcq->device);
6651 + struct efa_cq *cq = to_ecq(ibcq);
6652 +@@ -856,6 +856,7 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
6653 + efa_destroy_cq_idx(dev, cq->cq_idx);
6654 + efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
6655 + DMA_FROM_DEVICE);
6656 ++ return 0;
6657 + }
6658 +
6659 + static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
6660 +diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
6661 +index e87d616f79882..c5acf3332519b 100644
6662 +--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
6663 ++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
6664 +@@ -311,7 +311,7 @@ err_cq_buf:
6665 + return ret;
6666 + }
6667 +
6668 +-void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
6669 ++int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
6670 + {
6671 + struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
6672 + struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
6673 +@@ -322,6 +322,7 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
6674 + free_cq_buf(hr_dev, hr_cq);
6675 + free_cq_db(hr_dev, hr_cq, udata);
6676 + free_cqc(hr_dev, hr_cq);
6677 ++ return 0;
6678 + }
6679 +
6680 + void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
6681 +diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
6682 +index c69453a62767c..77ca55b559a0a 100644
6683 +--- a/drivers/infiniband/hw/hns/hns_roce_device.h
6684 ++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
6685 +@@ -929,7 +929,7 @@ struct hns_roce_hw {
6686 + int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
6687 + int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
6688 + struct ib_udata *udata);
6689 +- void (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
6690 ++ int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
6691 + int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
6692 + int (*init_eq)(struct hns_roce_dev *hr_dev);
6693 + void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
6694 +@@ -1246,7 +1246,7 @@ int to_hr_qp_type(int qp_type);
6695 + int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
6696 + struct ib_udata *udata);
6697 +
6698 +-void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
6699 ++int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
6700 + int hns_roce_db_map_user(struct hns_roce_ucontext *context,
6701 + struct ib_udata *udata, unsigned long virt,
6702 + struct hns_roce_db *db);
6703 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
6704 +index cf39f560b8001..5a0c90e0b367b 100644
6705 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
6706 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
6707 +@@ -271,7 +271,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
6708 + ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
6709 + break;
6710 + case IB_WR_LOCAL_INV:
6711 +- break;
6712 + case IB_WR_ATOMIC_CMP_AND_SWP:
6713 + case IB_WR_ATOMIC_FETCH_AND_ADD:
6714 + case IB_WR_LSO:
6715 +@@ -3573,7 +3572,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
6716 + return 0;
6717 + }
6718 +
6719 +-static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
6720 ++static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
6721 + {
6722 + struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
6723 + struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
6724 +@@ -3604,6 +3603,7 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
6725 + }
6726 + wait_time++;
6727 + }
6728 ++ return 0;
6729 + }
6730 +
6731 + static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
6732 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
6733 +index 38a48ab3e1d02..37809a0b50e25 100644
6734 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
6735 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
6736 +@@ -1770,9 +1770,9 @@ static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
6737 + int *buf_page_size, int *bt_page_size, u32 hem_type)
6738 + {
6739 + u64 obj_per_chunk;
6740 +- int bt_chunk_size = 1 << PAGE_SHIFT;
6741 +- int buf_chunk_size = 1 << PAGE_SHIFT;
6742 +- int obj_per_chunk_default = buf_chunk_size / obj_size;
6743 ++ u64 bt_chunk_size = PAGE_SIZE;
6744 ++ u64 buf_chunk_size = PAGE_SIZE;
6745 ++ u64 obj_per_chunk_default = buf_chunk_size / obj_size;
6746 +
6747 + *buf_page_size = 0;
6748 + *bt_page_size = 0;
6749 +@@ -3640,9 +3640,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
6750 + V2_QPC_BYTE_76_SRQ_EN_S, 1);
6751 + }
6752 +
6753 +- roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
6754 +- V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
6755 +-
6756 + roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
6757 +
6758 + hr_qp->access_flags = attr->qp_access_flags;
6759 +@@ -3983,6 +3980,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
6760 + dma_addr_t trrl_ba;
6761 + dma_addr_t irrl_ba;
6762 + enum ib_mtu mtu;
6763 ++ u8 lp_pktn_ini;
6764 + u8 port_num;
6765 + u64 *mtts;
6766 + u8 *dmac;
6767 +@@ -4090,13 +4088,21 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
6768 + }
6769 +
6770 + #define MAX_LP_MSG_LEN 65536
6771 +- /* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */
6772 ++ /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
6773 ++ lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu));
6774 ++
6775 + roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
6776 +- V2_QPC_BYTE_56_LP_PKTN_INI_S,
6777 +- ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)));
6778 ++ V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini);
6779 + roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
6780 + V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
6781 +
6782 ++ /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
6783 ++ roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
6784 ++ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, lp_pktn_ini);
6785 ++ roce_set_field(qpc_mask->byte_172_sq_psn,
6786 ++ V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
6787 ++ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
6788 ++
6789 + roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
6790 + V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
6791 + roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
6792 +@@ -4287,11 +4293,19 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
6793 + V2_QPC_BYTE_28_FL_S, 0);
6794 + memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
6795 + memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
6796 ++
6797 ++ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
6798 ++ if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
6799 ++ ibdev_err(ibdev,
6800 ++ "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
6801 ++ hr_qp->sl, MAX_SERVICE_LEVEL);
6802 ++ return -EINVAL;
6803 ++ }
6804 ++
6805 + roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
6806 +- V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
6807 ++ V2_QPC_BYTE_28_SL_S, hr_qp->sl);
6808 + roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
6809 + V2_QPC_BYTE_28_SL_S, 0);
6810 +- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
6811 +
6812 + return 0;
6813 + }
6814 +@@ -4787,7 +4801,9 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
6815 + qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
6816 + V2_QPC_BYTE_212_RETRY_CNT_M,
6817 + V2_QPC_BYTE_212_RETRY_CNT_S);
6818 +- qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
6819 ++ qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
6820 ++ V2_QPC_BYTE_244_RNR_CNT_M,
6821 ++ V2_QPC_BYTE_244_RNR_CNT_S);
6822 +
6823 + done:
6824 + qp_attr->cur_qp_state = qp_attr->qp_state;
6825 +@@ -4803,6 +4819,7 @@ done:
6826 + }
6827 +
6828 + qp_init_attr->cap = qp_attr->cap;
6829 ++ qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
6830 +
6831 + out:
6832 + mutex_unlock(&hr_qp->mutex);
6833 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
6834 +index 4f840997c6c73..c6a280bdbfaaf 100644
6835 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
6836 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
6837 +@@ -1957,6 +1957,8 @@ struct hns_roce_eq_context {
6838 + #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
6839 + #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
6840 +
6841 ++#define MAX_SERVICE_LEVEL 0x7
6842 ++
6843 + struct hns_roce_wqe_atomic_seg {
6844 + __le64 fetchadd_swap_data;
6845 + __le64 cmp_data;
6846 +diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
6847 +index 4edea397b6b80..4486c9b7c3e43 100644
6848 +--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
6849 ++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
6850 +@@ -1171,8 +1171,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
6851 +
6852 + mutex_lock(&hr_qp->mutex);
6853 +
6854 +- cur_state = attr_mask & IB_QP_CUR_STATE ?
6855 +- attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
6856 ++ if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
6857 ++ goto out;
6858 ++
6859 ++ cur_state = hr_qp->state;
6860 + new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
6861 +
6862 + if (ibqp->uobject &&
6863 +diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
6864 +index 49d92638e0dbb..9a2b87cc3d301 100644
6865 +--- a/drivers/infiniband/hw/i40iw/i40iw.h
6866 ++++ b/drivers/infiniband/hw/i40iw/i40iw.h
6867 +@@ -409,8 +409,8 @@ static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)
6868 + }
6869 +
6870 + /* i40iw.c */
6871 +-void i40iw_add_ref(struct ib_qp *);
6872 +-void i40iw_rem_ref(struct ib_qp *);
6873 ++void i40iw_qp_add_ref(struct ib_qp *ibqp);
6874 ++void i40iw_qp_rem_ref(struct ib_qp *ibqp);
6875 + struct ib_qp *i40iw_get_qp(struct ib_device *, int);
6876 +
6877 + void i40iw_flush_wqes(struct i40iw_device *iwdev,
6878 +@@ -554,9 +554,8 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
6879 + bool wait);
6880 + void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
6881 + void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
6882 +-void i40iw_free_qp_resources(struct i40iw_device *iwdev,
6883 +- struct i40iw_qp *iwqp,
6884 +- u32 qp_num);
6885 ++void i40iw_free_qp_resources(struct i40iw_qp *iwqp);
6886 ++
6887 + enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
6888 + struct i40iw_dma_mem *memptr,
6889 + u32 size, u32 mask);
6890 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
6891 +index fa7a5ff498c73..56c1e9abc52dc 100644
6892 +--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
6893 ++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
6894 +@@ -2322,7 +2322,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
6895 + iwqp = cm_node->iwqp;
6896 + if (iwqp) {
6897 + iwqp->cm_node = NULL;
6898 +- i40iw_rem_ref(&iwqp->ibqp);
6899 ++ i40iw_qp_rem_ref(&iwqp->ibqp);
6900 + cm_node->iwqp = NULL;
6901 + } else if (cm_node->qhash_set) {
6902 + i40iw_get_addr_info(cm_node, &nfo);
6903 +@@ -3452,7 +3452,7 @@ void i40iw_cm_disconn(struct i40iw_qp *iwqp)
6904 + kfree(work);
6905 + return;
6906 + }
6907 +- i40iw_add_ref(&iwqp->ibqp);
6908 ++ i40iw_qp_add_ref(&iwqp->ibqp);
6909 + spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
6910 +
6911 + work->iwqp = iwqp;
6912 +@@ -3623,7 +3623,7 @@ static void i40iw_disconnect_worker(struct work_struct *work)
6913 +
6914 + kfree(dwork);
6915 + i40iw_cm_disconn_true(iwqp);
6916 +- i40iw_rem_ref(&iwqp->ibqp);
6917 ++ i40iw_qp_rem_ref(&iwqp->ibqp);
6918 + }
6919 +
6920 + /**
6921 +@@ -3745,7 +3745,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
6922 + cm_node->lsmm_size = accept.size + conn_param->private_data_len;
6923 + i40iw_cm_init_tsa_conn(iwqp, cm_node);
6924 + cm_id->add_ref(cm_id);
6925 +- i40iw_add_ref(&iwqp->ibqp);
6926 ++ i40iw_qp_add_ref(&iwqp->ibqp);
6927 +
6928 + attr.qp_state = IB_QPS_RTS;
6929 + cm_node->qhash_set = false;
6930 +@@ -3908,7 +3908,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
6931 + iwqp->cm_node = cm_node;
6932 + cm_node->iwqp = iwqp;
6933 + iwqp->cm_id = cm_id;
6934 +- i40iw_add_ref(&iwqp->ibqp);
6935 ++ i40iw_qp_add_ref(&iwqp->ibqp);
6936 +
6937 + if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
6938 + cm_node->state = I40IW_CM_STATE_SYN_SENT;
6939 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
6940 +index ae8b97c306657..a7512508f7e60 100644
6941 +--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
6942 ++++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
6943 +@@ -313,7 +313,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
6944 + __func__, info->qp_cq_id);
6945 + continue;
6946 + }
6947 +- i40iw_add_ref(&iwqp->ibqp);
6948 ++ i40iw_qp_add_ref(&iwqp->ibqp);
6949 + spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
6950 + qp = &iwqp->sc_qp;
6951 + spin_lock_irqsave(&iwqp->lock, flags);
6952 +@@ -427,7 +427,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
6953 + break;
6954 + }
6955 + if (info->qp)
6956 +- i40iw_rem_ref(&iwqp->ibqp);
6957 ++ i40iw_qp_rem_ref(&iwqp->ibqp);
6958 + } while (1);
6959 +
6960 + if (aeqcnt)
6961 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
6962 +index 016524683e17e..72db7c1dc2998 100644
6963 +--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
6964 ++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
6965 +@@ -479,25 +479,6 @@ void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
6966 + }
6967 + }
6968 +
6969 +-/**
6970 +- * i40iw_free_qp - callback after destroy cqp completes
6971 +- * @cqp_request: cqp request for destroy qp
6972 +- * @num: not used
6973 +- */
6974 +-static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
6975 +-{
6976 +- struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
6977 +- struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
6978 +- struct i40iw_device *iwdev;
6979 +- u32 qp_num = iwqp->ibqp.qp_num;
6980 +-
6981 +- iwdev = iwqp->iwdev;
6982 +-
6983 +- i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
6984 +- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
6985 +- i40iw_rem_devusecount(iwdev);
6986 +-}
6987 +-
6988 + /**
6989 + * i40iw_wait_event - wait for completion
6990 + * @iwdev: iwarp device
6991 +@@ -618,26 +599,23 @@ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
6992 + }
6993 +
6994 + /**
6995 +- * i40iw_add_ref - add refcount for qp
6996 ++ * i40iw_qp_add_ref - add refcount for qp
6997 + * @ibqp: iqarp qp
6998 + */
6999 +-void i40iw_add_ref(struct ib_qp *ibqp)
7000 ++void i40iw_qp_add_ref(struct ib_qp *ibqp)
7001 + {
7002 + struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
7003 +
7004 +- atomic_inc(&iwqp->refcount);
7005 ++ refcount_inc(&iwqp->refcount);
7006 + }
7007 +
7008 + /**
7009 +- * i40iw_rem_ref - rem refcount for qp and free if 0
7010 ++ * i40iw_qp_rem_ref - rem refcount for qp and free if 0
7011 + * @ibqp: iqarp qp
7012 + */
7013 +-void i40iw_rem_ref(struct ib_qp *ibqp)
7014 ++void i40iw_qp_rem_ref(struct ib_qp *ibqp)
7015 + {
7016 + struct i40iw_qp *iwqp;
7017 +- enum i40iw_status_code status;
7018 +- struct i40iw_cqp_request *cqp_request;
7019 +- struct cqp_commands_info *cqp_info;
7020 + struct i40iw_device *iwdev;
7021 + u32 qp_num;
7022 + unsigned long flags;
7023 +@@ -645,7 +623,7 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
7024 + iwqp = to_iwqp(ibqp);
7025 + iwdev = iwqp->iwdev;
7026 + spin_lock_irqsave(&iwdev->qptable_lock, flags);
7027 +- if (!atomic_dec_and_test(&iwqp->refcount)) {
7028 ++ if (!refcount_dec_and_test(&iwqp->refcount)) {
7029 + spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
7030 + return;
7031 + }
7032 +@@ -653,25 +631,8 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
7033 + qp_num = iwqp->ibqp.qp_num;
7034 + iwdev->qp_table[qp_num] = NULL;
7035 + spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
7036 +- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
7037 +- if (!cqp_request)
7038 +- return;
7039 +-
7040 +- cqp_request->callback_fcn = i40iw_free_qp;
7041 +- cqp_request->param = (void *)&iwqp->sc_qp;
7042 +- cqp_info = &cqp_request->info;
7043 +- cqp_info->cqp_cmd = OP_QP_DESTROY;
7044 +- cqp_info->post_sq = 1;
7045 +- cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
7046 +- cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
7047 +- cqp_info->in.u.qp_destroy.remove_hash_idx = true;
7048 +- status = i40iw_handle_cqp_op(iwdev, cqp_request);
7049 +- if (!status)
7050 +- return;
7051 ++ complete(&iwqp->free_qp);
7052 +
7053 +- i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
7054 +- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
7055 +- i40iw_rem_devusecount(iwdev);
7056 + }
7057 +
7058 + /**
7059 +@@ -938,7 +899,7 @@ static void i40iw_terminate_timeout(struct timer_list *t)
7060 + struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
7061 +
7062 + i40iw_terminate_done(qp, 1);
7063 +- i40iw_rem_ref(&iwqp->ibqp);
7064 ++ i40iw_qp_rem_ref(&iwqp->ibqp);
7065 + }
7066 +
7067 + /**
7068 +@@ -950,7 +911,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
7069 + struct i40iw_qp *iwqp;
7070 +
7071 + iwqp = (struct i40iw_qp *)qp->back_qp;
7072 +- i40iw_add_ref(&iwqp->ibqp);
7073 ++ i40iw_qp_add_ref(&iwqp->ibqp);
7074 + timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
7075 + iwqp->terminate_timer.expires = jiffies + HZ;
7076 + add_timer(&iwqp->terminate_timer);
7077 +@@ -966,7 +927,7 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
7078 +
7079 + iwqp = (struct i40iw_qp *)qp->back_qp;
7080 + if (del_timer(&iwqp->terminate_timer))
7081 +- i40iw_rem_ref(&iwqp->ibqp);
7082 ++ i40iw_qp_rem_ref(&iwqp->ibqp);
7083 + }
7084 +
7085 + /**
7086 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
7087 +index 19af29a48c559..2419de36e943d 100644
7088 +--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
7089 ++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
7090 +@@ -364,11 +364,11 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
7091 + * @iwqp: qp ptr (user or kernel)
7092 + * @qp_num: qp number assigned
7093 + */
7094 +-void i40iw_free_qp_resources(struct i40iw_device *iwdev,
7095 +- struct i40iw_qp *iwqp,
7096 +- u32 qp_num)
7097 ++void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
7098 + {
7099 + struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
7100 ++ struct i40iw_device *iwdev = iwqp->iwdev;
7101 ++ u32 qp_num = iwqp->ibqp.qp_num;
7102 +
7103 + i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
7104 + i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
7105 +@@ -402,6 +402,10 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
7106 + static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
7107 + {
7108 + struct i40iw_qp *iwqp = to_iwqp(ibqp);
7109 ++ struct ib_qp_attr attr;
7110 ++ struct i40iw_device *iwdev = iwqp->iwdev;
7111 ++
7112 ++ memset(&attr, 0, sizeof(attr));
7113 +
7114 + iwqp->destroyed = 1;
7115 +
7116 +@@ -416,7 +420,15 @@ static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
7117 + }
7118 + }
7119 +
7120 +- i40iw_rem_ref(&iwqp->ibqp);
7121 ++ attr.qp_state = IB_QPS_ERR;
7122 ++ i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
7123 ++ i40iw_qp_rem_ref(&iwqp->ibqp);
7124 ++ wait_for_completion(&iwqp->free_qp);
7125 ++ i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp);
7126 ++ i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
7127 ++ i40iw_free_qp_resources(iwqp);
7128 ++ i40iw_rem_devusecount(iwdev);
7129 ++
7130 + return 0;
7131 + }
7132 +
7133 +@@ -577,6 +589,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
7134 + qp->back_qp = (void *)iwqp;
7135 + qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
7136 +
7137 ++ iwqp->iwdev = iwdev;
7138 + iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
7139 +
7140 + if (i40iw_allocate_dma_mem(dev->hw,
7141 +@@ -601,7 +614,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
7142 + goto error;
7143 + }
7144 +
7145 +- iwqp->iwdev = iwdev;
7146 + iwqp->iwpd = iwpd;
7147 + iwqp->ibqp.qp_num = qp_num;
7148 + qp = &iwqp->sc_qp;
7149 +@@ -715,7 +727,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
7150 + goto error;
7151 + }
7152 +
7153 +- i40iw_add_ref(&iwqp->ibqp);
7154 ++ refcount_set(&iwqp->refcount, 1);
7155 + spin_lock_init(&iwqp->lock);
7156 + iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
7157 + iwdev->qp_table[qp_num] = iwqp;
7158 +@@ -737,10 +749,11 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
7159 + }
7160 + init_completion(&iwqp->sq_drained);
7161 + init_completion(&iwqp->rq_drained);
7162 ++ init_completion(&iwqp->free_qp);
7163 +
7164 + return &iwqp->ibqp;
7165 + error:
7166 +- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
7167 ++ i40iw_free_qp_resources(iwqp);
7168 + return ERR_PTR(err_code);
7169 + }
7170 +
7171 +@@ -1053,7 +1066,7 @@ void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
7172 + * @ib_cq: cq pointer
7173 + * @udata: user data or NULL for kernel object
7174 + */
7175 +-static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
7176 ++static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
7177 + {
7178 + struct i40iw_cq *iwcq;
7179 + struct i40iw_device *iwdev;
7180 +@@ -1065,6 +1078,7 @@ static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
7181 + i40iw_cq_wq_destroy(iwdev, cq);
7182 + cq_free_resources(iwdev, iwcq);
7183 + i40iw_rem_devusecount(iwdev);
7184 ++ return 0;
7185 + }
7186 +
7187 + /**
7188 +@@ -2656,13 +2670,13 @@ static const struct ib_device_ops i40iw_dev_ops = {
7189 + .get_hw_stats = i40iw_get_hw_stats,
7190 + .get_port_immutable = i40iw_port_immutable,
7191 + .iw_accept = i40iw_accept,
7192 +- .iw_add_ref = i40iw_add_ref,
7193 ++ .iw_add_ref = i40iw_qp_add_ref,
7194 + .iw_connect = i40iw_connect,
7195 + .iw_create_listen = i40iw_create_listen,
7196 + .iw_destroy_listen = i40iw_destroy_listen,
7197 + .iw_get_qp = i40iw_get_qp,
7198 + .iw_reject = i40iw_reject,
7199 +- .iw_rem_ref = i40iw_rem_ref,
7200 ++ .iw_rem_ref = i40iw_qp_rem_ref,
7201 + .map_mr_sg = i40iw_map_mr_sg,
7202 + .mmap = i40iw_mmap,
7203 + .modify_qp = i40iw_modify_qp,
7204 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
7205 +index 331bc21cbcc73..bab71f3e56374 100644
7206 +--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
7207 ++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
7208 +@@ -139,7 +139,7 @@ struct i40iw_qp {
7209 + struct i40iw_qp_host_ctx_info ctx_info;
7210 + struct i40iwarp_offload_info iwarp_info;
7211 + void *allocated_buffer;
7212 +- atomic_t refcount;
7213 ++ refcount_t refcount;
7214 + struct iw_cm_id *cm_id;
7215 + void *cm_node;
7216 + struct ib_mr *lsmm_mr;
7217 +@@ -174,5 +174,6 @@ struct i40iw_qp {
7218 + struct i40iw_dma_mem ietf_mem;
7219 + struct completion sq_drained;
7220 + struct completion rq_drained;
7221 ++ struct completion free_qp;
7222 + };
7223 + #endif
7224 +diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
7225 +index b591861934b3c..81d6a3460b55d 100644
7226 +--- a/drivers/infiniband/hw/mlx4/cm.c
7227 ++++ b/drivers/infiniband/hw/mlx4/cm.c
7228 +@@ -280,6 +280,9 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
7229 + if (!sriov->is_going_down && !id->scheduled_delete) {
7230 + id->scheduled_delete = 1;
7231 + schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
7232 ++ } else if (id->scheduled_delete) {
7233 ++ /* Adjust timeout if already scheduled */
7234 ++ mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
7235 + }
7236 + spin_unlock_irqrestore(&sriov->going_down_lock, flags);
7237 + spin_unlock(&sriov->id_map_lock);
7238 +diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
7239 +index f8b936b76dcdf..3851316407ceb 100644
7240 +--- a/drivers/infiniband/hw/mlx4/cq.c
7241 ++++ b/drivers/infiniband/hw/mlx4/cq.c
7242 +@@ -475,7 +475,7 @@ out:
7243 + return err;
7244 + }
7245 +
7246 +-void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
7247 ++int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
7248 + {
7249 + struct mlx4_ib_dev *dev = to_mdev(cq->device);
7250 + struct mlx4_ib_cq *mcq = to_mcq(cq);
7251 +@@ -495,6 +495,7 @@ void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
7252 + mlx4_db_free(dev->dev, &mcq->db);
7253 + }
7254 + ib_umem_release(mcq->umem);
7255 ++ return 0;
7256 + }
7257 +
7258 + static void dump_cqe(void *cqe)
7259 +diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
7260 +index abe68708d6d6e..2cbdba4da9dfe 100644
7261 +--- a/drivers/infiniband/hw/mlx4/mad.c
7262 ++++ b/drivers/infiniband/hw/mlx4/mad.c
7263 +@@ -1299,6 +1299,18 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
7264 + spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
7265 + }
7266 +
7267 ++static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
7268 ++{
7269 ++ unsigned long flags;
7270 ++ struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
7271 ++ struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
7272 ++
7273 ++ spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
7274 ++ if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
7275 ++ queue_work(ctx->wi_wq, &ctx->work);
7276 ++ spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
7277 ++}
7278 ++
7279 + static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
7280 + struct mlx4_ib_demux_pv_qp *tun_qp,
7281 + int index)
7282 +@@ -2001,7 +2013,8 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
7283 + cq_size *= 2;
7284 +
7285 + cq_attr.cqe = cq_size;
7286 +- ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
7287 ++ ctx->cq = ib_create_cq(ctx->ib_dev,
7288 ++ create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
7289 + NULL, ctx, &cq_attr);
7290 + if (IS_ERR(ctx->cq)) {
7291 + ret = PTR_ERR(ctx->cq);
7292 +@@ -2038,6 +2051,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
7293 + INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
7294 +
7295 + ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
7296 ++ ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
7297 +
7298 + ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
7299 + if (ret) {
7300 +@@ -2181,7 +2195,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
7301 + goto err_mcg;
7302 + }
7303 +
7304 +- snprintf(name, sizeof name, "mlx4_ibt%d", port);
7305 ++ snprintf(name, sizeof(name), "mlx4_ibt%d", port);
7306 + ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
7307 + if (!ctx->wq) {
7308 + pr_err("Failed to create tunnelling WQ for port %d\n", port);
7309 +@@ -2189,7 +2203,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
7310 + goto err_wq;
7311 + }
7312 +
7313 +- snprintf(name, sizeof name, "mlx4_ibud%d", port);
7314 ++ snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
7315 ++ ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
7316 ++ if (!ctx->wi_wq) {
7317 ++ pr_err("Failed to create wire WQ for port %d\n", port);
7318 ++ ret = -ENOMEM;
7319 ++ goto err_wiwq;
7320 ++ }
7321 ++
7322 ++ snprintf(name, sizeof(name), "mlx4_ibud%d", port);
7323 + ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
7324 + if (!ctx->ud_wq) {
7325 + pr_err("Failed to create up/down WQ for port %d\n", port);
7326 +@@ -2200,6 +2222,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
7327 + return 0;
7328 +
7329 + err_udwq:
7330 ++ destroy_workqueue(ctx->wi_wq);
7331 ++ ctx->wi_wq = NULL;
7332 ++
7333 ++err_wiwq:
7334 + destroy_workqueue(ctx->wq);
7335 + ctx->wq = NULL;
7336 +
7337 +@@ -2247,12 +2273,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
7338 + ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
7339 + }
7340 + flush_workqueue(ctx->wq);
7341 ++ flush_workqueue(ctx->wi_wq);
7342 + for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
7343 + destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
7344 + free_pv_object(dev, i, ctx->port);
7345 + }
7346 + kfree(ctx->tun);
7347 + destroy_workqueue(ctx->ud_wq);
7348 ++ destroy_workqueue(ctx->wi_wq);
7349 + destroy_workqueue(ctx->wq);
7350 + }
7351 + }
7352 +diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
7353 +index 6f4ea1067095e..bac526a703173 100644
7354 +--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
7355 ++++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
7356 +@@ -454,6 +454,7 @@ struct mlx4_ib_demux_pv_ctx {
7357 + struct ib_pd *pd;
7358 + struct work_struct work;
7359 + struct workqueue_struct *wq;
7360 ++ struct workqueue_struct *wi_wq;
7361 + struct mlx4_ib_demux_pv_qp qp[2];
7362 + };
7363 +
7364 +@@ -461,6 +462,7 @@ struct mlx4_ib_demux_ctx {
7365 + struct ib_device *ib_dev;
7366 + int port;
7367 + struct workqueue_struct *wq;
7368 ++ struct workqueue_struct *wi_wq;
7369 + struct workqueue_struct *ud_wq;
7370 + spinlock_t ud_lock;
7371 + atomic64_t subnet_prefix;
7372 +@@ -736,7 +738,7 @@ int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
7373 + int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
7374 + int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
7375 + struct ib_udata *udata);
7376 +-void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
7377 ++int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
7378 + int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
7379 + int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
7380 + void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
7381 +diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
7382 +index 0c18cb6a2f148..ec634085e1d9a 100644
7383 +--- a/drivers/infiniband/hw/mlx5/cq.c
7384 ++++ b/drivers/infiniband/hw/mlx5/cq.c
7385 +@@ -168,7 +168,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
7386 + {
7387 + enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
7388 + struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
7389 +- struct mlx5_ib_srq *srq;
7390 ++ struct mlx5_ib_srq *srq = NULL;
7391 + struct mlx5_ib_wq *wq;
7392 + u16 wqe_ctr;
7393 + u8 roce_packet_type;
7394 +@@ -180,7 +180,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
7395 +
7396 + if (qp->ibqp.xrcd) {
7397 + msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
7398 +- srq = to_mibsrq(msrq);
7399 ++ if (msrq)
7400 ++ srq = to_mibsrq(msrq);
7401 + } else {
7402 + srq = to_msrq(qp->ibqp.srq);
7403 + }
7404 +@@ -1023,16 +1024,21 @@ err_cqb:
7405 + return err;
7406 + }
7407 +
7408 +-void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
7409 ++int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
7410 + {
7411 + struct mlx5_ib_dev *dev = to_mdev(cq->device);
7412 + struct mlx5_ib_cq *mcq = to_mcq(cq);
7413 ++ int ret;
7414 ++
7415 ++ ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
7416 ++ if (ret)
7417 ++ return ret;
7418 +
7419 +- mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
7420 + if (udata)
7421 + destroy_cq_user(mcq, udata);
7422 + else
7423 + destroy_cq_kernel(dev, mcq);
7424 ++ return 0;
7425 + }
7426 +
7427 + static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
7428 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
7429 +index 6f99ed03d88e7..1f4aa2647a6f3 100644
7430 +--- a/drivers/infiniband/hw/mlx5/main.c
7431 ++++ b/drivers/infiniband/hw/mlx5/main.c
7432 +@@ -867,7 +867,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
7433 + /* We support 'Gappy' memory registration too */
7434 + props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
7435 + }
7436 +- props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
7437 ++ /* IB_WR_REG_MR always requires changing the entity size with UMR */
7438 ++ if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
7439 ++ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
7440 + if (MLX5_CAP_GEN(mdev, sho)) {
7441 + props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
7442 + /* At this stage no support for signature handover */
7443 +diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
7444 +index 5dbe3eb0d9cb9..3825cdec6ac68 100644
7445 +--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
7446 ++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
7447 +@@ -1180,7 +1180,7 @@ int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
7448 + size_t buflen, size_t *bc);
7449 + int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
7450 + struct ib_udata *udata);
7451 +-void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
7452 ++int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
7453 + int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
7454 + int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
7455 + int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
7456 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
7457 +index 44683073be0c4..85c9a1ffdbb64 100644
7458 +--- a/drivers/infiniband/hw/mlx5/mr.c
7459 ++++ b/drivers/infiniband/hw/mlx5/mr.c
7460 +@@ -50,6 +50,29 @@ enum {
7461 + static void
7462 + create_mkey_callback(int status, struct mlx5_async_work *context);
7463 +
7464 ++static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
7465 ++ struct ib_pd *pd)
7466 ++{
7467 ++ struct mlx5_ib_dev *dev = to_mdev(pd->device);
7468 ++
7469 ++ MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
7470 ++ MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
7471 ++ MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
7472 ++ MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
7473 ++ MLX5_SET(mkc, mkc, lr, 1);
7474 ++
7475 ++ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
7476 ++ MLX5_SET(mkc, mkc, relaxed_ordering_write,
7477 ++ !!(acc & IB_ACCESS_RELAXED_ORDERING));
7478 ++ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
7479 ++ MLX5_SET(mkc, mkc, relaxed_ordering_read,
7480 ++ !!(acc & IB_ACCESS_RELAXED_ORDERING));
7481 ++
7482 ++ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
7483 ++ MLX5_SET(mkc, mkc, qpn, 0xffffff);
7484 ++ MLX5_SET64(mkc, mkc, start_addr, start_addr);
7485 ++}
7486 ++
7487 + static void
7488 + assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
7489 + u32 *in)
7490 +@@ -152,12 +175,12 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
7491 + mr->cache_ent = ent;
7492 + mr->dev = ent->dev;
7493 +
7494 ++ set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
7495 + MLX5_SET(mkc, mkc, free, 1);
7496 + MLX5_SET(mkc, mkc, umr_en, 1);
7497 + MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
7498 + MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
7499 +
7500 +- MLX5_SET(mkc, mkc, qpn, 0xffffff);
7501 + MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
7502 + MLX5_SET(mkc, mkc, log_page_size, ent->page);
7503 + return mr;
7504 +@@ -774,29 +797,6 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
7505 + return 0;
7506 + }
7507 +
7508 +-static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
7509 +- struct ib_pd *pd)
7510 +-{
7511 +- struct mlx5_ib_dev *dev = to_mdev(pd->device);
7512 +-
7513 +- MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
7514 +- MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
7515 +- MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
7516 +- MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
7517 +- MLX5_SET(mkc, mkc, lr, 1);
7518 +-
7519 +- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
7520 +- MLX5_SET(mkc, mkc, relaxed_ordering_write,
7521 +- !!(acc & IB_ACCESS_RELAXED_ORDERING));
7522 +- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
7523 +- MLX5_SET(mkc, mkc, relaxed_ordering_read,
7524 +- !!(acc & IB_ACCESS_RELAXED_ORDERING));
7525 +-
7526 +- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
7527 +- MLX5_SET(mkc, mkc, qpn, 0xffffff);
7528 +- MLX5_SET64(mkc, mkc, start_addr, start_addr);
7529 +-}
7530 +-
7531 + struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
7532 + {
7533 + struct mlx5_ib_dev *dev = to_mdev(pd->device);
7534 +@@ -1190,29 +1190,17 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
7535 + MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
7536 +
7537 + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
7538 ++ set_mkc_access_pd_addr_fields(mkc, access_flags, virt_addr,
7539 ++ populate ? pd : dev->umrc.pd);
7540 + MLX5_SET(mkc, mkc, free, !populate);
7541 + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
7542 +- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
7543 +- MLX5_SET(mkc, mkc, relaxed_ordering_write,
7544 +- !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
7545 +- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
7546 +- MLX5_SET(mkc, mkc, relaxed_ordering_read,
7547 +- !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
7548 +- MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
7549 +- MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
7550 +- MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
7551 +- MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
7552 +- MLX5_SET(mkc, mkc, lr, 1);
7553 + MLX5_SET(mkc, mkc, umr_en, 1);
7554 +
7555 +- MLX5_SET64(mkc, mkc, start_addr, virt_addr);
7556 + MLX5_SET64(mkc, mkc, len, length);
7557 +- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
7558 + MLX5_SET(mkc, mkc, bsf_octword_size, 0);
7559 + MLX5_SET(mkc, mkc, translations_octword_size,
7560 + get_octo_len(virt_addr, length, page_shift));
7561 + MLX5_SET(mkc, mkc, log_page_size, page_shift);
7562 +- MLX5_SET(mkc, mkc, qpn, 0xffffff);
7563 + if (populate) {
7564 + MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
7565 + get_octo_len(virt_addr, length, page_shift));
7566 +diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
7567 +index 9fa2f9164a47b..2ad15adf304e5 100644
7568 +--- a/drivers/infiniband/hw/mthca/mthca_provider.c
7569 ++++ b/drivers/infiniband/hw/mthca/mthca_provider.c
7570 +@@ -789,7 +789,7 @@ out:
7571 + return ret;
7572 + }
7573 +
7574 +-static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
7575 ++static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
7576 + {
7577 + if (udata) {
7578 + struct mthca_ucontext *context =
7579 +@@ -808,6 +808,7 @@ static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
7580 + to_mcq(cq)->set_ci_db_index);
7581 + }
7582 + mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
7583 ++ return 0;
7584 + }
7585 +
7586 + static inline u32 convert_access(int acc)
7587 +diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
7588 +index d11c74390a124..927c70d1ffbc3 100644
7589 +--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
7590 ++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
7591 +@@ -1056,7 +1056,7 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
7592 + spin_unlock_irqrestore(&cq->cq_lock, flags);
7593 + }
7594 +
7595 +-void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7596 ++int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7597 + {
7598 + struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
7599 + struct ocrdma_eq *eq = NULL;
7600 +@@ -1081,6 +1081,7 @@ void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7601 + ocrdma_get_db_addr(dev, pdid),
7602 + dev->nic_info.db_page_size);
7603 + }
7604 ++ return 0;
7605 + }
7606 +
7607 + static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
7608 +diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
7609 +index 3a5010881be5b..c46412dff924a 100644
7610 +--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
7611 ++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
7612 +@@ -72,7 +72,7 @@ void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
7613 + int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
7614 + struct ib_udata *udata);
7615 + int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
7616 +-void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
7617 ++int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
7618 +
7619 + struct ib_qp *ocrdma_create_qp(struct ib_pd *,
7620 + struct ib_qp_init_attr *attrs,
7621 +diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
7622 +index ccaedfd53e49e..679766abb436e 100644
7623 +--- a/drivers/infiniband/hw/qedr/main.c
7624 ++++ b/drivers/infiniband/hw/qedr/main.c
7625 +@@ -601,7 +601,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
7626 + qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
7627 +
7628 + /* Part 2 - check capabilities */
7629 +- page_size = ~dev->attr.page_size_caps + 1;
7630 ++ page_size = ~qed_attr->page_size_caps + 1;
7631 + if (page_size > PAGE_SIZE) {
7632 + DP_ERR(dev,
7633 + "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
7634 +diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
7635 +index 97fc7dd353b04..c7169d2c69e5b 100644
7636 +--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
7637 ++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
7638 +@@ -736,7 +736,7 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
7639 + struct qedr_dev *dev = ep->dev;
7640 + struct qedr_qp *qp;
7641 + struct qed_iwarp_accept_in params;
7642 +- int rc = 0;
7643 ++ int rc;
7644 +
7645 + DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
7646 +
7647 +@@ -759,8 +759,10 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
7648 + params.ord = conn_param->ord;
7649 +
7650 + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
7651 +- &qp->iwarp_cm_flags))
7652 ++ &qp->iwarp_cm_flags)) {
7653 ++ rc = -EINVAL;
7654 + goto err; /* QP already destroyed */
7655 ++ }
7656 +
7657 + rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
7658 + if (rc) {
7659 +diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
7660 +index 1a7f1f805be3e..41813e9d771ff 100644
7661 +--- a/drivers/infiniband/hw/qedr/verbs.c
7662 ++++ b/drivers/infiniband/hw/qedr/verbs.c
7663 +@@ -998,7 +998,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
7664 + /* Generate doorbell address. */
7665 + cq->db.data.icid = cq->icid;
7666 + cq->db_addr = dev->db_addr + db_offset;
7667 +- cq->db.data.params = DB_AGG_CMD_SET <<
7668 ++ cq->db.data.params = DB_AGG_CMD_MAX <<
7669 + RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
7670 +
7671 + /* point to the very last element, passing it we will toggle */
7672 +@@ -1050,7 +1050,7 @@ int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
7673 + #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
7674 + #define QEDR_DESTROY_CQ_ITER_DURATION (10)
7675 +
7676 +-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7677 ++int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7678 + {
7679 + struct qedr_dev *dev = get_qedr_dev(ibcq->device);
7680 + struct qed_rdma_destroy_cq_out_params oparams;
7681 +@@ -1065,7 +1065,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7682 + /* GSIs CQs are handled by driver, so they don't exist in the FW */
7683 + if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
7684 + qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
7685 +- return;
7686 ++ return 0;
7687 + }
7688 +
7689 + iparams.icid = cq->icid;
7690 +@@ -1113,6 +1113,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7691 + * Since the destroy CQ ramrod has also been received on the EQ we can
7692 + * be certain that there's no event handler in process.
7693 + */
7694 ++ return 0;
7695 + }
7696 +
7697 + static inline int get_gid_info_from_table(struct ib_qp *ibqp,
7698 +@@ -2112,6 +2113,28 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
7699 + return rc;
7700 + }
7701 +
7702 ++static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
7703 ++ struct ib_udata *udata)
7704 ++{
7705 ++ struct qedr_ucontext *ctx =
7706 ++ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
7707 ++ ibucontext);
7708 ++ int rc;
7709 ++
7710 ++ if (qp->qp_type != IB_QPT_GSI) {
7711 ++ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
7712 ++ if (rc)
7713 ++ return rc;
7714 ++ }
7715 ++
7716 ++ if (qp->create_type == QEDR_QP_CREATE_USER)
7717 ++ qedr_cleanup_user(dev, ctx, qp);
7718 ++ else
7719 ++ qedr_cleanup_kernel(dev, qp);
7720 ++
7721 ++ return 0;
7722 ++}
7723 ++
7724 + struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
7725 + struct ib_qp_init_attr *attrs,
7726 + struct ib_udata *udata)
7727 +@@ -2158,19 +2181,21 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
7728 + rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
7729 +
7730 + if (rc)
7731 +- goto err;
7732 ++ goto out_free_qp;
7733 +
7734 + qp->ibqp.qp_num = qp->qp_id;
7735 +
7736 + if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
7737 + rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
7738 + if (rc)
7739 +- goto err;
7740 ++ goto out_free_qp_resources;
7741 + }
7742 +
7743 + return &qp->ibqp;
7744 +
7745 +-err:
7746 ++out_free_qp_resources:
7747 ++ qedr_free_qp_resources(dev, qp, udata);
7748 ++out_free_qp:
7749 + kfree(qp);
7750 +
7751 + return ERR_PTR(-EFAULT);
7752 +@@ -2636,7 +2661,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
7753 + qp_attr->cap.max_recv_wr = qp->rq.max_wr;
7754 + qp_attr->cap.max_send_sge = qp->sq.max_sges;
7755 + qp_attr->cap.max_recv_sge = qp->rq.max_sges;
7756 +- qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
7757 ++ qp_attr->cap.max_inline_data = dev->attr.max_inline;
7758 + qp_init_attr->cap = qp_attr->cap;
7759 +
7760 + qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
7761 +@@ -2671,28 +2696,6 @@ err:
7762 + return rc;
7763 + }
7764 +
7765 +-static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
7766 +- struct ib_udata *udata)
7767 +-{
7768 +- struct qedr_ucontext *ctx =
7769 +- rdma_udata_to_drv_context(udata, struct qedr_ucontext,
7770 +- ibucontext);
7771 +- int rc;
7772 +-
7773 +- if (qp->qp_type != IB_QPT_GSI) {
7774 +- rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
7775 +- if (rc)
7776 +- return rc;
7777 +- }
7778 +-
7779 +- if (qp->create_type == QEDR_QP_CREATE_USER)
7780 +- qedr_cleanup_user(dev, ctx, qp);
7781 +- else
7782 +- qedr_cleanup_kernel(dev, qp);
7783 +-
7784 +- return 0;
7785 +-}
7786 +-
7787 + int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
7788 + {
7789 + struct qedr_qp *qp = get_qedr_qp(ibqp);
7790 +@@ -2752,6 +2755,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
7791 +
7792 + if (rdma_protocol_iwarp(&dev->ibdev, 1))
7793 + qedr_iw_qp_rem_ref(&qp->ibqp);
7794 ++ else
7795 ++ kfree(qp);
7796 +
7797 + return 0;
7798 + }
7799 +diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
7800 +index 5e02387e068d1..e0db3bc1653e2 100644
7801 +--- a/drivers/infiniband/hw/qedr/verbs.h
7802 ++++ b/drivers/infiniband/hw/qedr/verbs.h
7803 +@@ -52,7 +52,7 @@ void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
7804 + int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
7805 + struct ib_udata *udata);
7806 + int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
7807 +-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
7808 ++int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
7809 + int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
7810 + struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
7811 + struct ib_udata *);
7812 +diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
7813 +index b8a77ce115908..586ff16be1bb3 100644
7814 +--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
7815 ++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
7816 +@@ -596,9 +596,9 @@ int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
7817 + return 0;
7818 + }
7819 +
7820 +-void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
7821 ++int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
7822 + {
7823 +- return;
7824 ++ return 0;
7825 + }
7826 +
7827 + struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
7828 +diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
7829 +index 2aedf78c13cf2..f13b08c59b9a3 100644
7830 +--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
7831 ++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
7832 +@@ -60,7 +60,7 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
7833 + int attr_mask, struct ib_udata *udata);
7834 + int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
7835 + struct ib_udata *udata);
7836 +-void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
7837 ++int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
7838 + struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
7839 + u64 virt_addr, int access_flags,
7840 + struct ib_udata *udata);
7841 +diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
7842 +index 4f6cc0de7ef95..6d3e6389e47da 100644
7843 +--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
7844 ++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
7845 +@@ -235,7 +235,7 @@ static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
7846 + * @cq: the completion queue to destroy.
7847 + * @udata: user data or null for kernel object
7848 + */
7849 +-void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
7850 ++int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
7851 + {
7852 + struct pvrdma_cq *vcq = to_vcq(cq);
7853 + union pvrdma_cmd_req req;
7854 +@@ -261,6 +261,7 @@ void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
7855 +
7856 + pvrdma_free_cq(dev, vcq);
7857 + atomic_dec(&dev->num_cqs);
7858 ++ return 0;
7859 + }
7860 +
7861 + static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
7862 +diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
7863 +index 267702226f108..af36e9f767eed 100644
7864 +--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
7865 ++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
7866 +@@ -411,7 +411,7 @@ int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
7867 + int sg_nents, unsigned int *sg_offset);
7868 + int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
7869 + struct ib_udata *udata);
7870 +-void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
7871 ++int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
7872 + int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
7873 + int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
7874 + int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
7875 +diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
7876 +index 04d2e72017fed..19248be140933 100644
7877 +--- a/drivers/infiniband/sw/rdmavt/cq.c
7878 ++++ b/drivers/infiniband/sw/rdmavt/cq.c
7879 +@@ -315,7 +315,7 @@ bail_wc:
7880 + *
7881 + * Called by ib_destroy_cq() in the generic verbs code.
7882 + */
7883 +-void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7884 ++int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7885 + {
7886 + struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
7887 + struct rvt_dev_info *rdi = cq->rdi;
7888 +@@ -328,6 +328,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7889 + kref_put(&cq->ip->ref, rvt_release_mmap_info);
7890 + else
7891 + vfree(cq->kqueue);
7892 ++ return 0;
7893 + }
7894 +
7895 + /**
7896 +diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h
7897 +index 5e26a2eb19a4c..feb01e7ee0044 100644
7898 +--- a/drivers/infiniband/sw/rdmavt/cq.h
7899 ++++ b/drivers/infiniband/sw/rdmavt/cq.h
7900 +@@ -53,7 +53,7 @@
7901 +
7902 + int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
7903 + struct ib_udata *udata);
7904 +-void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
7905 ++int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
7906 + int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
7907 + int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
7908 + int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
7909 +diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
7910 +index f904bb34477ae..2d534c450f3c8 100644
7911 +--- a/drivers/infiniband/sw/rdmavt/vt.c
7912 ++++ b/drivers/infiniband/sw/rdmavt/vt.c
7913 +@@ -95,9 +95,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
7914 + if (!rdi)
7915 + return rdi;
7916 +
7917 +- rdi->ports = kcalloc(nports,
7918 +- sizeof(struct rvt_ibport **),
7919 +- GFP_KERNEL);
7920 ++ rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL);
7921 + if (!rdi->ports)
7922 + ib_dealloc_device(&rdi->ibdev);
7923 +
7924 +diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
7925 +index 46e111c218fd4..9bfb98056fc2a 100644
7926 +--- a/drivers/infiniband/sw/rxe/rxe_recv.c
7927 ++++ b/drivers/infiniband/sw/rxe/rxe_recv.c
7928 +@@ -281,6 +281,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
7929 + struct rxe_mc_elem *mce;
7930 + struct rxe_qp *qp;
7931 + union ib_gid dgid;
7932 ++ struct sk_buff *per_qp_skb;
7933 ++ struct rxe_pkt_info *per_qp_pkt;
7934 + int err;
7935 +
7936 + if (skb->protocol == htons(ETH_P_IP))
7937 +@@ -309,21 +311,29 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
7938 + if (err)
7939 + continue;
7940 +
7941 +- /* if *not* the last qp in the list
7942 +- * increase the users of the skb then post to the next qp
7943 ++ /* for all but the last qp create a new clone of the
7944 ++ * skb and pass to the qp.
7945 + */
7946 + if (mce->qp_list.next != &mcg->qp_list)
7947 +- skb_get(skb);
7948 ++ per_qp_skb = skb_clone(skb, GFP_ATOMIC);
7949 ++ else
7950 ++ per_qp_skb = skb;
7951 ++
7952 ++ if (unlikely(!per_qp_skb))
7953 ++ continue;
7954 +
7955 +- pkt->qp = qp;
7956 ++ per_qp_pkt = SKB_TO_PKT(per_qp_skb);
7957 ++ per_qp_pkt->qp = qp;
7958 + rxe_add_ref(qp);
7959 +- rxe_rcv_pkt(pkt, skb);
7960 ++ rxe_rcv_pkt(per_qp_pkt, per_qp_skb);
7961 + }
7962 +
7963 + spin_unlock_bh(&mcg->mcg_lock);
7964 +
7965 + rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
7966 +
7967 ++ return;
7968 ++
7969 + err1:
7970 + kfree_skb(skb);
7971 + }
7972 +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
7973 +index 00ba6fb1e6763..452748b3854b5 100644
7974 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
7975 ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
7976 +@@ -816,13 +816,14 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
7977 + return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
7978 + }
7979 +
7980 +-static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7981 ++static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7982 + {
7983 + struct rxe_cq *cq = to_rcq(ibcq);
7984 +
7985 + rxe_cq_disable(cq);
7986 +
7987 + rxe_drop_ref(cq);
7988 ++ return 0;
7989 + }
7990 +
7991 + static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
7992 +diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
7993 +index 987e2ba05dbc0..7e657f90ca4f4 100644
7994 +--- a/drivers/infiniband/sw/siw/siw_verbs.c
7995 ++++ b/drivers/infiniband/sw/siw/siw_verbs.c
7996 +@@ -1064,7 +1064,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
7997 + return rv > 0 ? 0 : rv;
7998 + }
7999 +
8000 +-void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
8001 ++int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
8002 + {
8003 + struct siw_cq *cq = to_siw_cq(base_cq);
8004 + struct siw_device *sdev = to_siw_dev(base_cq->device);
8005 +@@ -1082,6 +1082,7 @@ void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
8006 + atomic_dec(&sdev->num_cq);
8007 +
8008 + vfree(cq->queue);
8009 ++ return 0;
8010 + }
8011 +
8012 + /*
8013 +diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
8014 +index 1a731989fad60..b0b7488869104 100644
8015 +--- a/drivers/infiniband/sw/siw/siw_verbs.h
8016 ++++ b/drivers/infiniband/sw/siw/siw_verbs.h
8017 +@@ -63,7 +63,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
8018 + const struct ib_send_wr **bad_wr);
8019 + int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
8020 + const struct ib_recv_wr **bad_wr);
8021 +-void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata);
8022 ++int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata);
8023 + int siw_poll_cq(struct ib_cq *base_cq, int num_entries, struct ib_wc *wc);
8024 + int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags);
8025 + struct ib_mr *siw_reg_user_mr(struct ib_pd *base_pd, u64 start, u64 len,
8026 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
8027 +index ef60e8e4ae67b..7c0bb2642d232 100644
8028 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
8029 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
8030 +@@ -2470,6 +2470,8 @@ static struct net_device *ipoib_add_port(const char *format,
8031 + /* call event handler to ensure pkey in sync */
8032 + queue_work(ipoib_workqueue, &priv->flush_heavy);
8033 +
8034 ++ ndev->rtnl_link_ops = ipoib_get_link_ops();
8035 ++
8036 + result = register_netdev(ndev);
8037 + if (result) {
8038 + pr_warn("%s: couldn't register ipoib port %d; error %d\n",
8039 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
8040 +index 38c984d16996d..d5a90a66b45cf 100644
8041 +--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
8042 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
8043 +@@ -144,6 +144,16 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
8044 + return 0;
8045 + }
8046 +
8047 ++static void ipoib_del_child_link(struct net_device *dev, struct list_head *head)
8048 ++{
8049 ++ struct ipoib_dev_priv *priv = ipoib_priv(dev);
8050 ++
8051 ++ if (!priv->parent)
8052 ++ return;
8053 ++
8054 ++ unregister_netdevice_queue(dev, head);
8055 ++}
8056 ++
8057 + static size_t ipoib_get_size(const struct net_device *dev)
8058 + {
8059 + return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
8060 +@@ -158,6 +168,7 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
8061 + .priv_size = sizeof(struct ipoib_dev_priv),
8062 + .setup = ipoib_setup_common,
8063 + .newlink = ipoib_new_child_link,
8064 ++ .dellink = ipoib_del_child_link,
8065 + .changelink = ipoib_changelink,
8066 + .get_size = ipoib_get_size,
8067 + .fill_info = ipoib_fill_info,
8068 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
8069 +index 30865605e0980..4c50a87ed7cc2 100644
8070 +--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
8071 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
8072 +@@ -195,6 +195,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
8073 + }
8074 + priv = ipoib_priv(ndev);
8075 +
8076 ++ ndev->rtnl_link_ops = ipoib_get_link_ops();
8077 ++
8078 + result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
8079 +
8080 + if (result && ndev->reg_state == NETREG_UNINITIALIZED)
8081 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
8082 +index 28f6414dfa3dc..d6f93601712e4 100644
8083 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
8084 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
8085 +@@ -16,6 +16,7 @@
8086 + #include "rtrs-srv.h"
8087 + #include "rtrs-log.h"
8088 + #include <rdma/ib_cm.h>
8089 ++#include <rdma/ib_verbs.h>
8090 +
8091 + MODULE_DESCRIPTION("RDMA Transport Server");
8092 + MODULE_LICENSE("GPL");
8093 +@@ -31,6 +32,7 @@ MODULE_LICENSE("GPL");
8094 + static struct rtrs_rdma_dev_pd dev_pd;
8095 + static mempool_t *chunk_pool;
8096 + struct class *rtrs_dev_class;
8097 ++static struct rtrs_srv_ib_ctx ib_ctx;
8098 +
8099 + static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
8100 + static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
8101 +@@ -2042,6 +2044,70 @@ static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
8102 + kfree(ctx);
8103 + }
8104 +
8105 ++static int rtrs_srv_add_one(struct ib_device *device)
8106 ++{
8107 ++ struct rtrs_srv_ctx *ctx;
8108 ++ int ret = 0;
8109 ++
8110 ++ mutex_lock(&ib_ctx.ib_dev_mutex);
8111 ++ if (ib_ctx.ib_dev_count)
8112 ++ goto out;
8113 ++
8114 ++ /*
8115 ++ * Since our CM IDs are NOT bound to any ib device we will create them
8116 ++ * only once
8117 ++ */
8118 ++ ctx = ib_ctx.srv_ctx;
8119 ++ ret = rtrs_srv_rdma_init(ctx, ib_ctx.port);
8120 ++ if (ret) {
8121 ++ /*
8122 ++ * We errored out here.
8123 ++ * According to the ib code, if we encounter an error here then the
8124 ++ * error code is ignored, and no more calls to our ops are made.
8125 ++ */
8126 ++ pr_err("Failed to initialize RDMA connection");
8127 ++ goto err_out;
8128 ++ }
8129 ++
8130 ++out:
8131 ++ /*
8132 ++ * Keep a track on the number of ib devices added
8133 ++ */
8134 ++ ib_ctx.ib_dev_count++;
8135 ++
8136 ++err_out:
8137 ++ mutex_unlock(&ib_ctx.ib_dev_mutex);
8138 ++ return ret;
8139 ++}
8140 ++
8141 ++static void rtrs_srv_remove_one(struct ib_device *device, void *client_data)
8142 ++{
8143 ++ struct rtrs_srv_ctx *ctx;
8144 ++
8145 ++ mutex_lock(&ib_ctx.ib_dev_mutex);
8146 ++ ib_ctx.ib_dev_count--;
8147 ++
8148 ++ if (ib_ctx.ib_dev_count)
8149 ++ goto out;
8150 ++
8151 ++ /*
8152 ++ * Since our CM IDs are NOT bound to any ib device we will remove them
8153 ++ * only once, when the last device is removed
8154 ++ */
8155 ++ ctx = ib_ctx.srv_ctx;
8156 ++ rdma_destroy_id(ctx->cm_id_ip);
8157 ++ rdma_destroy_id(ctx->cm_id_ib);
8158 ++
8159 ++out:
8160 ++ mutex_unlock(&ib_ctx.ib_dev_mutex);
8161 ++}
8162 ++
8163 ++static struct ib_client rtrs_srv_client = {
8164 ++ .name = "rtrs_server",
8165 ++ .add = rtrs_srv_add_one,
8166 ++ .remove = rtrs_srv_remove_one
8167 ++};
8168 ++
8169 + /**
8170 + * rtrs_srv_open() - open RTRS server context
8171 + * @ops: callback functions
8172 +@@ -2060,7 +2126,11 @@ struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
8173 + if (!ctx)
8174 + return ERR_PTR(-ENOMEM);
8175 +
8176 +- err = rtrs_srv_rdma_init(ctx, port);
8177 ++ mutex_init(&ib_ctx.ib_dev_mutex);
8178 ++ ib_ctx.srv_ctx = ctx;
8179 ++ ib_ctx.port = port;
8180 ++
8181 ++ err = ib_register_client(&rtrs_srv_client);
8182 + if (err) {
8183 + free_srv_ctx(ctx);
8184 + return ERR_PTR(err);
8185 +@@ -2099,8 +2169,8 @@ static void close_ctx(struct rtrs_srv_ctx *ctx)
8186 + */
8187 + void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
8188 + {
8189 +- rdma_destroy_id(ctx->cm_id_ip);
8190 +- rdma_destroy_id(ctx->cm_id_ib);
8191 ++ ib_unregister_client(&rtrs_srv_client);
8192 ++ mutex_destroy(&ib_ctx.ib_dev_mutex);
8193 + close_ctx(ctx);
8194 + free_srv_ctx(ctx);
8195 + }
8196 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
8197 +index dc95b0932f0df..08b0b8a6eebe6 100644
8198 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
8199 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
8200 +@@ -118,6 +118,13 @@ struct rtrs_srv_ctx {
8201 + struct list_head srv_list;
8202 + };
8203 +
8204 ++struct rtrs_srv_ib_ctx {
8205 ++ struct rtrs_srv_ctx *srv_ctx;
8206 ++ u16 port;
8207 ++ struct mutex ib_dev_mutex;
8208 ++ int ib_dev_count;
8209 ++};
8210 ++
8211 + extern struct class *rtrs_dev_class;
8212 +
8213 + void close_sess(struct rtrs_srv_sess *sess);
8214 +diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
8215 +index 7c70492d9d6b5..f831f01501d58 100644
8216 +--- a/drivers/input/keyboard/ep93xx_keypad.c
8217 ++++ b/drivers/input/keyboard/ep93xx_keypad.c
8218 +@@ -250,8 +250,8 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
8219 + }
8220 +
8221 + keypad->irq = platform_get_irq(pdev, 0);
8222 +- if (!keypad->irq) {
8223 +- err = -ENXIO;
8224 ++ if (keypad->irq < 0) {
8225 ++ err = keypad->irq;
8226 + goto failed_free;
8227 + }
8228 +
8229 +diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
8230 +index 94c94d7f5155f..d6c924032aaa8 100644
8231 +--- a/drivers/input/keyboard/omap4-keypad.c
8232 ++++ b/drivers/input/keyboard/omap4-keypad.c
8233 +@@ -240,10 +240,8 @@ static int omap4_keypad_probe(struct platform_device *pdev)
8234 + }
8235 +
8236 + irq = platform_get_irq(pdev, 0);
8237 +- if (!irq) {
8238 +- dev_err(&pdev->dev, "no keyboard irq assigned\n");
8239 +- return -EINVAL;
8240 +- }
8241 ++ if (irq < 0)
8242 ++ return irq;
8243 +
8244 + keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL);
8245 + if (!keypad_data) {
8246 +diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
8247 +index af3a6824f1a4d..77e0743a3cf85 100644
8248 +--- a/drivers/input/keyboard/twl4030_keypad.c
8249 ++++ b/drivers/input/keyboard/twl4030_keypad.c
8250 +@@ -50,7 +50,7 @@ struct twl4030_keypad {
8251 + bool autorepeat;
8252 + unsigned int n_rows;
8253 + unsigned int n_cols;
8254 +- unsigned int irq;
8255 ++ int irq;
8256 +
8257 + struct device *dbg_dev;
8258 + struct input_dev *input;
8259 +@@ -376,10 +376,8 @@ static int twl4030_kp_probe(struct platform_device *pdev)
8260 + }
8261 +
8262 + kp->irq = platform_get_irq(pdev, 0);
8263 +- if (!kp->irq) {
8264 +- dev_err(&pdev->dev, "no keyboard irq assigned\n");
8265 +- return -EINVAL;
8266 +- }
8267 ++ if (kp->irq < 0)
8268 ++ return kp->irq;
8269 +
8270 + error = matrix_keypad_build_keymap(keymap_data, NULL,
8271 + TWL4030_MAX_ROWS,
8272 +diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c
8273 +index a681a2c04e399..f15ed3dcdb9b2 100644
8274 +--- a/drivers/input/serio/sun4i-ps2.c
8275 ++++ b/drivers/input/serio/sun4i-ps2.c
8276 +@@ -211,7 +211,6 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
8277 + struct sun4i_ps2data *drvdata;
8278 + struct serio *serio;
8279 + struct device *dev = &pdev->dev;
8280 +- unsigned int irq;
8281 + int error;
8282 +
8283 + drvdata = kzalloc(sizeof(struct sun4i_ps2data), GFP_KERNEL);
8284 +@@ -264,14 +263,12 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
8285 + writel(0, drvdata->reg_base + PS2_REG_GCTL);
8286 +
8287 + /* Get IRQ for the device */
8288 +- irq = platform_get_irq(pdev, 0);
8289 +- if (!irq) {
8290 +- dev_err(dev, "no IRQ found\n");
8291 +- error = -ENXIO;
8292 ++ drvdata->irq = platform_get_irq(pdev, 0);
8293 ++ if (drvdata->irq < 0) {
8294 ++ error = drvdata->irq;
8295 + goto err_disable_clk;
8296 + }
8297 +
8298 +- drvdata->irq = irq;
8299 + drvdata->serio = serio;
8300 + drvdata->dev = dev;
8301 +
8302 +diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
8303 +index 5477a5718202a..db7f27d4734a9 100644
8304 +--- a/drivers/input/touchscreen/elants_i2c.c
8305 ++++ b/drivers/input/touchscreen/elants_i2c.c
8306 +@@ -90,7 +90,7 @@
8307 + /* FW read command, 0x53 0x?? 0x0, 0x01 */
8308 + #define E_ELAN_INFO_FW_VER 0x00
8309 + #define E_ELAN_INFO_BC_VER 0x10
8310 +-#define E_ELAN_INFO_REK 0xE0
8311 ++#define E_ELAN_INFO_REK 0xD0
8312 + #define E_ELAN_INFO_TEST_VER 0xE0
8313 + #define E_ELAN_INFO_FW_ID 0xF0
8314 + #define E_INFO_OSR 0xD6
8315 +diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
8316 +index 9ed258854349b..5e6ba5c4eca2a 100644
8317 +--- a/drivers/input/touchscreen/imx6ul_tsc.c
8318 ++++ b/drivers/input/touchscreen/imx6ul_tsc.c
8319 +@@ -530,20 +530,25 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
8320 +
8321 + mutex_lock(&input_dev->mutex);
8322 +
8323 +- if (input_dev->users) {
8324 +- retval = clk_prepare_enable(tsc->adc_clk);
8325 +- if (retval)
8326 +- goto out;
8327 +-
8328 +- retval = clk_prepare_enable(tsc->tsc_clk);
8329 +- if (retval) {
8330 +- clk_disable_unprepare(tsc->adc_clk);
8331 +- goto out;
8332 +- }
8333 ++ if (!input_dev->users)
8334 ++ goto out;
8335 +
8336 +- retval = imx6ul_tsc_init(tsc);
8337 ++ retval = clk_prepare_enable(tsc->adc_clk);
8338 ++ if (retval)
8339 ++ goto out;
8340 ++
8341 ++ retval = clk_prepare_enable(tsc->tsc_clk);
8342 ++ if (retval) {
8343 ++ clk_disable_unprepare(tsc->adc_clk);
8344 ++ goto out;
8345 + }
8346 +
8347 ++ retval = imx6ul_tsc_init(tsc);
8348 ++ if (retval) {
8349 ++ clk_disable_unprepare(tsc->tsc_clk);
8350 ++ clk_disable_unprepare(tsc->adc_clk);
8351 ++ goto out;
8352 ++ }
8353 + out:
8354 + mutex_unlock(&input_dev->mutex);
8355 + return retval;
8356 +diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
8357 +index b54cc64e4ea64..389356332c54a 100644
8358 +--- a/drivers/input/touchscreen/stmfts.c
8359 ++++ b/drivers/input/touchscreen/stmfts.c
8360 +@@ -479,7 +479,7 @@ static ssize_t stmfts_sysfs_hover_enable_write(struct device *dev,
8361 +
8362 + mutex_lock(&sdata->mutex);
8363 +
8364 +- if (value & sdata->hover_enabled)
8365 ++ if (value && sdata->hover_enabled)
8366 + goto out;
8367 +
8368 + if (sdata->running)
8369 +diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
8370 +index d176df569af8f..78d813bd0dcc8 100644
8371 +--- a/drivers/iommu/qcom_iommu.c
8372 ++++ b/drivers/iommu/qcom_iommu.c
8373 +@@ -578,8 +578,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
8374 + * index into qcom_iommu->ctxs:
8375 + */
8376 + if (WARN_ON(asid < 1) ||
8377 +- WARN_ON(asid > qcom_iommu->num_ctxs))
8378 ++ WARN_ON(asid > qcom_iommu->num_ctxs)) {
8379 ++ put_device(&iommu_pdev->dev);
8380 + return -EINVAL;
8381 ++ }
8382 +
8383 + if (!dev_iommu_priv_get(dev)) {
8384 + dev_iommu_priv_set(dev, qcom_iommu);
8385 +@@ -588,8 +590,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
8386 + * multiple different iommu devices. Multiple context
8387 + * banks are ok, but multiple devices are not:
8388 + */
8389 +- if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
8390 ++ if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) {
8391 ++ put_device(&iommu_pdev->dev);
8392 + return -EINVAL;
8393 ++ }
8394 + }
8395 +
8396 + return iommu_fwspec_add_ids(dev, &asid, 1);
8397 +diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
8398 +index db38a68abb6c0..a6f4ca438bca1 100644
8399 +--- a/drivers/lightnvm/core.c
8400 ++++ b/drivers/lightnvm/core.c
8401 +@@ -1315,8 +1315,9 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
8402 + strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
8403 + i++;
8404 +
8405 +- if (i > 31) {
8406 +- pr_err("max 31 devices can be reported.\n");
8407 ++ if (i >= ARRAY_SIZE(devices->info)) {
8408 ++ pr_err("max %zd devices can be reported.\n",
8409 ++ ARRAY_SIZE(devices->info));
8410 + break;
8411 + }
8412 + }
8413 +diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
8414 +index 0b821a5b2db84..3e7d4b20ab34f 100644
8415 +--- a/drivers/mailbox/mailbox.c
8416 ++++ b/drivers/mailbox/mailbox.c
8417 +@@ -82,9 +82,12 @@ static void msg_submit(struct mbox_chan *chan)
8418 + exit:
8419 + spin_unlock_irqrestore(&chan->lock, flags);
8420 +
8421 +- if (!err && (chan->txdone_method & TXDONE_BY_POLL))
8422 +- /* kick start the timer immediately to avoid delays */
8423 +- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
8424 ++ /* kick start the timer immediately to avoid delays */
8425 ++ if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
8426 ++ /* but only if not already active */
8427 ++ if (!hrtimer_active(&chan->mbox->poll_hrt))
8428 ++ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
8429 ++ }
8430 + }
8431 +
8432 + static void tx_tick(struct mbox_chan *chan, int r)
8433 +@@ -122,11 +125,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
8434 + struct mbox_chan *chan = &mbox->chans[i];
8435 +
8436 + if (chan->active_req && chan->cl) {
8437 ++ resched = true;
8438 + txdone = chan->mbox->ops->last_tx_done(chan);
8439 + if (txdone)
8440 + tx_tick(chan, 0);
8441 +- else
8442 +- resched = true;
8443 + }
8444 + }
8445 +
8446 +diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
8447 +index b24822ad8409c..9963bb9cd74fa 100644
8448 +--- a/drivers/mailbox/mtk-cmdq-mailbox.c
8449 ++++ b/drivers/mailbox/mtk-cmdq-mailbox.c
8450 +@@ -69,7 +69,7 @@ struct cmdq_task {
8451 + struct cmdq {
8452 + struct mbox_controller mbox;
8453 + void __iomem *base;
8454 +- u32 irq;
8455 ++ int irq;
8456 + u32 thread_nr;
8457 + u32 irq_mask;
8458 + struct cmdq_thread *thread;
8459 +@@ -466,10 +466,8 @@ static int cmdq_probe(struct platform_device *pdev)
8460 + }
8461 +
8462 + cmdq->irq = platform_get_irq(pdev, 0);
8463 +- if (!cmdq->irq) {
8464 +- dev_err(dev, "failed to get irq\n");
8465 +- return -EINVAL;
8466 +- }
8467 ++ if (cmdq->irq < 0)
8468 ++ return cmdq->irq;
8469 +
8470 + cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev);
8471 + cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
8472 +diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
8473 +index 95a5f3757fa30..19b2601be3c5e 100644
8474 +--- a/drivers/md/md-bitmap.c
8475 ++++ b/drivers/md/md-bitmap.c
8476 +@@ -1949,6 +1949,7 @@ out:
8477 + }
8478 + EXPORT_SYMBOL_GPL(md_bitmap_load);
8479 +
8480 ++/* caller need to free returned bitmap with md_bitmap_free() */
8481 + struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
8482 + {
8483 + int rv = 0;
8484 +@@ -2012,6 +2013,7 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
8485 + md_bitmap_unplug(mddev->bitmap);
8486 + *low = lo;
8487 + *high = hi;
8488 ++ md_bitmap_free(bitmap);
8489 +
8490 + return rv;
8491 + }
8492 +@@ -2615,4 +2617,3 @@ struct attribute_group md_bitmap_group = {
8493 + .name = "bitmap",
8494 + .attrs = md_bitmap_attrs,
8495 + };
8496 +-
8497 +diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
8498 +index d50737ec40394..afbbc552c3275 100644
8499 +--- a/drivers/md/md-cluster.c
8500 ++++ b/drivers/md/md-cluster.c
8501 +@@ -1166,6 +1166,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
8502 + * can't resize bitmap
8503 + */
8504 + goto out;
8505 ++ md_bitmap_free(bitmap);
8506 + }
8507 +
8508 + return 0;
8509 +diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c
8510 +index 3f1ca40b9b987..8a8585261bb80 100644
8511 +--- a/drivers/media/firewire/firedtv-fw.c
8512 ++++ b/drivers/media/firewire/firedtv-fw.c
8513 +@@ -272,8 +272,10 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
8514 +
8515 + name_len = fw_csr_string(unit->directory, CSR_MODEL,
8516 + name, sizeof(name));
8517 +- if (name_len < 0)
8518 +- return name_len;
8519 ++ if (name_len < 0) {
8520 ++ err = name_len;
8521 ++ goto fail_free;
8522 ++ }
8523 + for (i = ARRAY_SIZE(model_names); --i; )
8524 + if (strlen(model_names[i]) <= name_len &&
8525 + strncmp(name, model_names[i], name_len) == 0)
8526 +diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
8527 +index de295114ca482..21666d705e372 100644
8528 +--- a/drivers/media/i2c/m5mols/m5mols_core.c
8529 ++++ b/drivers/media/i2c/m5mols/m5mols_core.c
8530 +@@ -764,7 +764,8 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
8531 +
8532 + ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
8533 + if (ret) {
8534 +- info->set_power(&client->dev, 0);
8535 ++ if (info->set_power)
8536 ++ info->set_power(&client->dev, 0);
8537 + return ret;
8538 + }
8539 +
8540 +diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
8541 +index 2fe4a7ac05929..3a4268aa5f023 100644
8542 +--- a/drivers/media/i2c/ov5640.c
8543 ++++ b/drivers/media/i2c/ov5640.c
8544 +@@ -34,6 +34,8 @@
8545 + #define OV5640_REG_SYS_RESET02 0x3002
8546 + #define OV5640_REG_SYS_CLOCK_ENABLE02 0x3006
8547 + #define OV5640_REG_SYS_CTRL0 0x3008
8548 ++#define OV5640_REG_SYS_CTRL0_SW_PWDN 0x42
8549 ++#define OV5640_REG_SYS_CTRL0_SW_PWUP 0x02
8550 + #define OV5640_REG_CHIP_ID 0x300a
8551 + #define OV5640_REG_IO_MIPI_CTRL00 0x300e
8552 + #define OV5640_REG_PAD_OUTPUT_ENABLE01 0x3017
8553 +@@ -274,8 +276,7 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
8554 + /* YUV422 UYVY VGA@30fps */
8555 + static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
8556 + {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
8557 +- {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0},
8558 +- {0x3630, 0x36, 0, 0},
8559 ++ {0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0},
8560 + {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
8561 + {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
8562 + {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
8563 +@@ -751,7 +752,7 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg,
8564 + * +->| PLL Root Div | - reg 0x3037, bit 4
8565 + * +-+------------+
8566 + * | +---------+
8567 +- * +->| Bit Div | - reg 0x3035, bits 0-3
8568 ++ * +->| Bit Div | - reg 0x3034, bits 0-3
8569 + * +-+-------+
8570 + * | +-------------+
8571 + * +->| SCLK Div | - reg 0x3108, bits 0-1
8572 +@@ -1120,6 +1121,12 @@ static int ov5640_load_regs(struct ov5640_dev *sensor,
8573 + val = regs->val;
8574 + mask = regs->mask;
8575 +
8576 ++ /* remain in power down mode for DVP */
8577 ++ if (regs->reg_addr == OV5640_REG_SYS_CTRL0 &&
8578 ++ val == OV5640_REG_SYS_CTRL0_SW_PWUP &&
8579 ++ sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
8580 ++ continue;
8581 ++
8582 + if (mask)
8583 + ret = ov5640_mod_reg(sensor, reg_addr, mask, val);
8584 + else
8585 +@@ -1275,31 +1282,9 @@ static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on)
8586 + if (ret)
8587 + return ret;
8588 +
8589 +- /*
8590 +- * enable VSYNC/HREF/PCLK DVP control lines
8591 +- * & D[9:6] DVP data lines
8592 +- *
8593 +- * PAD OUTPUT ENABLE 01
8594 +- * - 6: VSYNC output enable
8595 +- * - 5: HREF output enable
8596 +- * - 4: PCLK output enable
8597 +- * - [3:0]: D[9:6] output enable
8598 +- */
8599 +- ret = ov5640_write_reg(sensor,
8600 +- OV5640_REG_PAD_OUTPUT_ENABLE01,
8601 +- on ? 0x7f : 0);
8602 +- if (ret)
8603 +- return ret;
8604 +-
8605 +- /*
8606 +- * enable D[5:0] DVP data lines
8607 +- *
8608 +- * PAD OUTPUT ENABLE 02
8609 +- * - [7:2]: D[5:0] output enable
8610 +- */
8611 +- return ov5640_write_reg(sensor,
8612 +- OV5640_REG_PAD_OUTPUT_ENABLE02,
8613 +- on ? 0xfc : 0);
8614 ++ return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ?
8615 ++ OV5640_REG_SYS_CTRL0_SW_PWUP :
8616 ++ OV5640_REG_SYS_CTRL0_SW_PWDN);
8617 + }
8618 +
8619 + static int ov5640_set_stream_mipi(struct ov5640_dev *sensor, bool on)
8620 +@@ -2001,6 +1986,95 @@ static void ov5640_set_power_off(struct ov5640_dev *sensor)
8621 + clk_disable_unprepare(sensor->xclk);
8622 + }
8623 +
8624 ++static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on)
8625 ++{
8626 ++ int ret;
8627 ++
8628 ++ if (!on) {
8629 ++ /* Reset MIPI bus settings to their default values. */
8630 ++ ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58);
8631 ++ ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x04);
8632 ++ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x00);
8633 ++ return 0;
8634 ++ }
8635 ++
8636 ++ /*
8637 ++ * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
8638 ++ *
8639 ++ * 0x300e = 0x40
8640 ++ * [7:5] = 010 : 2 data lanes mode (see FIXME note in
8641 ++ * "ov5640_set_stream_mipi()")
8642 ++ * [4] = 0 : Power up MIPI HS Tx
8643 ++ * [3] = 0 : Power up MIPI LS Rx
8644 ++ * [2] = 0 : MIPI interface disabled
8645 ++ */
8646 ++ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40);
8647 ++ if (ret)
8648 ++ return ret;
8649 ++
8650 ++ /*
8651 ++ * Gate clock and set LP11 in 'no packets mode' (idle)
8652 ++ *
8653 ++ * 0x4800 = 0x24
8654 ++ * [5] = 1 : Gate clock when 'no packets'
8655 ++ * [2] = 1 : MIPI bus in LP11 when 'no packets'
8656 ++ */
8657 ++ ret = ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x24);
8658 ++ if (ret)
8659 ++ return ret;
8660 ++
8661 ++ /*
8662 ++ * Set data lanes and clock in LP11 when 'sleeping'
8663 ++ *
8664 ++ * 0x3019 = 0x70
8665 ++ * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping'
8666 ++ * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping'
8667 ++ * [4] = 1 : MIPI clock lane in LP11 when 'sleeping'
8668 ++ */
8669 ++ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x70);
8670 ++ if (ret)
8671 ++ return ret;
8672 ++
8673 ++ /* Give lanes some time to coax into LP11 state. */
8674 ++ usleep_range(500, 1000);
8675 ++
8676 ++ return 0;
8677 ++}
8678 ++
8679 ++static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on)
8680 ++{
8681 ++ int ret;
8682 ++
8683 ++ if (!on) {
8684 ++ /* Reset settings to their default values. */
8685 ++ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x00);
8686 ++ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0x00);
8687 ++ return 0;
8688 ++ }
8689 ++
8690 ++ /*
8691 ++ * enable VSYNC/HREF/PCLK DVP control lines
8692 ++ * & D[9:6] DVP data lines
8693 ++ *
8694 ++ * PAD OUTPUT ENABLE 01
8695 ++ * - 6: VSYNC output enable
8696 ++ * - 5: HREF output enable
8697 ++ * - 4: PCLK output enable
8698 ++ * - [3:0]: D[9:6] output enable
8699 ++ */
8700 ++ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x7f);
8701 ++ if (ret)
8702 ++ return ret;
8703 ++
8704 ++ /*
8705 ++ * enable D[5:0] DVP data lines
8706 ++ *
8707 ++ * PAD OUTPUT ENABLE 02
8708 ++ * - [7:2]: D[5:0] output enable
8709 ++ */
8710 ++ return ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0xfc);
8711 ++}
8712 ++
8713 + static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
8714 + {
8715 + int ret = 0;
8716 +@@ -2013,67 +2087,17 @@ static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
8717 + ret = ov5640_restore_mode(sensor);
8718 + if (ret)
8719 + goto power_off;
8720 ++ }
8721 +
8722 +- /* We're done here for DVP bus, while CSI-2 needs setup. */
8723 +- if (sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
8724 +- return 0;
8725 +-
8726 +- /*
8727 +- * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
8728 +- *
8729 +- * 0x300e = 0x40
8730 +- * [7:5] = 010 : 2 data lanes mode (see FIXME note in
8731 +- * "ov5640_set_stream_mipi()")
8732 +- * [4] = 0 : Power up MIPI HS Tx
8733 +- * [3] = 0 : Power up MIPI LS Rx
8734 +- * [2] = 0 : MIPI interface disabled
8735 +- */
8736 +- ret = ov5640_write_reg(sensor,
8737 +- OV5640_REG_IO_MIPI_CTRL00, 0x40);
8738 +- if (ret)
8739 +- goto power_off;
8740 +-
8741 +- /*
8742 +- * Gate clock and set LP11 in 'no packets mode' (idle)
8743 +- *
8744 +- * 0x4800 = 0x24
8745 +- * [5] = 1 : Gate clock when 'no packets'
8746 +- * [2] = 1 : MIPI bus in LP11 when 'no packets'
8747 +- */
8748 +- ret = ov5640_write_reg(sensor,
8749 +- OV5640_REG_MIPI_CTRL00, 0x24);
8750 +- if (ret)
8751 +- goto power_off;
8752 +-
8753 +- /*
8754 +- * Set data lanes and clock in LP11 when 'sleeping'
8755 +- *
8756 +- * 0x3019 = 0x70
8757 +- * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping'
8758 +- * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping'
8759 +- * [4] = 1 : MIPI clock lane in LP11 when 'sleeping'
8760 +- */
8761 +- ret = ov5640_write_reg(sensor,
8762 +- OV5640_REG_PAD_OUTPUT00, 0x70);
8763 +- if (ret)
8764 +- goto power_off;
8765 +-
8766 +- /* Give lanes some time to coax into LP11 state. */
8767 +- usleep_range(500, 1000);
8768 +-
8769 +- } else {
8770 +- if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
8771 +- /* Reset MIPI bus settings to their default values. */
8772 +- ov5640_write_reg(sensor,
8773 +- OV5640_REG_IO_MIPI_CTRL00, 0x58);
8774 +- ov5640_write_reg(sensor,
8775 +- OV5640_REG_MIPI_CTRL00, 0x04);
8776 +- ov5640_write_reg(sensor,
8777 +- OV5640_REG_PAD_OUTPUT00, 0x00);
8778 +- }
8779 ++ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
8780 ++ ret = ov5640_set_power_mipi(sensor, on);
8781 ++ else
8782 ++ ret = ov5640_set_power_dvp(sensor, on);
8783 ++ if (ret)
8784 ++ goto power_off;
8785 +
8786 ++ if (!on)
8787 + ov5640_set_power_off(sensor);
8788 +- }
8789 +
8790 + return 0;
8791 +
8792 +diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
8793 +index dbbab75f135ec..cff99cf61ed4d 100644
8794 +--- a/drivers/media/i2c/tc358743.c
8795 ++++ b/drivers/media/i2c/tc358743.c
8796 +@@ -919,8 +919,8 @@ static const struct cec_adap_ops tc358743_cec_adap_ops = {
8797 + .adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable,
8798 + };
8799 +
8800 +-static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
8801 +- bool *handled)
8802 ++static void tc358743_cec_handler(struct v4l2_subdev *sd, u16 intstatus,
8803 ++ bool *handled)
8804 + {
8805 + struct tc358743_state *state = to_state(sd);
8806 + unsigned int cec_rxint, cec_txint;
8807 +@@ -953,7 +953,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
8808 + cec_transmit_attempt_done(state->cec_adap,
8809 + CEC_TX_STATUS_ERROR);
8810 + }
8811 +- *handled = true;
8812 ++ if (handled)
8813 ++ *handled = true;
8814 + }
8815 + if ((intstatus & MASK_CEC_RINT) &&
8816 + (cec_rxint & MASK_CECRIEND)) {
8817 +@@ -968,7 +969,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
8818 + msg.msg[i] = v & 0xff;
8819 + }
8820 + cec_received_msg(state->cec_adap, &msg);
8821 +- *handled = true;
8822 ++ if (handled)
8823 ++ *handled = true;
8824 + }
8825 + i2c_wr16(sd, INTSTATUS,
8826 + intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
8827 +@@ -1432,7 +1434,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
8828 +
8829 + #ifdef CONFIG_VIDEO_TC358743_CEC
8830 + if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) {
8831 +- tc358743_cec_isr(sd, intstatus, handled);
8832 ++ tc358743_cec_handler(sd, intstatus, handled);
8833 + i2c_wr16(sd, INTSTATUS,
8834 + intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
8835 + intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT);
8836 +@@ -1461,7 +1463,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
8837 + static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
8838 + {
8839 + struct tc358743_state *state = dev_id;
8840 +- bool handled;
8841 ++ bool handled = false;
8842 +
8843 + tc358743_isr(&state->sd, 0, &handled);
8844 +
8845 +diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
8846 +index 9144f795fb933..b721720f9845a 100644
8847 +--- a/drivers/media/pci/bt8xx/bttv-driver.c
8848 ++++ b/drivers/media/pci/bt8xx/bttv-driver.c
8849 +@@ -4013,11 +4013,13 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
8850 + btv->id = dev->device;
8851 + if (pci_enable_device(dev)) {
8852 + pr_warn("%d: Can't enable device\n", btv->c.nr);
8853 +- return -EIO;
8854 ++ result = -EIO;
8855 ++ goto free_mem;
8856 + }
8857 + if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
8858 + pr_warn("%d: No suitable DMA available\n", btv->c.nr);
8859 +- return -EIO;
8860 ++ result = -EIO;
8861 ++ goto free_mem;
8862 + }
8863 + if (!request_mem_region(pci_resource_start(dev,0),
8864 + pci_resource_len(dev,0),
8865 +@@ -4025,7 +4027,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
8866 + pr_warn("%d: can't request iomem (0x%llx)\n",
8867 + btv->c.nr,
8868 + (unsigned long long)pci_resource_start(dev, 0));
8869 +- return -EBUSY;
8870 ++ result = -EBUSY;
8871 ++ goto free_mem;
8872 + }
8873 + pci_set_master(dev);
8874 + pci_set_command(dev);
8875 +@@ -4211,6 +4214,10 @@ fail0:
8876 + release_mem_region(pci_resource_start(btv->c.pci,0),
8877 + pci_resource_len(btv->c.pci,0));
8878 + pci_disable_device(btv->c.pci);
8879 ++
8880 ++free_mem:
8881 ++ bttvs[btv->c.nr] = NULL;
8882 ++ kfree(btv);
8883 + return result;
8884 + }
8885 +
8886 +diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c
8887 +index 79e1afb710758..5cc4ef21f9d37 100644
8888 +--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c
8889 ++++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c
8890 +@@ -683,7 +683,8 @@ int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value)
8891 + {
8892 + int err;
8893 +
8894 +- audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", reg << 2, value);
8895 ++ audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n",
8896 ++ (reg << 2) & 0xffffffff, value);
8897 + err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR);
8898 + if (err < 0)
8899 + return err;
8900 +diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
8901 +index cde0d254ec1c4..a77c49b185115 100644
8902 +--- a/drivers/media/platform/exynos4-is/fimc-isp.c
8903 ++++ b/drivers/media/platform/exynos4-is/fimc-isp.c
8904 +@@ -305,8 +305,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
8905 +
8906 + if (on) {
8907 + ret = pm_runtime_get_sync(&is->pdev->dev);
8908 +- if (ret < 0)
8909 ++ if (ret < 0) {
8910 ++ pm_runtime_put(&is->pdev->dev);
8911 + return ret;
8912 ++ }
8913 + set_bit(IS_ST_PWR_ON, &is->state);
8914 +
8915 + ret = fimc_is_start_firmware(is);
8916 +diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
8917 +index 394e0818f2d5c..92130d7791378 100644
8918 +--- a/drivers/media/platform/exynos4-is/fimc-lite.c
8919 ++++ b/drivers/media/platform/exynos4-is/fimc-lite.c
8920 +@@ -470,7 +470,7 @@ static int fimc_lite_open(struct file *file)
8921 + set_bit(ST_FLITE_IN_USE, &fimc->state);
8922 + ret = pm_runtime_get_sync(&fimc->pdev->dev);
8923 + if (ret < 0)
8924 +- goto unlock;
8925 ++ goto err_pm;
8926 +
8927 + ret = v4l2_fh_open(file);
8928 + if (ret < 0)
8929 +diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
8930 +index 9c31d950cddf7..a07d796f63df0 100644
8931 +--- a/drivers/media/platform/exynos4-is/media-dev.c
8932 ++++ b/drivers/media/platform/exynos4-is/media-dev.c
8933 +@@ -484,8 +484,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
8934 + return -ENXIO;
8935 +
8936 + ret = pm_runtime_get_sync(fmd->pmf);
8937 +- if (ret < 0)
8938 ++ if (ret < 0) {
8939 ++ pm_runtime_put(fmd->pmf);
8940 + return ret;
8941 ++ }
8942 +
8943 + fmd->num_sensors = 0;
8944 +
8945 +@@ -1268,11 +1270,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd)
8946 + if (IS_ERR(pctl->state_default))
8947 + return PTR_ERR(pctl->state_default);
8948 +
8949 ++ /* PINCTRL_STATE_IDLE is optional */
8950 + pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl,
8951 + PINCTRL_STATE_IDLE);
8952 +- if (IS_ERR(pctl->state_idle))
8953 +- return PTR_ERR(pctl->state_idle);
8954 +-
8955 + return 0;
8956 + }
8957 +
8958 +diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
8959 +index 540151bbf58f2..1aac167abb175 100644
8960 +--- a/drivers/media/platform/exynos4-is/mipi-csis.c
8961 ++++ b/drivers/media/platform/exynos4-is/mipi-csis.c
8962 +@@ -510,8 +510,10 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
8963 + if (enable) {
8964 + s5pcsis_clear_counters(state);
8965 + ret = pm_runtime_get_sync(&state->pdev->dev);
8966 +- if (ret && ret != 1)
8967 ++ if (ret && ret != 1) {
8968 ++ pm_runtime_put_noidle(&state->pdev->dev);
8969 + return ret;
8970 ++ }
8971 + }
8972 +
8973 + mutex_lock(&state->lock);
8974 +diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
8975 +index df78df59da456..08a5473b56104 100644
8976 +--- a/drivers/media/platform/mx2_emmaprp.c
8977 ++++ b/drivers/media/platform/mx2_emmaprp.c
8978 +@@ -852,8 +852,11 @@ static int emmaprp_probe(struct platform_device *pdev)
8979 + platform_set_drvdata(pdev, pcdev);
8980 +
8981 + irq = platform_get_irq(pdev, 0);
8982 +- if (irq < 0)
8983 +- return irq;
8984 ++ if (irq < 0) {
8985 ++ ret = irq;
8986 ++ goto rel_vdev;
8987 ++ }
8988 ++
8989 + ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0,
8990 + dev_name(&pdev->dev), pcdev);
8991 + if (ret)
8992 +diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
8993 +index b91e472ee764e..de066757726de 100644
8994 +--- a/drivers/media/platform/omap3isp/isp.c
8995 ++++ b/drivers/media/platform/omap3isp/isp.c
8996 +@@ -2328,8 +2328,10 @@ static int isp_probe(struct platform_device *pdev)
8997 + mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
8998 + isp->mmio_base[map_idx] =
8999 + devm_ioremap_resource(isp->dev, mem);
9000 +- if (IS_ERR(isp->mmio_base[map_idx]))
9001 +- return PTR_ERR(isp->mmio_base[map_idx]);
9002 ++ if (IS_ERR(isp->mmio_base[map_idx])) {
9003 ++ ret = PTR_ERR(isp->mmio_base[map_idx]);
9004 ++ goto error;
9005 ++ }
9006 + }
9007 +
9008 + ret = isp_get_clocks(isp);
9009 +diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
9010 +index 008afb85023be..3c5b9082ad723 100644
9011 +--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
9012 ++++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
9013 +@@ -176,8 +176,10 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on)
9014 + int ret;
9015 +
9016 + ret = pm_runtime_get_sync(dev);
9017 +- if (ret < 0)
9018 ++ if (ret < 0) {
9019 ++ pm_runtime_put_sync(dev);
9020 + return ret;
9021 ++ }
9022 +
9023 + ret = csiphy_set_clock_rates(csiphy);
9024 + if (ret < 0) {
9025 +diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
9026 +index 203c6538044fb..321ad77cb6cf4 100644
9027 +--- a/drivers/media/platform/qcom/venus/core.c
9028 ++++ b/drivers/media/platform/qcom/venus/core.c
9029 +@@ -224,13 +224,15 @@ static int venus_probe(struct platform_device *pdev)
9030 +
9031 + ret = dma_set_mask_and_coherent(dev, core->res->dma_mask);
9032 + if (ret)
9033 +- return ret;
9034 ++ goto err_core_put;
9035 +
9036 + if (!dev->dma_parms) {
9037 + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
9038 + GFP_KERNEL);
9039 +- if (!dev->dma_parms)
9040 +- return -ENOMEM;
9041 ++ if (!dev->dma_parms) {
9042 ++ ret = -ENOMEM;
9043 ++ goto err_core_put;
9044 ++ }
9045 + }
9046 + dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
9047 +
9048 +@@ -242,11 +244,11 @@ static int venus_probe(struct platform_device *pdev)
9049 + IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
9050 + "venus", core);
9051 + if (ret)
9052 +- return ret;
9053 ++ goto err_core_put;
9054 +
9055 + ret = hfi_create(core, &venus_core_ops);
9056 + if (ret)
9057 +- return ret;
9058 ++ goto err_core_put;
9059 +
9060 + pm_runtime_enable(dev);
9061 +
9062 +@@ -287,8 +289,10 @@ static int venus_probe(struct platform_device *pdev)
9063 + goto err_core_deinit;
9064 +
9065 + ret = pm_runtime_put_sync(dev);
9066 +- if (ret)
9067 ++ if (ret) {
9068 ++ pm_runtime_get_noresume(dev);
9069 + goto err_dev_unregister;
9070 ++ }
9071 +
9072 + return 0;
9073 +
9074 +@@ -299,9 +303,13 @@ err_core_deinit:
9075 + err_venus_shutdown:
9076 + venus_shutdown(core);
9077 + err_runtime_disable:
9078 ++ pm_runtime_put_noidle(dev);
9079 + pm_runtime_set_suspended(dev);
9080 + pm_runtime_disable(dev);
9081 + hfi_destroy(core);
9082 ++err_core_put:
9083 ++ if (core->pm_ops->core_put)
9084 ++ core->pm_ops->core_put(dev);
9085 + return ret;
9086 + }
9087 +
9088 +diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
9089 +index 7c4c483d54389..76be14efbfb09 100644
9090 +--- a/drivers/media/platform/qcom/venus/vdec.c
9091 ++++ b/drivers/media/platform/qcom/venus/vdec.c
9092 +@@ -1088,8 +1088,6 @@ static int vdec_stop_capture(struct venus_inst *inst)
9093 + break;
9094 + }
9095 +
9096 +- INIT_LIST_HEAD(&inst->registeredbufs);
9097 +-
9098 + return ret;
9099 + }
9100 +
9101 +@@ -1189,6 +1187,14 @@ static int vdec_buf_init(struct vb2_buffer *vb)
9102 + static void vdec_buf_cleanup(struct vb2_buffer *vb)
9103 + {
9104 + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
9105 ++ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
9106 ++ struct venus_buffer *buf = to_venus_buffer(vbuf);
9107 ++
9108 ++ mutex_lock(&inst->lock);
9109 ++ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
9110 ++ if (!list_empty(&inst->registeredbufs))
9111 ++ list_del_init(&buf->reg_list);
9112 ++ mutex_unlock(&inst->lock);
9113 +
9114 + inst->buf_count--;
9115 + if (!inst->buf_count)
9116 +diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
9117 +index 5c6b00737fe75..05c712e00a2a7 100644
9118 +--- a/drivers/media/platform/rcar-fcp.c
9119 ++++ b/drivers/media/platform/rcar-fcp.c
9120 +@@ -103,8 +103,10 @@ int rcar_fcp_enable(struct rcar_fcp_device *fcp)
9121 + return 0;
9122 +
9123 + ret = pm_runtime_get_sync(fcp->dev);
9124 +- if (ret < 0)
9125 ++ if (ret < 0) {
9126 ++ pm_runtime_put_noidle(fcp->dev);
9127 + return ret;
9128 ++ }
9129 +
9130 + return 0;
9131 + }
9132 +diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
9133 +index 151e6a90c5fbc..d9bc8cef7db58 100644
9134 +--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
9135 ++++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
9136 +@@ -361,7 +361,6 @@ struct rcar_csi2 {
9137 + struct media_pad pads[NR_OF_RCAR_CSI2_PAD];
9138 +
9139 + struct v4l2_async_notifier notifier;
9140 +- struct v4l2_async_subdev asd;
9141 + struct v4l2_subdev *remote;
9142 +
9143 + struct v4l2_mbus_framefmt mf;
9144 +@@ -810,6 +809,8 @@ static int rcsi2_parse_v4l2(struct rcar_csi2 *priv,
9145 +
9146 + static int rcsi2_parse_dt(struct rcar_csi2 *priv)
9147 + {
9148 ++ struct v4l2_async_subdev *asd;
9149 ++ struct fwnode_handle *fwnode;
9150 + struct device_node *ep;
9151 + struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
9152 + int ret;
9153 +@@ -833,24 +834,19 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
9154 + return ret;
9155 + }
9156 +
9157 +- priv->asd.match.fwnode =
9158 +- fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
9159 +- priv->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
9160 +-
9161 ++ fwnode = fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
9162 + of_node_put(ep);
9163 +
9164 +- v4l2_async_notifier_init(&priv->notifier);
9165 +-
9166 +- ret = v4l2_async_notifier_add_subdev(&priv->notifier, &priv->asd);
9167 +- if (ret) {
9168 +- fwnode_handle_put(priv->asd.match.fwnode);
9169 +- return ret;
9170 +- }
9171 ++ dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode));
9172 +
9173 ++ v4l2_async_notifier_init(&priv->notifier);
9174 + priv->notifier.ops = &rcar_csi2_notify_ops;
9175 +
9176 +- dev_dbg(priv->dev, "Found '%pOF'\n",
9177 +- to_of_node(priv->asd.match.fwnode));
9178 ++ asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, fwnode,
9179 ++ sizeof(*asd));
9180 ++ fwnode_handle_put(fwnode);
9181 ++ if (IS_ERR(asd))
9182 ++ return PTR_ERR(asd);
9183 +
9184 + ret = v4l2_async_subdev_notifier_register(&priv->subdev,
9185 + &priv->notifier);
9186 +diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
9187 +index 1a30cd0363711..95bc9e0e87926 100644
9188 +--- a/drivers/media/platform/rcar-vin/rcar-dma.c
9189 ++++ b/drivers/media/platform/rcar-vin/rcar-dma.c
9190 +@@ -1392,8 +1392,10 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
9191 + int ret;
9192 +
9193 + ret = pm_runtime_get_sync(vin->dev);
9194 +- if (ret < 0)
9195 ++ if (ret < 0) {
9196 ++ pm_runtime_put_noidle(vin->dev);
9197 + return ret;
9198 ++ }
9199 +
9200 + /* Make register writes take effect immediately. */
9201 + vnmc = rvin_read(vin, VNMC_REG);
9202 +diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
9203 +index 3d2451ac347d7..f318cd4b8086f 100644
9204 +--- a/drivers/media/platform/rcar_drif.c
9205 ++++ b/drivers/media/platform/rcar_drif.c
9206 +@@ -185,7 +185,6 @@ struct rcar_drif_frame_buf {
9207 + /* OF graph endpoint's V4L2 async data */
9208 + struct rcar_drif_graph_ep {
9209 + struct v4l2_subdev *subdev; /* Async matched subdev */
9210 +- struct v4l2_async_subdev asd; /* Async sub-device descriptor */
9211 + };
9212 +
9213 + /* DMA buffer */
9214 +@@ -1109,12 +1108,6 @@ static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier,
9215 + struct rcar_drif_sdr *sdr =
9216 + container_of(notifier, struct rcar_drif_sdr, notifier);
9217 +
9218 +- if (sdr->ep.asd.match.fwnode !=
9219 +- of_fwnode_handle(subdev->dev->of_node)) {
9220 +- rdrif_err(sdr, "subdev %s cannot bind\n", subdev->name);
9221 +- return -EINVAL;
9222 +- }
9223 +-
9224 + v4l2_set_subdev_hostdata(subdev, sdr);
9225 + sdr->ep.subdev = subdev;
9226 + rdrif_dbg(sdr, "bound asd %s\n", subdev->name);
9227 +@@ -1218,7 +1211,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
9228 + {
9229 + struct v4l2_async_notifier *notifier = &sdr->notifier;
9230 + struct fwnode_handle *fwnode, *ep;
9231 +- int ret;
9232 ++ struct v4l2_async_subdev *asd;
9233 +
9234 + v4l2_async_notifier_init(notifier);
9235 +
9236 +@@ -1227,26 +1220,21 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
9237 + if (!ep)
9238 + return 0;
9239 +
9240 ++ /* Get the endpoint properties */
9241 ++ rcar_drif_get_ep_properties(sdr, ep);
9242 ++
9243 + fwnode = fwnode_graph_get_remote_port_parent(ep);
9244 ++ fwnode_handle_put(ep);
9245 + if (!fwnode) {
9246 + dev_warn(sdr->dev, "bad remote port parent\n");
9247 +- fwnode_handle_put(ep);
9248 + return -EINVAL;
9249 + }
9250 +
9251 +- sdr->ep.asd.match.fwnode = fwnode;
9252 +- sdr->ep.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
9253 +- ret = v4l2_async_notifier_add_subdev(notifier, &sdr->ep.asd);
9254 +- if (ret) {
9255 +- fwnode_handle_put(fwnode);
9256 +- return ret;
9257 +- }
9258 +-
9259 +- /* Get the endpoint properties */
9260 +- rcar_drif_get_ep_properties(sdr, ep);
9261 +-
9262 ++ asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
9263 ++ sizeof(*asd));
9264 + fwnode_handle_put(fwnode);
9265 +- fwnode_handle_put(ep);
9266 ++ if (IS_ERR(asd))
9267 ++ return PTR_ERR(asd);
9268 +
9269 + return 0;
9270 + }
9271 +diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
9272 +index 36b821ccc1dba..bf9a75b75083b 100644
9273 +--- a/drivers/media/platform/rockchip/rga/rga-buf.c
9274 ++++ b/drivers/media/platform/rockchip/rga/rga-buf.c
9275 +@@ -81,6 +81,7 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
9276 +
9277 + ret = pm_runtime_get_sync(rga->dev);
9278 + if (ret < 0) {
9279 ++ pm_runtime_put_noidle(rga->dev);
9280 + rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
9281 + return ret;
9282 + }
9283 +diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
9284 +index c6fbcd7036d6d..ee624804862e2 100644
9285 +--- a/drivers/media/platform/s3c-camif/camif-core.c
9286 ++++ b/drivers/media/platform/s3c-camif/camif-core.c
9287 +@@ -464,7 +464,7 @@ static int s3c_camif_probe(struct platform_device *pdev)
9288 +
9289 + ret = camif_media_dev_init(camif);
9290 + if (ret < 0)
9291 +- goto err_alloc;
9292 ++ goto err_pm;
9293 +
9294 + ret = camif_register_sensor(camif);
9295 + if (ret < 0)
9296 +@@ -498,10 +498,9 @@ err_sens:
9297 + media_device_unregister(&camif->media_dev);
9298 + media_device_cleanup(&camif->media_dev);
9299 + camif_unregister_media_entities(camif);
9300 +-err_alloc:
9301 ++err_pm:
9302 + pm_runtime_put(dev);
9303 + pm_runtime_disable(dev);
9304 +-err_pm:
9305 + camif_clk_put(camif);
9306 + err_clk:
9307 + s3c_camif_unregister_subdev(camif);
9308 +diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
9309 +index 7d52431c2c837..62d2320a72186 100644
9310 +--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
9311 ++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
9312 +@@ -79,8 +79,10 @@ int s5p_mfc_power_on(void)
9313 + int i, ret = 0;
9314 +
9315 + ret = pm_runtime_get_sync(pm->device);
9316 +- if (ret < 0)
9317 ++ if (ret < 0) {
9318 ++ pm_runtime_put_noidle(pm->device);
9319 + return ret;
9320 ++ }
9321 +
9322 + /* clock control */
9323 + for (i = 0; i < pm->num_clocks; i++) {
9324 +diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
9325 +index af2d5eb782cee..e1d150584bdc2 100644
9326 +--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
9327 ++++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
9328 +@@ -1371,7 +1371,7 @@ static int bdisp_probe(struct platform_device *pdev)
9329 + ret = pm_runtime_get_sync(dev);
9330 + if (ret < 0) {
9331 + dev_err(dev, "failed to set PM\n");
9332 +- goto err_dbg;
9333 ++ goto err_pm;
9334 + }
9335 +
9336 + /* Filters */
9337 +@@ -1399,7 +1399,6 @@ err_filter:
9338 + bdisp_hw_free_filters(bdisp->dev);
9339 + err_pm:
9340 + pm_runtime_put(dev);
9341 +-err_dbg:
9342 + bdisp_debugfs_remove(bdisp);
9343 + err_v4l2:
9344 + v4l2_device_unregister(&bdisp->v4l2_dev);
9345 +diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
9346 +index 2503224eeee51..c691b3d81549d 100644
9347 +--- a/drivers/media/platform/sti/delta/delta-v4l2.c
9348 ++++ b/drivers/media/platform/sti/delta/delta-v4l2.c
9349 +@@ -954,8 +954,10 @@ static void delta_run_work(struct work_struct *work)
9350 + /* enable the hardware */
9351 + if (!dec->pm) {
9352 + ret = delta_get_sync(ctx);
9353 +- if (ret)
9354 ++ if (ret) {
9355 ++ delta_put_autosuspend(ctx);
9356 + goto err;
9357 ++ }
9358 + }
9359 +
9360 + /* decode this access unit */
9361 +diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
9362 +index 401aaafa17109..43f279e2a6a38 100644
9363 +--- a/drivers/media/platform/sti/hva/hva-hw.c
9364 ++++ b/drivers/media/platform/sti/hva/hva-hw.c
9365 +@@ -272,6 +272,7 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
9366 +
9367 + if (pm_runtime_get_sync(dev) < 0) {
9368 + dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
9369 ++ pm_runtime_put_noidle(dev);
9370 + mutex_unlock(&hva->protect_mutex);
9371 + return -EFAULT;
9372 + }
9373 +@@ -388,7 +389,7 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
9374 + ret = pm_runtime_get_sync(dev);
9375 + if (ret < 0) {
9376 + dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
9377 +- goto err_clk;
9378 ++ goto err_pm;
9379 + }
9380 +
9381 + /* check IP hardware version */
9382 +@@ -553,6 +554,7 @@ void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
9383 +
9384 + if (pm_runtime_get_sync(dev) < 0) {
9385 + seq_puts(s, "Cannot wake up IP\n");
9386 ++ pm_runtime_put_noidle(dev);
9387 + mutex_unlock(&hva->protect_mutex);
9388 + return;
9389 + }
9390 +diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
9391 +index b8931490b83b7..fd1c41cba52fc 100644
9392 +--- a/drivers/media/platform/stm32/stm32-dcmi.c
9393 ++++ b/drivers/media/platform/stm32/stm32-dcmi.c
9394 +@@ -733,7 +733,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
9395 + if (ret < 0) {
9396 + dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
9397 + __func__, ret);
9398 +- goto err_release_buffers;
9399 ++ goto err_pm_put;
9400 + }
9401 +
9402 + ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline);
9403 +@@ -837,8 +837,6 @@ err_media_pipeline_stop:
9404 +
9405 + err_pm_put:
9406 + pm_runtime_put(dcmi->dev);
9407 +-
9408 +-err_release_buffers:
9409 + spin_lock_irq(&dcmi->irqlock);
9410 + /*
9411 + * Return all buffers to vb2 in QUEUED state.
9412 +diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
9413 +index cff2fcd6d812a..82d3ee45e2e90 100644
9414 +--- a/drivers/media/platform/ti-vpe/vpe.c
9415 ++++ b/drivers/media/platform/ti-vpe/vpe.c
9416 +@@ -2475,6 +2475,8 @@ static int vpe_runtime_get(struct platform_device *pdev)
9417 +
9418 + r = pm_runtime_get_sync(&pdev->dev);
9419 + WARN_ON(r < 0);
9420 ++ if (r)
9421 ++ pm_runtime_put_noidle(&pdev->dev);
9422 + return r < 0 ? r : 0;
9423 + }
9424 +
9425 +diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
9426 +index c650e45bb0ad1..dc62533cf32ce 100644
9427 +--- a/drivers/media/platform/vsp1/vsp1_drv.c
9428 ++++ b/drivers/media/platform/vsp1/vsp1_drv.c
9429 +@@ -562,7 +562,12 @@ int vsp1_device_get(struct vsp1_device *vsp1)
9430 + int ret;
9431 +
9432 + ret = pm_runtime_get_sync(vsp1->dev);
9433 +- return ret < 0 ? ret : 0;
9434 ++ if (ret < 0) {
9435 ++ pm_runtime_put_noidle(vsp1->dev);
9436 ++ return ret;
9437 ++ }
9438 ++
9439 ++ return 0;
9440 + }
9441 +
9442 + /*
9443 +@@ -845,12 +850,12 @@ static int vsp1_probe(struct platform_device *pdev)
9444 + /* Configure device parameters based on the version register. */
9445 + pm_runtime_enable(&pdev->dev);
9446 +
9447 +- ret = pm_runtime_get_sync(&pdev->dev);
9448 ++ ret = vsp1_device_get(vsp1);
9449 + if (ret < 0)
9450 + goto done;
9451 +
9452 + vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
9453 +- pm_runtime_put_sync(&pdev->dev);
9454 ++ vsp1_device_put(vsp1);
9455 +
9456 + for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
9457 + if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
9458 +diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
9459 +index 9cdef17b4793f..c12dda73cdd53 100644
9460 +--- a/drivers/media/rc/ati_remote.c
9461 ++++ b/drivers/media/rc/ati_remote.c
9462 +@@ -835,6 +835,10 @@ static int ati_remote_probe(struct usb_interface *interface,
9463 + err("%s: endpoint_in message size==0? \n", __func__);
9464 + return -ENODEV;
9465 + }
9466 ++ if (!usb_endpoint_is_int_out(endpoint_out)) {
9467 ++ err("%s: Unexpected endpoint_out\n", __func__);
9468 ++ return -ENODEV;
9469 ++ }
9470 +
9471 + ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL);
9472 + rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
9473 +diff --git a/drivers/media/test-drivers/vivid/vivid-meta-out.c b/drivers/media/test-drivers/vivid/vivid-meta-out.c
9474 +index ff8a039aba72e..95835b52b58fc 100644
9475 +--- a/drivers/media/test-drivers/vivid/vivid-meta-out.c
9476 ++++ b/drivers/media/test-drivers/vivid/vivid-meta-out.c
9477 +@@ -164,10 +164,11 @@ void vivid_meta_out_process(struct vivid_dev *dev,
9478 + {
9479 + struct vivid_meta_out_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
9480 +
9481 +- tpg_s_brightness(&dev->tpg, meta->brightness);
9482 +- tpg_s_contrast(&dev->tpg, meta->contrast);
9483 +- tpg_s_saturation(&dev->tpg, meta->saturation);
9484 +- tpg_s_hue(&dev->tpg, meta->hue);
9485 ++ v4l2_ctrl_s_ctrl(dev->brightness, meta->brightness);
9486 ++ v4l2_ctrl_s_ctrl(dev->contrast, meta->contrast);
9487 ++ v4l2_ctrl_s_ctrl(dev->saturation, meta->saturation);
9488 ++ v4l2_ctrl_s_ctrl(dev->hue, meta->hue);
9489 ++
9490 + dprintk(dev, 2, " %s brightness %u contrast %u saturation %u hue %d\n",
9491 + __func__, meta->brightness, meta->contrast,
9492 + meta->saturation, meta->hue);
9493 +diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
9494 +index b6e70fada3fb2..8fb186b25d6af 100644
9495 +--- a/drivers/media/tuners/tuner-simple.c
9496 ++++ b/drivers/media/tuners/tuner-simple.c
9497 +@@ -500,7 +500,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
9498 + case TUNER_TENA_9533_DI:
9499 + case TUNER_YMEC_TVF_5533MF:
9500 + tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n");
9501 +- return 0;
9502 ++ return -EINVAL;
9503 + case TUNER_PHILIPS_FM1216ME_MK3:
9504 + case TUNER_PHILIPS_FM1236_MK3:
9505 + case TUNER_PHILIPS_FMD1216ME_MK3:
9506 +@@ -702,7 +702,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
9507 + TUNER_RATIO_SELECT_50; /* 50 kHz step */
9508 +
9509 + /* Bandswitch byte */
9510 +- simple_radio_bandswitch(fe, &buffer[0]);
9511 ++ if (simple_radio_bandswitch(fe, &buffer[0]))
9512 ++ return 0;
9513 +
9514 + /* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps
9515 + freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) =
9516 +diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
9517 +index e399b9fad7574..a30a8a731eda8 100644
9518 +--- a/drivers/media/usb/uvc/uvc_ctrl.c
9519 ++++ b/drivers/media/usb/uvc/uvc_ctrl.c
9520 +@@ -773,12 +773,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
9521 + offset &= 7;
9522 + mask = ((1LL << bits) - 1) << offset;
9523 +
9524 +- for (; bits > 0; data++) {
9525 ++ while (1) {
9526 + u8 byte = *data & mask;
9527 + value |= offset > 0 ? (byte >> offset) : (byte << (-offset));
9528 + bits -= 8 - (offset > 0 ? offset : 0);
9529 ++ if (bits <= 0)
9530 ++ break;
9531 ++
9532 + offset -= 8;
9533 + mask = (1 << bits) - 1;
9534 ++ data++;
9535 + }
9536 +
9537 + /* Sign-extend the value if needed. */
9538 +diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
9539 +index b4499cddeffe5..ca3a9c2eec271 100644
9540 +--- a/drivers/media/usb/uvc/uvc_entity.c
9541 ++++ b/drivers/media/usb/uvc/uvc_entity.c
9542 +@@ -73,10 +73,45 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain,
9543 + int ret;
9544 +
9545 + if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
9546 ++ u32 function;
9547 ++
9548 + v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
9549 + strscpy(entity->subdev.name, entity->name,
9550 + sizeof(entity->subdev.name));
9551 +
9552 ++ switch (UVC_ENTITY_TYPE(entity)) {
9553 ++ case UVC_VC_SELECTOR_UNIT:
9554 ++ function = MEDIA_ENT_F_VID_MUX;
9555 ++ break;
9556 ++ case UVC_VC_PROCESSING_UNIT:
9557 ++ case UVC_VC_EXTENSION_UNIT:
9558 ++ /* For lack of a better option. */
9559 ++ function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
9560 ++ break;
9561 ++ case UVC_COMPOSITE_CONNECTOR:
9562 ++ case UVC_COMPONENT_CONNECTOR:
9563 ++ function = MEDIA_ENT_F_CONN_COMPOSITE;
9564 ++ break;
9565 ++ case UVC_SVIDEO_CONNECTOR:
9566 ++ function = MEDIA_ENT_F_CONN_SVIDEO;
9567 ++ break;
9568 ++ case UVC_ITT_CAMERA:
9569 ++ function = MEDIA_ENT_F_CAM_SENSOR;
9570 ++ break;
9571 ++ case UVC_TT_VENDOR_SPECIFIC:
9572 ++ case UVC_ITT_VENDOR_SPECIFIC:
9573 ++ case UVC_ITT_MEDIA_TRANSPORT_INPUT:
9574 ++ case UVC_OTT_VENDOR_SPECIFIC:
9575 ++ case UVC_OTT_DISPLAY:
9576 ++ case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
9577 ++ case UVC_EXTERNAL_VENDOR_SPECIFIC:
9578 ++ default:
9579 ++ function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
9580 ++ break;
9581 ++ }
9582 ++
9583 ++ entity->subdev.entity.function = function;
9584 ++
9585 + ret = media_entity_pads_init(&entity->subdev.entity,
9586 + entity->num_pads, entity->pads);
9587 +
9588 +diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
9589 +index 0335e69b70abe..5e6f3153b5ff8 100644
9590 +--- a/drivers/media/usb/uvc/uvc_v4l2.c
9591 ++++ b/drivers/media/usb/uvc/uvc_v4l2.c
9592 +@@ -247,11 +247,41 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
9593 + if (ret < 0)
9594 + goto done;
9595 +
9596 ++ /* After the probe, update fmt with the values returned from
9597 ++ * negotiation with the device.
9598 ++ */
9599 ++ for (i = 0; i < stream->nformats; ++i) {
9600 ++ if (probe->bFormatIndex == stream->format[i].index) {
9601 ++ format = &stream->format[i];
9602 ++ break;
9603 ++ }
9604 ++ }
9605 ++
9606 ++ if (i == stream->nformats) {
9607 ++ uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n",
9608 ++ probe->bFormatIndex);
9609 ++ return -EINVAL;
9610 ++ }
9611 ++
9612 ++ for (i = 0; i < format->nframes; ++i) {
9613 ++ if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
9614 ++ frame = &format->frame[i];
9615 ++ break;
9616 ++ }
9617 ++ }
9618 ++
9619 ++ if (i == format->nframes) {
9620 ++ uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n",
9621 ++ probe->bFrameIndex);
9622 ++ return -EINVAL;
9623 ++ }
9624 ++
9625 + fmt->fmt.pix.width = frame->wWidth;
9626 + fmt->fmt.pix.height = frame->wHeight;
9627 + fmt->fmt.pix.field = V4L2_FIELD_NONE;
9628 + fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame);
9629 + fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize;
9630 ++ fmt->fmt.pix.pixelformat = format->fcc;
9631 + fmt->fmt.pix.colorspace = format->colorspace;
9632 +
9633 + if (uvc_format != NULL)
9634 +diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
9635 +index 0b0ed72016da8..0309bd5a18008 100644
9636 +--- a/drivers/memory/fsl-corenet-cf.c
9637 ++++ b/drivers/memory/fsl-corenet-cf.c
9638 +@@ -211,10 +211,8 @@ static int ccf_probe(struct platform_device *pdev)
9639 + dev_set_drvdata(&pdev->dev, ccf);
9640 +
9641 + irq = platform_get_irq(pdev, 0);
9642 +- if (!irq) {
9643 +- dev_err(&pdev->dev, "%s: no irq\n", __func__);
9644 +- return -ENXIO;
9645 +- }
9646 ++ if (irq < 0)
9647 ++ return irq;
9648 +
9649 + ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf);
9650 + if (ret) {
9651 +diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
9652 +index eff26c1b13940..27bc417029e11 100644
9653 +--- a/drivers/memory/omap-gpmc.c
9654 ++++ b/drivers/memory/omap-gpmc.c
9655 +@@ -949,7 +949,7 @@ static int gpmc_cs_remap(int cs, u32 base)
9656 + int ret;
9657 + u32 old_base, size;
9658 +
9659 +- if (cs > gpmc_cs_num) {
9660 ++ if (cs >= gpmc_cs_num) {
9661 + pr_err("%s: requested chip-select is disabled\n", __func__);
9662 + return -ENODEV;
9663 + }
9664 +@@ -984,7 +984,7 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
9665 + struct resource *res = &gpmc->mem;
9666 + int r = -1;
9667 +
9668 +- if (cs > gpmc_cs_num) {
9669 ++ if (cs >= gpmc_cs_num) {
9670 + pr_err("%s: requested chip-select is disabled\n", __func__);
9671 + return -ENODEV;
9672 + }
9673 +@@ -2274,6 +2274,10 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
9674 + }
9675 + }
9676 + #else
9677 ++void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
9678 ++{
9679 ++ memset(p, 0, sizeof(*p));
9680 ++}
9681 + static int gpmc_probe_dt(struct platform_device *pdev)
9682 + {
9683 + return 0;
9684 +diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
9685 +index ccd62b9639528..6d2f4a0a901dc 100644
9686 +--- a/drivers/mfd/sm501.c
9687 ++++ b/drivers/mfd/sm501.c
9688 +@@ -1415,8 +1415,14 @@ static int sm501_plat_probe(struct platform_device *dev)
9689 + goto err_claim;
9690 + }
9691 +
9692 +- return sm501_init_dev(sm);
9693 ++ ret = sm501_init_dev(sm);
9694 ++ if (ret)
9695 ++ goto err_unmap;
9696 ++
9697 ++ return 0;
9698 +
9699 ++ err_unmap:
9700 ++ iounmap(sm->regs);
9701 + err_claim:
9702 + release_mem_region(sm->io_res->start, 0x100);
9703 + err_res:
9704 +diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
9705 +index 0d5928bc1b6d7..82246f7aec6fb 100644
9706 +--- a/drivers/misc/cardreader/rtsx_pcr.c
9707 ++++ b/drivers/misc/cardreader/rtsx_pcr.c
9708 +@@ -1536,12 +1536,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
9709 + ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
9710 + ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
9711 + if (ret < 0)
9712 +- goto disable_irq;
9713 ++ goto free_slots;
9714 +
9715 + schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
9716 +
9717 + return 0;
9718 +
9719 ++free_slots:
9720 ++ kfree(pcr->slots);
9721 + disable_irq:
9722 + free_irq(pcr->irq, (void *)pcr);
9723 + disable_msi:
9724 +diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
9725 +index cde9a2fc13250..490ff49d11ede 100644
9726 +--- a/drivers/misc/eeprom/at25.c
9727 ++++ b/drivers/misc/eeprom/at25.c
9728 +@@ -358,7 +358,7 @@ static int at25_probe(struct spi_device *spi)
9729 + at25->nvmem_config.reg_read = at25_ee_read;
9730 + at25->nvmem_config.reg_write = at25_ee_write;
9731 + at25->nvmem_config.priv = at25;
9732 +- at25->nvmem_config.stride = 4;
9733 ++ at25->nvmem_config.stride = 1;
9734 + at25->nvmem_config.word_size = 1;
9735 + at25->nvmem_config.size = chip.byte_len;
9736 +
9737 +diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
9738 +index ca183733847b6..bcc45bf7af2c8 100644
9739 +--- a/drivers/misc/habanalabs/gaudi/gaudi.c
9740 ++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
9741 +@@ -6285,7 +6285,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
9742 + is_idle &= is_eng_idle;
9743 +
9744 + if (mask)
9745 +- *mask |= !is_eng_idle <<
9746 ++ *mask |= ((u64) !is_eng_idle) <<
9747 + (GAUDI_ENGINE_ID_DMA_0 + dma_id);
9748 + if (s)
9749 + seq_printf(s, fmt, dma_id,
9750 +@@ -6308,7 +6308,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
9751 + is_idle &= is_eng_idle;
9752 +
9753 + if (mask)
9754 +- *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_TPC_0 + i);
9755 ++ *mask |= ((u64) !is_eng_idle) <<
9756 ++ (GAUDI_ENGINE_ID_TPC_0 + i);
9757 + if (s)
9758 + seq_printf(s, fmt, i,
9759 + is_eng_idle ? "Y" : "N",
9760 +@@ -6336,7 +6337,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
9761 + is_idle &= is_eng_idle;
9762 +
9763 + if (mask)
9764 +- *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_MME_0 + i);
9765 ++ *mask |= ((u64) !is_eng_idle) <<
9766 ++ (GAUDI_ENGINE_ID_MME_0 + i);
9767 + if (s) {
9768 + if (!is_slave)
9769 + seq_printf(s, fmt, i,
9770 +diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
9771 +index c179085ced7b8..a8041a39fae31 100644
9772 +--- a/drivers/misc/habanalabs/goya/goya.c
9773 ++++ b/drivers/misc/habanalabs/goya/goya.c
9774 +@@ -5098,7 +5098,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
9775 + is_idle &= is_eng_idle;
9776 +
9777 + if (mask)
9778 +- *mask |= !is_eng_idle << (GOYA_ENGINE_ID_DMA_0 + i);
9779 ++ *mask |= ((u64) !is_eng_idle) <<
9780 ++ (GOYA_ENGINE_ID_DMA_0 + i);
9781 + if (s)
9782 + seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
9783 + qm_glbl_sts0, dma_core_sts0);
9784 +@@ -5121,7 +5122,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
9785 + is_idle &= is_eng_idle;
9786 +
9787 + if (mask)
9788 +- *mask |= !is_eng_idle << (GOYA_ENGINE_ID_TPC_0 + i);
9789 ++ *mask |= ((u64) !is_eng_idle) <<
9790 ++ (GOYA_ENGINE_ID_TPC_0 + i);
9791 + if (s)
9792 + seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
9793 + qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
9794 +@@ -5141,7 +5143,7 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
9795 + is_idle &= is_eng_idle;
9796 +
9797 + if (mask)
9798 +- *mask |= !is_eng_idle << GOYA_ENGINE_ID_MME_0;
9799 ++ *mask |= ((u64) !is_eng_idle) << GOYA_ENGINE_ID_MME_0;
9800 + if (s) {
9801 + seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
9802 + cmdq_glbl_sts0, mme_arch_sts);
9803 +diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
9804 +index 406cd5abfa726..56c784699eb8e 100644
9805 +--- a/drivers/misc/mic/scif/scif_rma.c
9806 ++++ b/drivers/misc/mic/scif/scif_rma.c
9807 +@@ -1384,6 +1384,8 @@ retry:
9808 + (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
9809 + pinned_pages->pages);
9810 + if (nr_pages != pinned_pages->nr_pages) {
9811 ++ if (pinned_pages->nr_pages < 0)
9812 ++ pinned_pages->nr_pages = 0;
9813 + if (try_upgrade) {
9814 + if (ulimit)
9815 + __scif_dec_pinned_vm_lock(mm, nr_pages);
9816 +@@ -1400,7 +1402,6 @@ retry:
9817 +
9818 + if (pinned_pages->nr_pages < nr_pages) {
9819 + err = -EFAULT;
9820 +- pinned_pages->nr_pages = nr_pages;
9821 + goto dec_pinned;
9822 + }
9823 +
9824 +@@ -1413,7 +1414,6 @@ dec_pinned:
9825 + __scif_dec_pinned_vm_lock(mm, nr_pages);
9826 + /* Something went wrong! Rollback */
9827 + error_unmap:
9828 +- pinned_pages->nr_pages = nr_pages;
9829 + scif_destroy_pinned_pages(pinned_pages);
9830 + *pages = NULL;
9831 + dev_dbg(scif_info.mdev.this_device,
9832 +diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
9833 +index 85942f6717c57..8aadc6055df17 100644
9834 +--- a/drivers/misc/mic/vop/vop_main.c
9835 ++++ b/drivers/misc/mic/vop/vop_main.c
9836 +@@ -320,7 +320,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
9837 + /* First assign the vring's allocated in host memory */
9838 + vqconfig = _vop_vq_config(vdev->desc) + index;
9839 + memcpy_fromio(&config, vqconfig, sizeof(config));
9840 +- _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
9841 ++ _vr_size = round_up(vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN), 4);
9842 + vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
9843 + va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
9844 + if (!va)
9845 +diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
9846 +index 30eac172f0170..7014ffe88632e 100644
9847 +--- a/drivers/misc/mic/vop/vop_vringh.c
9848 ++++ b/drivers/misc/mic/vop/vop_vringh.c
9849 +@@ -296,7 +296,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
9850 +
9851 + num = le16_to_cpu(vqconfig[i].num);
9852 + mutex_init(&vvr->vr_mutex);
9853 +- vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
9854 ++ vr_size = PAGE_ALIGN(round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4) +
9855 + sizeof(struct _mic_vring_info));
9856 + vr->va = (void *)
9857 + __get_free_pages(GFP_KERNEL | __GFP_ZERO,
9858 +@@ -308,7 +308,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
9859 + goto err;
9860 + }
9861 + vr->len = vr_size;
9862 +- vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
9863 ++ vr->info = vr->va + round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4);
9864 + vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
9865 + vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
9866 + DMA_BIDIRECTIONAL);
9867 +@@ -602,6 +602,7 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
9868 + size_t partlen;
9869 + bool dma = VOP_USE_DMA && vi->dma_ch;
9870 + int err = 0;
9871 ++ size_t offset = 0;
9872 +
9873 + if (dma) {
9874 + dma_alignment = 1 << vi->dma_ch->device->copy_align;
9875 +@@ -655,13 +656,20 @@ memcpy:
9876 + * We are copying to IO below and should ideally use something
9877 + * like copy_from_user_toio(..) if it existed.
9878 + */
9879 +- if (copy_from_user((void __force *)dbuf, ubuf, len)) {
9880 +- err = -EFAULT;
9881 +- dev_err(vop_dev(vdev), "%s %d err %d\n",
9882 +- __func__, __LINE__, err);
9883 +- goto err;
9884 ++ while (len) {
9885 ++ partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
9886 ++
9887 ++ if (copy_from_user(vvr->buf, ubuf + offset, partlen)) {
9888 ++ err = -EFAULT;
9889 ++ dev_err(vop_dev(vdev), "%s %d err %d\n",
9890 ++ __func__, __LINE__, err);
9891 ++ goto err;
9892 ++ }
9893 ++ memcpy_toio(dbuf + offset, vvr->buf, partlen);
9894 ++ offset += partlen;
9895 ++ vdev->out_bytes += partlen;
9896 ++ len -= partlen;
9897 + }
9898 +- vdev->out_bytes += len;
9899 + err = 0;
9900 + err:
9901 + vpdev->hw_ops->unmap(vpdev, dbuf);
9902 +diff --git a/drivers/misc/ocxl/Kconfig b/drivers/misc/ocxl/Kconfig
9903 +index 2d2266c1439ef..51b51f3774701 100644
9904 +--- a/drivers/misc/ocxl/Kconfig
9905 ++++ b/drivers/misc/ocxl/Kconfig
9906 +@@ -9,9 +9,8 @@ config OCXL_BASE
9907 +
9908 + config OCXL
9909 + tristate "OpenCAPI coherent accelerator support"
9910 +- depends on PPC_POWERNV && PCI && EEH
9911 ++ depends on PPC_POWERNV && PCI && EEH && HOTPLUG_PCI_POWERNV
9912 + select OCXL_BASE
9913 +- select HOTPLUG_PCI_POWERNV
9914 + default m
9915 + help
9916 + Select this option to enable the ocxl driver for Open
9917 +diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
9918 +index 8531ae7811956..c49065887e8f5 100644
9919 +--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
9920 ++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
9921 +@@ -657,8 +657,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
9922 + if (retval < (int)produce_q->kernel_if->num_pages) {
9923 + pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
9924 + retval);
9925 +- qp_release_pages(produce_q->kernel_if->u.h.header_page,
9926 +- retval, false);
9927 ++ if (retval > 0)
9928 ++ qp_release_pages(produce_q->kernel_if->u.h.header_page,
9929 ++ retval, false);
9930 + err = VMCI_ERROR_NO_MEM;
9931 + goto out;
9932 + }
9933 +@@ -670,8 +671,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
9934 + if (retval < (int)consume_q->kernel_if->num_pages) {
9935 + pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
9936 + retval);
9937 +- qp_release_pages(consume_q->kernel_if->u.h.header_page,
9938 +- retval, false);
9939 ++ if (retval > 0)
9940 ++ qp_release_pages(consume_q->kernel_if->u.h.header_page,
9941 ++ retval, false);
9942 + qp_release_pages(produce_q->kernel_if->u.h.header_page,
9943 + produce_q->kernel_if->num_pages, false);
9944 + err = VMCI_ERROR_NO_MEM;
9945 +diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
9946 +index e0655278c5c32..3efaa9534a777 100644
9947 +--- a/drivers/mmc/core/sdio_cis.c
9948 ++++ b/drivers/mmc/core/sdio_cis.c
9949 +@@ -26,6 +26,9 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
9950 + unsigned i, nr_strings;
9951 + char **buffer, *string;
9952 +
9953 ++ if (size < 2)
9954 ++ return 0;
9955 ++
9956 + /* Find all null-terminated (including zero length) strings in
9957 + the TPLLV1_INFO field. Trailing garbage is ignored. */
9958 + buf += 2;
9959 +diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
9960 +index f350a0809f880..a808fa28cd9a1 100644
9961 +--- a/drivers/mtd/hyperbus/hbmc-am654.c
9962 ++++ b/drivers/mtd/hyperbus/hbmc-am654.c
9963 +@@ -70,7 +70,8 @@ static int am654_hbmc_probe(struct platform_device *pdev)
9964 +
9965 + platform_set_drvdata(pdev, priv);
9966 +
9967 +- ret = of_address_to_resource(np, 0, &res);
9968 ++ priv->hbdev.np = of_get_next_child(np, NULL);
9969 ++ ret = of_address_to_resource(priv->hbdev.np, 0, &res);
9970 + if (ret)
9971 + return ret;
9972 +
9973 +@@ -103,7 +104,6 @@ static int am654_hbmc_probe(struct platform_device *pdev)
9974 + priv->ctlr.dev = dev;
9975 + priv->ctlr.ops = &am654_hbmc_ops;
9976 + priv->hbdev.ctlr = &priv->ctlr;
9977 +- priv->hbdev.np = of_get_next_child(dev->of_node, NULL);
9978 + ret = hyperbus_register_device(&priv->hbdev);
9979 + if (ret) {
9980 + dev_err(dev, "failed to register controller\n");
9981 +diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
9982 +index 0f1547f09d08b..72f5c7b300790 100644
9983 +--- a/drivers/mtd/lpddr/lpddr2_nvm.c
9984 ++++ b/drivers/mtd/lpddr/lpddr2_nvm.c
9985 +@@ -393,6 +393,17 @@ static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
9986 + return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
9987 + }
9988 +
9989 ++static const struct mtd_info lpddr2_nvm_mtd_info = {
9990 ++ .type = MTD_RAM,
9991 ++ .writesize = 1,
9992 ++ .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
9993 ++ ._read = lpddr2_nvm_read,
9994 ++ ._write = lpddr2_nvm_write,
9995 ++ ._erase = lpddr2_nvm_erase,
9996 ++ ._unlock = lpddr2_nvm_unlock,
9997 ++ ._lock = lpddr2_nvm_lock,
9998 ++};
9999 ++
10000 + /*
10001 + * lpddr2_nvm driver probe method
10002 + */
10003 +@@ -433,6 +444,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
10004 + .pfow_base = OW_BASE_ADDRESS,
10005 + .fldrv_priv = pcm_data,
10006 + };
10007 ++
10008 + if (IS_ERR(map->virt))
10009 + return PTR_ERR(map->virt);
10010 +
10011 +@@ -444,22 +456,13 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
10012 + return PTR_ERR(pcm_data->ctl_regs);
10013 +
10014 + /* Populate mtd_info data structure */
10015 +- *mtd = (struct mtd_info) {
10016 +- .dev = { .parent = &pdev->dev },
10017 +- .name = pdev->dev.init_name,
10018 +- .type = MTD_RAM,
10019 +- .priv = map,
10020 +- .size = resource_size(add_range),
10021 +- .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width,
10022 +- .writesize = 1,
10023 +- .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width,
10024 +- .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
10025 +- ._read = lpddr2_nvm_read,
10026 +- ._write = lpddr2_nvm_write,
10027 +- ._erase = lpddr2_nvm_erase,
10028 +- ._unlock = lpddr2_nvm_unlock,
10029 +- ._lock = lpddr2_nvm_lock,
10030 +- };
10031 ++ *mtd = lpddr2_nvm_mtd_info;
10032 ++ mtd->dev.parent = &pdev->dev;
10033 ++ mtd->name = pdev->dev.init_name;
10034 ++ mtd->priv = map;
10035 ++ mtd->size = resource_size(add_range);
10036 ++ mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width;
10037 ++ mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width;
10038 +
10039 + /* Verify the presence of the device looking for PFOW string */
10040 + if (!lpddr2_nvm_pfow_present(map)) {
10041 +diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
10042 +index 4ced68be7ed7e..774970bfcf859 100644
10043 +--- a/drivers/mtd/mtdoops.c
10044 ++++ b/drivers/mtd/mtdoops.c
10045 +@@ -279,12 +279,13 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
10046 + kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
10047 + record_size - MTDOOPS_HEADER_SIZE, NULL);
10048 +
10049 +- /* Panics must be written immediately */
10050 +- if (reason != KMSG_DUMP_OOPS)
10051 ++ if (reason != KMSG_DUMP_OOPS) {
10052 ++ /* Panics must be written immediately */
10053 + mtdoops_write(cxt, 1);
10054 +-
10055 +- /* For other cases, schedule work to write it "nicely" */
10056 +- schedule_work(&cxt->work_write);
10057 ++ } else {
10058 ++ /* For other cases, schedule work to write it "nicely" */
10059 ++ schedule_work(&cxt->work_write);
10060 ++ }
10061 + }
10062 +
10063 + static void mtdoops_notify_add(struct mtd_info *mtd)
10064 +diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
10065 +index 3711e7a0436cd..b3390028c6bfb 100644
10066 +--- a/drivers/mtd/nand/raw/ams-delta.c
10067 ++++ b/drivers/mtd/nand/raw/ams-delta.c
10068 +@@ -400,12 +400,14 @@ static int gpio_nand_remove(struct platform_device *pdev)
10069 + return 0;
10070 + }
10071 +
10072 ++#ifdef CONFIG_OF
10073 + static const struct of_device_id gpio_nand_of_id_table[] = {
10074 + {
10075 + /* sentinel */
10076 + },
10077 + };
10078 + MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table);
10079 ++#endif
10080 +
10081 + static const struct platform_device_id gpio_nand_plat_id_table[] = {
10082 + {
10083 +diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
10084 +index 65c9d17b25a3c..dce6d7a10a364 100644
10085 +--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
10086 ++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
10087 +@@ -1791,7 +1791,7 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
10088 + return ret;
10089 + }
10090 +
10091 +- if (cs > FMC2_MAX_CE) {
10092 ++ if (cs >= FMC2_MAX_CE) {
10093 + dev_err(nfc->dev, "invalid reg value: %d\n", cs);
10094 + return -EINVAL;
10095 + }
10096 +diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
10097 +index 7248c59011836..fcca45e2abe20 100644
10098 +--- a/drivers/mtd/nand/raw/vf610_nfc.c
10099 ++++ b/drivers/mtd/nand/raw/vf610_nfc.c
10100 +@@ -852,8 +852,10 @@ static int vf610_nfc_probe(struct platform_device *pdev)
10101 + }
10102 +
10103 + of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
10104 +- if (!of_id)
10105 +- return -ENODEV;
10106 ++ if (!of_id) {
10107 ++ err = -ENODEV;
10108 ++ goto err_disable_clk;
10109 ++ }
10110 +
10111 + nfc->variant = (enum vf610_nfc_variant)of_id->data;
10112 +
10113 +diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
10114 +index d219c970042a2..0b7667e60780f 100644
10115 +--- a/drivers/mtd/nand/spi/gigadevice.c
10116 ++++ b/drivers/mtd/nand/spi/gigadevice.c
10117 +@@ -21,7 +21,7 @@
10118 + #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
10119 +
10120 + static SPINAND_OP_VARIANTS(read_cache_variants,
10121 +- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
10122 ++ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
10123 + SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
10124 + SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
10125 + SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
10126 +@@ -29,7 +29,7 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
10127 + SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
10128 +
10129 + static SPINAND_OP_VARIANTS(read_cache_variants_f,
10130 +- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
10131 ++ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
10132 + SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
10133 + SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
10134 + SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
10135 +@@ -202,7 +202,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
10136 + SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
10137 + &write_cache_variants,
10138 + &update_cache_variants),
10139 +- 0,
10140 ++ SPINAND_HAS_QE_BIT,
10141 + SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
10142 + gd5fxgq4xa_ecc_get_status)),
10143 + SPINAND_INFO("GD5F2GQ4xA",
10144 +@@ -212,7 +212,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
10145 + SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
10146 + &write_cache_variants,
10147 + &update_cache_variants),
10148 +- 0,
10149 ++ SPINAND_HAS_QE_BIT,
10150 + SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
10151 + gd5fxgq4xa_ecc_get_status)),
10152 + SPINAND_INFO("GD5F4GQ4xA",
10153 +@@ -222,7 +222,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
10154 + SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
10155 + &write_cache_variants,
10156 + &update_cache_variants),
10157 +- 0,
10158 ++ SPINAND_HAS_QE_BIT,
10159 + SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
10160 + gd5fxgq4xa_ecc_get_status)),
10161 + SPINAND_INFO("GD5F1GQ4UExxG",
10162 +@@ -232,7 +232,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
10163 + SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
10164 + &write_cache_variants,
10165 + &update_cache_variants),
10166 +- 0,
10167 ++ SPINAND_HAS_QE_BIT,
10168 + SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
10169 + gd5fxgq4uexxg_ecc_get_status)),
10170 + SPINAND_INFO("GD5F1GQ4UFxxG",
10171 +@@ -242,7 +242,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
10172 + SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
10173 + &write_cache_variants,
10174 + &update_cache_variants),
10175 +- 0,
10176 ++ SPINAND_HAS_QE_BIT,
10177 + SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
10178 + gd5fxgq4ufxxg_ecc_get_status)),
10179 + };
10180 +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
10181 +index 94d10ec954a05..2ac7a667bde35 100644
10182 +--- a/drivers/net/can/flexcan.c
10183 ++++ b/drivers/net/can/flexcan.c
10184 +@@ -1260,18 +1260,23 @@ static int flexcan_chip_start(struct net_device *dev)
10185 + return err;
10186 + }
10187 +
10188 +-/* flexcan_chip_stop
10189 ++/* __flexcan_chip_stop
10190 + *
10191 +- * this functions is entered with clocks enabled
10192 ++ * this function is entered with clocks enabled
10193 + */
10194 +-static void flexcan_chip_stop(struct net_device *dev)
10195 ++static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
10196 + {
10197 + struct flexcan_priv *priv = netdev_priv(dev);
10198 + struct flexcan_regs __iomem *regs = priv->regs;
10199 ++ int err;
10200 +
10201 + /* freeze + disable module */
10202 +- flexcan_chip_freeze(priv);
10203 +- flexcan_chip_disable(priv);
10204 ++ err = flexcan_chip_freeze(priv);
10205 ++ if (err && !disable_on_error)
10206 ++ return err;
10207 ++ err = flexcan_chip_disable(priv);
10208 ++ if (err && !disable_on_error)
10209 ++ goto out_chip_unfreeze;
10210 +
10211 + /* Disable all interrupts */
10212 + priv->write(0, &regs->imask2);
10213 +@@ -1281,6 +1286,23 @@ static void flexcan_chip_stop(struct net_device *dev)
10214 +
10215 + flexcan_transceiver_disable(priv);
10216 + priv->can.state = CAN_STATE_STOPPED;
10217 ++
10218 ++ return 0;
10219 ++
10220 ++ out_chip_unfreeze:
10221 ++ flexcan_chip_unfreeze(priv);
10222 ++
10223 ++ return err;
10224 ++}
10225 ++
10226 ++static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev)
10227 ++{
10228 ++ return __flexcan_chip_stop(dev, true);
10229 ++}
10230 ++
10231 ++static inline int flexcan_chip_stop(struct net_device *dev)
10232 ++{
10233 ++ return __flexcan_chip_stop(dev, false);
10234 + }
10235 +
10236 + static int flexcan_open(struct net_device *dev)
10237 +@@ -1362,7 +1384,7 @@ static int flexcan_close(struct net_device *dev)
10238 +
10239 + netif_stop_queue(dev);
10240 + can_rx_offload_disable(&priv->offload);
10241 +- flexcan_chip_stop(dev);
10242 ++ flexcan_chip_stop_disable_on_error(dev);
10243 +
10244 + can_rx_offload_del(&priv->offload);
10245 + free_irq(dev->irq, dev);
10246 +diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
10247 +index 38ea5e600fb84..e6d0cb9ee02f0 100644
10248 +--- a/drivers/net/can/m_can/m_can_platform.c
10249 ++++ b/drivers/net/can/m_can/m_can_platform.c
10250 +@@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_suspend(struct device *dev)
10251 + struct net_device *ndev = dev_get_drvdata(dev);
10252 + struct m_can_classdev *mcan_class = netdev_priv(ndev);
10253 +
10254 +- m_can_class_suspend(dev);
10255 +-
10256 + clk_disable_unprepare(mcan_class->cclk);
10257 + clk_disable_unprepare(mcan_class->hclk);
10258 +
10259 +diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
10260 +index 7b6c0dce75360..ee433abc2d4b5 100644
10261 +--- a/drivers/net/dsa/microchip/ksz_common.c
10262 ++++ b/drivers/net/dsa/microchip/ksz_common.c
10263 +@@ -103,14 +103,8 @@ void ksz_init_mib_timer(struct ksz_device *dev)
10264 +
10265 + INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
10266 +
10267 +- /* Read MIB counters every 30 seconds to avoid overflow. */
10268 +- dev->mib_read_interval = msecs_to_jiffies(30000);
10269 +-
10270 + for (i = 0; i < dev->mib_port_cnt; i++)
10271 + dev->dev_ops->port_init_cnt(dev, i);
10272 +-
10273 +- /* Start the timer 2 seconds later. */
10274 +- schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
10275 + }
10276 + EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
10277 +
10278 +@@ -144,7 +138,9 @@ void ksz_adjust_link(struct dsa_switch *ds, int port,
10279 + /* Read all MIB counters when the link is going down. */
10280 + if (!phydev->link) {
10281 + p->read = true;
10282 +- schedule_delayed_work(&dev->mib_read, 0);
10283 ++ /* timer started */
10284 ++ if (dev->mib_read_interval)
10285 ++ schedule_delayed_work(&dev->mib_read, 0);
10286 + }
10287 + mutex_lock(&dev->dev_mutex);
10288 + if (!phydev->link)
10289 +@@ -460,6 +456,12 @@ int ksz_switch_register(struct ksz_device *dev,
10290 + return ret;
10291 + }
10292 +
10293 ++ /* Read MIB counters every 30 seconds to avoid overflow. */
10294 ++ dev->mib_read_interval = msecs_to_jiffies(30000);
10295 ++
10296 ++ /* Start the MIB timer. */
10297 ++ schedule_delayed_work(&dev->mib_read, 0);
10298 ++
10299 + return 0;
10300 + }
10301 + EXPORT_SYMBOL(ksz_switch_register);
10302 +diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
10303 +index 9a63b51e1d82f..6f2dab7e33d65 100644
10304 +--- a/drivers/net/dsa/realtek-smi-core.h
10305 ++++ b/drivers/net/dsa/realtek-smi-core.h
10306 +@@ -25,6 +25,9 @@ struct rtl8366_mib_counter {
10307 + const char *name;
10308 + };
10309 +
10310 ++/**
10311 ++ * struct rtl8366_vlan_mc - Virtual LAN member configuration
10312 ++ */
10313 + struct rtl8366_vlan_mc {
10314 + u16 vid;
10315 + u16 untag;
10316 +@@ -119,7 +122,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi);
10317 + int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
10318 + int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
10319 + u32 untag, u32 fid);
10320 +-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
10321 + int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
10322 + unsigned int vid);
10323 + int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
10324 +diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
10325 +index 99cdb2f18fa2f..49c626a336803 100644
10326 +--- a/drivers/net/dsa/rtl8366.c
10327 ++++ b/drivers/net/dsa/rtl8366.c
10328 +@@ -36,12 +36,113 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
10329 + }
10330 + EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
10331 +
10332 ++/**
10333 ++ * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
10334 ++ * @smi: the Realtek SMI device instance
10335 ++ * @vid: the VLAN ID to look up or allocate
10336 ++ * @vlanmc: the pointer will be assigned to a pointer to a valid member config
10337 ++ * if successful
10338 ++ * @return: index of a new member config or negative error number
10339 ++ */
10340 ++static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
10341 ++ struct rtl8366_vlan_mc *vlanmc)
10342 ++{
10343 ++ struct rtl8366_vlan_4k vlan4k;
10344 ++ int ret;
10345 ++ int i;
10346 ++
10347 ++ /* Try to find an existing member config entry for this VID */
10348 ++ for (i = 0; i < smi->num_vlan_mc; i++) {
10349 ++ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
10350 ++ if (ret) {
10351 ++ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
10352 ++ i, vid);
10353 ++ return ret;
10354 ++ }
10355 ++
10356 ++ if (vid == vlanmc->vid)
10357 ++ return i;
10358 ++ }
10359 ++
10360 ++ /* We have no MC entry for this VID, try to find an empty one */
10361 ++ for (i = 0; i < smi->num_vlan_mc; i++) {
10362 ++ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
10363 ++ if (ret) {
10364 ++ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
10365 ++ i, vid);
10366 ++ return ret;
10367 ++ }
10368 ++
10369 ++ if (vlanmc->vid == 0 && vlanmc->member == 0) {
10370 ++ /* Update the entry from the 4K table */
10371 ++ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
10372 ++ if (ret) {
10373 ++ dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
10374 ++ i, vid);
10375 ++ return ret;
10376 ++ }
10377 ++
10378 ++ vlanmc->vid = vid;
10379 ++ vlanmc->member = vlan4k.member;
10380 ++ vlanmc->untag = vlan4k.untag;
10381 ++ vlanmc->fid = vlan4k.fid;
10382 ++ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
10383 ++ if (ret) {
10384 ++ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
10385 ++ i, vid);
10386 ++ return ret;
10387 ++ }
10388 ++
10389 ++ dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
10390 ++ i, vid);
10391 ++ return i;
10392 ++ }
10393 ++ }
10394 ++
10395 ++ /* MC table is full, try to find an unused entry and replace it */
10396 ++ for (i = 0; i < smi->num_vlan_mc; i++) {
10397 ++ int used;
10398 ++
10399 ++ ret = rtl8366_mc_is_used(smi, i, &used);
10400 ++ if (ret)
10401 ++ return ret;
10402 ++
10403 ++ if (!used) {
10404 ++ /* Update the entry from the 4K table */
10405 ++ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
10406 ++ if (ret)
10407 ++ return ret;
10408 ++
10409 ++ vlanmc->vid = vid;
10410 ++ vlanmc->member = vlan4k.member;
10411 ++ vlanmc->untag = vlan4k.untag;
10412 ++ vlanmc->fid = vlan4k.fid;
10413 ++ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
10414 ++ if (ret) {
10415 ++ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
10416 ++ i, vid);
10417 ++ return ret;
10418 ++ }
10419 ++ dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
10420 ++ i, vid);
10421 ++ return i;
10422 ++ }
10423 ++ }
10424 ++
10425 ++ dev_err(smi->dev, "all VLAN member configurations are in use\n");
10426 ++ return -ENOSPC;
10427 ++}
10428 ++
10429 + int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
10430 + u32 untag, u32 fid)
10431 + {
10432 ++ struct rtl8366_vlan_mc vlanmc;
10433 + struct rtl8366_vlan_4k vlan4k;
10434 ++ int mc;
10435 + int ret;
10436 +- int i;
10437 ++
10438 ++ if (!smi->ops->is_vlan_valid(smi, vid))
10439 ++ return -EINVAL;
10440 +
10441 + dev_dbg(smi->dev,
10442 + "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
10443 +@@ -63,133 +164,58 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
10444 + "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
10445 + vid, vlan4k.member, vlan4k.untag);
10446 +
10447 +- /* Try to find an existing MC entry for this VID */
10448 +- for (i = 0; i < smi->num_vlan_mc; i++) {
10449 +- struct rtl8366_vlan_mc vlanmc;
10450 +-
10451 +- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
10452 +- if (ret)
10453 +- return ret;
10454 +-
10455 +- if (vid == vlanmc.vid) {
10456 +- /* update the MC entry */
10457 +- vlanmc.member |= member;
10458 +- vlanmc.untag |= untag;
10459 +- vlanmc.fid = fid;
10460 +-
10461 +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
10462 ++ /* Find or allocate a member config for this VID */
10463 ++ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
10464 ++ if (ret < 0)
10465 ++ return ret;
10466 ++ mc = ret;
10467 +
10468 +- dev_dbg(smi->dev,
10469 +- "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
10470 +- vid, vlanmc.member, vlanmc.untag);
10471 ++ /* Update the MC entry */
10472 ++ vlanmc.member |= member;
10473 ++ vlanmc.untag |= untag;
10474 ++ vlanmc.fid = fid;
10475 +
10476 +- break;
10477 +- }
10478 +- }
10479 ++ /* Commit updates to the MC entry */
10480 ++ ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
10481 ++ if (ret)
10482 ++ dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
10483 ++ mc, vid);
10484 ++ else
10485 ++ dev_dbg(smi->dev,
10486 ++ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
10487 ++ vid, vlanmc.member, vlanmc.untag);
10488 +
10489 + return ret;
10490 + }
10491 + EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
10492 +
10493 +-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
10494 +-{
10495 +- struct rtl8366_vlan_mc vlanmc;
10496 +- int ret;
10497 +- int index;
10498 +-
10499 +- ret = smi->ops->get_mc_index(smi, port, &index);
10500 +- if (ret)
10501 +- return ret;
10502 +-
10503 +- ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
10504 +- if (ret)
10505 +- return ret;
10506 +-
10507 +- *val = vlanmc.vid;
10508 +- return 0;
10509 +-}
10510 +-EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
10511 +-
10512 + int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
10513 + unsigned int vid)
10514 + {
10515 + struct rtl8366_vlan_mc vlanmc;
10516 +- struct rtl8366_vlan_4k vlan4k;
10517 ++ int mc;
10518 + int ret;
10519 +- int i;
10520 +-
10521 +- /* Try to find an existing MC entry for this VID */
10522 +- for (i = 0; i < smi->num_vlan_mc; i++) {
10523 +- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
10524 +- if (ret)
10525 +- return ret;
10526 +-
10527 +- if (vid == vlanmc.vid) {
10528 +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
10529 +- if (ret)
10530 +- return ret;
10531 +-
10532 +- ret = smi->ops->set_mc_index(smi, port, i);
10533 +- return ret;
10534 +- }
10535 +- }
10536 +-
10537 +- /* We have no MC entry for this VID, try to find an empty one */
10538 +- for (i = 0; i < smi->num_vlan_mc; i++) {
10539 +- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
10540 +- if (ret)
10541 +- return ret;
10542 +-
10543 +- if (vlanmc.vid == 0 && vlanmc.member == 0) {
10544 +- /* Update the entry from the 4K table */
10545 +- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
10546 +- if (ret)
10547 +- return ret;
10548 +
10549 +- vlanmc.vid = vid;
10550 +- vlanmc.member = vlan4k.member;
10551 +- vlanmc.untag = vlan4k.untag;
10552 +- vlanmc.fid = vlan4k.fid;
10553 +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
10554 +- if (ret)
10555 +- return ret;
10556 +-
10557 +- ret = smi->ops->set_mc_index(smi, port, i);
10558 +- return ret;
10559 +- }
10560 +- }
10561 +-
10562 +- /* MC table is full, try to find an unused entry and replace it */
10563 +- for (i = 0; i < smi->num_vlan_mc; i++) {
10564 +- int used;
10565 +-
10566 +- ret = rtl8366_mc_is_used(smi, i, &used);
10567 +- if (ret)
10568 +- return ret;
10569 +-
10570 +- if (!used) {
10571 +- /* Update the entry from the 4K table */
10572 +- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
10573 +- if (ret)
10574 +- return ret;
10575 ++ if (!smi->ops->is_vlan_valid(smi, vid))
10576 ++ return -EINVAL;
10577 +
10578 +- vlanmc.vid = vid;
10579 +- vlanmc.member = vlan4k.member;
10580 +- vlanmc.untag = vlan4k.untag;
10581 +- vlanmc.fid = vlan4k.fid;
10582 +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
10583 +- if (ret)
10584 +- return ret;
10585 ++ /* Find or allocate a member config for this VID */
10586 ++ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
10587 ++ if (ret < 0)
10588 ++ return ret;
10589 ++ mc = ret;
10590 +
10591 +- ret = smi->ops->set_mc_index(smi, port, i);
10592 +- return ret;
10593 +- }
10594 ++ ret = smi->ops->set_mc_index(smi, port, mc);
10595 ++ if (ret) {
10596 ++ dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
10597 ++ mc, port);
10598 ++ return ret;
10599 + }
10600 +
10601 +- dev_err(smi->dev,
10602 +- "all VLAN member configurations are in use\n");
10603 ++ dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
10604 ++ port, vid, mc);
10605 +
10606 +- return -ENOSPC;
10607 ++ return 0;
10608 + }
10609 + EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
10610 +
10611 +@@ -389,7 +415,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
10612 + if (!smi->ops->is_vlan_valid(smi, vid))
10613 + return;
10614 +
10615 +- dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
10616 ++ dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
10617 ++ vlan->vid_begin,
10618 + port,
10619 + untagged ? "untagged" : "tagged",
10620 + pvid ? " PVID" : "no PVID");
10621 +@@ -398,34 +425,29 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
10622 + dev_err(smi->dev, "port is DSA or CPU port\n");
10623 +
10624 + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
10625 +- int pvid_val = 0;
10626 +-
10627 +- dev_info(smi->dev, "add VLAN %04x\n", vid);
10628 + member |= BIT(port);
10629 +
10630 + if (untagged)
10631 + untag |= BIT(port);
10632 +
10633 +- /* To ensure that we have a valid MC entry for this VLAN,
10634 +- * initialize the port VLAN ID here.
10635 +- */
10636 +- ret = rtl8366_get_pvid(smi, port, &pvid_val);
10637 +- if (ret < 0) {
10638 +- dev_err(smi->dev, "could not lookup PVID for port %d\n",
10639 +- port);
10640 +- return;
10641 +- }
10642 +- if (pvid_val == 0) {
10643 +- ret = rtl8366_set_pvid(smi, port, vid);
10644 +- if (ret < 0)
10645 +- return;
10646 +- }
10647 +-
10648 + ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
10649 + if (ret)
10650 + dev_err(smi->dev,
10651 + "failed to set up VLAN %04x",
10652 + vid);
10653 ++
10654 ++ if (!pvid)
10655 ++ continue;
10656 ++
10657 ++ ret = rtl8366_set_pvid(smi, port, vid);
10658 ++ if (ret)
10659 ++ dev_err(smi->dev,
10660 ++ "failed to set PVID on port %d to VLAN %04x",
10661 ++ port, vid);
10662 ++
10663 ++ if (!ret)
10664 ++ dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
10665 ++ vid, port);
10666 + }
10667 + }
10668 + EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
10669 +diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
10670 +index fd1977590cb4b..c83b332656a4b 100644
10671 +--- a/drivers/net/dsa/rtl8366rb.c
10672 ++++ b/drivers/net/dsa/rtl8366rb.c
10673 +@@ -1270,7 +1270,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
10674 + if (smi->vlan4k_enabled)
10675 + max = RTL8366RB_NUM_VIDS - 1;
10676 +
10677 +- if (vlan == 0 || vlan >= max)
10678 ++ if (vlan == 0 || vlan > max)
10679 + return false;
10680 +
10681 + return true;
10682 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
10683 +index 59b65d4db086e..dff564e1cfc7f 100644
10684 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
10685 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
10686 +@@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[] = {
10687 + PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
10688 + };
10689 +
10690 ++static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
10691 ++ /* Default supported NAT modes */
10692 ++ {
10693 ++ .chip = CHELSIO_T5,
10694 ++ .flags = CXGB4_ACTION_NATMODE_NONE,
10695 ++ .natmode = NAT_MODE_NONE,
10696 ++ },
10697 ++ {
10698 ++ .chip = CHELSIO_T5,
10699 ++ .flags = CXGB4_ACTION_NATMODE_DIP,
10700 ++ .natmode = NAT_MODE_DIP,
10701 ++ },
10702 ++ {
10703 ++ .chip = CHELSIO_T5,
10704 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
10705 ++ .natmode = NAT_MODE_DIP_DP,
10706 ++ },
10707 ++ {
10708 ++ .chip = CHELSIO_T5,
10709 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
10710 ++ CXGB4_ACTION_NATMODE_SIP,
10711 ++ .natmode = NAT_MODE_DIP_DP_SIP,
10712 ++ },
10713 ++ {
10714 ++ .chip = CHELSIO_T5,
10715 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
10716 ++ CXGB4_ACTION_NATMODE_SPORT,
10717 ++ .natmode = NAT_MODE_DIP_DP_SP,
10718 ++ },
10719 ++ {
10720 ++ .chip = CHELSIO_T5,
10721 ++ .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
10722 ++ .natmode = NAT_MODE_SIP_SP,
10723 ++ },
10724 ++ {
10725 ++ .chip = CHELSIO_T5,
10726 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
10727 ++ CXGB4_ACTION_NATMODE_SPORT,
10728 ++ .natmode = NAT_MODE_DIP_SIP_SP,
10729 ++ },
10730 ++ {
10731 ++ .chip = CHELSIO_T5,
10732 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
10733 ++ CXGB4_ACTION_NATMODE_DPORT |
10734 ++ CXGB4_ACTION_NATMODE_SPORT,
10735 ++ .natmode = NAT_MODE_ALL,
10736 ++ },
10737 ++ /* T6+ can ignore L4 ports when they're disabled. */
10738 ++ {
10739 ++ .chip = CHELSIO_T6,
10740 ++ .flags = CXGB4_ACTION_NATMODE_SIP,
10741 ++ .natmode = NAT_MODE_SIP_SP,
10742 ++ },
10743 ++ {
10744 ++ .chip = CHELSIO_T6,
10745 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
10746 ++ .natmode = NAT_MODE_DIP_DP_SP,
10747 ++ },
10748 ++ {
10749 ++ .chip = CHELSIO_T6,
10750 ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
10751 ++ .natmode = NAT_MODE_ALL,
10752 ++ },
10753 ++};
10754 ++
10755 ++static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
10756 ++ u8 natmode_flags)
10757 ++{
10758 ++ u8 i = 0;
10759 ++
10760 ++ /* Translate the enabled NAT 4-tuple fields to one of the
10761 ++ * hardware supported NAT mode configurations. This ensures
10762 ++ * that we pick a valid combination, where the disabled fields
10763 ++ * do not get overwritten to 0.
10764 ++ */
10765 ++ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
10766 ++ if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
10767 ++ fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
10768 ++ return;
10769 ++ }
10770 ++ }
10771 ++}
10772 ++
10773 + static struct ch_tc_flower_entry *allocate_flower_entry(void)
10774 + {
10775 + struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
10776 +@@ -287,7 +370,8 @@ static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
10777 + }
10778 +
10779 + static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
10780 +- u32 mask, u32 offset, u8 htype)
10781 ++ u32 mask, u32 offset, u8 htype,
10782 ++ u8 *natmode_flags)
10783 + {
10784 + switch (htype) {
10785 + case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
10786 +@@ -312,60 +396,94 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
10787 + switch (offset) {
10788 + case PEDIT_IP4_SRC:
10789 + offload_pedit(fs, val, mask, IP4_SRC);
10790 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
10791 + break;
10792 + case PEDIT_IP4_DST:
10793 + offload_pedit(fs, val, mask, IP4_DST);
10794 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
10795 + }
10796 +- fs->nat_mode = NAT_MODE_ALL;
10797 + break;
10798 + case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
10799 + switch (offset) {
10800 + case PEDIT_IP6_SRC_31_0:
10801 + offload_pedit(fs, val, mask, IP6_SRC_31_0);
10802 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
10803 + break;
10804 + case PEDIT_IP6_SRC_63_32:
10805 + offload_pedit(fs, val, mask, IP6_SRC_63_32);
10806 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
10807 + break;
10808 + case PEDIT_IP6_SRC_95_64:
10809 + offload_pedit(fs, val, mask, IP6_SRC_95_64);
10810 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
10811 + break;
10812 + case PEDIT_IP6_SRC_127_96:
10813 + offload_pedit(fs, val, mask, IP6_SRC_127_96);
10814 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
10815 + break;
10816 + case PEDIT_IP6_DST_31_0:
10817 + offload_pedit(fs, val, mask, IP6_DST_31_0);
10818 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
10819 + break;
10820 + case PEDIT_IP6_DST_63_32:
10821 + offload_pedit(fs, val, mask, IP6_DST_63_32);
10822 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
10823 + break;
10824 + case PEDIT_IP6_DST_95_64:
10825 + offload_pedit(fs, val, mask, IP6_DST_95_64);
10826 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
10827 + break;
10828 + case PEDIT_IP6_DST_127_96:
10829 + offload_pedit(fs, val, mask, IP6_DST_127_96);
10830 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
10831 + }
10832 +- fs->nat_mode = NAT_MODE_ALL;
10833 + break;
10834 + case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
10835 + switch (offset) {
10836 + case PEDIT_TCP_SPORT_DPORT:
10837 +- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
10838 ++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
10839 + fs->nat_fport = val;
10840 +- else
10841 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
10842 ++ } else {
10843 + fs->nat_lport = val >> 16;
10844 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
10845 ++ }
10846 + }
10847 +- fs->nat_mode = NAT_MODE_ALL;
10848 + break;
10849 + case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
10850 + switch (offset) {
10851 + case PEDIT_UDP_SPORT_DPORT:
10852 +- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
10853 ++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
10854 + fs->nat_fport = val;
10855 +- else
10856 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
10857 ++ } else {
10858 + fs->nat_lport = val >> 16;
10859 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
10860 ++ }
10861 + }
10862 +- fs->nat_mode = NAT_MODE_ALL;
10863 ++ break;
10864 ++ }
10865 ++}
10866 ++
10867 ++static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags,
10868 ++ struct netlink_ext_ack *extack)
10869 ++{
10870 ++ u8 i = 0;
10871 ++
10872 ++ /* Extract the NAT mode to enable based on what 4-tuple fields
10873 ++ * are enabled to be overwritten. This ensures that the
10874 ++ * disabled fields don't get overwritten to 0.
10875 ++ */
10876 ++ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
10877 ++ const struct cxgb4_natmode_config *c;
10878 ++
10879 ++ c = &cxgb4_natmode_config_array[i];
10880 ++ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
10881 ++ natmode_flags == c->flags)
10882 ++ return 0;
10883 + }
10884 ++ NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination");
10885 ++ return -EOPNOTSUPP;
10886 + }
10887 +
10888 + void cxgb4_process_flow_actions(struct net_device *in,
10889 +@@ -373,6 +491,7 @@ void cxgb4_process_flow_actions(struct net_device *in,
10890 + struct ch_filter_specification *fs)
10891 + {
10892 + struct flow_action_entry *act;
10893 ++ u8 natmode_flags = 0;
10894 + int i;
10895 +
10896 + flow_action_for_each(i, act, actions) {
10897 +@@ -423,13 +542,17 @@ void cxgb4_process_flow_actions(struct net_device *in,
10898 + val = act->mangle.val;
10899 + offset = act->mangle.offset;
10900 +
10901 +- process_pedit_field(fs, val, mask, offset, htype);
10902 ++ process_pedit_field(fs, val, mask, offset, htype,
10903 ++ &natmode_flags);
10904 + }
10905 + break;
10906 + default:
10907 + break;
10908 + }
10909 + }
10910 ++ if (natmode_flags)
10911 ++ cxgb4_action_natmode_tweak(fs, natmode_flags);
10912 ++
10913 + }
10914 +
10915 + static bool valid_l4_mask(u32 mask)
10916 +@@ -446,7 +569,8 @@ static bool valid_l4_mask(u32 mask)
10917 + }
10918 +
10919 + static bool valid_pedit_action(struct net_device *dev,
10920 +- const struct flow_action_entry *act)
10921 ++ const struct flow_action_entry *act,
10922 ++ u8 *natmode_flags)
10923 + {
10924 + u32 mask, offset;
10925 + u8 htype;
10926 +@@ -471,7 +595,10 @@ static bool valid_pedit_action(struct net_device *dev,
10927 + case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
10928 + switch (offset) {
10929 + case PEDIT_IP4_SRC:
10930 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
10931 ++ break;
10932 + case PEDIT_IP4_DST:
10933 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
10934 + break;
10935 + default:
10936 + netdev_err(dev, "%s: Unsupported pedit field\n",
10937 +@@ -485,10 +612,13 @@ static bool valid_pedit_action(struct net_device *dev,
10938 + case PEDIT_IP6_SRC_63_32:
10939 + case PEDIT_IP6_SRC_95_64:
10940 + case PEDIT_IP6_SRC_127_96:
10941 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
10942 ++ break;
10943 + case PEDIT_IP6_DST_31_0:
10944 + case PEDIT_IP6_DST_63_32:
10945 + case PEDIT_IP6_DST_95_64:
10946 + case PEDIT_IP6_DST_127_96:
10947 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
10948 + break;
10949 + default:
10950 + netdev_err(dev, "%s: Unsupported pedit field\n",
10951 +@@ -504,6 +634,10 @@ static bool valid_pedit_action(struct net_device *dev,
10952 + __func__);
10953 + return false;
10954 + }
10955 ++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
10956 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
10957 ++ else
10958 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
10959 + break;
10960 + default:
10961 + netdev_err(dev, "%s: Unsupported pedit field\n",
10962 +@@ -519,6 +653,10 @@ static bool valid_pedit_action(struct net_device *dev,
10963 + __func__);
10964 + return false;
10965 + }
10966 ++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
10967 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
10968 ++ else
10969 ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
10970 + break;
10971 + default:
10972 + netdev_err(dev, "%s: Unsupported pedit field\n",
10973 +@@ -537,10 +675,12 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
10974 + struct flow_action *actions,
10975 + struct netlink_ext_ack *extack)
10976 + {
10977 ++ struct adapter *adap = netdev2adap(dev);
10978 + struct flow_action_entry *act;
10979 + bool act_redir = false;
10980 + bool act_pedit = false;
10981 + bool act_vlan = false;
10982 ++ u8 natmode_flags = 0;
10983 + int i;
10984 +
10985 + if (!flow_action_basic_hw_stats_check(actions, extack))
10986 +@@ -553,7 +693,6 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
10987 + /* Do nothing */
10988 + break;
10989 + case FLOW_ACTION_REDIRECT: {
10990 +- struct adapter *adap = netdev2adap(dev);
10991 + struct net_device *n_dev, *target_dev;
10992 + unsigned int i;
10993 + bool found = false;
10994 +@@ -603,7 +742,8 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
10995 + }
10996 + break;
10997 + case FLOW_ACTION_MANGLE: {
10998 +- bool pedit_valid = valid_pedit_action(dev, act);
10999 ++ bool pedit_valid = valid_pedit_action(dev, act,
11000 ++ &natmode_flags);
11001 +
11002 + if (!pedit_valid)
11003 + return -EOPNOTSUPP;
11004 +@@ -622,6 +762,15 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
11005 + return -EINVAL;
11006 + }
11007 +
11008 ++ if (act_pedit) {
11009 ++ int ret;
11010 ++
11011 ++ ret = cxgb4_action_natmode_validate(adap, natmode_flags,
11012 ++ extack);
11013 ++ if (ret)
11014 ++ return ret;
11015 ++ }
11016 ++
11017 + return 0;
11018 + }
11019 +
11020 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
11021 +index 0a30c96b81ffa..95142b1a88af6 100644
11022 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
11023 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
11024 +@@ -108,6 +108,21 @@ struct ch_tc_pedit_fields {
11025 + #define PEDIT_TCP_SPORT_DPORT 0x0
11026 + #define PEDIT_UDP_SPORT_DPORT 0x0
11027 +
11028 ++enum cxgb4_action_natmode_flags {
11029 ++ CXGB4_ACTION_NATMODE_NONE = 0,
11030 ++ CXGB4_ACTION_NATMODE_DIP = (1 << 0),
11031 ++ CXGB4_ACTION_NATMODE_SIP = (1 << 1),
11032 ++ CXGB4_ACTION_NATMODE_DPORT = (1 << 2),
11033 ++ CXGB4_ACTION_NATMODE_SPORT = (1 << 3),
11034 ++};
11035 ++
11036 ++/* TC PEDIT action to NATMODE translation entry */
11037 ++struct cxgb4_natmode_config {
11038 ++ enum chip_type chip;
11039 ++ u8 flags;
11040 ++ u8 natmode;
11041 ++};
11042 ++
11043 + void cxgb4_process_flow_actions(struct net_device *in,
11044 + struct flow_action *actions,
11045 + struct ch_filter_specification *fs);
11046 +diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
11047 +index 18f3aeb88f22a..c67a16a48d624 100644
11048 +--- a/drivers/net/ethernet/cisco/enic/enic.h
11049 ++++ b/drivers/net/ethernet/cisco/enic/enic.h
11050 +@@ -169,6 +169,7 @@ struct enic {
11051 + u16 num_vfs;
11052 + #endif
11053 + spinlock_t enic_api_lock;
11054 ++ bool enic_api_busy;
11055 + struct enic_port_profile *pp;
11056 +
11057 + /* work queue cache line section */
11058 +diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
11059 +index b161f24522b87..b028ea2dec2b9 100644
11060 +--- a/drivers/net/ethernet/cisco/enic/enic_api.c
11061 ++++ b/drivers/net/ethernet/cisco/enic/enic_api.c
11062 +@@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
11063 + struct vnic_dev *vdev = enic->vdev;
11064 +
11065 + spin_lock(&enic->enic_api_lock);
11066 ++ while (enic->enic_api_busy) {
11067 ++ spin_unlock(&enic->enic_api_lock);
11068 ++ cpu_relax();
11069 ++ spin_lock(&enic->enic_api_lock);
11070 ++ }
11071 ++
11072 + spin_lock_bh(&enic->devcmd_lock);
11073 +
11074 + vnic_dev_cmd_proxy_by_index_start(vdev, vf);
11075 +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
11076 +index cd5fe4f6b54ce..21093f33d2d73 100644
11077 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c
11078 ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
11079 +@@ -2140,8 +2140,6 @@ static int enic_dev_wait(struct vnic_dev *vdev,
11080 + int done;
11081 + int err;
11082 +
11083 +- BUG_ON(in_interrupt());
11084 +-
11085 + err = start(vdev, arg);
11086 + if (err)
11087 + return err;
11088 +@@ -2329,6 +2327,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
11089 + rss_hash_bits, rss_base_cpu, rss_enable);
11090 + }
11091 +
11092 ++static void enic_set_api_busy(struct enic *enic, bool busy)
11093 ++{
11094 ++ spin_lock(&enic->enic_api_lock);
11095 ++ enic->enic_api_busy = busy;
11096 ++ spin_unlock(&enic->enic_api_lock);
11097 ++}
11098 ++
11099 + static void enic_reset(struct work_struct *work)
11100 + {
11101 + struct enic *enic = container_of(work, struct enic, reset);
11102 +@@ -2338,7 +2343,9 @@ static void enic_reset(struct work_struct *work)
11103 +
11104 + rtnl_lock();
11105 +
11106 +- spin_lock(&enic->enic_api_lock);
11107 ++ /* Stop any activity from infiniband */
11108 ++ enic_set_api_busy(enic, true);
11109 ++
11110 + enic_stop(enic->netdev);
11111 + enic_dev_soft_reset(enic);
11112 + enic_reset_addr_lists(enic);
11113 +@@ -2346,7 +2353,10 @@ static void enic_reset(struct work_struct *work)
11114 + enic_set_rss_nic_cfg(enic);
11115 + enic_dev_set_ig_vlan_rewrite_mode(enic);
11116 + enic_open(enic->netdev);
11117 +- spin_unlock(&enic->enic_api_lock);
11118 ++
11119 ++ /* Allow infiniband to fiddle with the device again */
11120 ++ enic_set_api_busy(enic, false);
11121 ++
11122 + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
11123 +
11124 + rtnl_unlock();
11125 +@@ -2358,7 +2368,9 @@ static void enic_tx_hang_reset(struct work_struct *work)
11126 +
11127 + rtnl_lock();
11128 +
11129 +- spin_lock(&enic->enic_api_lock);
11130 ++ /* Stop any activity from infiniband */
11131 ++ enic_set_api_busy(enic, true);
11132 ++
11133 + enic_dev_hang_notify(enic);
11134 + enic_stop(enic->netdev);
11135 + enic_dev_hang_reset(enic);
11136 +@@ -2367,7 +2379,10 @@ static void enic_tx_hang_reset(struct work_struct *work)
11137 + enic_set_rss_nic_cfg(enic);
11138 + enic_dev_set_ig_vlan_rewrite_mode(enic);
11139 + enic_open(enic->netdev);
11140 +- spin_unlock(&enic->enic_api_lock);
11141 ++
11142 ++ /* Allow infiniband to fiddle with the device again */
11143 ++ enic_set_api_busy(enic, false);
11144 ++
11145 + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
11146 +
11147 + rtnl_unlock();
11148 +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
11149 +index 87236206366fd..00024dd411471 100644
11150 +--- a/drivers/net/ethernet/faraday/ftgmac100.c
11151 ++++ b/drivers/net/ethernet/faraday/ftgmac100.c
11152 +@@ -1817,6 +1817,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
11153 + priv->rxdes0_edorr_mask = BIT(30);
11154 + priv->txdes0_edotr_mask = BIT(30);
11155 + priv->is_aspeed = true;
11156 ++ /* Disable ast2600 problematic HW arbitration */
11157 ++ if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
11158 ++ iowrite32(FTGMAC100_TM_DEFAULT,
11159 ++ priv->base + FTGMAC100_OFFSET_TM);
11160 ++ }
11161 + } else {
11162 + priv->rxdes0_edorr_mask = BIT(15);
11163 + priv->txdes0_edotr_mask = BIT(15);
11164 +diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
11165 +index e5876a3fda91d..63b3e02fab162 100644
11166 +--- a/drivers/net/ethernet/faraday/ftgmac100.h
11167 ++++ b/drivers/net/ethernet/faraday/ftgmac100.h
11168 +@@ -169,6 +169,14 @@
11169 + #define FTGMAC100_MACCR_FAST_MODE (1 << 19)
11170 + #define FTGMAC100_MACCR_SW_RST (1 << 31)
11171 +
11172 ++/*
11173 ++ * test mode control register
11174 ++ */
11175 ++#define FTGMAC100_TM_RQ_TX_VALID_DIS (1 << 28)
11176 ++#define FTGMAC100_TM_RQ_RR_IDLE_PREV (1 << 27)
11177 ++#define FTGMAC100_TM_DEFAULT \
11178 ++ (FTGMAC100_TM_RQ_TX_VALID_DIS | FTGMAC100_TM_RQ_RR_IDLE_PREV)
11179 ++
11180 + /*
11181 + * PHY control register
11182 + */
11183 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
11184 +index 534fcc71a2a53..e1cd795556294 100644
11185 +--- a/drivers/net/ethernet/freescale/fec_main.c
11186 ++++ b/drivers/net/ethernet/freescale/fec_main.c
11187 +@@ -1913,6 +1913,27 @@ out:
11188 + return ret;
11189 + }
11190 +
11191 ++static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
11192 ++{
11193 ++ struct fec_enet_private *fep = netdev_priv(ndev);
11194 ++ struct phy_device *phy_dev = ndev->phydev;
11195 ++
11196 ++ if (phy_dev) {
11197 ++ phy_reset_after_clk_enable(phy_dev);
11198 ++ } else if (fep->phy_node) {
11199 ++ /*
11200 ++ * If the PHY still is not bound to the MAC, but there is
11201 ++ * OF PHY node and a matching PHY device instance already,
11202 ++ * use the OF PHY node to obtain the PHY device instance,
11203 ++ * and then use that PHY device instance when triggering
11204 ++ * the PHY reset.
11205 ++ */
11206 ++ phy_dev = of_phy_find_device(fep->phy_node);
11207 ++ phy_reset_after_clk_enable(phy_dev);
11208 ++ put_device(&phy_dev->mdio.dev);
11209 ++ }
11210 ++}
11211 ++
11212 + static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
11213 + {
11214 + struct fec_enet_private *fep = netdev_priv(ndev);
11215 +@@ -1939,7 +1960,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
11216 + if (ret)
11217 + goto failed_clk_ref;
11218 +
11219 +- phy_reset_after_clk_enable(ndev->phydev);
11220 ++ fec_enet_phy_reset_after_clk_enable(ndev);
11221 + } else {
11222 + clk_disable_unprepare(fep->clk_enet_out);
11223 + if (fep->clk_ptp) {
11224 +@@ -2985,16 +3006,16 @@ fec_enet_open(struct net_device *ndev)
11225 + /* Init MAC prior to mii bus probe */
11226 + fec_restart(ndev);
11227 +
11228 +- /* Probe and connect to PHY when open the interface */
11229 +- ret = fec_enet_mii_probe(ndev);
11230 +- if (ret)
11231 +- goto err_enet_mii_probe;
11232 +-
11233 + /* Call phy_reset_after_clk_enable() again if it failed during
11234 + * phy_reset_after_clk_enable() before because the PHY wasn't probed.
11235 + */
11236 + if (reset_again)
11237 +- phy_reset_after_clk_enable(ndev->phydev);
11238 ++ fec_enet_phy_reset_after_clk_enable(ndev);
11239 ++
11240 ++ /* Probe and connect to PHY when open the interface */
11241 ++ ret = fec_enet_mii_probe(ndev);
11242 ++ if (ret)
11243 ++ goto err_enet_mii_probe;
11244 +
11245 + if (fep->quirks & FEC_QUIRK_ERR006687)
11246 + imx6q_cpuidle_fec_irqs_used();
11247 +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
11248 +index c5c732601e35e..7ef3369953b6a 100644
11249 +--- a/drivers/net/ethernet/ibm/ibmveth.c
11250 ++++ b/drivers/net/ethernet/ibm/ibmveth.c
11251 +@@ -1349,6 +1349,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
11252 + int offset = ibmveth_rxq_frame_offset(adapter);
11253 + int csum_good = ibmveth_rxq_csum_good(adapter);
11254 + int lrg_pkt = ibmveth_rxq_large_packet(adapter);
11255 ++ __sum16 iph_check = 0;
11256 +
11257 + skb = ibmveth_rxq_get_buffer(adapter);
11258 +
11259 +@@ -1385,16 +1386,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
11260 + skb_put(skb, length);
11261 + skb->protocol = eth_type_trans(skb, netdev);
11262 +
11263 +- if (csum_good) {
11264 +- skb->ip_summed = CHECKSUM_UNNECESSARY;
11265 +- ibmveth_rx_csum_helper(skb, adapter);
11266 ++ /* PHYP without PLSO support places a -1 in the ip
11267 ++ * checksum for large send frames.
11268 ++ */
11269 ++ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
11270 ++ struct iphdr *iph = (struct iphdr *)skb->data;
11271 ++
11272 ++ iph_check = iph->check;
11273 + }
11274 +
11275 +- if (length > netdev->mtu + ETH_HLEN) {
11276 ++ if ((length > netdev->mtu + ETH_HLEN) ||
11277 ++ lrg_pkt || iph_check == 0xffff) {
11278 + ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
11279 + adapter->rx_large_packets++;
11280 + }
11281 +
11282 ++ if (csum_good) {
11283 ++ skb->ip_summed = CHECKSUM_UNNECESSARY;
11284 ++ ibmveth_rx_csum_helper(skb, adapter);
11285 ++ }
11286 ++
11287 + napi_gro_receive(napi, skb); /* send it up */
11288 +
11289 + netdev->stats.rx_packets++;
11290 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
11291 +index 1b702a43a5d01..3e0aab04d86fb 100644
11292 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
11293 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
11294 +@@ -4194,8 +4194,13 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
11295 + dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
11296 + goto out;
11297 + }
11298 ++ /* crq->change_mac_addr.mac_addr is the requested one
11299 ++ * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
11300 ++ */
11301 + ether_addr_copy(netdev->dev_addr,
11302 + &crq->change_mac_addr_rsp.mac_addr[0]);
11303 ++ ether_addr_copy(adapter->mac_addr,
11304 ++ &crq->change_mac_addr_rsp.mac_addr[0]);
11305 + out:
11306 + complete(&adapter->fw_done);
11307 + return rc;
11308 +@@ -4605,7 +4610,7 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
11309 + case IBMVNIC_1GBPS:
11310 + adapter->speed = SPEED_1000;
11311 + break;
11312 +- case IBMVNIC_10GBP:
11313 ++ case IBMVNIC_10GBPS:
11314 + adapter->speed = SPEED_10000;
11315 + break;
11316 + case IBMVNIC_25GBPS:
11317 +@@ -4620,6 +4625,9 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
11318 + case IBMVNIC_100GBPS:
11319 + adapter->speed = SPEED_100000;
11320 + break;
11321 ++ case IBMVNIC_200GBPS:
11322 ++ adapter->speed = SPEED_200000;
11323 ++ break;
11324 + default:
11325 + if (netif_carrier_ok(netdev))
11326 + netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
11327 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
11328 +index f8416e1d4cf09..43feb96b0a68a 100644
11329 +--- a/drivers/net/ethernet/ibm/ibmvnic.h
11330 ++++ b/drivers/net/ethernet/ibm/ibmvnic.h
11331 +@@ -373,7 +373,7 @@ struct ibmvnic_phys_parms {
11332 + #define IBMVNIC_10MBPS 0x40000000
11333 + #define IBMVNIC_100MBPS 0x20000000
11334 + #define IBMVNIC_1GBPS 0x10000000
11335 +-#define IBMVNIC_10GBP 0x08000000
11336 ++#define IBMVNIC_10GBPS 0x08000000
11337 + #define IBMVNIC_40GBPS 0x04000000
11338 + #define IBMVNIC_100GBPS 0x02000000
11339 + #define IBMVNIC_25GBPS 0x01000000
11340 +diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
11341 +index 03e034918d147..bf48f0ded9c7d 100644
11342 +--- a/drivers/net/ethernet/korina.c
11343 ++++ b/drivers/net/ethernet/korina.c
11344 +@@ -1113,7 +1113,7 @@ out:
11345 + return rc;
11346 +
11347 + probe_err_register:
11348 +- kfree(lp->td_ring);
11349 ++ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
11350 + probe_err_td_ring:
11351 + iounmap(lp->tx_dma_regs);
11352 + probe_err_dma_tx:
11353 +@@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev)
11354 + iounmap(lp->eth_regs);
11355 + iounmap(lp->rx_dma_regs);
11356 + iounmap(lp->tx_dma_regs);
11357 ++ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
11358 +
11359 + unregister_netdev(bif->dev);
11360 + free_netdev(bif->dev);
11361 +diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
11362 +index 62a820b1eb163..3362b148de23c 100644
11363 +--- a/drivers/net/ethernet/mediatek/Kconfig
11364 ++++ b/drivers/net/ethernet/mediatek/Kconfig
11365 +@@ -17,6 +17,7 @@ config NET_MEDIATEK_SOC
11366 + config NET_MEDIATEK_STAR_EMAC
11367 + tristate "MediaTek STAR Ethernet MAC support"
11368 + select PHYLIB
11369 ++ select REGMAP_MMIO
11370 + help
11371 + This driver supports the ethernet MAC IP first used on
11372 + MediaTek MT85** SoCs.
11373 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
11374 +index 8a10285b0e10c..89edcb5fca4fb 100644
11375 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
11376 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
11377 +@@ -943,6 +943,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
11378 + bool clean_complete = true;
11379 + int done;
11380 +
11381 ++ if (!budget)
11382 ++ return 0;
11383 ++
11384 + if (priv->tx_ring_num[TX_XDP]) {
11385 + xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
11386 + if (xdp_tx_cq->xdp_busy) {
11387 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
11388 +index 9dff7b086c9fb..1f11379ad5b64 100644
11389 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
11390 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
11391 +@@ -350,7 +350,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
11392 + .dma = tx_info->map0_dma,
11393 + };
11394 +
11395 +- if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
11396 ++ if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
11397 + dma_unmap_page(priv->ddev, tx_info->map0_dma,
11398 + PAGE_SIZE, priv->dma_dir);
11399 + put_page(tx_info->page);
11400 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
11401 +index 7283443868f3c..13c87ab50b267 100644
11402 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
11403 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
11404 +@@ -212,8 +212,8 @@ static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
11405 +
11406 + {
11407 + u32 data_size;
11408 ++ int err = 0;
11409 + u32 offset;
11410 +- int err;
11411 +
11412 + for (offset = 0; offset < value_len; offset += data_size) {
11413 + data_size = value_len - offset;
11414 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
11415 +index 2d55b7c22c034..4e7cfa22b3d2f 100644
11416 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
11417 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
11418 +@@ -550,8 +550,9 @@ static int mlx5_pps_event(struct notifier_block *nb,
11419 + switch (clock->ptp_info.pin_config[pin].func) {
11420 + case PTP_PF_EXTTS:
11421 + ptp_event.index = pin;
11422 +- ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
11423 +- be64_to_cpu(eqe->data.pps.time_stamp));
11424 ++ ptp_event.timestamp =
11425 ++ mlx5_timecounter_cyc2time(clock,
11426 ++ be64_to_cpu(eqe->data.pps.time_stamp));
11427 + if (clock->pps_info.enabled) {
11428 + ptp_event.type = PTP_CLOCK_PPSUSR;
11429 + ptp_event.pps_times.ts_real =
11430 +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
11431 +index fe173ea894e2c..b1feef473b746 100644
11432 +--- a/drivers/net/ethernet/realtek/r8169_main.c
11433 ++++ b/drivers/net/ethernet/realtek/r8169_main.c
11434 +@@ -4675,7 +4675,7 @@ static int rtl8169_close(struct net_device *dev)
11435 +
11436 + phy_disconnect(tp->phydev);
11437 +
11438 +- pci_free_irq(pdev, 0, tp);
11439 ++ free_irq(pci_irq_vector(pdev, 0), tp);
11440 +
11441 + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
11442 + tp->RxPhyAddr);
11443 +@@ -4726,8 +4726,8 @@ static int rtl_open(struct net_device *dev)
11444 +
11445 + rtl_request_firmware(tp);
11446 +
11447 +- retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
11448 +- dev->name);
11449 ++ retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
11450 ++ IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp);
11451 + if (retval < 0)
11452 + goto err_release_fw_2;
11453 +
11454 +@@ -4759,7 +4759,7 @@ out:
11455 + return retval;
11456 +
11457 + err_free_irq:
11458 +- pci_free_irq(pdev, 0, tp);
11459 ++ free_irq(pci_irq_vector(pdev, 0), tp);
11460 + err_release_fw_2:
11461 + rtl_release_firmware(tp);
11462 + rtl8169_rx_clear(tp);
11463 +@@ -4871,6 +4871,10 @@ static int __maybe_unused rtl8169_resume(struct device *device)
11464 + if (netif_running(tp->dev))
11465 + __rtl8169_resume(tp);
11466 +
11467 ++ /* Reportedly at least Asus X453MA truncates packets otherwise */
11468 ++ if (tp->mac_version == RTL_GIGA_MAC_VER_37)
11469 ++ rtl_init_rxcfg(tp);
11470 ++
11471 + return 0;
11472 + }
11473 +
11474 +diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
11475 +index 0f366cc50b74c..7f8be61a37089 100644
11476 +--- a/drivers/net/ethernet/socionext/netsec.c
11477 ++++ b/drivers/net/ethernet/socionext/netsec.c
11478 +@@ -6,6 +6,7 @@
11479 + #include <linux/pm_runtime.h>
11480 + #include <linux/acpi.h>
11481 + #include <linux/of_mdio.h>
11482 ++#include <linux/of_net.h>
11483 + #include <linux/etherdevice.h>
11484 + #include <linux/interrupt.h>
11485 + #include <linux/io.h>
11486 +@@ -1836,6 +1837,14 @@ static const struct net_device_ops netsec_netdev_ops = {
11487 + static int netsec_of_probe(struct platform_device *pdev,
11488 + struct netsec_priv *priv, u32 *phy_addr)
11489 + {
11490 ++ int err;
11491 ++
11492 ++ err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface);
11493 ++ if (err) {
11494 ++ dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
11495 ++ return err;
11496 ++ }
11497 ++
11498 + priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
11499 + if (!priv->phy_np) {
11500 + dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
11501 +@@ -1862,6 +1871,14 @@ static int netsec_acpi_probe(struct platform_device *pdev,
11502 + if (!IS_ENABLED(CONFIG_ACPI))
11503 + return -ENODEV;
11504 +
11505 ++ /* ACPI systems are assumed to configure the PHY in firmware, so
11506 ++ * there is really no need to discover the PHY mode from the DSDT.
11507 ++ * Since firmware is known to exist in the field that configures the
11508 ++ * PHY correctly but passes the wrong mode string in the phy-mode
11509 ++ * device property, we have no choice but to ignore it.
11510 ++ */
11511 ++ priv->phy_interface = PHY_INTERFACE_MODE_NA;
11512 ++
11513 + ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
11514 + if (ret) {
11515 + dev_err(&pdev->dev,
11516 +@@ -1998,13 +2015,6 @@ static int netsec_probe(struct platform_device *pdev)
11517 + priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
11518 + NETIF_MSG_LINK | NETIF_MSG_PROBE;
11519 +
11520 +- priv->phy_interface = device_get_phy_mode(&pdev->dev);
11521 +- if ((int)priv->phy_interface < 0) {
11522 +- dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
11523 +- ret = -ENODEV;
11524 +- goto free_ndev;
11525 +- }
11526 +-
11527 + priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
11528 + resource_size(mmio_res));
11529 + if (!priv->ioaddr) {
11530 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
11531 +index 73465e5f5a417..d4be2559bb73d 100644
11532 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
11533 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
11534 +@@ -176,32 +176,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
11535 + }
11536 + }
11537 +
11538 +-/**
11539 +- * stmmac_stop_all_queues - Stop all queues
11540 +- * @priv: driver private structure
11541 +- */
11542 +-static void stmmac_stop_all_queues(struct stmmac_priv *priv)
11543 +-{
11544 +- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
11545 +- u32 queue;
11546 +-
11547 +- for (queue = 0; queue < tx_queues_cnt; queue++)
11548 +- netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
11549 +-}
11550 +-
11551 +-/**
11552 +- * stmmac_start_all_queues - Start all queues
11553 +- * @priv: driver private structure
11554 +- */
11555 +-static void stmmac_start_all_queues(struct stmmac_priv *priv)
11556 +-{
11557 +- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
11558 +- u32 queue;
11559 +-
11560 +- for (queue = 0; queue < tx_queues_cnt; queue++)
11561 +- netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
11562 +-}
11563 +-
11564 + static void stmmac_service_event_schedule(struct stmmac_priv *priv)
11565 + {
11566 + if (!test_bit(STMMAC_DOWN, &priv->state) &&
11567 +@@ -2736,6 +2710,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
11568 + stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
11569 + }
11570 +
11571 ++ /* Configure real RX and TX queues */
11572 ++ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
11573 ++ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
11574 ++
11575 + /* Start the ball rolling... */
11576 + stmmac_start_all_dma(priv);
11577 +
11578 +@@ -2862,7 +2840,7 @@ static int stmmac_open(struct net_device *dev)
11579 + }
11580 +
11581 + stmmac_enable_all_queues(priv);
11582 +- stmmac_start_all_queues(priv);
11583 ++ netif_tx_start_all_queues(priv->dev);
11584 +
11585 + return 0;
11586 +
11587 +@@ -2903,8 +2881,6 @@ static int stmmac_release(struct net_device *dev)
11588 + phylink_stop(priv->phylink);
11589 + phylink_disconnect_phy(priv->phylink);
11590 +
11591 +- stmmac_stop_all_queues(priv);
11592 +-
11593 + stmmac_disable_all_queues(priv);
11594 +
11595 + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
11596 +@@ -4819,10 +4795,6 @@ int stmmac_dvr_probe(struct device *device,
11597 +
11598 + stmmac_check_ether_addr(priv);
11599 +
11600 +- /* Configure real RX and TX queues */
11601 +- netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
11602 +- netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
11603 +-
11604 + ndev->netdev_ops = &stmmac_netdev_ops;
11605 +
11606 + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
11607 +@@ -5078,7 +5050,6 @@ int stmmac_suspend(struct device *dev)
11608 + mutex_lock(&priv->lock);
11609 +
11610 + netif_device_detach(ndev);
11611 +- stmmac_stop_all_queues(priv);
11612 +
11613 + stmmac_disable_all_queues(priv);
11614 +
11615 +@@ -5203,8 +5174,6 @@ int stmmac_resume(struct device *dev)
11616 +
11617 + stmmac_enable_all_queues(priv);
11618 +
11619 +- stmmac_start_all_queues(priv);
11620 +-
11621 + mutex_unlock(&priv->lock);
11622 +
11623 + if (!device_may_wakeup(priv->device)) {
11624 +diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
11625 +index 9e58e495d3731..bb46741fbe47e 100644
11626 +--- a/drivers/net/ipa/ipa_endpoint.c
11627 ++++ b/drivers/net/ipa/ipa_endpoint.c
11628 +@@ -1447,6 +1447,9 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
11629 +
11630 + void ipa_endpoint_suspend(struct ipa *ipa)
11631 + {
11632 ++ if (!ipa->setup_complete)
11633 ++ return;
11634 ++
11635 + if (ipa->modem_netdev)
11636 + ipa_modem_suspend(ipa->modem_netdev);
11637 +
11638 +@@ -1458,6 +1461,9 @@ void ipa_endpoint_suspend(struct ipa *ipa)
11639 +
11640 + void ipa_endpoint_resume(struct ipa *ipa)
11641 + {
11642 ++ if (!ipa->setup_complete)
11643 ++ return;
11644 ++
11645 + ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
11646 + ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
11647 +
11648 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
11649 +index 07c42c0719f5b..5ca1356b8656f 100644
11650 +--- a/drivers/net/usb/qmi_wwan.c
11651 ++++ b/drivers/net/usb/qmi_wwan.c
11652 +@@ -1375,6 +1375,7 @@ static const struct usb_device_id products[] = {
11653 + {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
11654 + {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
11655 + {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
11656 ++ {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
11657 +
11658 + /* 4. Gobi 1000 devices */
11659 + {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
11660 +diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
11661 +index 9b00708676cf7..1bdd3df0867a5 100644
11662 +--- a/drivers/net/wan/hdlc.c
11663 ++++ b/drivers/net/wan/hdlc.c
11664 +@@ -46,7 +46,15 @@ static struct hdlc_proto *first_proto;
11665 + static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
11666 + struct packet_type *p, struct net_device *orig_dev)
11667 + {
11668 +- struct hdlc_device *hdlc = dev_to_hdlc(dev);
11669 ++ struct hdlc_device *hdlc;
11670 ++
11671 ++ /* First make sure "dev" is an HDLC device */
11672 ++ if (!(dev->priv_flags & IFF_WAN_HDLC)) {
11673 ++ kfree_skb(skb);
11674 ++ return NET_RX_SUCCESS;
11675 ++ }
11676 ++
11677 ++ hdlc = dev_to_hdlc(dev);
11678 +
11679 + if (!net_eq(dev_net(dev), &init_net)) {
11680 + kfree_skb(skb);
11681 +diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
11682 +index 08e0a46501dec..c70a518b8b478 100644
11683 +--- a/drivers/net/wan/hdlc_raw_eth.c
11684 ++++ b/drivers/net/wan/hdlc_raw_eth.c
11685 +@@ -99,6 +99,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
11686 + old_qlen = dev->tx_queue_len;
11687 + ether_setup(dev);
11688 + dev->tx_queue_len = old_qlen;
11689 ++ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
11690 + eth_hw_addr_random(dev);
11691 + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
11692 + netif_dormant_off(dev);
11693 +diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
11694 +index 294fbc1e89ab8..e6e0284e47837 100644
11695 +--- a/drivers/net/wireless/ath/ath10k/ce.c
11696 ++++ b/drivers/net/wireless/ath/ath10k/ce.c
11697 +@@ -1555,7 +1555,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
11698 + ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
11699 + if (ret) {
11700 + dma_free_coherent(ar->dev,
11701 +- (nentries * sizeof(struct ce_desc_64) +
11702 ++ (nentries * sizeof(struct ce_desc) +
11703 + CE_DESC_RING_ALIGN),
11704 + src_ring->base_addr_owner_space_unaligned,
11705 + base_addr);
11706 +diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
11707 +index d787cbead56ab..215ade6faf328 100644
11708 +--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
11709 ++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
11710 +@@ -142,6 +142,14 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
11711 + BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
11712 +
11713 + idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
11714 ++
11715 ++ if (idx < 0 || idx >= htt->rx_ring.size) {
11716 ++ ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
11717 ++ idx &= htt->rx_ring.size_mask;
11718 ++ ret = -ENOMEM;
11719 ++ goto fail;
11720 ++ }
11721 ++
11722 + while (num > 0) {
11723 + skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
11724 + if (!skb) {
11725 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
11726 +index 919d15584d4a2..77daca67a8e14 100644
11727 +--- a/drivers/net/wireless/ath/ath10k/mac.c
11728 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
11729 +@@ -7283,7 +7283,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
11730 + struct ieee80211_channel *channel)
11731 + {
11732 + int ret;
11733 +- enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
11734 ++ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
11735 +
11736 + lockdep_assert_held(&ar->conf_mutex);
11737 +
11738 +diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
11739 +index 30092841ac464..a0314c1c84653 100644
11740 +--- a/drivers/net/wireless/ath/ath11k/ahb.c
11741 ++++ b/drivers/net/wireless/ath/ath11k/ahb.c
11742 +@@ -981,12 +981,16 @@ err_core_free:
11743 + static int ath11k_ahb_remove(struct platform_device *pdev)
11744 + {
11745 + struct ath11k_base *ab = platform_get_drvdata(pdev);
11746 ++ unsigned long left;
11747 +
11748 + reinit_completion(&ab->driver_recovery);
11749 +
11750 +- if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags))
11751 +- wait_for_completion_timeout(&ab->driver_recovery,
11752 +- ATH11K_AHB_RECOVERY_TIMEOUT);
11753 ++ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
11754 ++ left = wait_for_completion_timeout(&ab->driver_recovery,
11755 ++ ATH11K_AHB_RECOVERY_TIMEOUT);
11756 ++ if (!left)
11757 ++ ath11k_warn(ab, "failed to receive recovery response completion\n");
11758 ++ }
11759 +
11760 + set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
11761 + cancel_work_sync(&ab->restart_work);
11762 +diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
11763 +index 2836a0f197ab0..fc5be7e8c043e 100644
11764 +--- a/drivers/net/wireless/ath/ath11k/mac.c
11765 ++++ b/drivers/net/wireless/ath/ath11k/mac.c
11766 +@@ -5824,7 +5824,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
11767 + ret = ath11k_mac_setup_channels_rates(ar,
11768 + cap->supported_bands);
11769 + if (ret)
11770 +- goto err_free;
11771 ++ goto err;
11772 +
11773 + ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
11774 + ath11k_mac_setup_he_cap(ar, cap);
11775 +@@ -5938,7 +5938,9 @@ static int __ath11k_mac_register(struct ath11k *ar)
11776 + err_free:
11777 + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
11778 + kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
11779 ++ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
11780 +
11781 ++err:
11782 + SET_IEEE80211_DEV(ar->hw, NULL);
11783 + return ret;
11784 + }
11785 +diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
11786 +index c00a99ad8dbc1..497cff7e64cc5 100644
11787 +--- a/drivers/net/wireless/ath/ath11k/qmi.c
11788 ++++ b/drivers/net/wireless/ath/ath11k/qmi.c
11789 +@@ -2419,6 +2419,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab)
11790 + ATH11K_QMI_WLFW_SERVICE_INS_ID_V01);
11791 + if (ret < 0) {
11792 + ath11k_warn(ab, "failed to add qmi lookup\n");
11793 ++ destroy_workqueue(ab->qmi.event_wq);
11794 + return ret;
11795 + }
11796 +
11797 +diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
11798 +index 5e7ea838a9218..814131a0680a4 100644
11799 +--- a/drivers/net/wireless/ath/ath6kl/main.c
11800 ++++ b/drivers/net/wireless/ath/ath6kl/main.c
11801 +@@ -430,6 +430,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
11802 +
11803 + ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
11804 +
11805 ++ if (aid < 1 || aid > AP_MAX_NUM_STA)
11806 ++ return;
11807 ++
11808 + if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
11809 + struct ieee80211_mgmt *mgmt =
11810 + (struct ieee80211_mgmt *) assoc_info;
11811 +diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
11812 +index 6885d2ded53a8..3d5db84d64650 100644
11813 +--- a/drivers/net/wireless/ath/ath6kl/wmi.c
11814 ++++ b/drivers/net/wireless/ath/ath6kl/wmi.c
11815 +@@ -2645,6 +2645,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
11816 + return -EINVAL;
11817 + }
11818 +
11819 ++ if (tsid >= 16) {
11820 ++ ath6kl_err("invalid tsid: %d\n", tsid);
11821 ++ return -EINVAL;
11822 ++ }
11823 ++
11824 + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
11825 + if (!skb)
11826 + return -ENOMEM;
11827 +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
11828 +index 3f563e02d17da..2ed98aaed6fb5 100644
11829 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
11830 ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
11831 +@@ -449,10 +449,19 @@ static void hif_usb_stop(void *hif_handle)
11832 + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
11833 +
11834 + /* The pending URBs have to be canceled. */
11835 ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
11836 + list_for_each_entry_safe(tx_buf, tx_buf_tmp,
11837 + &hif_dev->tx.tx_pending, list) {
11838 ++ usb_get_urb(tx_buf->urb);
11839 ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
11840 + usb_kill_urb(tx_buf->urb);
11841 ++ list_del(&tx_buf->list);
11842 ++ usb_free_urb(tx_buf->urb);
11843 ++ kfree(tx_buf->buf);
11844 ++ kfree(tx_buf);
11845 ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
11846 + }
11847 ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
11848 +
11849 + usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
11850 + }
11851 +@@ -762,27 +771,37 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
11852 + struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
11853 + unsigned long flags;
11854 +
11855 ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
11856 + list_for_each_entry_safe(tx_buf, tx_buf_tmp,
11857 + &hif_dev->tx.tx_buf, list) {
11858 ++ usb_get_urb(tx_buf->urb);
11859 ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
11860 + usb_kill_urb(tx_buf->urb);
11861 + list_del(&tx_buf->list);
11862 + usb_free_urb(tx_buf->urb);
11863 + kfree(tx_buf->buf);
11864 + kfree(tx_buf);
11865 ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
11866 + }
11867 ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
11868 +
11869 + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
11870 + hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
11871 + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
11872 +
11873 ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
11874 + list_for_each_entry_safe(tx_buf, tx_buf_tmp,
11875 + &hif_dev->tx.tx_pending, list) {
11876 ++ usb_get_urb(tx_buf->urb);
11877 ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
11878 + usb_kill_urb(tx_buf->urb);
11879 + list_del(&tx_buf->list);
11880 + usb_free_urb(tx_buf->urb);
11881 + kfree(tx_buf->buf);
11882 + kfree(tx_buf);
11883 ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
11884 + }
11885 ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
11886 +
11887 + usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
11888 + }
11889 +diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
11890 +index d2e062eaf5614..510e61e97dbcb 100644
11891 +--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
11892 ++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
11893 +@@ -339,6 +339,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
11894 +
11895 + if (skb) {
11896 + htc_hdr = (struct htc_frame_hdr *) skb->data;
11897 ++ if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint))
11898 ++ goto ret;
11899 + endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
11900 + skb_pull(skb, sizeof(struct htc_frame_hdr));
11901 +
11902 +diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
11903 +index 702b689c06df3..f3ea629764fa8 100644
11904 +--- a/drivers/net/wireless/ath/wcn36xx/main.c
11905 ++++ b/drivers/net/wireless/ath/wcn36xx/main.c
11906 +@@ -163,7 +163,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = {
11907 + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
11908 + .mcs = {
11909 + .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
11910 +- .rx_highest = cpu_to_le16(72),
11911 ++ .rx_highest = cpu_to_le16(150),
11912 + .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
11913 + }
11914 + }
11915 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
11916 +index c88655acc78c7..76b478f70b4bb 100644
11917 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
11918 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
11919 +@@ -483,7 +483,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
11920 + ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
11921 +
11922 + if (ret || !(*ifp) || !(*ifp)->ndev) {
11923 +- if (ret != -ENODATA && *ifp)
11924 ++ if (ret != -ENODATA && *ifp && (*ifp)->ndev)
11925 + (*ifp)->ndev->stats.rx_errors++;
11926 + brcmu_pkt_buf_free_skb(skb);
11927 + return -ENODATA;
11928 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
11929 +index 8bb4f1fa790e7..1bb270e782ff2 100644
11930 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
11931 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
11932 +@@ -1619,6 +1619,8 @@ fail:
11933 + BRCMF_TX_IOCTL_MAX_MSG_SIZE,
11934 + msgbuf->ioctbuf,
11935 + msgbuf->ioctbuf_handle);
11936 ++ if (msgbuf->txflow_wq)
11937 ++ destroy_workqueue(msgbuf->txflow_wq);
11938 + kfree(msgbuf);
11939 + }
11940 + return -ENOMEM;
11941 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
11942 +index 7ef36234a25dc..66797dc5e90d5 100644
11943 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
11944 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
11945 +@@ -5065,8 +5065,10 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
11946 + pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft;
11947 + pi->pi_fptr.detach = wlc_phy_detach_lcnphy;
11948 +
11949 +- if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
11950 ++ if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) {
11951 ++ kfree(pi->u.pi_lcnphy);
11952 + return false;
11953 ++ }
11954 +
11955 + if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
11956 + if (pi_lcn->lcnphy_tempsense_option == 3) {
11957 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
11958 +index 27116c7d3f4f8..48269a4cf8964 100644
11959 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
11960 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
11961 +@@ -947,9 +947,8 @@ static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
11962 + struct iwl_rx_packet *pkt = tp_data->fw_pkt;
11963 + struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
11964 +
11965 +- if (pkt && ((wanted_hdr->cmd == 0 && wanted_hdr->group_id == 0) ||
11966 +- (pkt->hdr.cmd == wanted_hdr->cmd &&
11967 +- pkt->hdr.group_id == wanted_hdr->group_id))) {
11968 ++ if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd &&
11969 ++ pkt->hdr.group_id == wanted_hdr->group_id)) {
11970 + struct iwl_rx_packet *fw_pkt =
11971 + kmemdup(pkt,
11972 + sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
11973 +@@ -1012,6 +1011,9 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
11974 + enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
11975 + int ret, i;
11976 +
11977 ++ if (*ini_dest != IWL_FW_INI_LOCATION_INVALID)
11978 ++ return;
11979 ++
11980 + IWL_DEBUG_FW(fwrt,
11981 + "WRT: Generating active triggers list, domain 0x%x\n",
11982 + fwrt->trans->dbg.domains_bitmap);
11983 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
11984 +index 77916231ff7d3..03b73003b0095 100644
11985 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
11986 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
11987 +@@ -3685,9 +3685,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
11988 + tail->apply_time_max_delay = cpu_to_le32(delay);
11989 +
11990 + IWL_DEBUG_TE(mvm,
11991 +- "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
11992 +- channel->hw_value, req_dur, duration, delay,
11993 +- dtim_interval);
11994 ++ "ROC: Requesting to remain on channel %u for %ums\n",
11995 ++ channel->hw_value, req_dur);
11996 ++ IWL_DEBUG_TE(mvm,
11997 ++ "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
11998 ++ duration, delay, dtim_interval);
11999 ++
12000 + /* Set the node address */
12001 + memcpy(tail->node_addr, vif->addr, ETH_ALEN);
12002 +
12003 +diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
12004 +index ff932627a46c1..2fb69a590bd8e 100644
12005 +--- a/drivers/net/wireless/marvell/mwifiex/scan.c
12006 ++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
12007 +@@ -1889,7 +1889,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
12008 + chan, CFG80211_BSS_FTYPE_UNKNOWN,
12009 + bssid, timestamp,
12010 + cap_info_bitmap, beacon_period,
12011 +- ie_buf, ie_len, rssi, GFP_KERNEL);
12012 ++ ie_buf, ie_len, rssi, GFP_ATOMIC);
12013 + if (bss) {
12014 + bss_priv = (struct mwifiex_bss_priv *)bss->priv;
12015 + bss_priv->band = band;
12016 +diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
12017 +index a042965962a2d..1b6bee5465288 100644
12018 +--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
12019 ++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
12020 +@@ -1976,6 +1976,8 @@ error:
12021 + kfree(card->mpa_rx.buf);
12022 + card->mpa_tx.buf_size = 0;
12023 + card->mpa_rx.buf_size = 0;
12024 ++ card->mpa_tx.buf = NULL;
12025 ++ card->mpa_rx.buf = NULL;
12026 + }
12027 +
12028 + return ret;
12029 +diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
12030 +index 6f3cfde4654cc..426e39d4ccf0f 100644
12031 +--- a/drivers/net/wireless/marvell/mwifiex/usb.c
12032 ++++ b/drivers/net/wireless/marvell/mwifiex/usb.c
12033 +@@ -1353,7 +1353,8 @@ static void mwifiex_usb_cleanup_tx_aggr(struct mwifiex_adapter *adapter)
12034 + skb_dequeue(&port->tx_aggr.aggr_list)))
12035 + mwifiex_write_data_complete(adapter, skb_tmp,
12036 + 0, -1);
12037 +- del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
12038 ++ if (port->tx_aggr.timer_cnxt.hold_timer.function)
12039 ++ del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
12040 + port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
12041 + port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
12042 + }
12043 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
12044 +index 8fb8255650a7e..6969579e6b1dd 100644
12045 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
12046 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
12047 +@@ -2267,14 +2267,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
12048 + struct bss_info_bcn *bcn;
12049 + int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
12050 +
12051 +- rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
12052 +- if (IS_ERR(rskb))
12053 +- return PTR_ERR(rskb);
12054 +-
12055 +- tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
12056 +- bcn = (struct bss_info_bcn *)tlv;
12057 +- bcn->enable = en;
12058 +-
12059 + skb = ieee80211_beacon_get_template(hw, vif, &offs);
12060 + if (!skb)
12061 + return -EINVAL;
12062 +@@ -2285,6 +2277,16 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
12063 + return -EINVAL;
12064 + }
12065 +
12066 ++ rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
12067 ++ if (IS_ERR(rskb)) {
12068 ++ dev_kfree_skb(skb);
12069 ++ return PTR_ERR(rskb);
12070 ++ }
12071 ++
12072 ++ tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
12073 ++ bcn = (struct bss_info_bcn *)tlv;
12074 ++ bcn->enable = en;
12075 ++
12076 + if (mvif->band_idx) {
12077 + info = IEEE80211_SKB_CB(skb);
12078 + info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
12079 +diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
12080 +index f40d8c3c3d9e5..f3ccbd2b10847 100644
12081 +--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
12082 ++++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
12083 +@@ -869,6 +869,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
12084 + default:
12085 + pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid,
12086 + vif->vifid, vif->wdev.iftype);
12087 ++ dev_kfree_skb(cmd_skb);
12088 + ret = -EINVAL;
12089 + goto out;
12090 + }
12091 +@@ -1924,6 +1925,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
12092 + break;
12093 + default:
12094 + pr_err("unsupported iftype %d\n", vif->wdev.iftype);
12095 ++ dev_kfree_skb(cmd_skb);
12096 + ret = -EINVAL;
12097 + goto out;
12098 + }
12099 +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
12100 +index 19efae462a242..5cd7ef3625c5e 100644
12101 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
12102 ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
12103 +@@ -5795,7 +5795,6 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
12104 + ret = usb_submit_urb(urb, GFP_KERNEL);
12105 + if (ret) {
12106 + usb_unanchor_urb(urb);
12107 +- usb_free_urb(urb);
12108 + goto error;
12109 + }
12110 +
12111 +@@ -5804,6 +5803,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
12112 + rtl8xxxu_write32(priv, REG_USB_HIMR, val32);
12113 +
12114 + error:
12115 ++ usb_free_urb(urb);
12116 + return ret;
12117 + }
12118 +
12119 +@@ -6318,6 +6318,7 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
12120 + struct rtl8xxxu_priv *priv = hw->priv;
12121 + struct rtl8xxxu_rx_urb *rx_urb;
12122 + struct rtl8xxxu_tx_urb *tx_urb;
12123 ++ struct sk_buff *skb;
12124 + unsigned long flags;
12125 + int ret, i;
12126 +
12127 +@@ -6368,6 +6369,13 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
12128 + rx_urb->hw = hw;
12129 +
12130 + ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
12131 ++ if (ret) {
12132 ++ if (ret != -ENOMEM) {
12133 ++ skb = (struct sk_buff *)rx_urb->urb.context;
12134 ++ dev_kfree_skb(skb);
12135 ++ }
12136 ++ rtl8xxxu_queue_rx_urb(priv, rx_urb);
12137 ++ }
12138 + }
12139 +
12140 + schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
12141 +diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
12142 +index 665d4bbdee6a0..6a881d0be9bf0 100644
12143 +--- a/drivers/net/wireless/realtek/rtw88/main.c
12144 ++++ b/drivers/net/wireless/realtek/rtw88/main.c
12145 +@@ -1465,6 +1465,9 @@ int rtw_core_init(struct rtw_dev *rtwdev)
12146 + ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW);
12147 + if (ret) {
12148 + rtw_warn(rtwdev, "no wow firmware loaded\n");
12149 ++ wait_for_completion(&rtwdev->fw.completion);
12150 ++ if (rtwdev->fw.firmware)
12151 ++ release_firmware(rtwdev->fw.firmware);
12152 + return ret;
12153 + }
12154 + }
12155 +@@ -1479,6 +1482,8 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
12156 + struct rtw_rsvd_page *rsvd_pkt, *tmp;
12157 + unsigned long flags;
12158 +
12159 ++ rtw_wait_firmware_completion(rtwdev);
12160 ++
12161 + if (fw->firmware)
12162 + release_firmware(fw->firmware);
12163 +
12164 +diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
12165 +index 3413973bc4750..7f1f5073b9f4d 100644
12166 +--- a/drivers/net/wireless/realtek/rtw88/pci.c
12167 ++++ b/drivers/net/wireless/realtek/rtw88/pci.c
12168 +@@ -1599,6 +1599,8 @@ void rtw_pci_shutdown(struct pci_dev *pdev)
12169 +
12170 + if (chip->ops->shutdown)
12171 + chip->ops->shutdown(rtwdev);
12172 ++
12173 ++ pci_set_power_state(pdev, PCI_D3hot);
12174 + }
12175 + EXPORT_SYMBOL(rtw_pci_shutdown);
12176 +
12177 +diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
12178 +index 024c2bc275cbe..ca17aa9cf7dc7 100644
12179 +--- a/drivers/net/wireless/realtek/rtw88/pci.h
12180 ++++ b/drivers/net/wireless/realtek/rtw88/pci.h
12181 +@@ -9,8 +9,8 @@
12182 + #define RTK_BEQ_TX_DESC_NUM 256
12183 +
12184 + #define RTK_MAX_RX_DESC_NUM 512
12185 +-/* 8K + rx desc size */
12186 +-#define RTK_PCI_RX_BUF_SIZE (8192 + 24)
12187 ++/* 11K + rx desc size */
12188 ++#define RTK_PCI_RX_BUF_SIZE (11454 + 24)
12189 +
12190 + #define RTK_PCI_CTRL 0x300
12191 + #define BIT_RST_TRXDMA_INTF BIT(20)
12192 +diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
12193 +index 8d93f31597469..9687b376d221b 100644
12194 +--- a/drivers/net/wireless/realtek/rtw88/phy.c
12195 ++++ b/drivers/net/wireless/realtek/rtw88/phy.c
12196 +@@ -147,12 +147,13 @@ void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
12197 + {
12198 + struct rtw_chip_info *chip = rtwdev->chip;
12199 + struct rtw_hal *hal = &rtwdev->hal;
12200 +- const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
12201 + u32 addr, mask;
12202 + u8 path;
12203 +
12204 +- if (dig_cck)
12205 ++ if (chip->dig_cck) {
12206 ++ const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
12207 + rtw_write32_mask(rtwdev, dig_cck->addr, dig_cck->mask, igi >> 1);
12208 ++ }
12209 +
12210 + for (path = 0; path < hal->rf_path_num; path++) {
12211 + addr = chip->dig[path].addr;
12212 +diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
12213 +index 88e1db65be02c..71428d8cbcfc5 100644
12214 +--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
12215 ++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
12216 +@@ -1203,6 +1203,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
12217 +
12218 + err_dma_mask:
12219 + pci_clear_master(pdev);
12220 ++ pci_release_regions(pdev);
12221 + err_pci_regions:
12222 + pci_disable_device(pdev);
12223 + err_pci_enable:
12224 +diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
12225 +index 423f9b8fbbcf5..fa561d455f7c8 100644
12226 +--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
12227 ++++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
12228 +@@ -1893,7 +1893,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
12229 + goto err_init_dev;
12230 + } else {
12231 + rc = -EINVAL;
12232 +- goto err_ndev;
12233 ++ goto err_init_pci;
12234 + }
12235 +
12236 + ndev_reset_unsafe_flags(ndev);
12237 +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
12238 +index 58b035cc67a01..75ed95a250fb5 100644
12239 +--- a/drivers/nvme/target/core.c
12240 ++++ b/drivers/nvme/target/core.c
12241 +@@ -1142,7 +1142,8 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
12242 + * in case a host died before it enabled the controller. Hence, simply
12243 + * reset the keep alive timer when the controller is enabled.
12244 + */
12245 +- mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
12246 ++ if (ctrl->kato)
12247 ++ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
12248 + }
12249 +
12250 + static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
12251 +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
12252 +index 927eb5f6003f0..4aca5b4a87d75 100644
12253 +--- a/drivers/nvmem/core.c
12254 ++++ b/drivers/nvmem/core.c
12255 +@@ -355,16 +355,14 @@ static void nvmem_cell_add(struct nvmem_cell *cell)
12256 + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
12257 + }
12258 +
12259 +-static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
12260 +- const struct nvmem_cell_info *info,
12261 +- struct nvmem_cell *cell)
12262 ++static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
12263 ++ const struct nvmem_cell_info *info,
12264 ++ struct nvmem_cell *cell)
12265 + {
12266 + cell->nvmem = nvmem;
12267 + cell->offset = info->offset;
12268 + cell->bytes = info->bytes;
12269 +- cell->name = kstrdup_const(info->name, GFP_KERNEL);
12270 +- if (!cell->name)
12271 +- return -ENOMEM;
12272 ++ cell->name = info->name;
12273 +
12274 + cell->bit_offset = info->bit_offset;
12275 + cell->nbits = info->nbits;
12276 +@@ -376,13 +374,30 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
12277 + if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
12278 + dev_err(&nvmem->dev,
12279 + "cell %s unaligned to nvmem stride %d\n",
12280 +- cell->name, nvmem->stride);
12281 ++ cell->name ?: "<unknown>", nvmem->stride);
12282 + return -EINVAL;
12283 + }
12284 +
12285 + return 0;
12286 + }
12287 +
12288 ++static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
12289 ++ const struct nvmem_cell_info *info,
12290 ++ struct nvmem_cell *cell)
12291 ++{
12292 ++ int err;
12293 ++
12294 ++ err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
12295 ++ if (err)
12296 ++ return err;
12297 ++
12298 ++ cell->name = kstrdup_const(info->name, GFP_KERNEL);
12299 ++ if (!cell->name)
12300 ++ return -ENOMEM;
12301 ++
12302 ++ return 0;
12303 ++}
12304 ++
12305 + /**
12306 + * nvmem_add_cells() - Add cell information to an nvmem device
12307 + *
12308 +@@ -823,6 +838,7 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
12309 + {
12310 +
12311 + struct device_node *nvmem_np;
12312 ++ struct nvmem_device *nvmem;
12313 + int index = 0;
12314 +
12315 + if (id)
12316 +@@ -832,7 +848,9 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
12317 + if (!nvmem_np)
12318 + return ERR_PTR(-ENOENT);
12319 +
12320 +- return __nvmem_device_get(nvmem_np, device_match_of_node);
12321 ++ nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
12322 ++ of_node_put(nvmem_np);
12323 ++ return nvmem;
12324 + }
12325 + EXPORT_SYMBOL_GPL(of_nvmem_device_get);
12326 + #endif
12327 +@@ -1433,7 +1451,7 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
12328 + if (!nvmem)
12329 + return -EINVAL;
12330 +
12331 +- rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
12332 ++ rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
12333 + if (rc)
12334 + return rc;
12335 +
12336 +@@ -1463,7 +1481,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
12337 + if (!nvmem)
12338 + return -EINVAL;
12339 +
12340 +- rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
12341 ++ rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
12342 + if (rc)
12343 + return rc;
12344 +
12345 +diff --git a/drivers/opp/core.c b/drivers/opp/core.c
12346 +index 91dcad982d362..11d192fb2e813 100644
12347 +--- a/drivers/opp/core.c
12348 ++++ b/drivers/opp/core.c
12349 +@@ -1918,6 +1918,9 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
12350 + {
12351 + int index;
12352 +
12353 ++ if (!opp_table->genpd_virt_devs)
12354 ++ return;
12355 ++
12356 + for (index = 0; index < opp_table->required_opp_count; index++) {
12357 + if (!opp_table->genpd_virt_devs[index])
12358 + continue;
12359 +@@ -1964,6 +1967,9 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
12360 + if (!opp_table)
12361 + return ERR_PTR(-ENOMEM);
12362 +
12363 ++ if (opp_table->genpd_virt_devs)
12364 ++ return opp_table;
12365 ++
12366 + /*
12367 + * If the genpd's OPP table isn't already initialized, parsing of the
12368 + * required-opps fail for dev. We should retry this after genpd's OPP
12369 +diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
12370 +index 5e5b8821bed8c..ce1c00ea5fdca 100644
12371 +--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
12372 ++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
12373 +@@ -505,7 +505,8 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
12374 + u32 reg;
12375 + int i;
12376 +
12377 +- hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
12378 ++ hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
12379 ++ PCI_HEADER_TYPE_MASK;
12380 + if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
12381 + dev_err(pci->dev,
12382 + "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
12383 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
12384 +index 90ff291c24f09..d5f58684d962c 100644
12385 +--- a/drivers/pci/controller/pci-aardvark.c
12386 ++++ b/drivers/pci/controller/pci-aardvark.c
12387 +@@ -9,7 +9,7 @@
12388 + */
12389 +
12390 + #include <linux/delay.h>
12391 +-#include <linux/gpio.h>
12392 ++#include <linux/gpio/consumer.h>
12393 + #include <linux/interrupt.h>
12394 + #include <linux/irq.h>
12395 + #include <linux/irqdomain.h>
12396 +@@ -608,7 +608,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
12397 + * Initialize the configuration space of the PCI-to-PCI bridge
12398 + * associated with the given PCIe interface.
12399 + */
12400 +-static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
12401 ++static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
12402 + {
12403 + struct pci_bridge_emul *bridge = &pcie->bridge;
12404 +
12405 +@@ -634,8 +634,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
12406 + bridge->data = pcie;
12407 + bridge->ops = &advk_pci_bridge_emul_ops;
12408 +
12409 +- pci_bridge_emul_init(bridge, 0);
12410 +-
12411 ++ return pci_bridge_emul_init(bridge, 0);
12412 + }
12413 +
12414 + static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
12415 +@@ -1169,7 +1168,11 @@ static int advk_pcie_probe(struct platform_device *pdev)
12416 +
12417 + advk_pcie_setup_hw(pcie);
12418 +
12419 +- advk_sw_pci_bridge_init(pcie);
12420 ++ ret = advk_sw_pci_bridge_init(pcie);
12421 ++ if (ret) {
12422 ++ dev_err(dev, "Failed to register emulated root PCI bridge\n");
12423 ++ return ret;
12424 ++ }
12425 +
12426 + ret = advk_pcie_init_irq_domain(pcie);
12427 + if (ret) {
12428 +diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
12429 +index bf40ff09c99d6..95c04b0ffeb16 100644
12430 +--- a/drivers/pci/controller/pci-hyperv.c
12431 ++++ b/drivers/pci/controller/pci-hyperv.c
12432 +@@ -1275,11 +1275,25 @@ static void hv_irq_unmask(struct irq_data *data)
12433 + exit_unlock:
12434 + spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
12435 +
12436 +- if (res) {
12437 ++ /*
12438 ++ * During hibernation, when a CPU is offlined, the kernel tries
12439 ++ * to move the interrupt to the remaining CPUs that haven't
12440 ++ * been offlined yet. In this case, the below hv_do_hypercall()
12441 ++ * always fails since the vmbus channel has been closed:
12442 ++ * refer to cpu_disable_common() -> fixup_irqs() ->
12443 ++ * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
12444 ++ *
12445 ++ * Suppress the error message for hibernation because the failure
12446 ++ * during hibernation does not matter (at this time all the devices
12447 ++ * have been frozen). Note: the correct affinity info is still updated
12448 ++ * into the irqdata data structure in migrate_one_irq() ->
12449 ++ * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM
12450 ++ * resumes, hv_pci_restore_msi_state() is able to correctly restore
12451 ++ * the interrupt with the correct affinity.
12452 ++ */
12453 ++ if (res && hbus->state != hv_pcibus_removing)
12454 + dev_err(&hbus->hdev->device,
12455 + "%s() failed: %#llx", __func__, res);
12456 +- return;
12457 +- }
12458 +
12459 + pci_msi_unmask_irq(data);
12460 + }
12461 +@@ -3368,6 +3382,34 @@ static int hv_pci_suspend(struct hv_device *hdev)
12462 + return 0;
12463 + }
12464 +
12465 ++static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
12466 ++{
12467 ++ struct msi_desc *entry;
12468 ++ struct irq_data *irq_data;
12469 ++
12470 ++ for_each_pci_msi_entry(entry, pdev) {
12471 ++ irq_data = irq_get_irq_data(entry->irq);
12472 ++ if (WARN_ON_ONCE(!irq_data))
12473 ++ return -EINVAL;
12474 ++
12475 ++ hv_compose_msi_msg(irq_data, &entry->msg);
12476 ++ }
12477 ++
12478 ++ return 0;
12479 ++}
12480 ++
12481 ++/*
12482 ++ * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg()
12483 ++ * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
12484 ++ * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
12485 ++ * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
12486 ++ * Table entries.
12487 ++ */
12488 ++static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
12489 ++{
12490 ++ pci_walk_bus(hbus->pci_bus, hv_pci_restore_msi_msg, NULL);
12491 ++}
12492 ++
12493 + static int hv_pci_resume(struct hv_device *hdev)
12494 + {
12495 + struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
12496 +@@ -3401,6 +3443,8 @@ static int hv_pci_resume(struct hv_device *hdev)
12497 +
12498 + prepopulate_bars(hbus);
12499 +
12500 ++ hv_pci_restore_msi_state(hbus);
12501 ++
12502 + hbus->state = hv_pcibus_installed;
12503 + return 0;
12504 + out:
12505 +diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
12506 +index 3176ad3ab0e52..908475d27e0e7 100644
12507 +--- a/drivers/pci/controller/pcie-iproc-msi.c
12508 ++++ b/drivers/pci/controller/pcie-iproc-msi.c
12509 +@@ -209,15 +209,20 @@ static int iproc_msi_irq_set_affinity(struct irq_data *data,
12510 + struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
12511 + int target_cpu = cpumask_first(mask);
12512 + int curr_cpu;
12513 ++ int ret;
12514 +
12515 + curr_cpu = hwirq_to_cpu(msi, data->hwirq);
12516 + if (curr_cpu == target_cpu)
12517 +- return IRQ_SET_MASK_OK_DONE;
12518 ++ ret = IRQ_SET_MASK_OK_DONE;
12519 ++ else {
12520 ++ /* steer MSI to the target CPU */
12521 ++ data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
12522 ++ ret = IRQ_SET_MASK_OK;
12523 ++ }
12524 +
12525 +- /* steer MSI to the target CPU */
12526 +- data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
12527 ++ irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
12528 +
12529 +- return IRQ_SET_MASK_OK;
12530 ++ return ret;
12531 + }
12532 +
12533 + static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
12534 +diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
12535 +index b37e08c4f9d1a..4afd4ee4f7f04 100644
12536 +--- a/drivers/pci/iov.c
12537 ++++ b/drivers/pci/iov.c
12538 +@@ -180,6 +180,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
12539 + virtfn->device = iov->vf_device;
12540 + virtfn->is_virtfn = 1;
12541 + virtfn->physfn = pci_dev_get(dev);
12542 ++ virtfn->no_command_memory = 1;
12543 +
12544 + if (id == 0)
12545 + pci_read_vf_config_common(virtfn);
12546 +diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
12547 +index aac9823b0c6bb..e116815fa8092 100644
12548 +--- a/drivers/perf/thunderx2_pmu.c
12549 ++++ b/drivers/perf/thunderx2_pmu.c
12550 +@@ -805,14 +805,17 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
12551 + list_for_each_entry(rentry, &list, node) {
12552 + if (resource_type(rentry->res) == IORESOURCE_MEM) {
12553 + res = *rentry->res;
12554 ++ rentry = NULL;
12555 + break;
12556 + }
12557 + }
12558 ++ acpi_dev_free_resource_list(&list);
12559 +
12560 +- if (!rentry->res)
12561 ++ if (rentry) {
12562 ++ dev_err(dev, "PMU type %d: Fail to find resource\n", type);
12563 + return NULL;
12564 ++ }
12565 +
12566 +- acpi_dev_free_resource_list(&list);
12567 + base = devm_ioremap_resource(dev, &res);
12568 + if (IS_ERR(base)) {
12569 + dev_err(dev, "PMU type %d: Fail to map resource\n", type);
12570 +diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
12571 +index edac28cd25ddc..633cf07ba6723 100644
12572 +--- a/drivers/perf/xgene_pmu.c
12573 ++++ b/drivers/perf/xgene_pmu.c
12574 +@@ -1453,17 +1453,6 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
12575 + }
12576 +
12577 + #if defined(CONFIG_ACPI)
12578 +-static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
12579 +-{
12580 +- struct resource *res = data;
12581 +-
12582 +- if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
12583 +- acpi_dev_resource_memory(ares, res);
12584 +-
12585 +- /* Always tell the ACPI core to skip this resource */
12586 +- return 1;
12587 +-}
12588 +-
12589 + static struct
12590 + xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
12591 + struct acpi_device *adev, u32 type)
12592 +@@ -1475,6 +1464,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
12593 + struct hw_pmu_info *inf;
12594 + void __iomem *dev_csr;
12595 + struct resource res;
12596 ++ struct resource_entry *rentry;
12597 + int enable_bit;
12598 + int rc;
12599 +
12600 +@@ -1483,11 +1473,23 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
12601 + return NULL;
12602 +
12603 + INIT_LIST_HEAD(&resource_list);
12604 +- rc = acpi_dev_get_resources(adev, &resource_list,
12605 +- acpi_pmu_dev_add_resource, &res);
12606 ++ rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
12607 ++ if (rc <= 0) {
12608 ++ dev_err(dev, "PMU type %d: No resources found\n", type);
12609 ++ return NULL;
12610 ++ }
12611 ++
12612 ++ list_for_each_entry(rentry, &resource_list, node) {
12613 ++ if (resource_type(rentry->res) == IORESOURCE_MEM) {
12614 ++ res = *rentry->res;
12615 ++ rentry = NULL;
12616 ++ break;
12617 ++ }
12618 ++ }
12619 + acpi_dev_free_resource_list(&resource_list);
12620 +- if (rc < 0) {
12621 +- dev_err(dev, "PMU type %d: No resource address found\n", type);
12622 ++
12623 ++ if (rentry) {
12624 ++ dev_err(dev, "PMU type %d: No memory resource found\n", type);
12625 + return NULL;
12626 + }
12627 +
12628 +diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
12629 +index b625a657171e6..11e27136032b9 100644
12630 +--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
12631 ++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
12632 +@@ -515,7 +515,7 @@ int aspeed_pin_config_set(struct pinctrl_dev *pctldev, unsigned int offset,
12633 + val = pmap->val << __ffs(pconf->mask);
12634 +
12635 + rc = regmap_update_bits(pdata->scu, pconf->reg,
12636 +- pmap->mask, val);
12637 ++ pconf->mask, val);
12638 +
12639 + if (rc < 0)
12640 + return rc;
12641 +diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
12642 +index dcf7df797af75..0ed14de0134cf 100644
12643 +--- a/drivers/pinctrl/bcm/Kconfig
12644 ++++ b/drivers/pinctrl/bcm/Kconfig
12645 +@@ -23,6 +23,7 @@ config PINCTRL_BCM2835
12646 + select PINMUX
12647 + select PINCONF
12648 + select GENERIC_PINCONF
12649 ++ select GPIOLIB
12650 + select GPIOLIB_IRQCHIP
12651 + default ARCH_BCM2835 || ARCH_BRCMSTB
12652 + help
12653 +diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
12654 +index c6fe7d64c9137..c7448be64d073 100644
12655 +--- a/drivers/pinctrl/devicetree.c
12656 ++++ b/drivers/pinctrl/devicetree.c
12657 +@@ -129,9 +129,8 @@ static int dt_to_map_one_config(struct pinctrl *p,
12658 + if (!np_pctldev || of_node_is_root(np_pctldev)) {
12659 + of_node_put(np_pctldev);
12660 + ret = driver_deferred_probe_check_state(p->dev);
12661 +- /* keep deferring if modules are enabled unless we've timed out */
12662 +- if (IS_ENABLED(CONFIG_MODULES) && !allow_default &&
12663 +- (ret == -ENODEV))
12664 ++ /* keep deferring if modules are enabled */
12665 ++ if (IS_ENABLED(CONFIG_MODULES) && !allow_default && ret < 0)
12666 + ret = -EPROBE_DEFER;
12667 + return ret;
12668 + }
12669 +diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
12670 +index 151931b593f6e..235a141182bf6 100644
12671 +--- a/drivers/pinctrl/pinctrl-mcp23s08.c
12672 ++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
12673 +@@ -87,7 +87,7 @@ const struct regmap_config mcp23x08_regmap = {
12674 + };
12675 + EXPORT_SYMBOL_GPL(mcp23x08_regmap);
12676 +
12677 +-static const struct reg_default mcp23x16_defaults[] = {
12678 ++static const struct reg_default mcp23x17_defaults[] = {
12679 + {.reg = MCP_IODIR << 1, .def = 0xffff},
12680 + {.reg = MCP_IPOL << 1, .def = 0x0000},
12681 + {.reg = MCP_GPINTEN << 1, .def = 0x0000},
12682 +@@ -98,23 +98,23 @@ static const struct reg_default mcp23x16_defaults[] = {
12683 + {.reg = MCP_OLAT << 1, .def = 0x0000},
12684 + };
12685 +
12686 +-static const struct regmap_range mcp23x16_volatile_range = {
12687 ++static const struct regmap_range mcp23x17_volatile_range = {
12688 + .range_min = MCP_INTF << 1,
12689 + .range_max = MCP_GPIO << 1,
12690 + };
12691 +
12692 +-static const struct regmap_access_table mcp23x16_volatile_table = {
12693 +- .yes_ranges = &mcp23x16_volatile_range,
12694 ++static const struct regmap_access_table mcp23x17_volatile_table = {
12695 ++ .yes_ranges = &mcp23x17_volatile_range,
12696 + .n_yes_ranges = 1,
12697 + };
12698 +
12699 +-static const struct regmap_range mcp23x16_precious_range = {
12700 +- .range_min = MCP_GPIO << 1,
12701 ++static const struct regmap_range mcp23x17_precious_range = {
12702 ++ .range_min = MCP_INTCAP << 1,
12703 + .range_max = MCP_GPIO << 1,
12704 + };
12705 +
12706 +-static const struct regmap_access_table mcp23x16_precious_table = {
12707 +- .yes_ranges = &mcp23x16_precious_range,
12708 ++static const struct regmap_access_table mcp23x17_precious_table = {
12709 ++ .yes_ranges = &mcp23x17_precious_range,
12710 + .n_yes_ranges = 1,
12711 + };
12712 +
12713 +@@ -124,10 +124,10 @@ const struct regmap_config mcp23x17_regmap = {
12714 +
12715 + .reg_stride = 2,
12716 + .max_register = MCP_OLAT << 1,
12717 +- .volatile_table = &mcp23x16_volatile_table,
12718 +- .precious_table = &mcp23x16_precious_table,
12719 +- .reg_defaults = mcp23x16_defaults,
12720 +- .num_reg_defaults = ARRAY_SIZE(mcp23x16_defaults),
12721 ++ .volatile_table = &mcp23x17_volatile_table,
12722 ++ .precious_table = &mcp23x17_precious_table,
12723 ++ .reg_defaults = mcp23x17_defaults,
12724 ++ .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
12725 + .cache_type = REGCACHE_FLAT,
12726 + .val_format_endian = REGMAP_ENDIAN_LITTLE,
12727 + };
12728 +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
12729 +index c322f30a20648..22283ba797cd0 100644
12730 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c
12731 ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
12732 +@@ -1060,12 +1060,10 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
12733 + * when TLMM is powered on. To allow that, enable the GPIO
12734 + * summary line to be wakeup capable at GIC.
12735 + */
12736 +- if (d->parent_data)
12737 +- irq_chip_set_wake_parent(d, on);
12738 +-
12739 +- irq_set_irq_wake(pctrl->irq, on);
12740 ++ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
12741 ++ return irq_chip_set_wake_parent(d, on);
12742 +
12743 +- return 0;
12744 ++ return irq_set_irq_wake(pctrl->irq, on);
12745 + }
12746 +
12747 + static int msm_gpio_irq_reqres(struct irq_data *d)
12748 +@@ -1226,6 +1224,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
12749 + pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
12750 + pctrl->irq_chip.irq_set_affinity = msm_gpio_irq_set_affinity;
12751 + pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity;
12752 ++ pctrl->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND |
12753 ++ IRQCHIP_SET_TYPE_MASKED;
12754 +
12755 + np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
12756 + if (np) {
12757 +diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
12758 +index b59180bff5a3e..ef61298c30bdd 100644
12759 +--- a/drivers/platform/chrome/cros_ec_lightbar.c
12760 ++++ b/drivers/platform/chrome/cros_ec_lightbar.c
12761 +@@ -116,6 +116,8 @@ static int get_lightbar_version(struct cros_ec_dev *ec,
12762 +
12763 + param = (struct ec_params_lightbar *)msg->data;
12764 + param->cmd = LIGHTBAR_CMD_VERSION;
12765 ++ msg->outsize = sizeof(param->cmd);
12766 ++ msg->result = sizeof(resp->version);
12767 + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
12768 + if (ret < 0) {
12769 + ret = 0;
12770 +diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
12771 +index c27548fd386ac..0d2ed6d1f9c79 100644
12772 +--- a/drivers/platform/x86/mlx-platform.c
12773 ++++ b/drivers/platform/x86/mlx-platform.c
12774 +@@ -319,15 +319,6 @@ static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
12775 + },
12776 + };
12777 +
12778 +-static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = {
12779 +- {
12780 +- I2C_BOARD_INFO("24c32", 0x51),
12781 +- },
12782 +- {
12783 +- I2C_BOARD_INFO("24c32", 0x50),
12784 +- },
12785 +-};
12786 +-
12787 + static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
12788 + {
12789 + I2C_BOARD_INFO("dps460", 0x59),
12790 +@@ -752,15 +743,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = {
12791 + .label = "psu1",
12792 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
12793 + .mask = BIT(0),
12794 +- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0],
12795 +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
12796 ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
12797 + },
12798 + {
12799 + .label = "psu2",
12800 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
12801 + .mask = BIT(1),
12802 +- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1],
12803 +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
12804 ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
12805 + },
12806 + };
12807 +
12808 +diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
12809 +index 599a0f66a3845..a34d95ed70b20 100644
12810 +--- a/drivers/pwm/pwm-img.c
12811 ++++ b/drivers/pwm/pwm-img.c
12812 +@@ -277,6 +277,8 @@ static int img_pwm_probe(struct platform_device *pdev)
12813 + return PTR_ERR(pwm->pwm_clk);
12814 + }
12815 +
12816 ++ platform_set_drvdata(pdev, pwm);
12817 ++
12818 + pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
12819 + pm_runtime_use_autosuspend(&pdev->dev);
12820 + pm_runtime_enable(&pdev->dev);
12821 +@@ -313,7 +315,6 @@ static int img_pwm_probe(struct platform_device *pdev)
12822 + goto err_suspend;
12823 + }
12824 +
12825 +- platform_set_drvdata(pdev, pwm);
12826 + return 0;
12827 +
12828 + err_suspend:
12829 +diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
12830 +index 9d965ffe66d1e..da9bc3d10104a 100644
12831 +--- a/drivers/pwm/pwm-lpss.c
12832 ++++ b/drivers/pwm/pwm-lpss.c
12833 +@@ -93,10 +93,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
12834 + * The equation is:
12835 + * base_unit = round(base_unit_range * freq / c)
12836 + */
12837 +- base_unit_range = BIT(lpwm->info->base_unit_bits) - 1;
12838 ++ base_unit_range = BIT(lpwm->info->base_unit_bits);
12839 + freq *= base_unit_range;
12840 +
12841 + base_unit = DIV_ROUND_CLOSEST_ULL(freq, c);
12842 ++ /* base_unit must not be 0 and we also want to avoid overflowing it */
12843 ++ base_unit = clamp_val(base_unit, 1, base_unit_range - 1);
12844 +
12845 + on_time_div = 255ULL * duty_ns;
12846 + do_div(on_time_div, period_ns);
12847 +@@ -104,8 +106,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
12848 +
12849 + orig_ctrl = ctrl = pwm_lpss_read(pwm);
12850 + ctrl &= ~PWM_ON_TIME_DIV_MASK;
12851 +- ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT);
12852 +- base_unit &= base_unit_range;
12853 ++ ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT);
12854 + ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
12855 + ctrl |= on_time_div;
12856 +
12857 +diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
12858 +index eb8c9cb645a6c..098e94335cb5b 100644
12859 +--- a/drivers/pwm/pwm-rockchip.c
12860 ++++ b/drivers/pwm/pwm-rockchip.c
12861 +@@ -288,6 +288,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
12862 + const struct of_device_id *id;
12863 + struct rockchip_pwm_chip *pc;
12864 + struct resource *r;
12865 ++ u32 enable_conf, ctrl;
12866 + int ret, count;
12867 +
12868 + id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev);
12869 +@@ -362,7 +363,9 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
12870 + }
12871 +
12872 + /* Keep the PWM clk enabled if the PWM appears to be up and running. */
12873 +- if (!pwm_is_enabled(pc->chip.pwms))
12874 ++ enable_conf = pc->data->enable_conf;
12875 ++ ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
12876 ++ if ((ctrl & enable_conf) != enable_conf)
12877 + clk_disable(pc->clk);
12878 +
12879 + return 0;
12880 +diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
12881 +index 451608e960a18..152946e033d17 100644
12882 +--- a/drivers/rapidio/devices/rio_mport_cdev.c
12883 ++++ b/drivers/rapidio/devices/rio_mport_cdev.c
12884 +@@ -871,15 +871,16 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
12885 + rmcd_error("pin_user_pages_fast err=%ld",
12886 + pinned);
12887 + nr_pages = 0;
12888 +- } else
12889 ++ } else {
12890 + rmcd_error("pinned %ld out of %ld pages",
12891 + pinned, nr_pages);
12892 ++ /*
12893 ++ * Set nr_pages up to mean "how many pages to unpin, in
12894 ++ * the error handler:
12895 ++ */
12896 ++ nr_pages = pinned;
12897 ++ }
12898 + ret = -EFAULT;
12899 +- /*
12900 +- * Set nr_pages up to mean "how many pages to unpin, in
12901 +- * the error handler:
12902 +- */
12903 +- nr_pages = pinned;
12904 + goto err_pg;
12905 + }
12906 +
12907 +@@ -1679,6 +1680,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
12908 + struct rio_dev *rdev;
12909 + struct rio_switch *rswitch = NULL;
12910 + struct rio_mport *mport;
12911 ++ struct device *dev;
12912 + size_t size;
12913 + u32 rval;
12914 + u32 swpinfo = 0;
12915 +@@ -1693,8 +1695,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
12916 + rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
12917 + dev_info.comptag, dev_info.destid, dev_info.hopcount);
12918 +
12919 +- if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
12920 ++ dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name);
12921 ++ if (dev) {
12922 + rmcd_debug(RDEV, "device %s already exists", dev_info.name);
12923 ++ put_device(dev);
12924 + return -EEXIST;
12925 + }
12926 +
12927 +diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
12928 +index 569d9ad2c5942..6939aa5b3dc7f 100644
12929 +--- a/drivers/ras/cec.c
12930 ++++ b/drivers/ras/cec.c
12931 +@@ -553,20 +553,20 @@ static struct notifier_block cec_nb = {
12932 + .priority = MCE_PRIO_CEC,
12933 + };
12934 +
12935 +-static void __init cec_init(void)
12936 ++static int __init cec_init(void)
12937 + {
12938 + if (ce_arr.disabled)
12939 +- return;
12940 ++ return -ENODEV;
12941 +
12942 + ce_arr.array = (void *)get_zeroed_page(GFP_KERNEL);
12943 + if (!ce_arr.array) {
12944 + pr_err("Error allocating CE array page!\n");
12945 +- return;
12946 ++ return -ENOMEM;
12947 + }
12948 +
12949 + if (create_debugfs_nodes()) {
12950 + free_page((unsigned long)ce_arr.array);
12951 +- return;
12952 ++ return -ENOMEM;
12953 + }
12954 +
12955 + INIT_DELAYED_WORK(&cec_work, cec_work_fn);
12956 +@@ -575,6 +575,7 @@ static void __init cec_init(void)
12957 + mce_register_decode_chain(&cec_nb);
12958 +
12959 + pr_info("Correctable Errors collector initialized.\n");
12960 ++ return 0;
12961 + }
12962 + late_initcall(cec_init);
12963 +
12964 +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
12965 +index be8c709a74883..25e601bf9383e 100644
12966 +--- a/drivers/regulator/core.c
12967 ++++ b/drivers/regulator/core.c
12968 +@@ -5187,15 +5187,20 @@ regulator_register(const struct regulator_desc *regulator_desc,
12969 + else if (regulator_desc->supply_name)
12970 + rdev->supply_name = regulator_desc->supply_name;
12971 +
12972 +- /*
12973 +- * Attempt to resolve the regulator supply, if specified,
12974 +- * but don't return an error if we fail because we will try
12975 +- * to resolve it again later as more regulators are added.
12976 +- */
12977 +- if (regulator_resolve_supply(rdev))
12978 +- rdev_dbg(rdev, "unable to resolve supply\n");
12979 +-
12980 + ret = set_machine_constraints(rdev, constraints);
12981 ++ if (ret == -EPROBE_DEFER) {
12982 ++ /* Regulator might be in bypass mode and so needs its supply
12983 ++ * to set the constraints */
12984 ++ /* FIXME: this currently triggers a chicken-and-egg problem
12985 ++ * when creating -SUPPLY symlink in sysfs to a regulator
12986 ++ * that is just being created */
12987 ++ ret = regulator_resolve_supply(rdev);
12988 ++ if (!ret)
12989 ++ ret = set_machine_constraints(rdev, constraints);
12990 ++ else
12991 ++ rdev_dbg(rdev, "unable to resolve supply early: %pe\n",
12992 ++ ERR_PTR(ret));
12993 ++ }
12994 + if (ret < 0)
12995 + goto wash;
12996 +
12997 +diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
12998 +index 3d3d87210ef2c..58d1d7e571d66 100644
12999 +--- a/drivers/remoteproc/mtk_scp_ipi.c
13000 ++++ b/drivers/remoteproc/mtk_scp_ipi.c
13001 +@@ -30,10 +30,8 @@ int scp_ipi_register(struct mtk_scp *scp,
13002 + scp_ipi_handler_t handler,
13003 + void *priv)
13004 + {
13005 +- if (!scp) {
13006 +- dev_err(scp->dev, "scp device is not ready\n");
13007 ++ if (!scp)
13008 + return -EPROBE_DEFER;
13009 +- }
13010 +
13011 + if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL))
13012 + return -EINVAL;
13013 +diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
13014 +index 83f2b8804ee98..96a17ec291401 100644
13015 +--- a/drivers/rpmsg/mtk_rpmsg.c
13016 ++++ b/drivers/rpmsg/mtk_rpmsg.c
13017 +@@ -200,7 +200,6 @@ static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
13018 + struct rpmsg_device *rpdev;
13019 + struct mtk_rpmsg_device *mdev;
13020 + struct platform_device *pdev = mtk_subdev->pdev;
13021 +- int ret;
13022 +
13023 + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
13024 + if (!mdev)
13025 +@@ -219,13 +218,7 @@ static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
13026 + rpdev->dev.parent = &pdev->dev;
13027 + rpdev->dev.release = mtk_rpmsg_release_device;
13028 +
13029 +- ret = rpmsg_register_device(rpdev);
13030 +- if (ret) {
13031 +- kfree(mdev);
13032 +- return ret;
13033 +- }
13034 +-
13035 +- return 0;
13036 ++ return rpmsg_register_device(rpdev);
13037 + }
13038 +
13039 + static void mtk_register_device_work_function(struct work_struct *register_work)
13040 +diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
13041 +index 4abbeea782fa4..19903de6268db 100644
13042 +--- a/drivers/rpmsg/qcom_smd.c
13043 ++++ b/drivers/rpmsg/qcom_smd.c
13044 +@@ -1338,7 +1338,7 @@ static int qcom_smd_parse_edge(struct device *dev,
13045 + ret = of_property_read_u32(node, key, &edge->edge_id);
13046 + if (ret) {
13047 + dev_err(dev, "edge missing %s property\n", key);
13048 +- return -EINVAL;
13049 ++ goto put_node;
13050 + }
13051 +
13052 + edge->remote_pid = QCOM_SMEM_HOST_ANY;
13053 +@@ -1349,32 +1349,37 @@ static int qcom_smd_parse_edge(struct device *dev,
13054 + edge->mbox_client.knows_txdone = true;
13055 + edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
13056 + if (IS_ERR(edge->mbox_chan)) {
13057 +- if (PTR_ERR(edge->mbox_chan) != -ENODEV)
13058 +- return PTR_ERR(edge->mbox_chan);
13059 ++ if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
13060 ++ ret = PTR_ERR(edge->mbox_chan);
13061 ++ goto put_node;
13062 ++ }
13063 +
13064 + edge->mbox_chan = NULL;
13065 +
13066 + syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
13067 + if (!syscon_np) {
13068 + dev_err(dev, "no qcom,ipc node\n");
13069 +- return -ENODEV;
13070 ++ ret = -ENODEV;
13071 ++ goto put_node;
13072 + }
13073 +
13074 + edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
13075 +- if (IS_ERR(edge->ipc_regmap))
13076 +- return PTR_ERR(edge->ipc_regmap);
13077 ++ if (IS_ERR(edge->ipc_regmap)) {
13078 ++ ret = PTR_ERR(edge->ipc_regmap);
13079 ++ goto put_node;
13080 ++ }
13081 +
13082 + key = "qcom,ipc";
13083 + ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
13084 + if (ret < 0) {
13085 + dev_err(dev, "no offset in %s\n", key);
13086 +- return -EINVAL;
13087 ++ goto put_node;
13088 + }
13089 +
13090 + ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
13091 + if (ret < 0) {
13092 + dev_err(dev, "no bit in %s\n", key);
13093 +- return -EINVAL;
13094 ++ goto put_node;
13095 + }
13096 + }
13097 +
13098 +@@ -1385,7 +1390,8 @@ static int qcom_smd_parse_edge(struct device *dev,
13099 + irq = irq_of_parse_and_map(node, 0);
13100 + if (irq < 0) {
13101 + dev_err(dev, "required smd interrupt missing\n");
13102 +- return -EINVAL;
13103 ++ ret = irq;
13104 ++ goto put_node;
13105 + }
13106 +
13107 + ret = devm_request_irq(dev, irq,
13108 +@@ -1393,12 +1399,18 @@ static int qcom_smd_parse_edge(struct device *dev,
13109 + node->name, edge);
13110 + if (ret) {
13111 + dev_err(dev, "failed to request smd irq\n");
13112 +- return ret;
13113 ++ goto put_node;
13114 + }
13115 +
13116 + edge->irq = irq;
13117 +
13118 + return 0;
13119 ++
13120 ++put_node:
13121 ++ of_node_put(node);
13122 ++ edge->of_node = NULL;
13123 ++
13124 ++ return ret;
13125 + }
13126 +
13127 + /*
13128 +diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
13129 +index 49702942bb086..70b198423deba 100644
13130 +--- a/drivers/rtc/rtc-ds1307.c
13131 ++++ b/drivers/rtc/rtc-ds1307.c
13132 +@@ -352,6 +352,10 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
13133 + regmap_update_bits(ds1307->regmap, DS1340_REG_FLAG,
13134 + DS1340_BIT_OSF, 0);
13135 + break;
13136 ++ case ds_1388:
13137 ++ regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG,
13138 ++ DS1388_BIT_OSF, 0);
13139 ++ break;
13140 + case mcp794xx:
13141 + /*
13142 + * these bits were cleared when preparing the date/time
13143 +diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
13144 +index 51ea56b73a97d..4e30047d76c46 100644
13145 +--- a/drivers/s390/net/qeth_core.h
13146 ++++ b/drivers/s390/net/qeth_core.h
13147 +@@ -680,6 +680,11 @@ struct qeth_card_blkt {
13148 + int inter_packet_jumbo;
13149 + };
13150 +
13151 ++enum qeth_pnso_mode {
13152 ++ QETH_PNSO_NONE,
13153 ++ QETH_PNSO_BRIDGEPORT,
13154 ++};
13155 ++
13156 + #define QETH_BROADCAST_WITH_ECHO 0x01
13157 + #define QETH_BROADCAST_WITHOUT_ECHO 0x02
13158 + struct qeth_card_info {
13159 +@@ -696,6 +701,7 @@ struct qeth_card_info {
13160 + /* no bitfield, we take a pointer on these two: */
13161 + u8 has_lp2lp_cso_v6;
13162 + u8 has_lp2lp_cso_v4;
13163 ++ enum qeth_pnso_mode pnso_mode;
13164 + enum qeth_card_types type;
13165 + enum qeth_link_types link_type;
13166 + int broadcast_capable;
13167 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
13168 +index b4e06aeb6dc1c..7c6f6a09b99e4 100644
13169 +--- a/drivers/s390/net/qeth_l2_main.c
13170 ++++ b/drivers/s390/net/qeth_l2_main.c
13171 +@@ -273,6 +273,17 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
13172 + return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
13173 + }
13174 +
13175 ++static void qeth_l2_set_pnso_mode(struct qeth_card *card,
13176 ++ enum qeth_pnso_mode mode)
13177 ++{
13178 ++ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
13179 ++ WRITE_ONCE(card->info.pnso_mode, mode);
13180 ++ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
13181 ++
13182 ++ if (mode == QETH_PNSO_NONE)
13183 ++ drain_workqueue(card->event_wq);
13184 ++}
13185 ++
13186 + static void qeth_l2_stop_card(struct qeth_card *card)
13187 + {
13188 + QETH_CARD_TEXT(card, 2, "stopcard");
13189 +@@ -291,7 +302,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
13190 +
13191 + qeth_qdio_clear_card(card, 0);
13192 + qeth_clear_working_pool_list(card);
13193 +- flush_workqueue(card->event_wq);
13194 ++ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
13195 + qeth_flush_local_addrs(card);
13196 + card->info.promisc_mode = 0;
13197 + }
13198 +@@ -1111,12 +1122,6 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
13199 + NULL
13200 + };
13201 +
13202 +- /* Role should not change by itself, but if it did, */
13203 +- /* information from the hardware is authoritative. */
13204 +- mutex_lock(&data->card->sbp_lock);
13205 +- data->card->options.sbp.role = entry->role;
13206 +- mutex_unlock(&data->card->sbp_lock);
13207 +-
13208 + snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
13209 + snprintf(env_role, sizeof(env_role), "ROLE=%s",
13210 + (entry->role == QETH_SBP_ROLE_NONE) ? "none" :
13211 +@@ -1165,19 +1170,34 @@ static void qeth_bridge_state_change(struct qeth_card *card,
13212 + }
13213 +
13214 + struct qeth_addr_change_data {
13215 +- struct work_struct worker;
13216 ++ struct delayed_work dwork;
13217 + struct qeth_card *card;
13218 + struct qeth_ipacmd_addr_change ac_event;
13219 + };
13220 +
13221 + static void qeth_addr_change_event_worker(struct work_struct *work)
13222 + {
13223 +- struct qeth_addr_change_data *data =
13224 +- container_of(work, struct qeth_addr_change_data, worker);
13225 ++ struct delayed_work *dwork = to_delayed_work(work);
13226 ++ struct qeth_addr_change_data *data;
13227 ++ struct qeth_card *card;
13228 + int i;
13229 +
13230 ++ data = container_of(dwork, struct qeth_addr_change_data, dwork);
13231 ++ card = data->card;
13232 ++
13233 + QETH_CARD_TEXT(data->card, 4, "adrchgew");
13234 ++
13235 ++ if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
13236 ++ goto free;
13237 ++
13238 + if (data->ac_event.lost_event_mask) {
13239 ++ /* Potential re-config in progress, try again later: */
13240 ++ if (!mutex_trylock(&card->sbp_lock)) {
13241 ++ queue_delayed_work(card->event_wq, dwork,
13242 ++ msecs_to_jiffies(100));
13243 ++ return;
13244 ++ }
13245 ++
13246 + dev_info(&data->card->gdev->dev,
13247 + "Address change notification stopped on %s (%s)\n",
13248 + data->card->dev->name,
13249 +@@ -1186,8 +1206,9 @@ static void qeth_addr_change_event_worker(struct work_struct *work)
13250 + : (data->ac_event.lost_event_mask == 0x02)
13251 + ? "Bridge port state change"
13252 + : "Unknown reason");
13253 +- mutex_lock(&data->card->sbp_lock);
13254 ++
13255 + data->card->options.sbp.hostnotification = 0;
13256 ++ card->info.pnso_mode = QETH_PNSO_NONE;
13257 + mutex_unlock(&data->card->sbp_lock);
13258 + qeth_bridge_emit_host_event(data->card, anev_abort,
13259 + 0, NULL, NULL);
13260 +@@ -1201,6 +1222,8 @@ static void qeth_addr_change_event_worker(struct work_struct *work)
13261 + &entry->token,
13262 + &entry->addr_lnid);
13263 + }
13264 ++
13265 ++free:
13266 + kfree(data);
13267 + }
13268 +
13269 +@@ -1212,6 +1235,9 @@ static void qeth_addr_change_event(struct qeth_card *card,
13270 + struct qeth_addr_change_data *data;
13271 + int extrasize;
13272 +
13273 ++ if (card->info.pnso_mode == QETH_PNSO_NONE)
13274 ++ return;
13275 ++
13276 + QETH_CARD_TEXT(card, 4, "adrchgev");
13277 + if (cmd->hdr.return_code != 0x0000) {
13278 + if (cmd->hdr.return_code == 0x0010) {
13279 +@@ -1231,11 +1257,11 @@ static void qeth_addr_change_event(struct qeth_card *card,
13280 + QETH_CARD_TEXT(card, 2, "ACNalloc");
13281 + return;
13282 + }
13283 +- INIT_WORK(&data->worker, qeth_addr_change_event_worker);
13284 ++ INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
13285 + data->card = card;
13286 + memcpy(&data->ac_event, hostevs,
13287 + sizeof(struct qeth_ipacmd_addr_change) + extrasize);
13288 +- queue_work(card->event_wq, &data->worker);
13289 ++ queue_delayed_work(card->event_wq, &data->dwork, 0);
13290 + }
13291 +
13292 + /* SETBRIDGEPORT support; sending commands */
13293 +@@ -1556,9 +1582,14 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
13294 +
13295 + if (enable) {
13296 + qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
13297 ++ qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
13298 + rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card);
13299 +- } else
13300 ++ if (rc)
13301 ++ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
13302 ++ } else {
13303 + rc = qeth_l2_pnso(card, 0, NULL, NULL);
13304 ++ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
13305 ++ }
13306 + return rc;
13307 + }
13308 +
13309 +diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
13310 +index 86bcae992f725..4695d25e54f24 100644
13311 +--- a/drivers/s390/net/qeth_l2_sys.c
13312 ++++ b/drivers/s390/net/qeth_l2_sys.c
13313 +@@ -157,6 +157,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
13314 + rc = -EBUSY;
13315 + else if (qeth_card_hw_is_reachable(card)) {
13316 + rc = qeth_bridgeport_an_set(card, enable);
13317 ++ /* sbp_lock ensures ordering vs notifications-stopped events */
13318 + if (!rc)
13319 + card->options.sbp.hostnotification = enable;
13320 + } else
13321 +diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
13322 +index 9b81cfbbc5c53..239e04c03cf90 100644
13323 +--- a/drivers/scsi/be2iscsi/be_main.c
13324 ++++ b/drivers/scsi/be2iscsi/be_main.c
13325 +@@ -3020,6 +3020,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
13326 + goto create_eq_error;
13327 + }
13328 +
13329 ++ mem->dma = paddr;
13330 + mem->va = eq_vaddress;
13331 + ret = be_fill_queue(eq, phba->params.num_eq_entries,
13332 + sizeof(struct be_eq_entry), eq_vaddress);
13333 +@@ -3029,7 +3030,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
13334 + goto create_eq_error;
13335 + }
13336 +
13337 +- mem->dma = paddr;
13338 + ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
13339 + BEISCSI_EQ_DELAY_DEF);
13340 + if (ret) {
13341 +@@ -3086,6 +3086,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
13342 + goto create_cq_error;
13343 + }
13344 +
13345 ++ mem->dma = paddr;
13346 + ret = be_fill_queue(cq, phba->params.num_cq_entries,
13347 + sizeof(struct sol_cqe), cq_vaddress);
13348 + if (ret) {
13349 +@@ -3095,7 +3096,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
13350 + goto create_cq_error;
13351 + }
13352 +
13353 +- mem->dma = paddr;
13354 + ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
13355 + false, 0);
13356 + if (ret) {
13357 +diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
13358 +index bc5d84f87d8fc..440ef32be048f 100644
13359 +--- a/drivers/scsi/bfa/bfad.c
13360 ++++ b/drivers/scsi/bfa/bfad.c
13361 +@@ -749,6 +749,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
13362 +
13363 + if (bfad->pci_bar0_kva == NULL) {
13364 + printk(KERN_ERR "Fail to map bar0\n");
13365 ++ rc = -ENODEV;
13366 + goto out_release_region;
13367 + }
13368 +
13369 +diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
13370 +index 950f9cdf0577f..5d0f42031d121 100644
13371 +--- a/drivers/scsi/csiostor/csio_hw.c
13372 ++++ b/drivers/scsi/csiostor/csio_hw.c
13373 +@@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
13374 + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
13375 + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
13376 + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
13377 +- ret = EINVAL;
13378 ++ ret = -EINVAL;
13379 + goto bye;
13380 + }
13381 +
13382 +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
13383 +index 635f6f9cffc40..ef91f3d01f989 100644
13384 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c
13385 ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
13386 +@@ -4928,6 +4928,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
13387 + if (IS_ERR(vhost->work_thread)) {
13388 + dev_err(dev, "Couldn't create kernel thread: %ld\n",
13389 + PTR_ERR(vhost->work_thread));
13390 ++ rc = PTR_ERR(vhost->work_thread);
13391 + goto free_host_mem;
13392 + }
13393 +
13394 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
13395 +index a85c9672c6ea3..a67749c8f4ab3 100644
13396 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
13397 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
13398 +@@ -1808,18 +1808,22 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
13399 + /* TMs are on msix_index == 0 */
13400 + if (reply_q->msix_index == 0)
13401 + continue;
13402 ++ synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
13403 + if (reply_q->irq_poll_scheduled) {
13404 + /* Calling irq_poll_disable will wait for any pending
13405 + * callbacks to have completed.
13406 + */
13407 + irq_poll_disable(&reply_q->irqpoll);
13408 + irq_poll_enable(&reply_q->irqpoll);
13409 +- reply_q->irq_poll_scheduled = false;
13410 +- reply_q->irq_line_enable = true;
13411 +- enable_irq(reply_q->os_irq);
13412 +- continue;
13413 ++ /* check how the scheduled poll has ended,
13414 ++ * clean up only if necessary
13415 ++ */
13416 ++ if (reply_q->irq_poll_scheduled) {
13417 ++ reply_q->irq_poll_scheduled = false;
13418 ++ reply_q->irq_line_enable = true;
13419 ++ enable_irq(reply_q->os_irq);
13420 ++ }
13421 + }
13422 +- synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
13423 + }
13424 + }
13425 +
13426 +diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
13427 +index 8906aceda4c43..0354898d7cac1 100644
13428 +--- a/drivers/scsi/mvumi.c
13429 ++++ b/drivers/scsi/mvumi.c
13430 +@@ -2425,6 +2425,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
13431 + if (IS_ERR(mhba->dm_thread)) {
13432 + dev_err(&mhba->pdev->dev,
13433 + "failed to create device scan thread\n");
13434 ++ ret = PTR_ERR(mhba->dm_thread);
13435 + mutex_unlock(&mhba->sas_discovery_mutex);
13436 + goto fail_create_thread;
13437 + }
13438 +diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
13439 +index 51cfab9d1afdc..ed3054fffa344 100644
13440 +--- a/drivers/scsi/qedf/qedf_main.c
13441 ++++ b/drivers/scsi/qedf/qedf_main.c
13442 +@@ -704,7 +704,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
13443 + rdata = fcport->rdata;
13444 + if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
13445 + QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
13446 +- rc = 1;
13447 ++ rc = SUCCESS;
13448 + goto out;
13449 + }
13450 +
13451 +diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
13452 +index 946cebc4c9322..90aa64604ad78 100644
13453 +--- a/drivers/scsi/qedi/qedi_fw.c
13454 ++++ b/drivers/scsi/qedi/qedi_fw.c
13455 +@@ -59,6 +59,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
13456 + "Freeing tid=0x%x for cid=0x%x\n",
13457 + cmd->task_id, qedi_conn->iscsi_conn_id);
13458 +
13459 ++ spin_lock(&qedi_conn->list_lock);
13460 + if (likely(cmd->io_cmd_in_list)) {
13461 + cmd->io_cmd_in_list = false;
13462 + list_del_init(&cmd->io_cmd);
13463 +@@ -69,6 +70,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
13464 + cmd->task_id, qedi_conn->iscsi_conn_id,
13465 + &cmd->io_cmd);
13466 + }
13467 ++ spin_unlock(&qedi_conn->list_lock);
13468 +
13469 + cmd->state = RESPONSE_RECEIVED;
13470 + qedi_clear_task_idx(qedi, cmd->task_id);
13471 +@@ -122,6 +124,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
13472 + "Freeing tid=0x%x for cid=0x%x\n",
13473 + cmd->task_id, qedi_conn->iscsi_conn_id);
13474 +
13475 ++ spin_lock(&qedi_conn->list_lock);
13476 + if (likely(cmd->io_cmd_in_list)) {
13477 + cmd->io_cmd_in_list = false;
13478 + list_del_init(&cmd->io_cmd);
13479 +@@ -132,6 +135,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
13480 + cmd->task_id, qedi_conn->iscsi_conn_id,
13481 + &cmd->io_cmd);
13482 + }
13483 ++ spin_unlock(&qedi_conn->list_lock);
13484 +
13485 + cmd->state = RESPONSE_RECEIVED;
13486 + qedi_clear_task_idx(qedi, cmd->task_id);
13487 +@@ -222,11 +226,13 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
13488 +
13489 + tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
13490 +
13491 ++ spin_lock(&qedi_conn->list_lock);
13492 + if (likely(qedi_cmd->io_cmd_in_list)) {
13493 + qedi_cmd->io_cmd_in_list = false;
13494 + list_del_init(&qedi_cmd->io_cmd);
13495 + qedi_conn->active_cmd_count--;
13496 + }
13497 ++ spin_unlock(&qedi_conn->list_lock);
13498 +
13499 + if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
13500 + ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
13501 +@@ -288,11 +294,13 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
13502 + ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
13503 + qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
13504 +
13505 ++ spin_lock(&qedi_conn->list_lock);
13506 + if (likely(cmd->io_cmd_in_list)) {
13507 + cmd->io_cmd_in_list = false;
13508 + list_del_init(&cmd->io_cmd);
13509 + qedi_conn->active_cmd_count--;
13510 + }
13511 ++ spin_unlock(&qedi_conn->list_lock);
13512 +
13513 + memset(task_ctx, '\0', sizeof(*task_ctx));
13514 +
13515 +@@ -817,8 +825,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
13516 + qedi_clear_task_idx(qedi_conn->qedi, rtid);
13517 +
13518 + spin_lock(&qedi_conn->list_lock);
13519 +- list_del_init(&dbg_cmd->io_cmd);
13520 +- qedi_conn->active_cmd_count--;
13521 ++ if (likely(dbg_cmd->io_cmd_in_list)) {
13522 ++ dbg_cmd->io_cmd_in_list = false;
13523 ++ list_del_init(&dbg_cmd->io_cmd);
13524 ++ qedi_conn->active_cmd_count--;
13525 ++ }
13526 + spin_unlock(&qedi_conn->list_lock);
13527 + qedi_cmd->state = CLEANUP_RECV;
13528 + wake_up_interruptible(&qedi_conn->wait_queue);
13529 +@@ -1236,6 +1247,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
13530 + qedi_conn->cmd_cleanup_req++;
13531 + qedi_iscsi_cleanup_task(ctask, true);
13532 +
13533 ++ cmd->io_cmd_in_list = false;
13534 + list_del_init(&cmd->io_cmd);
13535 + qedi_conn->active_cmd_count--;
13536 + QEDI_WARN(&qedi->dbg_ctx,
13537 +@@ -1447,8 +1459,11 @@ ldel_exit:
13538 + spin_unlock_bh(&qedi_conn->tmf_work_lock);
13539 +
13540 + spin_lock(&qedi_conn->list_lock);
13541 +- list_del_init(&cmd->io_cmd);
13542 +- qedi_conn->active_cmd_count--;
13543 ++ if (likely(cmd->io_cmd_in_list)) {
13544 ++ cmd->io_cmd_in_list = false;
13545 ++ list_del_init(&cmd->io_cmd);
13546 ++ qedi_conn->active_cmd_count--;
13547 ++ }
13548 + spin_unlock(&qedi_conn->list_lock);
13549 +
13550 + clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
13551 +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
13552 +index 425e665ec08b2..6e92625df4b7c 100644
13553 +--- a/drivers/scsi/qedi/qedi_iscsi.c
13554 ++++ b/drivers/scsi/qedi/qedi_iscsi.c
13555 +@@ -975,11 +975,13 @@ static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
13556 + {
13557 + struct qedi_cmd *cmd, *cmd_tmp;
13558 +
13559 ++ spin_lock(&qedi_conn->list_lock);
13560 + list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
13561 + io_cmd) {
13562 + list_del_init(&cmd->io_cmd);
13563 + qedi_conn->active_cmd_count--;
13564 + }
13565 ++ spin_unlock(&qedi_conn->list_lock);
13566 + }
13567 +
13568 + static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
13569 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
13570 +index 81a307695cc91..569fa4b28e4e2 100644
13571 +--- a/drivers/scsi/qedi/qedi_main.c
13572 ++++ b/drivers/scsi/qedi/qedi_main.c
13573 +@@ -1127,6 +1127,15 @@ static void qedi_schedule_recovery_handler(void *dev)
13574 + schedule_delayed_work(&qedi->recovery_work, 0);
13575 + }
13576 +
13577 ++static void qedi_set_conn_recovery(struct iscsi_cls_session *cls_session)
13578 ++{
13579 ++ struct iscsi_session *session = cls_session->dd_data;
13580 ++ struct iscsi_conn *conn = session->leadconn;
13581 ++ struct qedi_conn *qedi_conn = conn->dd_data;
13582 ++
13583 ++ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
13584 ++}
13585 ++
13586 + static void qedi_link_update(void *dev, struct qed_link_output *link)
13587 + {
13588 + struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
13589 +@@ -1138,6 +1147,7 @@ static void qedi_link_update(void *dev, struct qed_link_output *link)
13590 + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
13591 + "Link Down event.\n");
13592 + atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
13593 ++ iscsi_host_for_each_session(qedi->shost, qedi_set_conn_recovery);
13594 + }
13595 + }
13596 +
13597 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
13598 +index 2861c636dd651..f17ab22ad0e4a 100644
13599 +--- a/drivers/scsi/qla2xxx/qla_init.c
13600 ++++ b/drivers/scsi/qla2xxx/qla_init.c
13601 +@@ -63,6 +63,16 @@ void qla2x00_sp_free(srb_t *sp)
13602 + qla2x00_rel_sp(sp);
13603 + }
13604 +
13605 ++void qla2xxx_rel_done_warning(srb_t *sp, int res)
13606 ++{
13607 ++ WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
13608 ++}
13609 ++
13610 ++void qla2xxx_rel_free_warning(srb_t *sp)
13611 ++{
13612 ++ WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
13613 ++}
13614 ++
13615 + /* Asynchronous Login/Logout Routines -------------------------------------- */
13616 +
13617 + unsigned long
13618 +diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
13619 +index 1fb6ccac07ccd..26d9c78d4c52c 100644
13620 +--- a/drivers/scsi/qla2xxx/qla_inline.h
13621 ++++ b/drivers/scsi/qla2xxx/qla_inline.h
13622 +@@ -207,10 +207,15 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
13623 + return sp;
13624 + }
13625 +
13626 ++void qla2xxx_rel_done_warning(srb_t *sp, int res);
13627 ++void qla2xxx_rel_free_warning(srb_t *sp);
13628 ++
13629 + static inline void
13630 + qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
13631 + {
13632 + sp->qpair = NULL;
13633 ++ sp->done = qla2xxx_rel_done_warning;
13634 ++ sp->free = qla2xxx_rel_free_warning;
13635 + mempool_free(sp, qpair->srb_mempool);
13636 + QLA_QPAIR_MARK_NOT_BUSY(qpair);
13637 + }
13638 +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
13639 +index fdb2ce7acb912..9f5d3aa1d8745 100644
13640 +--- a/drivers/scsi/qla2xxx/qla_mbx.c
13641 ++++ b/drivers/scsi/qla2xxx/qla_mbx.c
13642 +@@ -4908,7 +4908,7 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
13643 + "Done %s.\n", __func__);
13644 + }
13645 +
13646 +- dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
13647 ++ dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
13648 + els_cmd_map, els_cmd_map_dma);
13649 +
13650 + return rval;
13651 +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
13652 +index 262dfd7635a48..7b14fd1cb0309 100644
13653 +--- a/drivers/scsi/qla2xxx/qla_nvme.c
13654 ++++ b/drivers/scsi/qla2xxx/qla_nvme.c
13655 +@@ -683,7 +683,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
13656 + struct nvme_fc_port_template *tmpl;
13657 + struct qla_hw_data *ha;
13658 + struct nvme_fc_port_info pinfo;
13659 +- int ret = EINVAL;
13660 ++ int ret = -EINVAL;
13661 +
13662 + if (!IS_ENABLED(CONFIG_NVME_FC))
13663 + return ret;
13664 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
13665 +index 90289162dbd4c..a034e9caa2997 100644
13666 +--- a/drivers/scsi/qla2xxx/qla_target.c
13667 ++++ b/drivers/scsi/qla2xxx/qla_target.c
13668 +@@ -5668,7 +5668,7 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
13669 + /* found existing exchange */
13670 + qpair->retry_term_cnt++;
13671 + if (qpair->retry_term_cnt >= 5) {
13672 +- rc = EIO;
13673 ++ rc = -EIO;
13674 + qpair->retry_term_cnt = 0;
13675 + ql_log(ql_log_warn, vha, 0xffff,
13676 + "Unable to send ABTS Respond. Dumping firmware.\n");
13677 +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
13678 +index 5dc697ce8b5dd..4a6b15dc36aaf 100644
13679 +--- a/drivers/scsi/qla4xxx/ql4_os.c
13680 ++++ b/drivers/scsi/qla4xxx/ql4_os.c
13681 +@@ -1220,7 +1220,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
13682 + le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
13683 + exit_host_stats:
13684 + if (ql_iscsi_stats)
13685 +- dma_free_coherent(&ha->pdev->dev, host_stats_size,
13686 ++ dma_free_coherent(&ha->pdev->dev, stats_size,
13687 + ql_iscsi_stats, iscsi_stats_dma);
13688 +
13689 + ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
13690 +diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
13691 +index 1129fe7a27edd..ee069a8b442a7 100644
13692 +--- a/drivers/scsi/smartpqi/smartpqi.h
13693 ++++ b/drivers/scsi/smartpqi/smartpqi.h
13694 +@@ -359,7 +359,7 @@ struct pqi_event_response {
13695 + struct pqi_iu_header header;
13696 + u8 event_type;
13697 + u8 reserved2 : 7;
13698 +- u8 request_acknowlege : 1;
13699 ++ u8 request_acknowledge : 1;
13700 + __le16 event_id;
13701 + __le32 additional_event_id;
13702 + union {
13703 +diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
13704 +index cd157f11eb222..10afbaaa4a82f 100644
13705 +--- a/drivers/scsi/smartpqi/smartpqi_init.c
13706 ++++ b/drivers/scsi/smartpqi/smartpqi_init.c
13707 +@@ -542,8 +542,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
13708 + put_unaligned_be16(cdb_length, &cdb[7]);
13709 + break;
13710 + default:
13711 +- dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
13712 +- cmd);
13713 ++ dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
13714 + break;
13715 + }
13716 +
13717 +@@ -2462,7 +2461,6 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
13718 + offload_to_mirror =
13719 + (offload_to_mirror >= layout_map_count - 1) ?
13720 + 0 : offload_to_mirror + 1;
13721 +- WARN_ON(offload_to_mirror >= layout_map_count);
13722 + device->offload_to_mirror = offload_to_mirror;
13723 + /*
13724 + * Avoid direct use of device->offload_to_mirror within this
13725 +@@ -2915,10 +2913,14 @@ static int pqi_interpret_task_management_response(
13726 + return rc;
13727 + }
13728 +
13729 +-static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
13730 +- struct pqi_queue_group *queue_group)
13731 ++static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
13732 ++{
13733 ++ pqi_take_ctrl_offline(ctrl_info);
13734 ++}
13735 ++
13736 ++static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
13737 + {
13738 +- unsigned int num_responses;
13739 ++ int num_responses;
13740 + pqi_index_t oq_pi;
13741 + pqi_index_t oq_ci;
13742 + struct pqi_io_request *io_request;
13743 +@@ -2930,6 +2932,13 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
13744 +
13745 + while (1) {
13746 + oq_pi = readl(queue_group->oq_pi);
13747 ++ if (oq_pi >= ctrl_info->num_elements_per_oq) {
13748 ++ pqi_invalid_response(ctrl_info);
13749 ++ dev_err(&ctrl_info->pci_dev->dev,
13750 ++ "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
13751 ++ oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
13752 ++ return -1;
13753 ++ }
13754 + if (oq_pi == oq_ci)
13755 + break;
13756 +
13757 +@@ -2938,10 +2947,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
13758 + (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
13759 +
13760 + request_id = get_unaligned_le16(&response->request_id);
13761 +- WARN_ON(request_id >= ctrl_info->max_io_slots);
13762 ++ if (request_id >= ctrl_info->max_io_slots) {
13763 ++ pqi_invalid_response(ctrl_info);
13764 ++ dev_err(&ctrl_info->pci_dev->dev,
13765 ++ "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
13766 ++ request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
13767 ++ return -1;
13768 ++ }
13769 +
13770 + io_request = &ctrl_info->io_request_pool[request_id];
13771 +- WARN_ON(atomic_read(&io_request->refcount) == 0);
13772 ++ if (atomic_read(&io_request->refcount) == 0) {
13773 ++ pqi_invalid_response(ctrl_info);
13774 ++ dev_err(&ctrl_info->pci_dev->dev,
13775 ++ "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
13776 ++ request_id, oq_pi, oq_ci);
13777 ++ return -1;
13778 ++ }
13779 +
13780 + switch (response->header.iu_type) {
13781 + case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
13782 +@@ -2971,24 +2992,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
13783 + io_request->error_info = ctrl_info->error_buffer +
13784 + (get_unaligned_le16(&response->error_index) *
13785 + PQI_ERROR_BUFFER_ELEMENT_LENGTH);
13786 +- pqi_process_io_error(response->header.iu_type,
13787 +- io_request);
13788 ++ pqi_process_io_error(response->header.iu_type, io_request);
13789 + break;
13790 + default:
13791 ++ pqi_invalid_response(ctrl_info);
13792 + dev_err(&ctrl_info->pci_dev->dev,
13793 +- "unexpected IU type: 0x%x\n",
13794 +- response->header.iu_type);
13795 +- break;
13796 ++ "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
13797 ++ response->header.iu_type, oq_pi, oq_ci);
13798 ++ return -1;
13799 + }
13800 +
13801 +- io_request->io_complete_callback(io_request,
13802 +- io_request->context);
13803 ++ io_request->io_complete_callback(io_request, io_request->context);
13804 +
13805 + /*
13806 + * Note that the I/O request structure CANNOT BE TOUCHED after
13807 + * returning from the I/O completion callback!
13808 + */
13809 +-
13810 + oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
13811 + }
13812 +
13813 +@@ -3301,9 +3320,9 @@ static void pqi_ofa_capture_event_payload(struct pqi_event *event,
13814 + }
13815 + }
13816 +
13817 +-static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
13818 ++static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
13819 + {
13820 +- unsigned int num_events;
13821 ++ int num_events;
13822 + pqi_index_t oq_pi;
13823 + pqi_index_t oq_ci;
13824 + struct pqi_event_queue *event_queue;
13825 +@@ -3317,26 +3336,31 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
13826 +
13827 + while (1) {
13828 + oq_pi = readl(event_queue->oq_pi);
13829 ++ if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
13830 ++ pqi_invalid_response(ctrl_info);
13831 ++ dev_err(&ctrl_info->pci_dev->dev,
13832 ++ "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
13833 ++ oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
13834 ++ return -1;
13835 ++ }
13836 ++
13837 + if (oq_pi == oq_ci)
13838 + break;
13839 +
13840 + num_events++;
13841 +- response = event_queue->oq_element_array +
13842 +- (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
13843 ++ response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
13844 +
13845 + event_index =
13846 + pqi_event_type_to_event_index(response->event_type);
13847 +
13848 +- if (event_index >= 0) {
13849 +- if (response->request_acknowlege) {
13850 +- event = &ctrl_info->events[event_index];
13851 +- event->pending = true;
13852 +- event->event_type = response->event_type;
13853 +- event->event_id = response->event_id;
13854 +- event->additional_event_id =
13855 +- response->additional_event_id;
13856 ++ if (event_index >= 0 && response->request_acknowledge) {
13857 ++ event = &ctrl_info->events[event_index];
13858 ++ event->pending = true;
13859 ++ event->event_type = response->event_type;
13860 ++ event->event_id = response->event_id;
13861 ++ event->additional_event_id = response->additional_event_id;
13862 ++ if (event->event_type == PQI_EVENT_TYPE_OFA)
13863 + pqi_ofa_capture_event_payload(event, response);
13864 +- }
13865 + }
13866 +
13867 + oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
13868 +@@ -3451,7 +3475,8 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
13869 + {
13870 + struct pqi_ctrl_info *ctrl_info;
13871 + struct pqi_queue_group *queue_group;
13872 +- unsigned int num_responses_handled;
13873 ++ int num_io_responses_handled;
13874 ++ int num_events_handled;
13875 +
13876 + queue_group = data;
13877 + ctrl_info = queue_group->ctrl_info;
13878 +@@ -3459,17 +3484,25 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
13879 + if (!pqi_is_valid_irq(ctrl_info))
13880 + return IRQ_NONE;
13881 +
13882 +- num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
13883 ++ num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
13884 ++ if (num_io_responses_handled < 0)
13885 ++ goto out;
13886 +
13887 +- if (irq == ctrl_info->event_irq)
13888 +- num_responses_handled += pqi_process_event_intr(ctrl_info);
13889 ++ if (irq == ctrl_info->event_irq) {
13890 ++ num_events_handled = pqi_process_event_intr(ctrl_info);
13891 ++ if (num_events_handled < 0)
13892 ++ goto out;
13893 ++ } else {
13894 ++ num_events_handled = 0;
13895 ++ }
13896 +
13897 +- if (num_responses_handled)
13898 ++ if (num_io_responses_handled + num_events_handled > 0)
13899 + atomic_inc(&ctrl_info->num_interrupts);
13900 +
13901 + pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
13902 + pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
13903 +
13904 ++out:
13905 + return IRQ_HANDLED;
13906 + }
13907 +
13908 +diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
13909 +index d56ce8d97d4e8..7ad127f213977 100644
13910 +--- a/drivers/scsi/ufs/ufs-mediatek.c
13911 ++++ b/drivers/scsi/ufs/ufs-mediatek.c
13912 +@@ -585,13 +585,7 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
13913 +
13914 + static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
13915 + {
13916 +- struct ufs_dev_info *dev_info = &hba->dev_info;
13917 +- u16 mid = dev_info->wmanufacturerid;
13918 +-
13919 + ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
13920 +-
13921 +- if (mid == UFS_VENDOR_SAMSUNG)
13922 +- hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
13923 + }
13924 +
13925 + /**
13926 +diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
13927 +index 2e6ddb5cdfc23..7da27eed1fe7b 100644
13928 +--- a/drivers/scsi/ufs/ufs-qcom.c
13929 ++++ b/drivers/scsi/ufs/ufs-qcom.c
13930 +@@ -1604,9 +1604,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
13931 + */
13932 + }
13933 + mask <<= offset;
13934 +-
13935 +- pm_runtime_get_sync(host->hba->dev);
13936 +- ufshcd_hold(host->hba, false);
13937 + ufshcd_rmwl(host->hba, TEST_BUS_SEL,
13938 + (u32)host->testbus.select_major << 19,
13939 + REG_UFS_CFG1);
13940 +@@ -1619,8 +1616,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
13941 + * committed before returning.
13942 + */
13943 + mb();
13944 +- ufshcd_release(host->hba);
13945 +- pm_runtime_put_sync(host->hba->dev);
13946 +
13947 + return 0;
13948 + }
13949 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
13950 +index 8bc8e4e62c045..e5f75b2e07e2c 100644
13951 +--- a/drivers/scsi/ufs/ufshcd.c
13952 ++++ b/drivers/scsi/ufs/ufshcd.c
13953 +@@ -484,6 +484,9 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
13954 +
13955 + prdt_length = le16_to_cpu(
13956 + lrbp->utr_descriptor_ptr->prd_table_length);
13957 ++ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
13958 ++ prdt_length /= sizeof(struct ufshcd_sg_entry);
13959 ++
13960 + dev_err(hba->dev,
13961 + "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
13962 + tag, prdt_length,
13963 +diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
13964 +index ae1e248a8fb8a..1d2bc181da050 100644
13965 +--- a/drivers/slimbus/core.c
13966 ++++ b/drivers/slimbus/core.c
13967 +@@ -301,8 +301,6 @@ int slim_unregister_controller(struct slim_controller *ctrl)
13968 + {
13969 + /* Remove all clients */
13970 + device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
13971 +- /* Enter Clock Pause */
13972 +- slim_ctrl_clk_pause(ctrl, false, 0);
13973 + ida_simple_remove(&ctrl_ida, ctrl->id);
13974 +
13975 + return 0;
13976 +@@ -326,8 +324,8 @@ void slim_report_absent(struct slim_device *sbdev)
13977 + mutex_lock(&ctrl->lock);
13978 + sbdev->is_laddr_valid = false;
13979 + mutex_unlock(&ctrl->lock);
13980 +-
13981 +- ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
13982 ++ if (!ctrl->get_laddr)
13983 ++ ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
13984 + slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
13985 + }
13986 + EXPORT_SYMBOL_GPL(slim_report_absent);
13987 +diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
13988 +index 743ee7b4e63f2..218aefc3531cd 100644
13989 +--- a/drivers/slimbus/qcom-ngd-ctrl.c
13990 ++++ b/drivers/slimbus/qcom-ngd-ctrl.c
13991 +@@ -1277,9 +1277,13 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
13992 + {
13993 + struct qcom_slim_ngd_qmi *qmi =
13994 + container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
13995 ++ struct qcom_slim_ngd_ctrl *ctrl =
13996 ++ container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
13997 +
13998 + qmi->svc_info.sq_node = 0;
13999 + qmi->svc_info.sq_port = 0;
14000 ++
14001 ++ qcom_slim_ngd_enable(ctrl, false);
14002 + }
14003 +
14004 + static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
14005 +diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
14006 +index f4fb527d83018..c5dd026fe889f 100644
14007 +--- a/drivers/soc/fsl/qbman/bman.c
14008 ++++ b/drivers/soc/fsl/qbman/bman.c
14009 +@@ -660,7 +660,7 @@ int bm_shutdown_pool(u32 bpid)
14010 + }
14011 + done:
14012 + put_affine_portal();
14013 +- return 0;
14014 ++ return err;
14015 + }
14016 +
14017 + struct gen_pool *bm_bpalloc;
14018 +diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
14019 +index 87ee9f767b7af..d8ace96832bac 100644
14020 +--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
14021 ++++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
14022 +@@ -213,15 +213,16 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
14023 + }
14024 + EXPORT_SYMBOL(cmdq_pkt_write_mask);
14025 +
14026 +-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
14027 ++int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
14028 + {
14029 + struct cmdq_instruction inst = { {0} };
14030 ++ u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
14031 +
14032 + if (event >= CMDQ_MAX_EVENT)
14033 + return -EINVAL;
14034 +
14035 + inst.op = CMDQ_CODE_WFE;
14036 +- inst.value = CMDQ_WFE_OPTION;
14037 ++ inst.value = CMDQ_WFE_OPTION | clear_option;
14038 + inst.event = event;
14039 +
14040 + return cmdq_pkt_append_command(pkt, inst);
14041 +diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
14042 +index 1f35b097c6356..7abfc8c4fdc72 100644
14043 +--- a/drivers/soc/qcom/apr.c
14044 ++++ b/drivers/soc/qcom/apr.c
14045 +@@ -328,7 +328,7 @@ static int of_apr_add_pd_lookups(struct device *dev)
14046 +
14047 + pds = pdr_add_lookup(apr->pdr, service_name, service_path);
14048 + if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
14049 +- dev_err(dev, "pdr add lookup failed: %d\n", ret);
14050 ++ dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds));
14051 + return PTR_ERR(pds);
14052 + }
14053 + }
14054 +diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
14055 +index 15b5002e4127b..ab9ae8cdfa54c 100644
14056 +--- a/drivers/soc/qcom/pdr_internal.h
14057 ++++ b/drivers/soc/qcom/pdr_internal.h
14058 +@@ -185,7 +185,7 @@ struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
14059 + .data_type = QMI_STRUCT,
14060 + .elem_len = SERVREG_DOMAIN_LIST_LENGTH,
14061 + .elem_size = sizeof(struct servreg_location_entry),
14062 +- .array_type = NO_ARRAY,
14063 ++ .array_type = VAR_LEN_ARRAY,
14064 + .tlv_type = 0x12,
14065 + .offset = offsetof(struct servreg_get_domain_list_resp,
14066 + domain_list),
14067 +diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
14068 +index 31ff49fcd078b..c556623dae024 100644
14069 +--- a/drivers/soc/xilinx/zynqmp_power.c
14070 ++++ b/drivers/soc/xilinx/zynqmp_power.c
14071 +@@ -205,7 +205,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
14072 + rx_chan = mbox_request_channel_byname(client, "rx");
14073 + if (IS_ERR(rx_chan)) {
14074 + dev_err(&pdev->dev, "Failed to request rx channel\n");
14075 +- return IS_ERR(rx_chan);
14076 ++ return PTR_ERR(rx_chan);
14077 + }
14078 + } else if (of_find_property(pdev->dev.of_node, "interrupts", NULL)) {
14079 + irq = platform_get_irq(pdev, 0);
14080 +diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
14081 +index 2ea73809ca345..271839a8add0e 100644
14082 +--- a/drivers/spi/spi-dw-pci.c
14083 ++++ b/drivers/spi/spi-dw-pci.c
14084 +@@ -127,18 +127,16 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
14085 + if (desc->setup) {
14086 + ret = desc->setup(dws);
14087 + if (ret)
14088 +- return ret;
14089 ++ goto err_free_irq_vectors;
14090 + }
14091 + } else {
14092 +- pci_free_irq_vectors(pdev);
14093 +- return -ENODEV;
14094 ++ ret = -ENODEV;
14095 ++ goto err_free_irq_vectors;
14096 + }
14097 +
14098 + ret = dw_spi_add_host(&pdev->dev, dws);
14099 +- if (ret) {
14100 +- pci_free_irq_vectors(pdev);
14101 +- return ret;
14102 +- }
14103 ++ if (ret)
14104 ++ goto err_free_irq_vectors;
14105 +
14106 + /* PCI hook and SPI hook use the same drv data */
14107 + pci_set_drvdata(pdev, dws);
14108 +@@ -152,6 +150,10 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
14109 + pm_runtime_allow(&pdev->dev);
14110 +
14111 + return 0;
14112 ++
14113 ++err_free_irq_vectors:
14114 ++ pci_free_irq_vectors(pdev);
14115 ++ return ret;
14116 + }
14117 +
14118 + static void spi_pci_remove(struct pci_dev *pdev)
14119 +diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
14120 +index 37a3e0f8e7526..a702e9d7d68c0 100644
14121 +--- a/drivers/spi/spi-fsi.c
14122 ++++ b/drivers/spi/spi-fsi.c
14123 +@@ -24,11 +24,16 @@
14124 +
14125 + #define SPI_FSI_BASE 0x70000
14126 + #define SPI_FSI_INIT_TIMEOUT_MS 1000
14127 +-#define SPI_FSI_MAX_TRANSFER_SIZE 2048
14128 ++#define SPI_FSI_MAX_XFR_SIZE 2048
14129 ++#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED 32
14130 +
14131 + #define SPI_FSI_ERROR 0x0
14132 + #define SPI_FSI_COUNTER_CFG 0x1
14133 + #define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32)
14134 ++#define SPI_FSI_COUNTER_CFG_N2_RX BIT_ULL(8)
14135 ++#define SPI_FSI_COUNTER_CFG_N2_TX BIT_ULL(9)
14136 ++#define SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
14137 ++#define SPI_FSI_COUNTER_CFG_N2_RELOAD BIT_ULL(11)
14138 + #define SPI_FSI_CFG1 0x2
14139 + #define SPI_FSI_CLOCK_CFG 0x3
14140 + #define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
14141 +@@ -61,7 +66,7 @@
14142 + #define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62)
14143 + #define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63)
14144 + #define SPI_FSI_STATUS_ANY_ERROR \
14145 +- (SPI_FSI_STATUS_ERROR | SPI_FSI_STATUS_TDR_UNDERRUN | \
14146 ++ (SPI_FSI_STATUS_ERROR | \
14147 + SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \
14148 + SPI_FSI_STATUS_RDR_OVERRUN)
14149 + #define SPI_FSI_PORT_CTRL 0x9
14150 +@@ -70,6 +75,8 @@ struct fsi_spi {
14151 + struct device *dev; /* SPI controller device */
14152 + struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
14153 + u32 base;
14154 ++ size_t max_xfr_size;
14155 ++ bool restricted;
14156 + };
14157 +
14158 + struct fsi_spi_sequence {
14159 +@@ -205,8 +212,12 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
14160 + if (rc)
14161 + return rc;
14162 +
14163 +- return fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
14164 +- SPI_FSI_CLOCK_CFG_RESET2);
14165 ++ rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
14166 ++ SPI_FSI_CLOCK_CFG_RESET2);
14167 ++ if (rc)
14168 ++ return rc;
14169 ++
14170 ++ return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
14171 + }
14172 +
14173 + static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
14174 +@@ -214,8 +225,8 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
14175 + /*
14176 + * Add the next byte of instruction to the 8-byte sequence register.
14177 + * Then decrement the counter so that the next instruction will go in
14178 +- * the right place. Return the number of "slots" left in the sequence
14179 +- * register.
14180 ++ * the right place. Return the index of the slot we just filled in the
14181 ++ * sequence register.
14182 + */
14183 + seq->data |= (u64)val << seq->bit;
14184 + seq->bit -= 8;
14185 +@@ -233,40 +244,71 @@ static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
14186 + struct fsi_spi_sequence *seq,
14187 + struct spi_transfer *transfer)
14188 + {
14189 ++ bool docfg = false;
14190 + int loops;
14191 + int idx;
14192 + int rc;
14193 ++ u8 val = 0;
14194 + u8 len = min(transfer->len, 8U);
14195 + u8 rem = transfer->len % len;
14196 ++ u64 cfg = 0ULL;
14197 +
14198 + loops = transfer->len / len;
14199 +
14200 + if (transfer->tx_buf) {
14201 +- idx = fsi_spi_sequence_add(seq,
14202 +- SPI_FSI_SEQUENCE_SHIFT_OUT(len));
14203 ++ val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
14204 ++ idx = fsi_spi_sequence_add(seq, val);
14205 ++
14206 + if (rem)
14207 + rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
14208 + } else if (transfer->rx_buf) {
14209 +- idx = fsi_spi_sequence_add(seq,
14210 +- SPI_FSI_SEQUENCE_SHIFT_IN(len));
14211 ++ val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
14212 ++ idx = fsi_spi_sequence_add(seq, val);
14213 ++
14214 + if (rem)
14215 + rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
14216 + } else {
14217 + return -EINVAL;
14218 + }
14219 +
14220 ++ if (ctx->restricted) {
14221 ++ const int eidx = rem ? 5 : 6;
14222 ++
14223 ++ while (loops > 1 && idx <= eidx) {
14224 ++ idx = fsi_spi_sequence_add(seq, val);
14225 ++ loops--;
14226 ++ docfg = true;
14227 ++ }
14228 ++
14229 ++ if (loops > 1) {
14230 ++ dev_warn(ctx->dev, "No sequencer slots; aborting.\n");
14231 ++ return -EINVAL;
14232 ++ }
14233 ++ }
14234 ++
14235 + if (loops > 1) {
14236 + fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
14237 ++ docfg = true;
14238 ++ }
14239 +
14240 +- if (rem)
14241 +- fsi_spi_sequence_add(seq, rem);
14242 ++ if (docfg) {
14243 ++ cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
14244 ++ if (transfer->rx_buf)
14245 ++ cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
14246 ++ SPI_FSI_COUNTER_CFG_N2_TX |
14247 ++ SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
14248 ++ SPI_FSI_COUNTER_CFG_N2_RELOAD;
14249 +
14250 +- rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG,
14251 +- SPI_FSI_COUNTER_CFG_LOOPS(loops - 1));
14252 ++ rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
14253 + if (rc)
14254 + return rc;
14255 ++ } else {
14256 ++ fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
14257 + }
14258 +
14259 ++ if (rem)
14260 ++ fsi_spi_sequence_add(seq, rem);
14261 ++
14262 + return 0;
14263 + }
14264 +
14265 +@@ -275,6 +317,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
14266 + {
14267 + int rc = 0;
14268 + u64 status = 0ULL;
14269 ++ u64 cfg = 0ULL;
14270 +
14271 + if (transfer->tx_buf) {
14272 + int nb;
14273 +@@ -312,6 +355,16 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
14274 + u64 in = 0ULL;
14275 + u8 *rx = transfer->rx_buf;
14276 +
14277 ++ rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
14278 ++ if (rc)
14279 ++ return rc;
14280 ++
14281 ++ if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
14282 ++ rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
14283 ++ if (rc)
14284 ++ return rc;
14285 ++ }
14286 ++
14287 + while (transfer->len > recv) {
14288 + do {
14289 + rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
14290 +@@ -350,7 +403,7 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
14291 + u64 status = 0ULL;
14292 + u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE |
14293 + SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
14294 +- FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 4);
14295 ++ FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19);
14296 +
14297 + end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS);
14298 + do {
14299 +@@ -407,7 +460,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
14300 +
14301 + /* Sequencer must do shift out (tx) first. */
14302 + if (!transfer->tx_buf ||
14303 +- transfer->len > SPI_FSI_MAX_TRANSFER_SIZE) {
14304 ++ transfer->len > (ctx->max_xfr_size + 8)) {
14305 + rc = -EINVAL;
14306 + goto error;
14307 + }
14308 +@@ -431,7 +484,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
14309 +
14310 + /* Sequencer can only do shift in (rx) after tx. */
14311 + if (next->rx_buf) {
14312 +- if (next->len > SPI_FSI_MAX_TRANSFER_SIZE) {
14313 ++ if (next->len > ctx->max_xfr_size) {
14314 + rc = -EINVAL;
14315 + goto error;
14316 + }
14317 +@@ -476,7 +529,9 @@ error:
14318 +
14319 + static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
14320 + {
14321 +- return SPI_FSI_MAX_TRANSFER_SIZE;
14322 ++ struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
14323 ++
14324 ++ return ctx->max_xfr_size;
14325 + }
14326 +
14327 + static int fsi_spi_probe(struct device *dev)
14328 +@@ -524,6 +579,14 @@ static int fsi_spi_probe(struct device *dev)
14329 + ctx->fsi = fsi;
14330 + ctx->base = base + SPI_FSI_BASE;
14331 +
14332 ++ if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
14333 ++ ctx->restricted = true;
14334 ++ ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
14335 ++ } else {
14336 ++ ctx->restricted = false;
14337 ++ ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
14338 ++ }
14339 ++
14340 + rc = devm_spi_register_controller(dev, ctlr);
14341 + if (rc)
14342 + spi_controller_put(ctlr);
14343 +diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
14344 +index e9e256718ef4a..10d8a722b0833 100644
14345 +--- a/drivers/spi/spi-omap2-mcspi.c
14346 ++++ b/drivers/spi/spi-omap2-mcspi.c
14347 +@@ -24,7 +24,6 @@
14348 + #include <linux/of.h>
14349 + #include <linux/of_device.h>
14350 + #include <linux/gcd.h>
14351 +-#include <linux/iopoll.h>
14352 +
14353 + #include <linux/spi/spi.h>
14354 + #include <linux/gpio.h>
14355 +@@ -349,9 +348,19 @@ disable_fifo:
14356 +
14357 + static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
14358 + {
14359 +- u32 val;
14360 +-
14361 +- return readl_poll_timeout(reg, val, val & bit, 1, MSEC_PER_SEC);
14362 ++ unsigned long timeout;
14363 ++
14364 ++ timeout = jiffies + msecs_to_jiffies(1000);
14365 ++ while (!(readl_relaxed(reg) & bit)) {
14366 ++ if (time_after(jiffies, timeout)) {
14367 ++ if (!(readl_relaxed(reg) & bit))
14368 ++ return -ETIMEDOUT;
14369 ++ else
14370 ++ return 0;
14371 ++ }
14372 ++ cpu_relax();
14373 ++ }
14374 ++ return 0;
14375 + }
14376 +
14377 + static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
14378 +diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
14379 +index cf67ea60dc0ed..6587a7dc3f5ba 100644
14380 +--- a/drivers/spi/spi-s3c64xx.c
14381 ++++ b/drivers/spi/spi-s3c64xx.c
14382 +@@ -122,6 +122,7 @@
14383 +
14384 + struct s3c64xx_spi_dma_data {
14385 + struct dma_chan *ch;
14386 ++ dma_cookie_t cookie;
14387 + enum dma_transfer_direction direction;
14388 + };
14389 +
14390 +@@ -264,12 +265,13 @@ static void s3c64xx_spi_dmacb(void *data)
14391 + spin_unlock_irqrestore(&sdd->lock, flags);
14392 + }
14393 +
14394 +-static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
14395 ++static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
14396 + struct sg_table *sgt)
14397 + {
14398 + struct s3c64xx_spi_driver_data *sdd;
14399 + struct dma_slave_config config;
14400 + struct dma_async_tx_descriptor *desc;
14401 ++ int ret;
14402 +
14403 + memset(&config, 0, sizeof(config));
14404 +
14405 +@@ -293,12 +295,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
14406 +
14407 + desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
14408 + dma->direction, DMA_PREP_INTERRUPT);
14409 ++ if (!desc) {
14410 ++ dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
14411 ++ dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
14412 ++ return -ENOMEM;
14413 ++ }
14414 +
14415 + desc->callback = s3c64xx_spi_dmacb;
14416 + desc->callback_param = dma;
14417 +
14418 +- dmaengine_submit(desc);
14419 ++ dma->cookie = dmaengine_submit(desc);
14420 ++ ret = dma_submit_error(dma->cookie);
14421 ++ if (ret) {
14422 ++ dev_err(&sdd->pdev->dev, "DMA submission failed");
14423 ++ return -EIO;
14424 ++ }
14425 ++
14426 + dma_async_issue_pending(dma->ch);
14427 ++ return 0;
14428 + }
14429 +
14430 + static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
14431 +@@ -348,11 +362,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
14432 + return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
14433 + }
14434 +
14435 +-static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
14436 ++static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
14437 + struct spi_transfer *xfer, int dma_mode)
14438 + {
14439 + void __iomem *regs = sdd->regs;
14440 + u32 modecfg, chcfg;
14441 ++ int ret = 0;
14442 +
14443 + modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
14444 + modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
14445 +@@ -378,7 +393,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
14446 + chcfg |= S3C64XX_SPI_CH_TXCH_ON;
14447 + if (dma_mode) {
14448 + modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
14449 +- prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
14450 ++ ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
14451 + } else {
14452 + switch (sdd->cur_bpw) {
14453 + case 32:
14454 +@@ -410,12 +425,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
14455 + writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
14456 + | S3C64XX_SPI_PACKET_CNT_EN,
14457 + regs + S3C64XX_SPI_PACKET_CNT);
14458 +- prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
14459 ++ ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
14460 + }
14461 + }
14462 +
14463 ++ if (ret)
14464 ++ return ret;
14465 ++
14466 + writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
14467 + writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
14468 ++
14469 ++ return 0;
14470 + }
14471 +
14472 + static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
14473 +@@ -548,9 +568,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
14474 + return 0;
14475 + }
14476 +
14477 +-static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
14478 ++static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
14479 + {
14480 + void __iomem *regs = sdd->regs;
14481 ++ int ret;
14482 + u32 val;
14483 +
14484 + /* Disable Clock */
14485 +@@ -598,7 +619,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
14486 +
14487 + if (sdd->port_conf->clk_from_cmu) {
14488 + /* The src_clk clock is divided internally by 2 */
14489 +- clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
14490 ++ ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
14491 ++ if (ret)
14492 ++ return ret;
14493 + } else {
14494 + /* Configure Clock */
14495 + val = readl(regs + S3C64XX_SPI_CLK_CFG);
14496 +@@ -612,6 +635,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
14497 + val |= S3C64XX_SPI_ENCLK_ENABLE;
14498 + writel(val, regs + S3C64XX_SPI_CLK_CFG);
14499 + }
14500 ++
14501 ++ return 0;
14502 + }
14503 +
14504 + #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
14505 +@@ -654,7 +679,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
14506 + sdd->cur_bpw = bpw;
14507 + sdd->cur_speed = speed;
14508 + sdd->cur_mode = spi->mode;
14509 +- s3c64xx_spi_config(sdd);
14510 ++ status = s3c64xx_spi_config(sdd);
14511 ++ if (status)
14512 ++ return status;
14513 + }
14514 +
14515 + if (!is_polling(sdd) && (xfer->len > fifo_len) &&
14516 +@@ -678,13 +705,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
14517 + sdd->state &= ~RXBUSY;
14518 + sdd->state &= ~TXBUSY;
14519 +
14520 +- s3c64xx_enable_datapath(sdd, xfer, use_dma);
14521 +-
14522 + /* Start the signals */
14523 + s3c64xx_spi_set_cs(spi, true);
14524 +
14525 ++ status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
14526 ++
14527 + spin_unlock_irqrestore(&sdd->lock, flags);
14528 +
14529 ++ if (status) {
14530 ++ dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
14531 ++ break;
14532 ++ }
14533 ++
14534 + if (use_dma)
14535 + status = s3c64xx_wait_for_dma(sdd, xfer);
14536 + else
14537 +diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
14538 +index 03929b9d3a8bc..d0725bc8b48a4 100644
14539 +--- a/drivers/staging/emxx_udc/emxx_udc.c
14540 ++++ b/drivers/staging/emxx_udc/emxx_udc.c
14541 +@@ -2593,7 +2593,7 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
14542 +
14543 + if (req->unaligned) {
14544 + if (!ep->virt_buf)
14545 +- ep->virt_buf = dma_alloc_coherent(NULL, PAGE_SIZE,
14546 ++ ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
14547 + &ep->phys_buf,
14548 + GFP_ATOMIC | GFP_DMA);
14549 + if (ep->epnum > 0) {
14550 +@@ -3148,7 +3148,7 @@ static int nbu2ss_drv_remove(struct platform_device *pdev)
14551 + for (i = 0; i < NUM_ENDPOINTS; i++) {
14552 + ep = &udc->ep[i];
14553 + if (ep->virt_buf)
14554 +- dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf,
14555 ++ dma_free_coherent(udc->dev, PAGE_SIZE, (void *)ep->virt_buf,
14556 + ep->phys_buf);
14557 + }
14558 +
14559 +diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
14560 +index 54434c2dbaf90..8473e14370747 100644
14561 +--- a/drivers/staging/media/atomisp/pci/sh_css.c
14562 ++++ b/drivers/staging/media/atomisp/pci/sh_css.c
14563 +@@ -9521,7 +9521,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
14564 + if (err)
14565 + {
14566 + IA_CSS_LEAVE_ERR(err);
14567 +- return err;
14568 ++ goto ERR;
14569 + }
14570 + #endif
14571 + for (i = 0; i < num_pipes; i++)
14572 +diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
14573 +index d561f125085a7..d72ebbd17a692 100644
14574 +--- a/drivers/staging/media/hantro/hantro_h264.c
14575 ++++ b/drivers/staging/media/hantro/hantro_h264.c
14576 +@@ -327,7 +327,7 @@ dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
14577 + */
14578 + dst_buf = hantro_get_dst_buf(ctx);
14579 + buf = &dst_buf->vb2_buf;
14580 +- dma_addr = vb2_dma_contig_plane_dma_addr(buf, 0);
14581 ++ dma_addr = hantro_get_dec_buf_addr(ctx, buf);
14582 + }
14583 +
14584 + return dma_addr;
14585 +diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c
14586 +index 44062ffceaea7..6d2a8f2a8f0bb 100644
14587 +--- a/drivers/staging/media/hantro/hantro_postproc.c
14588 ++++ b/drivers/staging/media/hantro/hantro_postproc.c
14589 +@@ -118,7 +118,9 @@ int hantro_postproc_alloc(struct hantro_ctx *ctx)
14590 + unsigned int num_buffers = cap_queue->num_buffers;
14591 + unsigned int i, buf_size;
14592 +
14593 +- buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage;
14594 ++ buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage +
14595 ++ hantro_h264_mv_size(ctx->dst_fmt.width,
14596 ++ ctx->dst_fmt.height);
14597 +
14598 + for (i = 0; i < num_buffers; ++i) {
14599 + struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
14600 +diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c
14601 +index fbd53d7c097cd..e9d6bd9e9332a 100644
14602 +--- a/drivers/staging/media/ipu3/ipu3-css-params.c
14603 ++++ b/drivers/staging/media/ipu3/ipu3-css-params.c
14604 +@@ -159,7 +159,7 @@ imgu_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
14605 +
14606 + memset(&cfg->scaler_coeffs_chroma, 0,
14607 + sizeof(cfg->scaler_coeffs_chroma));
14608 +- memset(&cfg->scaler_coeffs_luma, 0, sizeof(*cfg->scaler_coeffs_luma));
14609 ++ memset(&cfg->scaler_coeffs_luma, 0, sizeof(cfg->scaler_coeffs_luma));
14610 + do {
14611 + phase_step_correction++;
14612 +
14613 +diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
14614 +index 7c4df6d48c43d..4df9476ef2a9b 100644
14615 +--- a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
14616 ++++ b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
14617 +@@ -16,6 +16,7 @@
14618 + */
14619 +
14620 + #include <linux/clk.h>
14621 ++#include <linux/delay.h>
14622 + #include <linux/io.h>
14623 + #include <linux/mfd/syscon.h>
14624 + #include <linux/module.h>
14625 +diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
14626 +index 195d963c4fbb4..b6fee7230ce05 100644
14627 +--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
14628 ++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
14629 +@@ -597,7 +597,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
14630 +
14631 + prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE,
14632 + sizeof(struct ieee80211_rxb *),
14633 +- GFP_KERNEL);
14634 ++ GFP_ATOMIC);
14635 + if (!prxbIndicateArray)
14636 + return;
14637 +
14638 +diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
14639 +index 0e959ebc38b56..a9fb5165b33d9 100644
14640 +--- a/drivers/staging/wfx/data_rx.c
14641 ++++ b/drivers/staging/wfx/data_rx.c
14642 +@@ -80,7 +80,7 @@ void wfx_rx_cb(struct wfx_vif *wvif,
14643 + goto drop;
14644 +
14645 + if (arg->status == HIF_STATUS_RX_FAIL_MIC)
14646 +- hdr->flag |= RX_FLAG_MMIC_ERROR;
14647 ++ hdr->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_IV_STRIPPED;
14648 + else if (arg->status)
14649 + goto drop;
14650 +
14651 +diff --git a/drivers/staging/wilc1000/mon.c b/drivers/staging/wilc1000/mon.c
14652 +index 60331417bd983..66f1c870f4f69 100644
14653 +--- a/drivers/staging/wilc1000/mon.c
14654 ++++ b/drivers/staging/wilc1000/mon.c
14655 +@@ -236,11 +236,10 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
14656 +
14657 + if (register_netdevice(wl->monitor_dev)) {
14658 + netdev_err(real_dev, "register_netdevice failed\n");
14659 ++ free_netdev(wl->monitor_dev);
14660 + return NULL;
14661 + }
14662 + priv = netdev_priv(wl->monitor_dev);
14663 +- if (!priv)
14664 +- return NULL;
14665 +
14666 + priv->real_ndev = real_dev;
14667 +
14668 +diff --git a/drivers/staging/wilc1000/sdio.c b/drivers/staging/wilc1000/sdio.c
14669 +index 36eb589263bfd..b14e4ed6134fc 100644
14670 +--- a/drivers/staging/wilc1000/sdio.c
14671 ++++ b/drivers/staging/wilc1000/sdio.c
14672 +@@ -151,9 +151,10 @@ static int wilc_sdio_probe(struct sdio_func *func,
14673 + wilc->dev = &func->dev;
14674 +
14675 + wilc->rtc_clk = devm_clk_get(&func->card->dev, "rtc");
14676 +- if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
14677 ++ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) {
14678 ++ kfree(sdio_priv);
14679 + return -EPROBE_DEFER;
14680 +- else if (!IS_ERR(wilc->rtc_clk))
14681 ++ } else if (!IS_ERR(wilc->rtc_clk))
14682 + clk_prepare_enable(wilc->rtc_clk);
14683 +
14684 + dev_info(&func->dev, "Driver Initializing success\n");
14685 +diff --git a/drivers/staging/wilc1000/spi.c b/drivers/staging/wilc1000/spi.c
14686 +index 3f19e3f38a397..a18dac0aa6b67 100644
14687 +--- a/drivers/staging/wilc1000/spi.c
14688 ++++ b/drivers/staging/wilc1000/spi.c
14689 +@@ -112,9 +112,10 @@ static int wilc_bus_probe(struct spi_device *spi)
14690 + wilc->dev_irq_num = spi->irq;
14691 +
14692 + wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc_clk");
14693 +- if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
14694 ++ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) {
14695 ++ kfree(spi_priv);
14696 + return -EPROBE_DEFER;
14697 +- else if (!IS_ERR(wilc->rtc_clk))
14698 ++ } else if (!IS_ERR(wilc->rtc_clk))
14699 + clk_prepare_enable(wilc->rtc_clk);
14700 +
14701 + return 0;
14702 +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
14703 +index 0209bc23e631e..13a280c780c39 100644
14704 +--- a/drivers/target/target_core_user.c
14705 ++++ b/drivers/target/target_core_user.c
14706 +@@ -669,7 +669,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
14707 + void *from, *to = NULL;
14708 + size_t copy_bytes, to_offset, offset;
14709 + struct scatterlist *sg;
14710 +- struct page *page;
14711 ++ struct page *page = NULL;
14712 +
14713 + for_each_sg(data_sg, sg, data_nents, i) {
14714 + int sg_remaining = sg->length;
14715 +diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
14716 +index d1b27b0522a3c..8d60e0ff67b4d 100644
14717 +--- a/drivers/tty/hvc/Kconfig
14718 ++++ b/drivers/tty/hvc/Kconfig
14719 +@@ -81,6 +81,7 @@ config HVC_DCC
14720 + bool "ARM JTAG DCC console"
14721 + depends on ARM || ARM64
14722 + select HVC_DRIVER
14723 ++ select SERIAL_CORE_CONSOLE
14724 + help
14725 + This console uses the JTAG DCC on ARM to create a console under the HVC
14726 + driver. This console is used through a JTAG only on ARM. If you don't have
14727 +diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
14728 +index 55105ac38f89b..509d1042825a1 100644
14729 +--- a/drivers/tty/hvc/hvcs.c
14730 ++++ b/drivers/tty/hvc/hvcs.c
14731 +@@ -1216,13 +1216,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
14732 +
14733 + tty_wait_until_sent(tty, HVCS_CLOSE_WAIT);
14734 +
14735 +- /*
14736 +- * This line is important because it tells hvcs_open that this
14737 +- * device needs to be re-configured the next time hvcs_open is
14738 +- * called.
14739 +- */
14740 +- tty->driver_data = NULL;
14741 +-
14742 + free_irq(irq, hvcsd);
14743 + return;
14744 + } else if (hvcsd->port.count < 0) {
14745 +@@ -1237,6 +1230,13 @@ static void hvcs_cleanup(struct tty_struct * tty)
14746 + {
14747 + struct hvcs_struct *hvcsd = tty->driver_data;
14748 +
14749 ++ /*
14750 ++ * This line is important because it tells hvcs_open that this
14751 ++ * device needs to be re-configured the next time hvcs_open is
14752 ++ * called.
14753 ++ */
14754 ++ tty->driver_data = NULL;
14755 ++
14756 + tty_port_put(&hvcsd->port);
14757 + }
14758 +
14759 +diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c
14760 +index cf20616340a1a..fe569f6294a24 100644
14761 +--- a/drivers/tty/ipwireless/network.c
14762 ++++ b/drivers/tty/ipwireless/network.c
14763 +@@ -117,7 +117,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
14764 + skb->len,
14765 + notify_packet_sent,
14766 + network);
14767 +- if (ret == -1) {
14768 ++ if (ret < 0) {
14769 + skb_pull(skb, 2);
14770 + return 0;
14771 + }
14772 +@@ -134,7 +134,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
14773 + notify_packet_sent,
14774 + network);
14775 + kfree(buf);
14776 +- if (ret == -1)
14777 ++ if (ret < 0)
14778 + return 0;
14779 + }
14780 + kfree_skb(skb);
14781 +diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
14782 +index fad3401e604d9..23584769fc292 100644
14783 +--- a/drivers/tty/ipwireless/tty.c
14784 ++++ b/drivers/tty/ipwireless/tty.c
14785 +@@ -218,7 +218,7 @@ static int ipw_write(struct tty_struct *linux_tty,
14786 + ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS,
14787 + buf, count,
14788 + ipw_write_packet_sent_callback, tty);
14789 +- if (ret == -1) {
14790 ++ if (ret < 0) {
14791 + mutex_unlock(&tty->ipw_tty_mutex);
14792 + return 0;
14793 + }
14794 +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
14795 +index 00099a8439d21..c6a1d8c4e6894 100644
14796 +--- a/drivers/tty/pty.c
14797 ++++ b/drivers/tty/pty.c
14798 +@@ -120,10 +120,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
14799 + spin_lock_irqsave(&to->port->lock, flags);
14800 + /* Stuff the data into the input queue of the other end */
14801 + c = tty_insert_flip_string(to->port, buf, c);
14802 ++ spin_unlock_irqrestore(&to->port->lock, flags);
14803 + /* And shovel */
14804 + if (c)
14805 + tty_flip_buffer_push(to->port);
14806 +- spin_unlock_irqrestore(&to->port->lock, flags);
14807 + }
14808 + return c;
14809 + }
14810 +diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
14811 +index 780908d435577..896b9c77117d3 100644
14812 +--- a/drivers/tty/serial/Kconfig
14813 ++++ b/drivers/tty/serial/Kconfig
14814 +@@ -8,6 +8,7 @@ menu "Serial drivers"
14815 +
14816 + config SERIAL_EARLYCON
14817 + bool
14818 ++ depends on SERIAL_CORE
14819 + help
14820 + Support for early consoles with the earlycon parameter. This enables
14821 + the console before standard serial driver is probed. The console is
14822 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
14823 +index 90298c4030421..f8ba7690efe31 100644
14824 +--- a/drivers/tty/serial/fsl_lpuart.c
14825 ++++ b/drivers/tty/serial/fsl_lpuart.c
14826 +@@ -649,26 +649,24 @@ static int lpuart32_poll_init(struct uart_port *port)
14827 + spin_lock_irqsave(&sport->port.lock, flags);
14828 +
14829 + /* Disable Rx & Tx */
14830 +- lpuart32_write(&sport->port, UARTCTRL, 0);
14831 ++ lpuart32_write(&sport->port, 0, UARTCTRL);
14832 +
14833 + temp = lpuart32_read(&sport->port, UARTFIFO);
14834 +
14835 + /* Enable Rx and Tx FIFO */
14836 +- lpuart32_write(&sport->port, UARTFIFO,
14837 +- temp | UARTFIFO_RXFE | UARTFIFO_TXFE);
14838 ++ lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
14839 +
14840 + /* flush Tx and Rx FIFO */
14841 +- lpuart32_write(&sport->port, UARTFIFO,
14842 +- UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH);
14843 ++ lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
14844 +
14845 + /* explicitly clear RDRF */
14846 + if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
14847 + lpuart32_read(&sport->port, UARTDATA);
14848 +- lpuart32_write(&sport->port, UARTFIFO, UARTFIFO_RXUF);
14849 ++ lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO);
14850 + }
14851 +
14852 + /* Enable Rx and Tx */
14853 +- lpuart32_write(&sport->port, UARTCTRL, UARTCTRL_RE | UARTCTRL_TE);
14854 ++ lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
14855 + spin_unlock_irqrestore(&sport->port.lock, flags);
14856 +
14857 + return 0;
14858 +@@ -677,12 +675,12 @@ static int lpuart32_poll_init(struct uart_port *port)
14859 + static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c)
14860 + {
14861 + lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE);
14862 +- lpuart32_write(port, UARTDATA, c);
14863 ++ lpuart32_write(port, c, UARTDATA);
14864 + }
14865 +
14866 + static int lpuart32_poll_get_char(struct uart_port *port)
14867 + {
14868 +- if (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF))
14869 ++ if (!(lpuart32_read(port, UARTWATER) >> UARTWATER_RXCNT_OFF))
14870 + return NO_POLL_CHAR;
14871 +
14872 + return lpuart32_read(port, UARTDATA);
14873 +diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
14874 +index 37ae7fc5f8dd8..7bac485b49ba9 100644
14875 +--- a/drivers/usb/cdns3/gadget.c
14876 ++++ b/drivers/usb/cdns3/gadget.c
14877 +@@ -2988,12 +2988,12 @@ void cdns3_gadget_exit(struct cdns3 *cdns)
14878 +
14879 + priv_dev = cdns->gadget_dev;
14880 +
14881 +- devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
14882 +
14883 + pm_runtime_mark_last_busy(cdns->dev);
14884 + pm_runtime_put_autosuspend(cdns->dev);
14885 +
14886 + usb_del_gadget_udc(&priv_dev->gadget);
14887 ++ devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
14888 +
14889 + cdns3_free_all_eps(priv_dev);
14890 +
14891 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
14892 +index 7499ba118665a..808722b8294a4 100644
14893 +--- a/drivers/usb/class/cdc-acm.c
14894 ++++ b/drivers/usb/class/cdc-acm.c
14895 +@@ -1243,9 +1243,21 @@ static int acm_probe(struct usb_interface *intf,
14896 + }
14897 + }
14898 + } else {
14899 ++ int class = -1;
14900 ++
14901 + data_intf_num = union_header->bSlaveInterface0;
14902 + control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
14903 + data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
14904 ++
14905 ++ if (control_interface)
14906 ++ class = control_interface->cur_altsetting->desc.bInterfaceClass;
14907 ++
14908 ++ if (class != USB_CLASS_COMM && class != USB_CLASS_CDC_DATA) {
14909 ++ dev_dbg(&intf->dev, "Broken union descriptor, assuming single interface\n");
14910 ++ combined_interfaces = 1;
14911 ++ control_interface = data_interface = intf;
14912 ++ goto look_for_collapsed_interface;
14913 ++ }
14914 + }
14915 +
14916 + if (!control_interface || !data_interface) {
14917 +@@ -1900,6 +1912,17 @@ static const struct usb_device_id acm_ids[] = {
14918 + .driver_info = IGNORE_DEVICE,
14919 + },
14920 +
14921 ++ /* Exclude ETAS ES58x */
14922 ++ { USB_DEVICE(0x108c, 0x0159), /* ES581.4 */
14923 ++ .driver_info = IGNORE_DEVICE,
14924 ++ },
14925 ++ { USB_DEVICE(0x108c, 0x0168), /* ES582.1 */
14926 ++ .driver_info = IGNORE_DEVICE,
14927 ++ },
14928 ++ { USB_DEVICE(0x108c, 0x0169), /* ES584.1 */
14929 ++ .driver_info = IGNORE_DEVICE,
14930 ++ },
14931 ++
14932 + { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
14933 + .driver_info = SEND_ZERO_PACKET,
14934 + },
14935 +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
14936 +index e3db6fbeadef8..0c7a0adfd1e1f 100644
14937 +--- a/drivers/usb/class/cdc-wdm.c
14938 ++++ b/drivers/usb/class/cdc-wdm.c
14939 +@@ -58,6 +58,9 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
14940 +
14941 + #define WDM_MAX 16
14942 +
14943 ++/* we cannot wait forever at flush() */
14944 ++#define WDM_FLUSH_TIMEOUT (30 * HZ)
14945 ++
14946 + /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
14947 + #define WDM_DEFAULT_BUFSIZE 256
14948 +
14949 +@@ -151,7 +154,7 @@ static void wdm_out_callback(struct urb *urb)
14950 + kfree(desc->outbuf);
14951 + desc->outbuf = NULL;
14952 + clear_bit(WDM_IN_USE, &desc->flags);
14953 +- wake_up(&desc->wait);
14954 ++ wake_up_all(&desc->wait);
14955 + }
14956 +
14957 + static void wdm_in_callback(struct urb *urb)
14958 +@@ -393,6 +396,9 @@ static ssize_t wdm_write
14959 + if (test_bit(WDM_RESETTING, &desc->flags))
14960 + r = -EIO;
14961 +
14962 ++ if (test_bit(WDM_DISCONNECTING, &desc->flags))
14963 ++ r = -ENODEV;
14964 ++
14965 + if (r < 0) {
14966 + rv = r;
14967 + goto out_free_mem_pm;
14968 +@@ -424,6 +430,7 @@ static ssize_t wdm_write
14969 + if (rv < 0) {
14970 + desc->outbuf = NULL;
14971 + clear_bit(WDM_IN_USE, &desc->flags);
14972 ++ wake_up_all(&desc->wait); /* for wdm_wait_for_response() */
14973 + dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
14974 + rv = usb_translate_errors(rv);
14975 + goto out_free_mem_pm;
14976 +@@ -583,28 +590,58 @@ err:
14977 + return rv;
14978 + }
14979 +
14980 +-static int wdm_flush(struct file *file, fl_owner_t id)
14981 ++static int wdm_wait_for_response(struct file *file, long timeout)
14982 + {
14983 + struct wdm_device *desc = file->private_data;
14984 ++ long rv; /* Use long here because (int) MAX_SCHEDULE_TIMEOUT < 0. */
14985 ++
14986 ++ /*
14987 ++ * Needs both flags. We cannot do with one because resetting it would
14988 ++ * cause a race with write() yet we need to signal a disconnect.
14989 ++ */
14990 ++ rv = wait_event_interruptible_timeout(desc->wait,
14991 ++ !test_bit(WDM_IN_USE, &desc->flags) ||
14992 ++ test_bit(WDM_DISCONNECTING, &desc->flags),
14993 ++ timeout);
14994 +
14995 +- wait_event(desc->wait,
14996 +- /*
14997 +- * needs both flags. We cannot do with one
14998 +- * because resetting it would cause a race
14999 +- * with write() yet we need to signal
15000 +- * a disconnect
15001 +- */
15002 +- !test_bit(WDM_IN_USE, &desc->flags) ||
15003 +- test_bit(WDM_DISCONNECTING, &desc->flags));
15004 +-
15005 +- /* cannot dereference desc->intf if WDM_DISCONNECTING */
15006 ++ /*
15007 ++ * To report the correct error. This is best effort.
15008 ++ * We are inevitably racing with the hardware.
15009 ++ */
15010 + if (test_bit(WDM_DISCONNECTING, &desc->flags))
15011 + return -ENODEV;
15012 +- if (desc->werr < 0)
15013 +- dev_err(&desc->intf->dev, "Error in flush path: %d\n",
15014 +- desc->werr);
15015 ++ if (!rv)
15016 ++ return -EIO;
15017 ++ if (rv < 0)
15018 ++ return -EINTR;
15019 ++
15020 ++ spin_lock_irq(&desc->iuspin);
15021 ++ rv = desc->werr;
15022 ++ desc->werr = 0;
15023 ++ spin_unlock_irq(&desc->iuspin);
15024 ++
15025 ++ return usb_translate_errors(rv);
15026 ++
15027 ++}
15028 ++
15029 ++/*
15030 ++ * You need to send a signal when you react to malicious or defective hardware.
15031 ++ * Also, don't abort when fsync() returned -EINVAL, for older kernels which do
15032 ++ * not implement wdm_flush() will return -EINVAL.
15033 ++ */
15034 ++static int wdm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
15035 ++{
15036 ++ return wdm_wait_for_response(file, MAX_SCHEDULE_TIMEOUT);
15037 ++}
15038 +
15039 +- return usb_translate_errors(desc->werr);
15040 ++/*
15041 ++ * Same with wdm_fsync(), except it uses finite timeout in order to react to
15042 ++ * malicious or defective hardware which ceased communication after close() was
15043 ++ * implicitly called due to process termination.
15044 ++ */
15045 ++static int wdm_flush(struct file *file, fl_owner_t id)
15046 ++{
15047 ++ return wdm_wait_for_response(file, WDM_FLUSH_TIMEOUT);
15048 + }
15049 +
15050 + static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
15051 +@@ -729,6 +766,7 @@ static const struct file_operations wdm_fops = {
15052 + .owner = THIS_MODULE,
15053 + .read = wdm_read,
15054 + .write = wdm_write,
15055 ++ .fsync = wdm_fsync,
15056 + .open = wdm_open,
15057 + .flush = wdm_flush,
15058 + .release = wdm_release,
15059 +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
15060 +index da923ec176122..31ca5abb4c12a 100644
15061 +--- a/drivers/usb/core/urb.c
15062 ++++ b/drivers/usb/core/urb.c
15063 +@@ -772,11 +772,12 @@ void usb_block_urb(struct urb *urb)
15064 + EXPORT_SYMBOL_GPL(usb_block_urb);
15065 +
15066 + /**
15067 +- * usb_kill_anchored_urbs - cancel transfer requests en masse
15068 ++ * usb_kill_anchored_urbs - kill all URBs associated with an anchor
15069 + * @anchor: anchor the requests are bound to
15070 + *
15071 +- * this allows all outstanding URBs to be killed starting
15072 +- * from the back of the queue
15073 ++ * This kills all outstanding URBs starting from the back of the queue,
15074 ++ * with guarantee that no completer callbacks will take place from the
15075 ++ * anchor after this function returns.
15076 + *
15077 + * This routine should not be called by a driver after its disconnect
15078 + * method has returned.
15079 +@@ -784,20 +785,26 @@ EXPORT_SYMBOL_GPL(usb_block_urb);
15080 + void usb_kill_anchored_urbs(struct usb_anchor *anchor)
15081 + {
15082 + struct urb *victim;
15083 ++ int surely_empty;
15084 +
15085 +- spin_lock_irq(&anchor->lock);
15086 +- while (!list_empty(&anchor->urb_list)) {
15087 +- victim = list_entry(anchor->urb_list.prev, struct urb,
15088 +- anchor_list);
15089 +- /* we must make sure the URB isn't freed before we kill it*/
15090 +- usb_get_urb(victim);
15091 +- spin_unlock_irq(&anchor->lock);
15092 +- /* this will unanchor the URB */
15093 +- usb_kill_urb(victim);
15094 +- usb_put_urb(victim);
15095 ++ do {
15096 + spin_lock_irq(&anchor->lock);
15097 +- }
15098 +- spin_unlock_irq(&anchor->lock);
15099 ++ while (!list_empty(&anchor->urb_list)) {
15100 ++ victim = list_entry(anchor->urb_list.prev,
15101 ++ struct urb, anchor_list);
15102 ++ /* make sure the URB isn't freed before we kill it */
15103 ++ usb_get_urb(victim);
15104 ++ spin_unlock_irq(&anchor->lock);
15105 ++ /* this will unanchor the URB */
15106 ++ usb_kill_urb(victim);
15107 ++ usb_put_urb(victim);
15108 ++ spin_lock_irq(&anchor->lock);
15109 ++ }
15110 ++ surely_empty = usb_anchor_check_wakeup(anchor);
15111 ++
15112 ++ spin_unlock_irq(&anchor->lock);
15113 ++ cpu_relax();
15114 ++ } while (!surely_empty);
15115 + }
15116 + EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
15117 +
15118 +@@ -816,21 +823,27 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
15119 + void usb_poison_anchored_urbs(struct usb_anchor *anchor)
15120 + {
15121 + struct urb *victim;
15122 ++ int surely_empty;
15123 +
15124 +- spin_lock_irq(&anchor->lock);
15125 +- anchor->poisoned = 1;
15126 +- while (!list_empty(&anchor->urb_list)) {
15127 +- victim = list_entry(anchor->urb_list.prev, struct urb,
15128 +- anchor_list);
15129 +- /* we must make sure the URB isn't freed before we kill it*/
15130 +- usb_get_urb(victim);
15131 +- spin_unlock_irq(&anchor->lock);
15132 +- /* this will unanchor the URB */
15133 +- usb_poison_urb(victim);
15134 +- usb_put_urb(victim);
15135 ++ do {
15136 + spin_lock_irq(&anchor->lock);
15137 +- }
15138 +- spin_unlock_irq(&anchor->lock);
15139 ++ anchor->poisoned = 1;
15140 ++ while (!list_empty(&anchor->urb_list)) {
15141 ++ victim = list_entry(anchor->urb_list.prev,
15142 ++ struct urb, anchor_list);
15143 ++ /* make sure the URB isn't freed before we kill it */
15144 ++ usb_get_urb(victim);
15145 ++ spin_unlock_irq(&anchor->lock);
15146 ++ /* this will unanchor the URB */
15147 ++ usb_poison_urb(victim);
15148 ++ usb_put_urb(victim);
15149 ++ spin_lock_irq(&anchor->lock);
15150 ++ }
15151 ++ surely_empty = usb_anchor_check_wakeup(anchor);
15152 ++
15153 ++ spin_unlock_irq(&anchor->lock);
15154 ++ cpu_relax();
15155 ++ } while (!surely_empty);
15156 + }
15157 + EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
15158 +
15159 +@@ -970,14 +983,20 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
15160 + {
15161 + struct urb *victim;
15162 + unsigned long flags;
15163 ++ int surely_empty;
15164 ++
15165 ++ do {
15166 ++ spin_lock_irqsave(&anchor->lock, flags);
15167 ++ while (!list_empty(&anchor->urb_list)) {
15168 ++ victim = list_entry(anchor->urb_list.prev,
15169 ++ struct urb, anchor_list);
15170 ++ __usb_unanchor_urb(victim, anchor);
15171 ++ }
15172 ++ surely_empty = usb_anchor_check_wakeup(anchor);
15173 +
15174 +- spin_lock_irqsave(&anchor->lock, flags);
15175 +- while (!list_empty(&anchor->urb_list)) {
15176 +- victim = list_entry(anchor->urb_list.prev, struct urb,
15177 +- anchor_list);
15178 +- __usb_unanchor_urb(victim, anchor);
15179 +- }
15180 +- spin_unlock_irqrestore(&anchor->lock, flags);
15181 ++ spin_unlock_irqrestore(&anchor->lock, flags);
15182 ++ cpu_relax();
15183 ++ } while (!surely_empty);
15184 + }
15185 +
15186 + EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
15187 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
15188 +index 7faf5f8c056d4..642926f9670e6 100644
15189 +--- a/drivers/usb/dwc2/gadget.c
15190 ++++ b/drivers/usb/dwc2/gadget.c
15191 +@@ -712,8 +712,11 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
15192 + */
15193 + static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
15194 + {
15195 ++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
15196 + int is_isoc = hs_ep->isochronous;
15197 + unsigned int maxsize;
15198 ++ u32 mps = hs_ep->ep.maxpacket;
15199 ++ int dir_in = hs_ep->dir_in;
15200 +
15201 + if (is_isoc)
15202 + maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
15203 +@@ -722,6 +725,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
15204 + else
15205 + maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
15206 +
15207 ++ /* Interrupt OUT EP with mps not multiple of 4 */
15208 ++ if (hs_ep->index)
15209 ++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
15210 ++ maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
15211 ++
15212 + return maxsize;
15213 + }
15214 +
15215 +@@ -737,11 +745,14 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
15216 + * Isochronous - descriptor rx/tx bytes bitfield limit,
15217 + * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
15218 + * have concatenations from various descriptors within one packet.
15219 ++ * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
15220 ++ * to a single descriptor.
15221 + *
15222 + * Selects corresponding mask for RX/TX bytes as well.
15223 + */
15224 + static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
15225 + {
15226 ++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
15227 + u32 mps = hs_ep->ep.maxpacket;
15228 + int dir_in = hs_ep->dir_in;
15229 + u32 desc_size = 0;
15230 +@@ -765,6 +776,13 @@ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
15231 + desc_size -= desc_size % mps;
15232 + }
15233 +
15234 ++ /* Interrupt OUT EP with mps not multiple of 4 */
15235 ++ if (hs_ep->index)
15236 ++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
15237 ++ desc_size = mps;
15238 ++ *mask = DEV_DMA_NBYTES_MASK;
15239 ++ }
15240 ++
15241 + return desc_size;
15242 + }
15243 +
15244 +@@ -1123,13 +1141,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
15245 + length += (mps - (length % mps));
15246 + }
15247 +
15248 +- /*
15249 +- * If more data to send, adjust DMA for EP0 out data stage.
15250 +- * ureq->dma stays unchanged, hence increment it by already
15251 +- * passed passed data count before starting new transaction.
15252 +- */
15253 +- if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
15254 +- continuing)
15255 ++ if (continuing)
15256 + offset = ureq->actual;
15257 +
15258 + /* Fill DDMA chain entries */
15259 +@@ -2320,22 +2332,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
15260 + */
15261 + static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
15262 + {
15263 ++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
15264 + struct dwc2_hsotg *hsotg = hs_ep->parent;
15265 + unsigned int bytes_rem = 0;
15266 ++ unsigned int bytes_rem_correction = 0;
15267 + struct dwc2_dma_desc *desc = hs_ep->desc_list;
15268 + int i;
15269 + u32 status;
15270 ++ u32 mps = hs_ep->ep.maxpacket;
15271 ++ int dir_in = hs_ep->dir_in;
15272 +
15273 + if (!desc)
15274 + return -EINVAL;
15275 +
15276 ++ /* Interrupt OUT EP with mps not multiple of 4 */
15277 ++ if (hs_ep->index)
15278 ++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
15279 ++ bytes_rem_correction = 4 - (mps % 4);
15280 ++
15281 + for (i = 0; i < hs_ep->desc_count; ++i) {
15282 + status = desc->status;
15283 + bytes_rem += status & DEV_DMA_NBYTES_MASK;
15284 ++ bytes_rem -= bytes_rem_correction;
15285 +
15286 + if (status & DEV_DMA_STS_MASK)
15287 + dev_err(hsotg->dev, "descriptor %d closed with %x\n",
15288 + i, status & DEV_DMA_STS_MASK);
15289 ++
15290 ++ if (status & DEV_DMA_L)
15291 ++ break;
15292 ++
15293 + desc++;
15294 + }
15295 +
15296 +diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
15297 +index ce736d67c7c34..fd73ddd8eb753 100644
15298 +--- a/drivers/usb/dwc2/params.c
15299 ++++ b/drivers/usb/dwc2/params.c
15300 +@@ -860,7 +860,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
15301 + int dwc2_init_params(struct dwc2_hsotg *hsotg)
15302 + {
15303 + const struct of_device_id *match;
15304 +- void (*set_params)(void *data);
15305 ++ void (*set_params)(struct dwc2_hsotg *data);
15306 +
15307 + dwc2_set_default_params(hsotg);
15308 + dwc2_get_device_properties(hsotg);
15309 +diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
15310 +index db9fd4bd1a38c..b28e90e0b685d 100644
15311 +--- a/drivers/usb/dwc2/platform.c
15312 ++++ b/drivers/usb/dwc2/platform.c
15313 +@@ -584,12 +584,16 @@ static int dwc2_driver_probe(struct platform_device *dev)
15314 + if (retval) {
15315 + hsotg->gadget.udc = NULL;
15316 + dwc2_hsotg_remove(hsotg);
15317 +- goto error_init;
15318 ++ goto error_debugfs;
15319 + }
15320 + }
15321 + #endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
15322 + return 0;
15323 +
15324 ++error_debugfs:
15325 ++ dwc2_debugfs_exit(hsotg);
15326 ++ if (hsotg->hcd_enabled)
15327 ++ dwc2_hcd_remove(hsotg);
15328 + error_init:
15329 + if (hsotg->params.activate_stm_id_vb_detection)
15330 + regulator_disable(hsotg->usb33d);
15331 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
15332 +index 25c686a752b0f..928a85b0d1cdd 100644
15333 +--- a/drivers/usb/dwc3/core.c
15334 ++++ b/drivers/usb/dwc3/core.c
15335 +@@ -119,6 +119,7 @@ static void __dwc3_set_mode(struct work_struct *work)
15336 + struct dwc3 *dwc = work_to_dwc(work);
15337 + unsigned long flags;
15338 + int ret;
15339 ++ u32 reg;
15340 +
15341 + if (dwc->dr_mode != USB_DR_MODE_OTG)
15342 + return;
15343 +@@ -172,6 +173,11 @@ static void __dwc3_set_mode(struct work_struct *work)
15344 + otg_set_vbus(dwc->usb2_phy->otg, true);
15345 + phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
15346 + phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
15347 ++ if (dwc->dis_split_quirk) {
15348 ++ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
15349 ++ reg |= DWC3_GUCTL3_SPLITDISABLE;
15350 ++ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
15351 ++ }
15352 + }
15353 + break;
15354 + case DWC3_GCTL_PRTCAP_DEVICE:
15355 +@@ -930,13 +936,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
15356 + */
15357 + dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
15358 +
15359 +- /* Handle USB2.0-only core configuration */
15360 +- if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
15361 +- DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
15362 +- if (dwc->maximum_speed == USB_SPEED_SUPER)
15363 +- dwc->maximum_speed = USB_SPEED_HIGH;
15364 +- }
15365 +-
15366 + ret = dwc3_phy_setup(dwc);
15367 + if (ret)
15368 + goto err0;
15369 +@@ -1357,6 +1356,9 @@ static void dwc3_get_properties(struct dwc3 *dwc)
15370 + dwc->dis_metastability_quirk = device_property_read_bool(dev,
15371 + "snps,dis_metastability_quirk");
15372 +
15373 ++ dwc->dis_split_quirk = device_property_read_bool(dev,
15374 ++ "snps,dis-split-quirk");
15375 ++
15376 + dwc->lpm_nyet_threshold = lpm_nyet_threshold;
15377 + dwc->tx_de_emphasis = tx_de_emphasis;
15378 +
15379 +@@ -1382,6 +1384,8 @@ bool dwc3_has_imod(struct dwc3 *dwc)
15380 + static void dwc3_check_params(struct dwc3 *dwc)
15381 + {
15382 + struct device *dev = dwc->dev;
15383 ++ unsigned int hwparam_gen =
15384 ++ DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
15385 +
15386 + /* Check for proper value of imod_interval */
15387 + if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
15388 +@@ -1413,17 +1417,23 @@ static void dwc3_check_params(struct dwc3 *dwc)
15389 + dwc->maximum_speed);
15390 + /* fall through */
15391 + case USB_SPEED_UNKNOWN:
15392 +- /* default to superspeed */
15393 +- dwc->maximum_speed = USB_SPEED_SUPER;
15394 +-
15395 +- /*
15396 +- * default to superspeed plus if we are capable.
15397 +- */
15398 +- if ((DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) &&
15399 +- (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
15400 +- DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
15401 ++ switch (hwparam_gen) {
15402 ++ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
15403 + dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
15404 +-
15405 ++ break;
15406 ++ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
15407 ++ if (DWC3_IP_IS(DWC32))
15408 ++ dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
15409 ++ else
15410 ++ dwc->maximum_speed = USB_SPEED_SUPER;
15411 ++ break;
15412 ++ case DWC3_GHWPARAMS3_SSPHY_IFC_DIS:
15413 ++ dwc->maximum_speed = USB_SPEED_HIGH;
15414 ++ break;
15415 ++ default:
15416 ++ dwc->maximum_speed = USB_SPEED_SUPER;
15417 ++ break;
15418 ++ }
15419 + break;
15420 + }
15421 + }
15422 +@@ -1866,10 +1876,26 @@ static int dwc3_resume(struct device *dev)
15423 +
15424 + return 0;
15425 + }
15426 ++
15427 ++static void dwc3_complete(struct device *dev)
15428 ++{
15429 ++ struct dwc3 *dwc = dev_get_drvdata(dev);
15430 ++ u32 reg;
15431 ++
15432 ++ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
15433 ++ dwc->dis_split_quirk) {
15434 ++ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
15435 ++ reg |= DWC3_GUCTL3_SPLITDISABLE;
15436 ++ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
15437 ++ }
15438 ++}
15439 ++#else
15440 ++#define dwc3_complete NULL
15441 + #endif /* CONFIG_PM_SLEEP */
15442 +
15443 + static const struct dev_pm_ops dwc3_dev_pm_ops = {
15444 + SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
15445 ++ .complete = dwc3_complete,
15446 + SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
15447 + dwc3_runtime_idle)
15448 + };
15449 +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
15450 +index 013f42a2b5dcc..af5533b097133 100644
15451 +--- a/drivers/usb/dwc3/core.h
15452 ++++ b/drivers/usb/dwc3/core.h
15453 +@@ -138,6 +138,7 @@
15454 + #define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10))
15455 +
15456 + #define DWC3_GHWPARAMS8 0xc600
15457 ++#define DWC3_GUCTL3 0xc60c
15458 + #define DWC3_GFLADJ 0xc630
15459 +
15460 + /* Device Registers */
15461 +@@ -380,6 +381,9 @@
15462 + /* Global User Control Register 2 */
15463 + #define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
15464 +
15465 ++/* Global User Control Register 3 */
15466 ++#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
15467 ++
15468 + /* Device Configuration Register */
15469 + #define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
15470 + #define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
15471 +@@ -1052,6 +1056,7 @@ struct dwc3_scratchpad_array {
15472 + * 2 - No de-emphasis
15473 + * 3 - Reserved
15474 + * @dis_metastability_quirk: set to disable metastability quirk.
15475 ++ * @dis_split_quirk: set to disable split boundary.
15476 + * @imod_interval: set the interrupt moderation interval in 250ns
15477 + * increments or 0 to disable.
15478 + */
15479 +@@ -1245,6 +1250,8 @@ struct dwc3 {
15480 +
15481 + unsigned dis_metastability_quirk:1;
15482 +
15483 ++ unsigned dis_split_quirk:1;
15484 ++
15485 + u16 imod_interval;
15486 + };
15487 +
15488 +diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
15489 +index 8852fbfdead4e..336253ff55749 100644
15490 +--- a/drivers/usb/dwc3/dwc3-of-simple.c
15491 ++++ b/drivers/usb/dwc3/dwc3-of-simple.c
15492 +@@ -176,6 +176,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
15493 + { .compatible = "cavium,octeon-7130-usb-uctl" },
15494 + { .compatible = "sprd,sc9860-dwc3" },
15495 + { .compatible = "allwinner,sun50i-h6-dwc3" },
15496 ++ { .compatible = "hisilicon,hi3670-dwc3" },
15497 + { /* Sentinel */ }
15498 + };
15499 + MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
15500 +diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
15501 +index 1f638759a9533..92a7c3a839454 100644
15502 +--- a/drivers/usb/gadget/function/f_ncm.c
15503 ++++ b/drivers/usb/gadget/function/f_ncm.c
15504 +@@ -85,8 +85,10 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
15505 + /* peak (theoretical) bulk transfer rate in bits-per-second */
15506 + static inline unsigned ncm_bitrate(struct usb_gadget *g)
15507 + {
15508 +- if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
15509 +- return 13 * 1024 * 8 * 1000 * 8;
15510 ++ if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
15511 ++ return 4250000000U;
15512 ++ else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
15513 ++ return 3750000000U;
15514 + else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
15515 + return 13 * 512 * 8 * 1000 * 8;
15516 + else
15517 +@@ -1534,7 +1536,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
15518 + fs_ncm_notify_desc.bEndpointAddress;
15519 +
15520 + status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
15521 +- ncm_ss_function, NULL);
15522 ++ ncm_ss_function, ncm_ss_function);
15523 + if (status)
15524 + goto fail;
15525 +
15526 +diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
15527 +index 9c7ed2539ff77..8ed1295d7e350 100644
15528 +--- a/drivers/usb/gadget/function/f_printer.c
15529 ++++ b/drivers/usb/gadget/function/f_printer.c
15530 +@@ -31,6 +31,7 @@
15531 + #include <linux/types.h>
15532 + #include <linux/ctype.h>
15533 + #include <linux/cdev.h>
15534 ++#include <linux/kref.h>
15535 +
15536 + #include <asm/byteorder.h>
15537 + #include <linux/io.h>
15538 +@@ -64,7 +65,7 @@ struct printer_dev {
15539 + struct usb_gadget *gadget;
15540 + s8 interface;
15541 + struct usb_ep *in_ep, *out_ep;
15542 +-
15543 ++ struct kref kref;
15544 + struct list_head rx_reqs; /* List of free RX structs */
15545 + struct list_head rx_reqs_active; /* List of Active RX xfers */
15546 + struct list_head rx_buffers; /* List of completed xfers */
15547 +@@ -218,6 +219,13 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
15548 +
15549 + /*-------------------------------------------------------------------------*/
15550 +
15551 ++static void printer_dev_free(struct kref *kref)
15552 ++{
15553 ++ struct printer_dev *dev = container_of(kref, struct printer_dev, kref);
15554 ++
15555 ++ kfree(dev);
15556 ++}
15557 ++
15558 + static struct usb_request *
15559 + printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags)
15560 + {
15561 +@@ -348,6 +356,7 @@ printer_open(struct inode *inode, struct file *fd)
15562 +
15563 + spin_unlock_irqrestore(&dev->lock, flags);
15564 +
15565 ++ kref_get(&dev->kref);
15566 + DBG(dev, "printer_open returned %x\n", ret);
15567 + return ret;
15568 + }
15569 +@@ -365,6 +374,7 @@ printer_close(struct inode *inode, struct file *fd)
15570 + dev->printer_status &= ~PRINTER_SELECTED;
15571 + spin_unlock_irqrestore(&dev->lock, flags);
15572 +
15573 ++ kref_put(&dev->kref, printer_dev_free);
15574 + DBG(dev, "printer_close\n");
15575 +
15576 + return 0;
15577 +@@ -1350,7 +1360,8 @@ static void gprinter_free(struct usb_function *f)
15578 + struct f_printer_opts *opts;
15579 +
15580 + opts = container_of(f->fi, struct f_printer_opts, func_inst);
15581 +- kfree(dev);
15582 ++
15583 ++ kref_put(&dev->kref, printer_dev_free);
15584 + mutex_lock(&opts->lock);
15585 + --opts->refcnt;
15586 + mutex_unlock(&opts->lock);
15587 +@@ -1419,6 +1430,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi)
15588 + return ERR_PTR(-ENOMEM);
15589 + }
15590 +
15591 ++ kref_init(&dev->kref);
15592 + ++opts->refcnt;
15593 + dev->minor = opts->minor;
15594 + dev->pnp_string = opts->pnp_string;
15595 +diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
15596 +index fbe96ef1ac7a4..891e9f7f40d59 100644
15597 +--- a/drivers/usb/gadget/function/u_ether.c
15598 ++++ b/drivers/usb/gadget/function/u_ether.c
15599 +@@ -93,7 +93,7 @@ struct eth_dev {
15600 + static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
15601 + {
15602 + if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
15603 +- gadget->speed == USB_SPEED_SUPER))
15604 ++ gadget->speed >= USB_SPEED_SUPER))
15605 + return qmult * DEFAULT_QLEN;
15606 + else
15607 + return DEFAULT_QLEN;
15608 +diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
15609 +index 3cfc6e2eba71a..e0e3cb2f6f3bc 100644
15610 +--- a/drivers/usb/gadget/function/u_serial.c
15611 ++++ b/drivers/usb/gadget/function/u_serial.c
15612 +@@ -1391,6 +1391,7 @@ void gserial_disconnect(struct gserial *gser)
15613 + if (port->port.tty)
15614 + tty_hangup(port->port.tty);
15615 + }
15616 ++ port->suspended = false;
15617 + spin_unlock_irqrestore(&port->port_lock, flags);
15618 +
15619 + /* disable endpoints, aborting down any active I/O */
15620 +diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
15621 +index 54501814dc3fd..aebe11829baa6 100644
15622 +--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
15623 ++++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
15624 +@@ -26,6 +26,7 @@
15625 + #include <linux/seq_file.h>
15626 + #include <linux/slab.h>
15627 + #include <linux/timer.h>
15628 ++#include <linux/usb.h>
15629 + #include <linux/usb/ch9.h>
15630 + #include <linux/usb/gadget.h>
15631 + #include <linux/workqueue.h>
15632 +diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
15633 +index 4de91653a2c7b..5eb62240c7f87 100644
15634 +--- a/drivers/usb/host/ohci-hcd.c
15635 ++++ b/drivers/usb/host/ohci-hcd.c
15636 +@@ -673,20 +673,24 @@ retry:
15637 +
15638 + /* handle root hub init quirks ... */
15639 + val = roothub_a (ohci);
15640 +- val &= ~(RH_A_PSM | RH_A_OCPM);
15641 ++ /* Configure for per-port over-current protection by default */
15642 ++ val &= ~RH_A_NOCP;
15643 ++ val |= RH_A_OCPM;
15644 + if (ohci->flags & OHCI_QUIRK_SUPERIO) {
15645 +- /* NSC 87560 and maybe others */
15646 ++ /* NSC 87560 and maybe others.
15647 ++ * Ganged power switching, no over-current protection.
15648 ++ */
15649 + val |= RH_A_NOCP;
15650 +- val &= ~(RH_A_POTPGT | RH_A_NPS);
15651 +- ohci_writel (ohci, val, &ohci->regs->roothub.a);
15652 ++ val &= ~(RH_A_POTPGT | RH_A_NPS | RH_A_PSM | RH_A_OCPM);
15653 + } else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
15654 + (ohci->flags & OHCI_QUIRK_HUB_POWER)) {
15655 + /* hub power always on; required for AMD-756 and some
15656 +- * Mac platforms. ganged overcurrent reporting, if any.
15657 ++ * Mac platforms.
15658 + */
15659 + val |= RH_A_NPS;
15660 +- ohci_writel (ohci, val, &ohci->regs->roothub.a);
15661 + }
15662 ++ ohci_writel(ohci, val, &ohci->regs->roothub.a);
15663 ++
15664 + ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
15665 + ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
15666 + &ohci->regs->roothub.b);
15667 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
15668 +index 113ab5d3cbfe5..f665da34a8f73 100644
15669 +--- a/drivers/usb/host/xhci.c
15670 ++++ b/drivers/usb/host/xhci.c
15671 +@@ -1915,8 +1915,6 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
15672 + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
15673 + trace_xhci_add_endpoint(ep_ctx);
15674 +
15675 +- xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
15676 +-
15677 + xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
15678 + (unsigned int) ep->desc.bEndpointAddress,
15679 + udev->slot_id,
15680 +@@ -2949,6 +2947,7 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
15681 + xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
15682 + virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
15683 + virt_dev->eps[i].new_ring = NULL;
15684 ++ xhci_debugfs_create_endpoint(xhci, virt_dev, i);
15685 + }
15686 + command_cleanup:
15687 + kfree(command->completion);
15688 +diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
15689 +index d98843feddce0..5076d0155bc3f 100644
15690 +--- a/drivers/vfio/pci/vfio_pci_config.c
15691 ++++ b/drivers/vfio/pci/vfio_pci_config.c
15692 +@@ -406,7 +406,7 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
15693 + * PF SR-IOV capability, there's therefore no need to trigger
15694 + * faults based on the virtual value.
15695 + */
15696 +- return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
15697 ++ return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY);
15698 + }
15699 +
15700 + /*
15701 +@@ -520,8 +520,8 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
15702 +
15703 + count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
15704 +
15705 +- /* Mask in virtual memory enable for SR-IOV devices */
15706 +- if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) {
15707 ++ /* Mask in virtual memory enable */
15708 ++ if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) {
15709 + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
15710 + u32 tmp_val = le32_to_cpu(*val);
15711 +
15712 +@@ -589,9 +589,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
15713 + * shows it disabled (phys_mem/io, then the device has
15714 + * undergone some kind of backdoor reset and needs to be
15715 + * restored before we allow it to enable the bars.
15716 +- * SR-IOV devices will trigger this, but we catch them later
15717 ++ * SR-IOV devices will trigger this - for mem enable let's
15718 ++ * catch this now and for io enable it will be caught later
15719 + */
15720 +- if ((new_mem && virt_mem && !phys_mem) ||
15721 ++ if ((new_mem && virt_mem && !phys_mem &&
15722 ++ !pdev->no_command_memory) ||
15723 + (new_io && virt_io && !phys_io) ||
15724 + vfio_need_bar_restore(vdev))
15725 + vfio_bar_restore(vdev);
15726 +@@ -1734,12 +1736,14 @@ int vfio_config_init(struct vfio_pci_device *vdev)
15727 + vconfig[PCI_INTERRUPT_PIN]);
15728 +
15729 + vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
15730 +-
15731 ++ }
15732 ++ if (pdev->no_command_memory) {
15733 + /*
15734 +- * VFs do no implement the memory enable bit of the COMMAND
15735 +- * register therefore we'll not have it set in our initial
15736 +- * copy of config space after pci_enable_device(). For
15737 +- * consistency with PFs, set the virtual enable bit here.
15738 ++ * VFs and devices that set pdev->no_command_memory do not
15739 ++ * implement the memory enable bit of the COMMAND register
15740 ++ * therefore we'll not have it set in our initial copy of
15741 ++ * config space after pci_enable_device(). For consistency
15742 ++ * with PFs, set the virtual enable bit here.
15743 + */
15744 + *(__le16 *)&vconfig[PCI_COMMAND] |=
15745 + cpu_to_le16(PCI_COMMAND_MEMORY);
15746 +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
15747 +index 1d9fb25929459..869dce5f134dd 100644
15748 +--- a/drivers/vfio/pci/vfio_pci_intrs.c
15749 ++++ b/drivers/vfio/pci/vfio_pci_intrs.c
15750 +@@ -352,11 +352,13 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
15751 + vdev->ctx[vector].producer.token = trigger;
15752 + vdev->ctx[vector].producer.irq = irq;
15753 + ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
15754 +- if (unlikely(ret))
15755 ++ if (unlikely(ret)) {
15756 + dev_info(&pdev->dev,
15757 + "irq bypass producer (token %p) registration fails: %d\n",
15758 + vdev->ctx[vector].producer.token, ret);
15759 +
15760 ++ vdev->ctx[vector].producer.token = NULL;
15761 ++ }
15762 + vdev->ctx[vector].trigger = trigger;
15763 +
15764 + return 0;
15765 +diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
15766 +index 580099afeaffa..fbff5c4743c5e 100644
15767 +--- a/drivers/vfio/vfio.c
15768 ++++ b/drivers/vfio/vfio.c
15769 +@@ -1948,8 +1948,10 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
15770 + if (!group)
15771 + return -ENODEV;
15772 +
15773 +- if (group->dev_counter > 1)
15774 +- return -EINVAL;
15775 ++ if (group->dev_counter > 1) {
15776 ++ ret = -EINVAL;
15777 ++ goto err_pin_pages;
15778 ++ }
15779 +
15780 + ret = vfio_group_add_container_user(group);
15781 + if (ret)
15782 +@@ -2050,6 +2052,9 @@ int vfio_group_pin_pages(struct vfio_group *group,
15783 + if (!group || !user_iova_pfn || !phys_pfn || !npage)
15784 + return -EINVAL;
15785 +
15786 ++ if (group->dev_counter > 1)
15787 ++ return -EINVAL;
15788 ++
15789 + if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
15790 + return -E2BIG;
15791 +
15792 +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
15793 +index f48f0db908a46..00d3cf12e92c3 100644
15794 +--- a/drivers/vfio/vfio_iommu_type1.c
15795 ++++ b/drivers/vfio/vfio_iommu_type1.c
15796 +@@ -693,7 +693,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
15797 +
15798 + ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
15799 + if (ret) {
15800 +- vfio_unpin_page_external(dma, iova, do_accounting);
15801 ++ if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
15802 ++ vfio_lock_acct(dma, -1, true);
15803 + goto pin_unwind;
15804 + }
15805 +
15806 +@@ -2899,7 +2900,8 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
15807 + * size
15808 + */
15809 + bitmap_set(dma->bitmap, offset >> pgshift,
15810 +- *copied >> pgshift);
15811 ++ ((offset + *copied - 1) >> pgshift) -
15812 ++ (offset >> pgshift) + 1);
15813 + }
15814 + } else
15815 + *copied = copy_from_user(data, (void __user *)vaddr,
15816 +diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
15817 +index 2355f00f57732..1f6301375fd33 100644
15818 +--- a/drivers/video/backlight/sky81452-backlight.c
15819 ++++ b/drivers/video/backlight/sky81452-backlight.c
15820 +@@ -196,6 +196,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
15821 + num_entry);
15822 + if (ret < 0) {
15823 + dev_err(dev, "led-sources node is invalid.\n");
15824 ++ of_node_put(np);
15825 + return ERR_PTR(-EINVAL);
15826 + }
15827 +
15828 +diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
15829 +index e116a3f9ad566..687bd2c0d5040 100644
15830 +--- a/drivers/video/fbdev/aty/radeon_base.c
15831 ++++ b/drivers/video/fbdev/aty/radeon_base.c
15832 +@@ -2311,7 +2311,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
15833 +
15834 + ret = radeon_kick_out_firmware_fb(pdev);
15835 + if (ret)
15836 +- return ret;
15837 ++ goto err_release_fb;
15838 +
15839 + /* request the mem regions */
15840 + ret = pci_request_region(pdev, 0, "radeonfb framebuffer");
15841 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
15842 +index da7c88ffaa6a8..1136b569ccb7c 100644
15843 +--- a/drivers/video/fbdev/core/fbmem.c
15844 ++++ b/drivers/video/fbdev/core/fbmem.c
15845 +@@ -1006,6 +1006,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
15846 + return 0;
15847 + }
15848 +
15849 ++ /* bitfill_aligned() assumes that it's at least 8x8 */
15850 ++ if (var->xres < 8 || var->yres < 8)
15851 ++ return -EINVAL;
15852 ++
15853 + ret = info->fbops->fb_check_var(var, info);
15854 +
15855 + if (ret)
15856 +diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
15857 +index dfe3eb769638b..fde27feae5d0c 100644
15858 +--- a/drivers/video/fbdev/sis/init.c
15859 ++++ b/drivers/video/fbdev/sis/init.c
15860 +@@ -2428,6 +2428,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
15861 +
15862 + i = 0;
15863 +
15864 ++ if (SiS_Pr->ChipType == SIS_730)
15865 ++ queuedata = &FQBQData730[0];
15866 ++ else
15867 ++ queuedata = &FQBQData[0];
15868 ++
15869 + if(ModeNo > 0x13) {
15870 +
15871 + /* Get VCLK */
15872 +@@ -2445,12 +2450,6 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
15873 + /* Get half colordepth */
15874 + colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)];
15875 +
15876 +- if(SiS_Pr->ChipType == SIS_730) {
15877 +- queuedata = &FQBQData730[0];
15878 +- } else {
15879 +- queuedata = &FQBQData[0];
15880 +- }
15881 +-
15882 + do {
15883 + templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth;
15884 +
15885 +diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
15886 +index 578d3541e3d6f..1e8a38a7967d8 100644
15887 +--- a/drivers/video/fbdev/vga16fb.c
15888 ++++ b/drivers/video/fbdev/vga16fb.c
15889 +@@ -243,7 +243,7 @@ static void vga16fb_update_fix(struct fb_info *info)
15890 + }
15891 +
15892 + static void vga16fb_clock_chip(struct vga16fb_par *par,
15893 +- unsigned int pixclock,
15894 ++ unsigned int *pixclock,
15895 + const struct fb_info *info,
15896 + int mul, int div)
15897 + {
15898 +@@ -259,14 +259,14 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
15899 + { 0 /* bad */, 0x00, 0x00}};
15900 + int err;
15901 +
15902 +- pixclock = (pixclock * mul) / div;
15903 ++ *pixclock = (*pixclock * mul) / div;
15904 + best = vgaclocks;
15905 +- err = pixclock - best->pixclock;
15906 ++ err = *pixclock - best->pixclock;
15907 + if (err < 0) err = -err;
15908 + for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) {
15909 + int tmp;
15910 +
15911 +- tmp = pixclock - ptr->pixclock;
15912 ++ tmp = *pixclock - ptr->pixclock;
15913 + if (tmp < 0) tmp = -tmp;
15914 + if (tmp < err) {
15915 + err = tmp;
15916 +@@ -275,7 +275,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
15917 + }
15918 + par->misc |= best->misc;
15919 + par->clkdiv = best->seq_clock_mode;
15920 +- pixclock = (best->pixclock * div) / mul;
15921 ++ *pixclock = (best->pixclock * div) / mul;
15922 + }
15923 +
15924 + #define FAIL(X) return -EINVAL
15925 +@@ -497,10 +497,10 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var,
15926 +
15927 + if (mode & MODE_8BPP)
15928 + /* pixel clock == vga clock / 2 */
15929 +- vga16fb_clock_chip(par, var->pixclock, info, 1, 2);
15930 ++ vga16fb_clock_chip(par, &var->pixclock, info, 1, 2);
15931 + else
15932 + /* pixel clock == vga clock */
15933 +- vga16fb_clock_chip(par, var->pixclock, info, 1, 1);
15934 ++ vga16fb_clock_chip(par, &var->pixclock, info, 1, 1);
15935 +
15936 + var->red.offset = var->green.offset = var->blue.offset =
15937 + var->transp.offset = 0;
15938 +diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
15939 +index 1b0b11b55d2a0..46ee0a0998b6f 100644
15940 +--- a/drivers/virt/fsl_hypervisor.c
15941 ++++ b/drivers/virt/fsl_hypervisor.c
15942 +@@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
15943 +
15944 + unsigned int i;
15945 + long ret = 0;
15946 +- int num_pinned; /* return value from get_user_pages() */
15947 ++ int num_pinned = 0; /* return value from get_user_pages_fast() */
15948 + phys_addr_t remote_paddr; /* The next address in the remote buffer */
15949 + uint32_t count; /* The number of bytes left to copy */
15950 +
15951 +@@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
15952 + return -EINVAL;
15953 +
15954 + /*
15955 +- * The array of pages returned by get_user_pages() covers only
15956 ++ * The array of pages returned by get_user_pages_fast() covers only
15957 + * page-aligned memory. Since the user buffer is probably not
15958 + * page-aligned, we need to handle the discrepancy.
15959 + *
15960 +@@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
15961 +
15962 + /*
15963 + * 'pages' is an array of struct page pointers that's initialized by
15964 +- * get_user_pages().
15965 ++ * get_user_pages_fast().
15966 + */
15967 + pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
15968 + if (!pages) {
15969 +@@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
15970 + if (!sg_list_unaligned) {
15971 + pr_debug("fsl-hv: could not allocate S/G list\n");
15972 + ret = -ENOMEM;
15973 +- goto exit;
15974 ++ goto free_pages;
15975 + }
15976 + sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
15977 +
15978 +@@ -250,7 +250,6 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
15979 + num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
15980 +
15981 + if (num_pinned != num_pages) {
15982 +- /* get_user_pages() failed */
15983 + pr_debug("fsl-hv: could not lock source buffer\n");
15984 + ret = (num_pinned < 0) ? num_pinned : -EFAULT;
15985 + goto exit;
15986 +@@ -292,13 +291,13 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
15987 + virt_to_phys(sg_list), num_pages);
15988 +
15989 + exit:
15990 +- if (pages) {
15991 +- for (i = 0; i < num_pages; i++)
15992 +- if (pages[i])
15993 +- put_page(pages[i]);
15994 ++ if (pages && (num_pinned > 0)) {
15995 ++ for (i = 0; i < num_pinned; i++)
15996 ++ put_page(pages[i]);
15997 + }
15998 +
15999 + kfree(sg_list_unaligned);
16000 ++free_pages:
16001 + kfree(pages);
16002 +
16003 + if (!ret)
16004 +diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
16005 +index 87eaf357ae01f..adf015aa4126f 100644
16006 +--- a/drivers/watchdog/sp5100_tco.h
16007 ++++ b/drivers/watchdog/sp5100_tco.h
16008 +@@ -70,7 +70,7 @@
16009 + #define EFCH_PM_DECODEEN_WDT_TMREN BIT(7)
16010 +
16011 +
16012 +-#define EFCH_PM_DECODEEN3 0x00
16013 ++#define EFCH_PM_DECODEEN3 0x03
16014 + #define EFCH_PM_DECODEEN_SECOND_RES GENMASK(1, 0)
16015 + #define EFCH_PM_WATCHDOG_DISABLE ((u8)GENMASK(3, 2))
16016 +
16017 +diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
16018 +index b535f5fa279b9..c2065615fd6ca 100644
16019 +--- a/drivers/watchdog/watchdog_dev.c
16020 ++++ b/drivers/watchdog/watchdog_dev.c
16021 +@@ -991,8 +991,10 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
16022 + wd_data->wdd = wdd;
16023 + wdd->wd_data = wd_data;
16024 +
16025 +- if (IS_ERR_OR_NULL(watchdog_kworker))
16026 ++ if (IS_ERR_OR_NULL(watchdog_kworker)) {
16027 ++ kfree(wd_data);
16028 + return -ENODEV;
16029 ++ }
16030 +
16031 + device_initialize(&wd_data->dev);
16032 + wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
16033 +@@ -1018,7 +1020,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
16034 + pr_err("%s: a legacy watchdog module is probably present.\n",
16035 + wdd->info->identity);
16036 + old_wd_data = NULL;
16037 +- kfree(wd_data);
16038 ++ put_device(&wd_data->dev);
16039 + return err;
16040 + }
16041 + }
16042 +diff --git a/fs/afs/cell.c b/fs/afs/cell.c
16043 +index 5b79cdceefa0f..bc7ed46aaca9f 100644
16044 +--- a/fs/afs/cell.c
16045 ++++ b/fs/afs/cell.c
16046 +@@ -19,7 +19,8 @@ static unsigned __read_mostly afs_cell_gc_delay = 10;
16047 + static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
16048 + static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
16049 +
16050 +-static void afs_manage_cell(struct work_struct *);
16051 ++static void afs_queue_cell_manager(struct afs_net *);
16052 ++static void afs_manage_cell_work(struct work_struct *);
16053 +
16054 + static void afs_dec_cells_outstanding(struct afs_net *net)
16055 + {
16056 +@@ -37,19 +38,21 @@ static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
16057 + atomic_inc(&net->cells_outstanding);
16058 + if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
16059 + afs_dec_cells_outstanding(net);
16060 ++ } else {
16061 ++ afs_queue_cell_manager(net);
16062 + }
16063 + }
16064 +
16065 + /*
16066 +- * Look up and get an activation reference on a cell record under RCU
16067 +- * conditions. The caller must hold the RCU read lock.
16068 ++ * Look up and get an activation reference on a cell record. The caller must
16069 ++ * hold net->cells_lock at least read-locked.
16070 + */
16071 +-struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
16072 +- const char *name, unsigned int namesz)
16073 ++static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
16074 ++ const char *name, unsigned int namesz)
16075 + {
16076 + struct afs_cell *cell = NULL;
16077 + struct rb_node *p;
16078 +- int n, seq = 0, ret = 0;
16079 ++ int n;
16080 +
16081 + _enter("%*.*s", namesz, namesz, name);
16082 +
16083 +@@ -58,61 +61,47 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
16084 + if (namesz > AFS_MAXCELLNAME)
16085 + return ERR_PTR(-ENAMETOOLONG);
16086 +
16087 +- do {
16088 +- /* Unfortunately, rbtree walking doesn't give reliable results
16089 +- * under just the RCU read lock, so we have to check for
16090 +- * changes.
16091 +- */
16092 +- if (cell)
16093 +- afs_put_cell(net, cell);
16094 +- cell = NULL;
16095 +- ret = -ENOENT;
16096 +-
16097 +- read_seqbegin_or_lock(&net->cells_lock, &seq);
16098 +-
16099 +- if (!name) {
16100 +- cell = rcu_dereference_raw(net->ws_cell);
16101 +- if (cell) {
16102 +- afs_get_cell(cell);
16103 +- ret = 0;
16104 +- break;
16105 +- }
16106 +- ret = -EDESTADDRREQ;
16107 +- continue;
16108 +- }
16109 ++ if (!name) {
16110 ++ cell = net->ws_cell;
16111 ++ if (!cell)
16112 ++ return ERR_PTR(-EDESTADDRREQ);
16113 ++ goto found;
16114 ++ }
16115 +
16116 +- p = rcu_dereference_raw(net->cells.rb_node);
16117 +- while (p) {
16118 +- cell = rb_entry(p, struct afs_cell, net_node);
16119 +-
16120 +- n = strncasecmp(cell->name, name,
16121 +- min_t(size_t, cell->name_len, namesz));
16122 +- if (n == 0)
16123 +- n = cell->name_len - namesz;
16124 +- if (n < 0) {
16125 +- p = rcu_dereference_raw(p->rb_left);
16126 +- } else if (n > 0) {
16127 +- p = rcu_dereference_raw(p->rb_right);
16128 +- } else {
16129 +- if (atomic_inc_not_zero(&cell->usage)) {
16130 +- ret = 0;
16131 +- break;
16132 +- }
16133 +- /* We want to repeat the search, this time with
16134 +- * the lock properly locked.
16135 +- */
16136 +- }
16137 +- cell = NULL;
16138 +- }
16139 ++ p = net->cells.rb_node;
16140 ++ while (p) {
16141 ++ cell = rb_entry(p, struct afs_cell, net_node);
16142 ++
16143 ++ n = strncasecmp(cell->name, name,
16144 ++ min_t(size_t, cell->name_len, namesz));
16145 ++ if (n == 0)
16146 ++ n = cell->name_len - namesz;
16147 ++ if (n < 0)
16148 ++ p = p->rb_left;
16149 ++ else if (n > 0)
16150 ++ p = p->rb_right;
16151 ++ else
16152 ++ goto found;
16153 ++ }
16154 +
16155 +- } while (need_seqretry(&net->cells_lock, seq));
16156 ++ return ERR_PTR(-ENOENT);
16157 +
16158 +- done_seqretry(&net->cells_lock, seq);
16159 ++found:
16160 ++ return afs_use_cell(cell);
16161 ++}
16162 +
16163 +- if (ret != 0 && cell)
16164 +- afs_put_cell(net, cell);
16165 ++/*
16166 ++ * Look up and get an activation reference on a cell record.
16167 ++ */
16168 ++struct afs_cell *afs_find_cell(struct afs_net *net,
16169 ++ const char *name, unsigned int namesz)
16170 ++{
16171 ++ struct afs_cell *cell;
16172 +
16173 +- return ret == 0 ? cell : ERR_PTR(ret);
16174 ++ down_read(&net->cells_lock);
16175 ++ cell = afs_find_cell_locked(net, name, namesz);
16176 ++ up_read(&net->cells_lock);
16177 ++ return cell;
16178 + }
16179 +
16180 + /*
16181 +@@ -166,8 +155,9 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
16182 + cell->name[i] = tolower(name[i]);
16183 + cell->name[i] = 0;
16184 +
16185 +- atomic_set(&cell->usage, 2);
16186 +- INIT_WORK(&cell->manager, afs_manage_cell);
16187 ++ atomic_set(&cell->ref, 1);
16188 ++ atomic_set(&cell->active, 0);
16189 ++ INIT_WORK(&cell->manager, afs_manage_cell_work);
16190 + cell->volumes = RB_ROOT;
16191 + INIT_HLIST_HEAD(&cell->proc_volumes);
16192 + seqlock_init(&cell->volume_lock);
16193 +@@ -206,6 +196,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
16194 + cell->dns_source = vllist->source;
16195 + cell->dns_status = vllist->status;
16196 + smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
16197 ++ atomic_inc(&net->cells_outstanding);
16198 +
16199 + _leave(" = %p", cell);
16200 + return cell;
16201 +@@ -245,9 +236,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
16202 + _enter("%s,%s", name, vllist);
16203 +
16204 + if (!excl) {
16205 +- rcu_read_lock();
16206 +- cell = afs_lookup_cell_rcu(net, name, namesz);
16207 +- rcu_read_unlock();
16208 ++ cell = afs_find_cell(net, name, namesz);
16209 + if (!IS_ERR(cell))
16210 + goto wait_for_cell;
16211 + }
16212 +@@ -268,7 +257,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
16213 + /* Find the insertion point and check to see if someone else added a
16214 + * cell whilst we were allocating.
16215 + */
16216 +- write_seqlock(&net->cells_lock);
16217 ++ down_write(&net->cells_lock);
16218 +
16219 + pp = &net->cells.rb_node;
16220 + parent = NULL;
16221 +@@ -290,23 +279,23 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
16222 +
16223 + cell = candidate;
16224 + candidate = NULL;
16225 ++ atomic_set(&cell->active, 2);
16226 + rb_link_node_rcu(&cell->net_node, parent, pp);
16227 + rb_insert_color(&cell->net_node, &net->cells);
16228 +- atomic_inc(&net->cells_outstanding);
16229 +- write_sequnlock(&net->cells_lock);
16230 ++ up_write(&net->cells_lock);
16231 +
16232 +- queue_work(afs_wq, &cell->manager);
16233 ++ afs_queue_cell(cell);
16234 +
16235 + wait_for_cell:
16236 + _debug("wait_for_cell");
16237 + wait_var_event(&cell->state,
16238 + ({
16239 + state = smp_load_acquire(&cell->state); /* vs error */
16240 +- state == AFS_CELL_ACTIVE || state == AFS_CELL_FAILED;
16241 ++ state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED;
16242 + }));
16243 +
16244 + /* Check the state obtained from the wait check. */
16245 +- if (state == AFS_CELL_FAILED) {
16246 ++ if (state == AFS_CELL_REMOVED) {
16247 + ret = cell->error;
16248 + goto error;
16249 + }
16250 +@@ -320,16 +309,17 @@ cell_already_exists:
16251 + if (excl) {
16252 + ret = -EEXIST;
16253 + } else {
16254 +- afs_get_cell(cursor);
16255 ++ afs_use_cell(cursor);
16256 + ret = 0;
16257 + }
16258 +- write_sequnlock(&net->cells_lock);
16259 +- kfree(candidate);
16260 ++ up_write(&net->cells_lock);
16261 ++ if (candidate)
16262 ++ afs_put_cell(candidate);
16263 + if (ret == 0)
16264 + goto wait_for_cell;
16265 + goto error_noput;
16266 + error:
16267 +- afs_put_cell(net, cell);
16268 ++ afs_unuse_cell(net, cell);
16269 + error_noput:
16270 + _leave(" = %d [error]", ret);
16271 + return ERR_PTR(ret);
16272 +@@ -374,15 +364,15 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
16273 + }
16274 +
16275 + if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
16276 +- afs_get_cell(new_root);
16277 ++ afs_use_cell(new_root);
16278 +
16279 + /* install the new cell */
16280 +- write_seqlock(&net->cells_lock);
16281 +- old_root = rcu_access_pointer(net->ws_cell);
16282 +- rcu_assign_pointer(net->ws_cell, new_root);
16283 +- write_sequnlock(&net->cells_lock);
16284 ++ down_write(&net->cells_lock);
16285 ++ old_root = net->ws_cell;
16286 ++ net->ws_cell = new_root;
16287 ++ up_write(&net->cells_lock);
16288 +
16289 +- afs_put_cell(net, old_root);
16290 ++ afs_unuse_cell(net, old_root);
16291 + _leave(" = 0");
16292 + return 0;
16293 + }
16294 +@@ -488,18 +478,21 @@ out_wake:
16295 + static void afs_cell_destroy(struct rcu_head *rcu)
16296 + {
16297 + struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
16298 ++ struct afs_net *net = cell->net;
16299 ++ int u;
16300 +
16301 + _enter("%p{%s}", cell, cell->name);
16302 +
16303 +- ASSERTCMP(atomic_read(&cell->usage), ==, 0);
16304 ++ u = atomic_read(&cell->ref);
16305 ++ ASSERTCMP(u, ==, 0);
16306 +
16307 +- afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
16308 +- afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
16309 +- afs_put_cell(cell->net, cell->alias_of);
16310 ++ afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
16311 ++ afs_unuse_cell(net, cell->alias_of);
16312 + key_put(cell->anonymous_key);
16313 + kfree(cell->name);
16314 + kfree(cell);
16315 +
16316 ++ afs_dec_cells_outstanding(net);
16317 + _leave(" [destroyed]");
16318 + }
16319 +
16320 +@@ -534,16 +527,50 @@ void afs_cells_timer(struct timer_list *timer)
16321 + */
16322 + struct afs_cell *afs_get_cell(struct afs_cell *cell)
16323 + {
16324 +- atomic_inc(&cell->usage);
16325 ++ if (atomic_read(&cell->ref) <= 0)
16326 ++ BUG();
16327 ++
16328 ++ atomic_inc(&cell->ref);
16329 + return cell;
16330 + }
16331 +
16332 + /*
16333 + * Drop a reference on a cell record.
16334 + */
16335 +-void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
16336 ++void afs_put_cell(struct afs_cell *cell)
16337 ++{
16338 ++ if (cell) {
16339 ++ unsigned int u, a;
16340 ++
16341 ++ u = atomic_dec_return(&cell->ref);
16342 ++ if (u == 0) {
16343 ++ a = atomic_read(&cell->active);
16344 ++ WARN(a != 0, "Cell active count %u > 0\n", a);
16345 ++ call_rcu(&cell->rcu, afs_cell_destroy);
16346 ++ }
16347 ++ }
16348 ++}
16349 ++
16350 ++/*
16351 ++ * Note a cell becoming more active.
16352 ++ */
16353 ++struct afs_cell *afs_use_cell(struct afs_cell *cell)
16354 ++{
16355 ++ if (atomic_read(&cell->ref) <= 0)
16356 ++ BUG();
16357 ++
16358 ++ atomic_inc(&cell->active);
16359 ++ return cell;
16360 ++}
16361 ++
16362 ++/*
16363 ++ * Record a cell becoming less active. When the active counter reaches 1, it
16364 ++ * is scheduled for destruction, but may get reactivated.
16365 ++ */
16366 ++void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell)
16367 + {
16368 + time64_t now, expire_delay;
16369 ++ int a;
16370 +
16371 + if (!cell)
16372 + return;
16373 +@@ -556,11 +583,21 @@ void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
16374 + if (cell->vl_servers->nr_servers)
16375 + expire_delay = afs_cell_gc_delay;
16376 +
16377 +- if (atomic_dec_return(&cell->usage) > 1)
16378 +- return;
16379 ++ a = atomic_dec_return(&cell->active);
16380 ++ WARN_ON(a == 0);
16381 ++ if (a == 1)
16382 ++ /* 'cell' may now be garbage collected. */
16383 ++ afs_set_cell_timer(net, expire_delay);
16384 ++}
16385 +
16386 +- /* 'cell' may now be garbage collected. */
16387 +- afs_set_cell_timer(net, expire_delay);
16388 ++/*
16389 ++ * Queue a cell for management, giving the workqueue a ref to hold.
16390 ++ */
16391 ++void afs_queue_cell(struct afs_cell *cell)
16392 ++{
16393 ++ afs_get_cell(cell);
16394 ++ if (!queue_work(afs_wq, &cell->manager))
16395 ++ afs_put_cell(cell);
16396 + }
16397 +
16398 + /*
16399 +@@ -660,12 +697,10 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
16400 + * Manage a cell record, initialising and destroying it, maintaining its DNS
16401 + * records.
16402 + */
16403 +-static void afs_manage_cell(struct work_struct *work)
16404 ++static void afs_manage_cell(struct afs_cell *cell)
16405 + {
16406 +- struct afs_cell *cell = container_of(work, struct afs_cell, manager);
16407 + struct afs_net *net = cell->net;
16408 +- bool deleted;
16409 +- int ret, usage;
16410 ++ int ret, active;
16411 +
16412 + _enter("%s", cell->name);
16413 +
16414 +@@ -674,14 +709,17 @@ again:
16415 + switch (cell->state) {
16416 + case AFS_CELL_INACTIVE:
16417 + case AFS_CELL_FAILED:
16418 +- write_seqlock(&net->cells_lock);
16419 +- usage = 1;
16420 +- deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0);
16421 +- if (deleted)
16422 ++ down_write(&net->cells_lock);
16423 ++ active = 1;
16424 ++ if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) {
16425 + rb_erase(&cell->net_node, &net->cells);
16426 +- write_sequnlock(&net->cells_lock);
16427 +- if (deleted)
16428 ++ smp_store_release(&cell->state, AFS_CELL_REMOVED);
16429 ++ }
16430 ++ up_write(&net->cells_lock);
16431 ++ if (cell->state == AFS_CELL_REMOVED) {
16432 ++ wake_up_var(&cell->state);
16433 + goto final_destruction;
16434 ++ }
16435 + if (cell->state == AFS_CELL_FAILED)
16436 + goto done;
16437 + smp_store_release(&cell->state, AFS_CELL_UNSET);
16438 +@@ -703,7 +741,7 @@ again:
16439 + goto again;
16440 +
16441 + case AFS_CELL_ACTIVE:
16442 +- if (atomic_read(&cell->usage) > 1) {
16443 ++ if (atomic_read(&cell->active) > 1) {
16444 + if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
16445 + ret = afs_update_cell(cell);
16446 + if (ret < 0)
16447 +@@ -716,13 +754,16 @@ again:
16448 + goto again;
16449 +
16450 + case AFS_CELL_DEACTIVATING:
16451 +- if (atomic_read(&cell->usage) > 1)
16452 ++ if (atomic_read(&cell->active) > 1)
16453 + goto reverse_deactivation;
16454 + afs_deactivate_cell(net, cell);
16455 + smp_store_release(&cell->state, AFS_CELL_INACTIVE);
16456 + wake_up_var(&cell->state);
16457 + goto again;
16458 +
16459 ++ case AFS_CELL_REMOVED:
16460 ++ goto done;
16461 ++
16462 + default:
16463 + break;
16464 + }
16465 +@@ -748,9 +789,18 @@ done:
16466 + return;
16467 +
16468 + final_destruction:
16469 +- call_rcu(&cell->rcu, afs_cell_destroy);
16470 +- afs_dec_cells_outstanding(net);
16471 +- _leave(" [destruct %d]", atomic_read(&net->cells_outstanding));
16472 ++ /* The root volume is pinning the cell */
16473 ++ afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
16474 ++ cell->root_volume = NULL;
16475 ++ afs_put_cell(cell);
16476 ++}
16477 ++
16478 ++static void afs_manage_cell_work(struct work_struct *work)
16479 ++{
16480 ++ struct afs_cell *cell = container_of(work, struct afs_cell, manager);
16481 ++
16482 ++ afs_manage_cell(cell);
16483 ++ afs_put_cell(cell);
16484 + }
16485 +
16486 + /*
16487 +@@ -779,26 +829,25 @@ void afs_manage_cells(struct work_struct *work)
16488 + * lack of use and cells whose DNS results have expired and dispatch
16489 + * their managers.
16490 + */
16491 +- read_seqlock_excl(&net->cells_lock);
16492 ++ down_read(&net->cells_lock);
16493 +
16494 + for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
16495 + struct afs_cell *cell =
16496 + rb_entry(cursor, struct afs_cell, net_node);
16497 +- unsigned usage;
16498 ++ unsigned active;
16499 + bool sched_cell = false;
16500 +
16501 +- usage = atomic_read(&cell->usage);
16502 +- _debug("manage %s %u", cell->name, usage);
16503 ++ active = atomic_read(&cell->active);
16504 ++ _debug("manage %s %u %u", cell->name, atomic_read(&cell->ref), active);
16505 +
16506 +- ASSERTCMP(usage, >=, 1);
16507 ++ ASSERTCMP(active, >=, 1);
16508 +
16509 + if (purging) {
16510 + if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
16511 +- usage = atomic_dec_return(&cell->usage);
16512 +- ASSERTCMP(usage, ==, 1);
16513 ++ atomic_dec(&cell->active);
16514 + }
16515 +
16516 +- if (usage == 1) {
16517 ++ if (active == 1) {
16518 + struct afs_vlserver_list *vllist;
16519 + time64_t expire_at = cell->last_inactive;
16520 +
16521 +@@ -821,10 +870,10 @@ void afs_manage_cells(struct work_struct *work)
16522 + }
16523 +
16524 + if (sched_cell)
16525 +- queue_work(afs_wq, &cell->manager);
16526 ++ afs_queue_cell(cell);
16527 + }
16528 +
16529 +- read_sequnlock_excl(&net->cells_lock);
16530 ++ up_read(&net->cells_lock);
16531 +
16532 + /* Update the timer on the way out. We have to pass an increment on
16533 + * cells_outstanding in the namespace that we are in to the timer or
16534 +@@ -854,11 +903,11 @@ void afs_cell_purge(struct afs_net *net)
16535 +
16536 + _enter("");
16537 +
16538 +- write_seqlock(&net->cells_lock);
16539 +- ws = rcu_access_pointer(net->ws_cell);
16540 +- RCU_INIT_POINTER(net->ws_cell, NULL);
16541 +- write_sequnlock(&net->cells_lock);
16542 +- afs_put_cell(net, ws);
16543 ++ down_write(&net->cells_lock);
16544 ++ ws = net->ws_cell;
16545 ++ net->ws_cell = NULL;
16546 ++ up_write(&net->cells_lock);
16547 ++ afs_unuse_cell(net, ws);
16548 +
16549 + _debug("del timer");
16550 + if (del_timer_sync(&net->cells_timer))
16551 +diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
16552 +index 7b784af604fd9..da32797dd4257 100644
16553 +--- a/fs/afs/dynroot.c
16554 ++++ b/fs/afs/dynroot.c
16555 +@@ -123,9 +123,9 @@ static int afs_probe_cell_name(struct dentry *dentry)
16556 + len--;
16557 + }
16558 +
16559 +- cell = afs_lookup_cell_rcu(net, name, len);
16560 ++ cell = afs_find_cell(net, name, len);
16561 + if (!IS_ERR(cell)) {
16562 +- afs_put_cell(net, cell);
16563 ++ afs_unuse_cell(net, cell);
16564 + return 0;
16565 + }
16566 +
16567 +@@ -179,7 +179,6 @@ static struct dentry *afs_lookup_atcell(struct dentry *dentry)
16568 + struct afs_cell *cell;
16569 + struct afs_net *net = afs_d2net(dentry);
16570 + struct dentry *ret;
16571 +- unsigned int seq = 0;
16572 + char *name;
16573 + int len;
16574 +
16575 +@@ -191,17 +190,13 @@ static struct dentry *afs_lookup_atcell(struct dentry *dentry)
16576 + if (!name)
16577 + goto out_p;
16578 +
16579 +- rcu_read_lock();
16580 +- do {
16581 +- read_seqbegin_or_lock(&net->cells_lock, &seq);
16582 +- cell = rcu_dereference_raw(net->ws_cell);
16583 +- if (cell) {
16584 +- len = cell->name_len;
16585 +- memcpy(name, cell->name, len + 1);
16586 +- }
16587 +- } while (need_seqretry(&net->cells_lock, seq));
16588 +- done_seqretry(&net->cells_lock, seq);
16589 +- rcu_read_unlock();
16590 ++ down_read(&net->cells_lock);
16591 ++ cell = net->ws_cell;
16592 ++ if (cell) {
16593 ++ len = cell->name_len;
16594 ++ memcpy(name, cell->name, len + 1);
16595 ++ }
16596 ++ up_read(&net->cells_lock);
16597 +
16598 + ret = ERR_PTR(-ENOENT);
16599 + if (!cell)
16600 +diff --git a/fs/afs/internal.h b/fs/afs/internal.h
16601 +index e1ebead2e505a..7689f4535ef9c 100644
16602 +--- a/fs/afs/internal.h
16603 ++++ b/fs/afs/internal.h
16604 +@@ -263,11 +263,11 @@ struct afs_net {
16605 +
16606 + /* Cell database */
16607 + struct rb_root cells;
16608 +- struct afs_cell __rcu *ws_cell;
16609 ++ struct afs_cell *ws_cell;
16610 + struct work_struct cells_manager;
16611 + struct timer_list cells_timer;
16612 + atomic_t cells_outstanding;
16613 +- seqlock_t cells_lock;
16614 ++ struct rw_semaphore cells_lock;
16615 + struct mutex cells_alias_lock;
16616 +
16617 + struct mutex proc_cells_lock;
16618 +@@ -326,6 +326,7 @@ enum afs_cell_state {
16619 + AFS_CELL_DEACTIVATING,
16620 + AFS_CELL_INACTIVE,
16621 + AFS_CELL_FAILED,
16622 ++ AFS_CELL_REMOVED,
16623 + };
16624 +
16625 + /*
16626 +@@ -363,7 +364,8 @@ struct afs_cell {
16627 + #endif
16628 + time64_t dns_expiry; /* Time AFSDB/SRV record expires */
16629 + time64_t last_inactive; /* Time of last drop of usage count */
16630 +- atomic_t usage;
16631 ++ atomic_t ref; /* Struct refcount */
16632 ++ atomic_t active; /* Active usage counter */
16633 + unsigned long flags;
16634 + #define AFS_CELL_FL_NO_GC 0 /* The cell was added manually, don't auto-gc */
16635 + #define AFS_CELL_FL_DO_LOOKUP 1 /* DNS lookup requested */
16636 +@@ -915,11 +917,14 @@ static inline bool afs_cb_is_broken(unsigned int cb_break,
16637 + * cell.c
16638 + */
16639 + extern int afs_cell_init(struct afs_net *, const char *);
16640 +-extern struct afs_cell *afs_lookup_cell_rcu(struct afs_net *, const char *, unsigned);
16641 ++extern struct afs_cell *afs_find_cell(struct afs_net *, const char *, unsigned);
16642 + extern struct afs_cell *afs_lookup_cell(struct afs_net *, const char *, unsigned,
16643 + const char *, bool);
16644 ++extern struct afs_cell *afs_use_cell(struct afs_cell *);
16645 ++extern void afs_unuse_cell(struct afs_net *, struct afs_cell *);
16646 + extern struct afs_cell *afs_get_cell(struct afs_cell *);
16647 +-extern void afs_put_cell(struct afs_net *, struct afs_cell *);
16648 ++extern void afs_put_cell(struct afs_cell *);
16649 ++extern void afs_queue_cell(struct afs_cell *);
16650 + extern void afs_manage_cells(struct work_struct *);
16651 + extern void afs_cells_timer(struct timer_list *);
16652 + extern void __net_exit afs_cell_purge(struct afs_net *);
16653 +diff --git a/fs/afs/main.c b/fs/afs/main.c
16654 +index 31b472f7c734c..accdd8970e7c0 100644
16655 +--- a/fs/afs/main.c
16656 ++++ b/fs/afs/main.c
16657 +@@ -78,7 +78,7 @@ static int __net_init afs_net_init(struct net *net_ns)
16658 + mutex_init(&net->socket_mutex);
16659 +
16660 + net->cells = RB_ROOT;
16661 +- seqlock_init(&net->cells_lock);
16662 ++ init_rwsem(&net->cells_lock);
16663 + INIT_WORK(&net->cells_manager, afs_manage_cells);
16664 + timer_setup(&net->cells_timer, afs_cells_timer, 0);
16665 +
16666 +diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
16667 +index 79bc5f1338edf..c69a0282960cc 100644
16668 +--- a/fs/afs/mntpt.c
16669 ++++ b/fs/afs/mntpt.c
16670 +@@ -88,7 +88,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
16671 + ctx->force = true;
16672 + }
16673 + if (ctx->cell) {
16674 +- afs_put_cell(ctx->net, ctx->cell);
16675 ++ afs_unuse_cell(ctx->net, ctx->cell);
16676 + ctx->cell = NULL;
16677 + }
16678 + if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
16679 +@@ -124,7 +124,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
16680 + char *buf;
16681 +
16682 + if (src_as->cell)
16683 +- ctx->cell = afs_get_cell(src_as->cell);
16684 ++ ctx->cell = afs_use_cell(src_as->cell);
16685 +
16686 + if (size < 2 || size > PAGE_SIZE - 1)
16687 + return -EINVAL;
16688 +diff --git a/fs/afs/proc.c b/fs/afs/proc.c
16689 +index e817fc740ba01..855d7358933b4 100644
16690 +--- a/fs/afs/proc.c
16691 ++++ b/fs/afs/proc.c
16692 +@@ -38,7 +38,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
16693 +
16694 + if (v == SEQ_START_TOKEN) {
16695 + /* display header on line 1 */
16696 +- seq_puts(m, "USE TTL SV ST NAME\n");
16697 ++ seq_puts(m, "USE ACT TTL SV ST NAME\n");
16698 + return 0;
16699 + }
16700 +
16701 +@@ -46,10 +46,11 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
16702 + vllist = rcu_dereference(cell->vl_servers);
16703 +
16704 + /* display one cell per line on subsequent lines */
16705 +- seq_printf(m, "%3u %6lld %2u %2u %s\n",
16706 +- atomic_read(&cell->usage),
16707 ++ seq_printf(m, "%3u %3u %6lld %2u %2u %s\n",
16708 ++ atomic_read(&cell->ref),
16709 ++ atomic_read(&cell->active),
16710 + cell->dns_expiry - ktime_get_real_seconds(),
16711 +- vllist->nr_servers,
16712 ++ vllist ? vllist->nr_servers : 0,
16713 + cell->state,
16714 + cell->name);
16715 + return 0;
16716 +@@ -128,7 +129,7 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
16717 + }
16718 +
16719 + if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
16720 +- afs_put_cell(net, cell);
16721 ++ afs_unuse_cell(net, cell);
16722 + } else {
16723 + goto inval;
16724 + }
16725 +@@ -154,13 +155,11 @@ static int afs_proc_rootcell_show(struct seq_file *m, void *v)
16726 + struct afs_net *net;
16727 +
16728 + net = afs_seq2net_single(m);
16729 +- if (rcu_access_pointer(net->ws_cell)) {
16730 +- rcu_read_lock();
16731 +- cell = rcu_dereference(net->ws_cell);
16732 +- if (cell)
16733 +- seq_printf(m, "%s\n", cell->name);
16734 +- rcu_read_unlock();
16735 +- }
16736 ++ down_read(&net->cells_lock);
16737 ++ cell = net->ws_cell;
16738 ++ if (cell)
16739 ++ seq_printf(m, "%s\n", cell->name);
16740 ++ up_read(&net->cells_lock);
16741 + return 0;
16742 + }
16743 +
16744 +diff --git a/fs/afs/super.c b/fs/afs/super.c
16745 +index b552357b1d137..e72c223f831d2 100644
16746 +--- a/fs/afs/super.c
16747 ++++ b/fs/afs/super.c
16748 +@@ -294,7 +294,7 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param)
16749 + cellnamesz, cellnamesz, cellname ?: "");
16750 + return PTR_ERR(cell);
16751 + }
16752 +- afs_put_cell(ctx->net, ctx->cell);
16753 ++ afs_unuse_cell(ctx->net, ctx->cell);
16754 + ctx->cell = cell;
16755 + }
16756 +
16757 +@@ -389,8 +389,8 @@ static int afs_validate_fc(struct fs_context *fc)
16758 + _debug("switch to alias");
16759 + key_put(ctx->key);
16760 + ctx->key = NULL;
16761 +- cell = afs_get_cell(ctx->cell->alias_of);
16762 +- afs_put_cell(ctx->net, ctx->cell);
16763 ++ cell = afs_use_cell(ctx->cell->alias_of);
16764 ++ afs_unuse_cell(ctx->net, ctx->cell);
16765 + ctx->cell = cell;
16766 + goto reget_key;
16767 + }
16768 +@@ -508,7 +508,7 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
16769 + if (ctx->dyn_root) {
16770 + as->dyn_root = true;
16771 + } else {
16772 +- as->cell = afs_get_cell(ctx->cell);
16773 ++ as->cell = afs_use_cell(ctx->cell);
16774 + as->volume = afs_get_volume(ctx->volume,
16775 + afs_volume_trace_get_alloc_sbi);
16776 + }
16777 +@@ -521,7 +521,7 @@ static void afs_destroy_sbi(struct afs_super_info *as)
16778 + if (as) {
16779 + struct afs_net *net = afs_net(as->net_ns);
16780 + afs_put_volume(net, as->volume, afs_volume_trace_put_destroy_sbi);
16781 +- afs_put_cell(net, as->cell);
16782 ++ afs_unuse_cell(net, as->cell);
16783 + put_net(as->net_ns);
16784 + kfree(as);
16785 + }
16786 +@@ -607,7 +607,7 @@ static void afs_free_fc(struct fs_context *fc)
16787 +
16788 + afs_destroy_sbi(fc->s_fs_info);
16789 + afs_put_volume(ctx->net, ctx->volume, afs_volume_trace_put_free_fc);
16790 +- afs_put_cell(ctx->net, ctx->cell);
16791 ++ afs_unuse_cell(ctx->net, ctx->cell);
16792 + key_put(ctx->key);
16793 + kfree(ctx);
16794 + }
16795 +@@ -634,9 +634,7 @@ static int afs_init_fs_context(struct fs_context *fc)
16796 + ctx->net = afs_net(fc->net_ns);
16797 +
16798 + /* Default to the workstation cell. */
16799 +- rcu_read_lock();
16800 +- cell = afs_lookup_cell_rcu(ctx->net, NULL, 0);
16801 +- rcu_read_unlock();
16802 ++ cell = afs_find_cell(ctx->net, NULL, 0);
16803 + if (IS_ERR(cell))
16804 + cell = NULL;
16805 + ctx->cell = cell;
16806 +diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
16807 +index 5082ef04e99c5..ddb4cb67d0fd9 100644
16808 +--- a/fs/afs/vl_alias.c
16809 ++++ b/fs/afs/vl_alias.c
16810 +@@ -177,7 +177,7 @@ static int afs_compare_cell_roots(struct afs_cell *cell)
16811 +
16812 + is_alias:
16813 + rcu_read_unlock();
16814 +- cell->alias_of = afs_get_cell(p);
16815 ++ cell->alias_of = afs_use_cell(p);
16816 + return 1;
16817 + }
16818 +
16819 +@@ -247,18 +247,18 @@ static int afs_query_for_alias(struct afs_cell *cell, struct key *key)
16820 + continue;
16821 + if (p->root_volume)
16822 + continue; /* Ignore cells that have a root.cell volume. */
16823 +- afs_get_cell(p);
16824 ++ afs_use_cell(p);
16825 + mutex_unlock(&cell->net->proc_cells_lock);
16826 +
16827 + if (afs_query_for_alias_one(cell, key, p) != 0)
16828 + goto is_alias;
16829 +
16830 + if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) {
16831 +- afs_put_cell(cell->net, p);
16832 ++ afs_unuse_cell(cell->net, p);
16833 + return -ERESTARTSYS;
16834 + }
16835 +
16836 +- afs_put_cell(cell->net, p);
16837 ++ afs_unuse_cell(cell->net, p);
16838 + }
16839 +
16840 + mutex_unlock(&cell->net->proc_cells_lock);
16841 +diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
16842 +index f405ca8b240a5..750bd1579f212 100644
16843 +--- a/fs/afs/vl_rotate.c
16844 ++++ b/fs/afs/vl_rotate.c
16845 +@@ -45,7 +45,7 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
16846 + cell->dns_expiry <= ktime_get_real_seconds()) {
16847 + dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count);
16848 + set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
16849 +- queue_work(afs_wq, &cell->manager);
16850 ++ afs_queue_cell(cell);
16851 +
16852 + if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
16853 + if (wait_var_event_interruptible(
16854 +diff --git a/fs/afs/volume.c b/fs/afs/volume.c
16855 +index 9bc0509e3634c..a838030e95634 100644
16856 +--- a/fs/afs/volume.c
16857 ++++ b/fs/afs/volume.c
16858 +@@ -106,7 +106,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
16859 + return volume;
16860 +
16861 + error_1:
16862 +- afs_put_cell(params->net, volume->cell);
16863 ++ afs_put_cell(volume->cell);
16864 + kfree(volume);
16865 + error_0:
16866 + return ERR_PTR(ret);
16867 +@@ -228,7 +228,7 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
16868 +
16869 + afs_remove_volume_from_cell(volume);
16870 + afs_put_serverlist(net, rcu_access_pointer(volume->servers));
16871 +- afs_put_cell(net, volume->cell);
16872 ++ afs_put_cell(volume->cell);
16873 + trace_afs_volume(volume->vid, atomic_read(&volume->usage),
16874 + afs_volume_trace_free);
16875 + kfree_rcu(volume, rcu);
16876 +diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
16877 +index 8bbb734f3f514..49384d55a908f 100644
16878 +--- a/fs/btrfs/extent-io-tree.h
16879 ++++ b/fs/btrfs/extent-io-tree.h
16880 +@@ -48,6 +48,7 @@ enum {
16881 + IO_TREE_INODE_FILE_EXTENT,
16882 + IO_TREE_LOG_CSUM_RANGE,
16883 + IO_TREE_SELFTEST,
16884 ++ IO_TREE_DEVICE_ALLOC_STATE,
16885 + };
16886 +
16887 + struct extent_io_tree {
16888 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
16889 +index 79e9a80bd37a0..f9d8bd3099488 100644
16890 +--- a/fs/btrfs/volumes.c
16891 ++++ b/fs/btrfs/volumes.c
16892 +@@ -406,7 +406,7 @@ void __exit btrfs_cleanup_fs_uuids(void)
16893 + * Returned struct is not linked onto any lists and must be destroyed using
16894 + * btrfs_free_device.
16895 + */
16896 +-static struct btrfs_device *__alloc_device(void)
16897 ++static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
16898 + {
16899 + struct btrfs_device *dev;
16900 +
16901 +@@ -433,7 +433,8 @@ static struct btrfs_device *__alloc_device(void)
16902 + btrfs_device_data_ordered_init(dev);
16903 + INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
16904 + INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
16905 +- extent_io_tree_init(NULL, &dev->alloc_state, 0, NULL);
16906 ++ extent_io_tree_init(fs_info, &dev->alloc_state,
16907 ++ IO_TREE_DEVICE_ALLOC_STATE, NULL);
16908 +
16909 + return dev;
16910 + }
16911 +@@ -6545,7 +6546,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
16912 + if (WARN_ON(!devid && !fs_info))
16913 + return ERR_PTR(-EINVAL);
16914 +
16915 +- dev = __alloc_device();
16916 ++ dev = __alloc_device(fs_info);
16917 + if (IS_ERR(dev))
16918 + return dev;
16919 +
16920 +diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
16921 +index 689162e2e1755..3150c19cdc2fb 100644
16922 +--- a/fs/cifs/asn1.c
16923 ++++ b/fs/cifs/asn1.c
16924 +@@ -530,8 +530,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
16925 + return 0;
16926 + } else if ((cls != ASN1_CTX) || (con != ASN1_CON)
16927 + || (tag != ASN1_EOC)) {
16928 +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
16929 +- cls, con, tag, end, *end);
16930 ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
16931 ++ cls, con, tag, end);
16932 + return 0;
16933 + }
16934 +
16935 +@@ -541,8 +541,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
16936 + return 0;
16937 + } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
16938 + || (tag != ASN1_SEQ)) {
16939 +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
16940 +- cls, con, tag, end, *end);
16941 ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 1\n",
16942 ++ cls, con, tag, end);
16943 + return 0;
16944 + }
16945 +
16946 +@@ -552,8 +552,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
16947 + return 0;
16948 + } else if ((cls != ASN1_CTX) || (con != ASN1_CON)
16949 + || (tag != ASN1_EOC)) {
16950 +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
16951 +- cls, con, tag, end, *end);
16952 ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
16953 ++ cls, con, tag, end);
16954 + return 0;
16955 + }
16956 +
16957 +@@ -564,8 +564,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
16958 + return 0;
16959 + } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
16960 + || (tag != ASN1_SEQ)) {
16961 +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
16962 +- cls, con, tag, end, *end);
16963 ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d sequence_end = %p exit 1\n",
16964 ++ cls, con, tag, sequence_end);
16965 + return 0;
16966 + }
16967 +
16968 +diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
16969 +index 6025d7fc7bbfd..d0658891b0a6d 100644
16970 +--- a/fs/cifs/cifsacl.c
16971 ++++ b/fs/cifs/cifsacl.c
16972 +@@ -338,7 +338,7 @@ invalidate_key:
16973 + goto out_key_put;
16974 + }
16975 +
16976 +-static int
16977 ++int
16978 + sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
16979 + struct cifs_fattr *fattr, uint sidtype)
16980 + {
16981 +@@ -359,7 +359,8 @@ sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
16982 + return -EIO;
16983 + }
16984 +
16985 +- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) {
16986 ++ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) ||
16987 ++ (cifs_sb_master_tcon(cifs_sb)->posix_extensions)) {
16988 + uint32_t unix_id;
16989 + bool is_group;
16990 +
16991 +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
16992 +index 7a836ec0438e8..f4751cb391238 100644
16993 +--- a/fs/cifs/cifsproto.h
16994 ++++ b/fs/cifs/cifsproto.h
16995 +@@ -208,6 +208,8 @@ extern int cifs_set_file_info(struct inode *inode, struct iattr *attrs,
16996 + extern int cifs_rename_pending_delete(const char *full_path,
16997 + struct dentry *dentry,
16998 + const unsigned int xid);
16999 ++extern int sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
17000 ++ struct cifs_fattr *fattr, uint sidtype);
17001 + extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
17002 + struct cifs_fattr *fattr, struct inode *inode,
17003 + bool get_mode_from_special_sid,
17004 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
17005 +index a61abde09ffe1..f4ecc13b02c0a 100644
17006 +--- a/fs/cifs/connect.c
17007 ++++ b/fs/cifs/connect.c
17008 +@@ -3594,7 +3594,10 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
17009 + */
17010 + tcon->retry = volume_info->retry;
17011 + tcon->nocase = volume_info->nocase;
17012 +- tcon->nohandlecache = volume_info->nohandlecache;
17013 ++ if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
17014 ++ tcon->nohandlecache = volume_info->nohandlecache;
17015 ++ else
17016 ++ tcon->nohandlecache = 1;
17017 + tcon->nodelete = volume_info->nodelete;
17018 + tcon->local_lease = volume_info->local_lease;
17019 + INIT_LIST_HEAD(&tcon->pending_opens);
17020 +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
17021 +index 6df0922e7e304..709fb53e9fee1 100644
17022 +--- a/fs/cifs/readdir.c
17023 ++++ b/fs/cifs/readdir.c
17024 +@@ -267,9 +267,8 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
17025 + if (reparse_file_needs_reval(fattr))
17026 + fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
17027 +
17028 +- /* TODO map SIDs */
17029 +- fattr->cf_uid = cifs_sb->mnt_uid;
17030 +- fattr->cf_gid = cifs_sb->mnt_gid;
17031 ++ sid_to_id(cifs_sb, &parsed.owner, fattr, SIDOWNER);
17032 ++ sid_to_id(cifs_sb, &parsed.group, fattr, SIDGROUP);
17033 + }
17034 +
17035 + static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info)
17036 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
17037 +index d44df8f95bcd4..09e1cd320ee56 100644
17038 +--- a/fs/cifs/smb2ops.c
17039 ++++ b/fs/cifs/smb2ops.c
17040 +@@ -3072,7 +3072,12 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
17041 + oparms.tcon = tcon;
17042 + oparms.desired_access = READ_CONTROL;
17043 + oparms.disposition = FILE_OPEN;
17044 +- oparms.create_options = cifs_create_options(cifs_sb, 0);
17045 ++ /*
17046 ++ * When querying an ACL, even if the file is a symlink we want to open
17047 ++ * the source not the target, and so the protocol requires that the
17048 ++ * client specify this flag when opening a reparse point
17049 ++ */
17050 ++ oparms.create_options = cifs_create_options(cifs_sb, 0) | OPEN_REPARSE_POINT;
17051 + oparms.fid = &fid;
17052 + oparms.reconnect = false;
17053 +
17054 +@@ -3924,7 +3929,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
17055 + if (rc) {
17056 + cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
17057 + enc ? "en" : "de");
17058 +- return 0;
17059 ++ return rc;
17060 + }
17061 +
17062 + rc = smb3_crypto_aead_allocate(server);
17063 +@@ -4103,7 +4108,8 @@ smb3_is_transform_hdr(void *buf)
17064 + static int
17065 + decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
17066 + unsigned int buf_data_size, struct page **pages,
17067 +- unsigned int npages, unsigned int page_data_size)
17068 ++ unsigned int npages, unsigned int page_data_size,
17069 ++ bool is_offloaded)
17070 + {
17071 + struct kvec iov[2];
17072 + struct smb_rqst rqst = {NULL};
17073 +@@ -4129,7 +4135,8 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
17074 +
17075 + memmove(buf, iov[1].iov_base, buf_data_size);
17076 +
17077 +- server->total_read = buf_data_size + page_data_size;
17078 ++ if (!is_offloaded)
17079 ++ server->total_read = buf_data_size + page_data_size;
17080 +
17081 + return rc;
17082 + }
17083 +@@ -4342,7 +4349,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
17084 + struct mid_q_entry *mid;
17085 +
17086 + rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
17087 +- dw->ppages, dw->npages, dw->len);
17088 ++ dw->ppages, dw->npages, dw->len, true);
17089 + if (rc) {
17090 + cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
17091 + goto free_pages;
17092 +@@ -4448,7 +4455,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
17093 +
17094 + non_offloaded_decrypt:
17095 + rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
17096 +- pages, npages, len);
17097 ++ pages, npages, len, false);
17098 + if (rc)
17099 + goto free_pages;
17100 +
17101 +@@ -4504,7 +4511,7 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
17102 + server->total_read += length;
17103 +
17104 + buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
17105 +- length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
17106 ++ length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false);
17107 + if (length)
17108 + return length;
17109 +
17110 +diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
17111 +index d23ff162c78bc..0b32c64eb4053 100644
17112 +--- a/fs/crypto/policy.c
17113 ++++ b/fs/crypto/policy.c
17114 +@@ -178,10 +178,15 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy,
17115 + 32, 32))
17116 + return false;
17117 +
17118 ++ /*
17119 ++ * IV_INO_LBLK_32 hashes the inode number, so in principle it can
17120 ++ * support any ino_bits. However, currently the inode number is gotten
17121 ++ * from inode::i_ino which is 'unsigned long'. So for now the
17122 ++ * implementation limit is 32 bits.
17123 ++ */
17124 + if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
17125 +- /* This uses hashed inode numbers, so ino_bits doesn't matter. */
17126 + !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_32",
17127 +- INT_MAX, 32))
17128 ++ 32, 32))
17129 + return false;
17130 +
17131 + if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) {
17132 +diff --git a/fs/d_path.c b/fs/d_path.c
17133 +index 0f1fc1743302f..a69e2cd36e6e3 100644
17134 +--- a/fs/d_path.c
17135 ++++ b/fs/d_path.c
17136 +@@ -102,6 +102,8 @@ restart:
17137 +
17138 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
17139 + struct mount *parent = READ_ONCE(mnt->mnt_parent);
17140 ++ struct mnt_namespace *mnt_ns;
17141 ++
17142 + /* Escaped? */
17143 + if (dentry != vfsmnt->mnt_root) {
17144 + bptr = *buffer;
17145 +@@ -116,7 +118,9 @@ restart:
17146 + vfsmnt = &mnt->mnt;
17147 + continue;
17148 + }
17149 +- if (is_mounted(vfsmnt) && !is_anon_ns(mnt->mnt_ns))
17150 ++ mnt_ns = READ_ONCE(mnt->mnt_ns);
17151 ++ /* open-coded is_mounted() to use local mnt_ns */
17152 ++ if (!IS_ERR_OR_NULL(mnt_ns) && !is_anon_ns(mnt_ns))
17153 + error = 1; // absolute root
17154 + else
17155 + error = 2; // detached or not attached yet
17156 +diff --git a/fs/dlm/config.c b/fs/dlm/config.c
17157 +index 3b21082e1b550..3b1012a3c4396 100644
17158 +--- a/fs/dlm/config.c
17159 ++++ b/fs/dlm/config.c
17160 +@@ -216,6 +216,7 @@ struct dlm_space {
17161 + struct list_head members;
17162 + struct mutex members_lock;
17163 + int members_count;
17164 ++ struct dlm_nodes *nds;
17165 + };
17166 +
17167 + struct dlm_comms {
17168 +@@ -424,6 +425,7 @@ static struct config_group *make_space(struct config_group *g, const char *name)
17169 + INIT_LIST_HEAD(&sp->members);
17170 + mutex_init(&sp->members_lock);
17171 + sp->members_count = 0;
17172 ++ sp->nds = nds;
17173 + return &sp->group;
17174 +
17175 + fail:
17176 +@@ -445,6 +447,7 @@ static void drop_space(struct config_group *g, struct config_item *i)
17177 + static void release_space(struct config_item *i)
17178 + {
17179 + struct dlm_space *sp = config_item_to_space(i);
17180 ++ kfree(sp->nds);
17181 + kfree(sp);
17182 + }
17183 +
17184 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
17185 +index ff46defc65683..dc943e714d142 100644
17186 +--- a/fs/ext4/ext4.h
17187 ++++ b/fs/ext4/ext4.h
17188 +@@ -466,7 +466,7 @@ struct flex_groups {
17189 +
17190 + /* Flags which are mutually exclusive to DAX */
17191 + #define EXT4_DAX_MUT_EXCL (EXT4_VERITY_FL | EXT4_ENCRYPT_FL |\
17192 +- EXT4_JOURNAL_DATA_FL)
17193 ++ EXT4_JOURNAL_DATA_FL | EXT4_INLINE_DATA_FL)
17194 +
17195 + /* Mask out flags that are inappropriate for the given type of inode. */
17196 + static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
17197 +diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
17198 +index dbccf46f17709..37347ba868b70 100644
17199 +--- a/fs/ext4/fsmap.c
17200 ++++ b/fs/ext4/fsmap.c
17201 +@@ -108,6 +108,9 @@ static int ext4_getfsmap_helper(struct super_block *sb,
17202 +
17203 + /* Are we just counting mappings? */
17204 + if (info->gfi_head->fmh_count == 0) {
17205 ++ if (info->gfi_head->fmh_entries == UINT_MAX)
17206 ++ return EXT4_QUERY_RANGE_ABORT;
17207 ++
17208 + if (rec_fsblk > info->gfi_next_fsblk)
17209 + info->gfi_head->fmh_entries++;
17210 +
17211 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
17212 +index e88eff999bd15..79d32ea606aa1 100644
17213 +--- a/fs/ext4/mballoc.c
17214 ++++ b/fs/ext4/mballoc.c
17215 +@@ -4037,7 +4037,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
17216 + struct ext4_buddy e4b;
17217 + int err;
17218 + int busy = 0;
17219 +- int free = 0;
17220 ++ int free, free_total = 0;
17221 +
17222 + mb_debug(sb, "discard preallocation for group %u\n", group);
17223 + if (list_empty(&grp->bb_prealloc_list))
17224 +@@ -4065,8 +4065,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
17225 +
17226 + INIT_LIST_HEAD(&list);
17227 + repeat:
17228 ++ free = 0;
17229 + ext4_lock_group(sb, group);
17230 +- this_cpu_inc(discard_pa_seq);
17231 + list_for_each_entry_safe(pa, tmp,
17232 + &grp->bb_prealloc_list, pa_group_list) {
17233 + spin_lock(&pa->pa_lock);
17234 +@@ -4083,6 +4083,9 @@ repeat:
17235 + /* seems this one can be freed ... */
17236 + ext4_mb_mark_pa_deleted(sb, pa);
17237 +
17238 ++ if (!free)
17239 ++ this_cpu_inc(discard_pa_seq);
17240 ++
17241 + /* we can trust pa_free ... */
17242 + free += pa->pa_free;
17243 +
17244 +@@ -4092,22 +4095,6 @@ repeat:
17245 + list_add(&pa->u.pa_tmp_list, &list);
17246 + }
17247 +
17248 +- /* if we still need more blocks and some PAs were used, try again */
17249 +- if (free < needed && busy) {
17250 +- busy = 0;
17251 +- ext4_unlock_group(sb, group);
17252 +- cond_resched();
17253 +- goto repeat;
17254 +- }
17255 +-
17256 +- /* found anything to free? */
17257 +- if (list_empty(&list)) {
17258 +- BUG_ON(free != 0);
17259 +- mb_debug(sb, "Someone else may have freed PA for this group %u\n",
17260 +- group);
17261 +- goto out;
17262 +- }
17263 +-
17264 + /* now free all selected PAs */
17265 + list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
17266 +
17267 +@@ -4125,14 +4112,22 @@ repeat:
17268 + call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
17269 + }
17270 +
17271 +-out:
17272 ++ free_total += free;
17273 ++
17274 ++ /* if we still need more blocks and some PAs were used, try again */
17275 ++ if (free_total < needed && busy) {
17276 ++ ext4_unlock_group(sb, group);
17277 ++ cond_resched();
17278 ++ busy = 0;
17279 ++ goto repeat;
17280 ++ }
17281 + ext4_unlock_group(sb, group);
17282 + ext4_mb_unload_buddy(&e4b);
17283 + put_bh(bitmap_bh);
17284 + out_dbg:
17285 + mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
17286 +- free, group, grp->bb_free);
17287 +- return free;
17288 ++ free_total, group, grp->bb_free);
17289 ++ return free_total;
17290 + }
17291 +
17292 + /*
17293 +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
17294 +index 44582a4db513e..1e014535c2530 100644
17295 +--- a/fs/f2fs/inode.c
17296 ++++ b/fs/f2fs/inode.c
17297 +@@ -287,6 +287,13 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
17298 + return false;
17299 + }
17300 +
17301 ++ if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
17302 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
17303 ++ f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
17304 ++ __func__, inode->i_ino);
17305 ++ return false;
17306 ++ }
17307 ++
17308 + if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
17309 + fi->i_flags & F2FS_COMPR_FL &&
17310 + F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
17311 +diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
17312 +index c5e32ceb94827..e186d3af61368 100644
17313 +--- a/fs/f2fs/sysfs.c
17314 ++++ b/fs/f2fs/sysfs.c
17315 +@@ -964,4 +964,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
17316 + }
17317 + kobject_del(&sbi->s_kobj);
17318 + kobject_put(&sbi->s_kobj);
17319 ++ wait_for_completion(&sbi->s_kobj_unregister);
17320 + }
17321 +diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
17322 +index bcfc288dba3fb..b115e7d47fcec 100644
17323 +--- a/fs/iomap/buffered-io.c
17324 ++++ b/fs/iomap/buffered-io.c
17325 +@@ -49,16 +49,8 @@ iomap_page_create(struct inode *inode, struct page *page)
17326 + if (iop || i_blocksize(inode) == PAGE_SIZE)
17327 + return iop;
17328 +
17329 +- iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
17330 +- atomic_set(&iop->read_count, 0);
17331 +- atomic_set(&iop->write_count, 0);
17332 ++ iop = kzalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
17333 + spin_lock_init(&iop->uptodate_lock);
17334 +- bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
17335 +-
17336 +- /*
17337 +- * migrate_page_move_mapping() assumes that pages with private data have
17338 +- * their count elevated by 1.
17339 +- */
17340 + attach_page_private(page, iop);
17341 + return iop;
17342 + }
17343 +@@ -574,10 +566,10 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
17344 + loff_t block_start = pos & ~(block_size - 1);
17345 + loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
17346 + unsigned from = offset_in_page(pos), to = from + len, poff, plen;
17347 +- int status;
17348 +
17349 + if (PageUptodate(page))
17350 + return 0;
17351 ++ ClearPageError(page);
17352 +
17353 + do {
17354 + iomap_adjust_read_range(inode, iop, &block_start,
17355 +@@ -594,14 +586,13 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
17356 + if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
17357 + return -EIO;
17358 + zero_user_segments(page, poff, from, to, poff + plen);
17359 +- iomap_set_range_uptodate(page, poff, plen);
17360 +- continue;
17361 ++ } else {
17362 ++ int status = iomap_read_page_sync(block_start, page,
17363 ++ poff, plen, srcmap);
17364 ++ if (status)
17365 ++ return status;
17366 + }
17367 +-
17368 +- status = iomap_read_page_sync(block_start, page, poff, plen,
17369 +- srcmap);
17370 +- if (status)
17371 +- return status;
17372 ++ iomap_set_range_uptodate(page, poff, plen);
17373 + } while ((block_start += plen) < block_end);
17374 +
17375 + return 0;
17376 +diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
17377 +index ec7b78e6fecaf..28d656b15300b 100644
17378 +--- a/fs/iomap/direct-io.c
17379 ++++ b/fs/iomap/direct-io.c
17380 +@@ -387,6 +387,16 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
17381 + return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
17382 + case IOMAP_INLINE:
17383 + return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
17384 ++ case IOMAP_DELALLOC:
17385 ++ /*
17386 ++ * DIO is not serialised against mmap() access at all, and so
17387 ++ * if the page_mkwrite occurs between the writeback and the
17388 ++ * iomap_apply() call in the DIO path, then it will see the
17389 ++ * DELALLOC block that the page-mkwrite allocated.
17390 ++ */
17391 ++ pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
17392 ++ dio->iocb->ki_filp, current->comm);
17393 ++ return -EIO;
17394 + default:
17395 + WARN_ON_ONCE(1);
17396 + return -EIO;
17397 +diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
17398 +index ccc88be88d6ae..a30b4bcb95a2c 100644
17399 +--- a/fs/nfs/fs_context.c
17400 ++++ b/fs/nfs/fs_context.c
17401 +@@ -94,6 +94,7 @@ enum {
17402 + static const struct constant_table nfs_param_enums_local_lock[] = {
17403 + { "all", Opt_local_lock_all },
17404 + { "flock", Opt_local_lock_flock },
17405 ++ { "posix", Opt_local_lock_posix },
17406 + { "none", Opt_local_lock_none },
17407 + {}
17408 + };
17409 +diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
17410 +index d4359a1df3d5e..84933a0af49b6 100644
17411 +--- a/fs/ntfs/inode.c
17412 ++++ b/fs/ntfs/inode.c
17413 +@@ -1809,6 +1809,12 @@ int ntfs_read_inode_mount(struct inode *vi)
17414 + brelse(bh);
17415 + }
17416 +
17417 ++ if (le32_to_cpu(m->bytes_allocated) != vol->mft_record_size) {
17418 ++ ntfs_error(sb, "Incorrect mft record size %u in superblock, should be %u.",
17419 ++ le32_to_cpu(m->bytes_allocated), vol->mft_record_size);
17420 ++ goto err_out;
17421 ++ }
17422 ++
17423 + /* Apply the mst fixups. */
17424 + if (post_read_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size)) {
17425 + /* FIXME: Try to use the $MFTMirr now. */
17426 +diff --git a/fs/proc/base.c b/fs/proc/base.c
17427 +index d86c0afc8a859..297ff606ae0f6 100644
17428 +--- a/fs/proc/base.c
17429 ++++ b/fs/proc/base.c
17430 +@@ -1046,7 +1046,6 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
17431 +
17432 + static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
17433 + {
17434 +- static DEFINE_MUTEX(oom_adj_mutex);
17435 + struct mm_struct *mm = NULL;
17436 + struct task_struct *task;
17437 + int err = 0;
17438 +@@ -1086,7 +1085,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
17439 + struct task_struct *p = find_lock_task_mm(task);
17440 +
17441 + if (p) {
17442 +- if (atomic_read(&p->mm->mm_users) > 1) {
17443 ++ if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) {
17444 + mm = p->mm;
17445 + mmgrab(mm);
17446 + }
17447 +diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
17448 +index 58fc2a7c7fd19..e69a2bfdd81c0 100644
17449 +--- a/fs/quota/quota_v2.c
17450 ++++ b/fs/quota/quota_v2.c
17451 +@@ -282,6 +282,7 @@ static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot)
17452 + d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
17453 + d->dqb_btime = cpu_to_le64(m->dqb_btime);
17454 + d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
17455 ++ d->dqb_pad = 0;
17456 + if (qtree_entry_unused(info, dp))
17457 + d->dqb_itime = cpu_to_le64(1);
17458 + }
17459 +diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
17460 +index 4146954549560..355523f4a4bf3 100644
17461 +--- a/fs/ramfs/file-nommu.c
17462 ++++ b/fs/ramfs/file-nommu.c
17463 +@@ -224,7 +224,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
17464 + if (!pages)
17465 + goto out_free;
17466 +
17467 +- nr = find_get_pages(inode->i_mapping, &pgoff, lpages, pages);
17468 ++ nr = find_get_pages_contig(inode->i_mapping, pgoff, lpages, pages);
17469 + if (nr != lpages)
17470 + goto out_free_pages; /* leave if some pages were missing */
17471 +
17472 +diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
17473 +index e43fed96704d8..c76d563dec0e1 100644
17474 +--- a/fs/reiserfs/inode.c
17475 ++++ b/fs/reiserfs/inode.c
17476 +@@ -2159,7 +2159,8 @@ out_end_trans:
17477 + out_inserted_sd:
17478 + clear_nlink(inode);
17479 + th->t_trans_id = 0; /* so the caller can't use this handle later */
17480 +- unlock_new_inode(inode); /* OK to do even if we hadn't locked it */
17481 ++ if (inode->i_state & I_NEW)
17482 ++ unlock_new_inode(inode);
17483 + iput(inode);
17484 + return err;
17485 + }
17486 +diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
17487 +index a6bce5b1fb1dc..1b9c7a387dc71 100644
17488 +--- a/fs/reiserfs/super.c
17489 ++++ b/fs/reiserfs/super.c
17490 +@@ -1258,6 +1258,10 @@ static int reiserfs_parse_options(struct super_block *s,
17491 + "turned on.");
17492 + return 0;
17493 + }
17494 ++ if (qf_names[qtype] !=
17495 ++ REISERFS_SB(s)->s_qf_names[qtype])
17496 ++ kfree(qf_names[qtype]);
17497 ++ qf_names[qtype] = NULL;
17498 + if (*arg) { /* Some filename specified? */
17499 + if (REISERFS_SB(s)->s_qf_names[qtype]
17500 + && strcmp(REISERFS_SB(s)->s_qf_names[qtype],
17501 +@@ -1287,10 +1291,6 @@ static int reiserfs_parse_options(struct super_block *s,
17502 + else
17503 + *mount_options |= 1 << REISERFS_GRPQUOTA;
17504 + } else {
17505 +- if (qf_names[qtype] !=
17506 +- REISERFS_SB(s)->s_qf_names[qtype])
17507 +- kfree(qf_names[qtype]);
17508 +- qf_names[qtype] = NULL;
17509 + if (qtype == USRQUOTA)
17510 + *mount_options &= ~(1 << REISERFS_USRQUOTA);
17511 + else
17512 +diff --git a/fs/udf/inode.c b/fs/udf/inode.c
17513 +index adaba8e8b326e..566118417e562 100644
17514 +--- a/fs/udf/inode.c
17515 ++++ b/fs/udf/inode.c
17516 +@@ -139,21 +139,24 @@ void udf_evict_inode(struct inode *inode)
17517 + struct udf_inode_info *iinfo = UDF_I(inode);
17518 + int want_delete = 0;
17519 +
17520 +- if (!inode->i_nlink && !is_bad_inode(inode)) {
17521 +- want_delete = 1;
17522 +- udf_setsize(inode, 0);
17523 +- udf_update_inode(inode, IS_SYNC(inode));
17524 ++ if (!is_bad_inode(inode)) {
17525 ++ if (!inode->i_nlink) {
17526 ++ want_delete = 1;
17527 ++ udf_setsize(inode, 0);
17528 ++ udf_update_inode(inode, IS_SYNC(inode));
17529 ++ }
17530 ++ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
17531 ++ inode->i_size != iinfo->i_lenExtents) {
17532 ++ udf_warn(inode->i_sb,
17533 ++ "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
17534 ++ inode->i_ino, inode->i_mode,
17535 ++ (unsigned long long)inode->i_size,
17536 ++ (unsigned long long)iinfo->i_lenExtents);
17537 ++ }
17538 + }
17539 + truncate_inode_pages_final(&inode->i_data);
17540 + invalidate_inode_buffers(inode);
17541 + clear_inode(inode);
17542 +- if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
17543 +- inode->i_size != iinfo->i_lenExtents) {
17544 +- udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
17545 +- inode->i_ino, inode->i_mode,
17546 +- (unsigned long long)inode->i_size,
17547 +- (unsigned long long)iinfo->i_lenExtents);
17548 +- }
17549 + kfree(iinfo->i_ext.i_data);
17550 + iinfo->i_ext.i_data = NULL;
17551 + udf_clear_extent_cache(inode);
17552 +diff --git a/fs/udf/super.c b/fs/udf/super.c
17553 +index f747bf72edbe0..a6ce0ddb392c7 100644
17554 +--- a/fs/udf/super.c
17555 ++++ b/fs/udf/super.c
17556 +@@ -1353,6 +1353,12 @@ static int udf_load_sparable_map(struct super_block *sb,
17557 + (int)spm->numSparingTables);
17558 + return -EIO;
17559 + }
17560 ++ if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) {
17561 ++ udf_err(sb, "error loading logical volume descriptor: "
17562 ++ "Too big sparing table size (%u)\n",
17563 ++ le32_to_cpu(spm->sizeSparingTable));
17564 ++ return -EIO;
17565 ++ }
17566 +
17567 + for (i = 0; i < spm->numSparingTables; i++) {
17568 + loc = le32_to_cpu(spm->locSparingTable[i]);
17569 +diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
17570 +index 9498ced947be9..2a38576189307 100644
17571 +--- a/fs/xfs/libxfs/xfs_rtbitmap.c
17572 ++++ b/fs/xfs/libxfs/xfs_rtbitmap.c
17573 +@@ -1018,7 +1018,6 @@ xfs_rtalloc_query_range(
17574 + struct xfs_mount *mp = tp->t_mountp;
17575 + xfs_rtblock_t rtstart;
17576 + xfs_rtblock_t rtend;
17577 +- xfs_rtblock_t rem;
17578 + int is_free;
17579 + int error = 0;
17580 +
17581 +@@ -1027,13 +1026,12 @@ xfs_rtalloc_query_range(
17582 + if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
17583 + low_rec->ar_startext == high_rec->ar_startext)
17584 + return 0;
17585 +- if (high_rec->ar_startext > mp->m_sb.sb_rextents)
17586 +- high_rec->ar_startext = mp->m_sb.sb_rextents;
17587 ++ high_rec->ar_startext = min(high_rec->ar_startext,
17588 ++ mp->m_sb.sb_rextents - 1);
17589 +
17590 + /* Iterate the bitmap, looking for discrepancies. */
17591 + rtstart = low_rec->ar_startext;
17592 +- rem = high_rec->ar_startext - rtstart;
17593 +- while (rem) {
17594 ++ while (rtstart <= high_rec->ar_startext) {
17595 + /* Is the first block free? */
17596 + error = xfs_rtcheck_range(mp, tp, rtstart, 1, 1, &rtend,
17597 + &is_free);
17598 +@@ -1042,7 +1040,7 @@ xfs_rtalloc_query_range(
17599 +
17600 + /* How long does the extent go for? */
17601 + error = xfs_rtfind_forw(mp, tp, rtstart,
17602 +- high_rec->ar_startext - 1, &rtend);
17603 ++ high_rec->ar_startext, &rtend);
17604 + if (error)
17605 + break;
17606 +
17607 +@@ -1055,7 +1053,6 @@ xfs_rtalloc_query_range(
17608 + break;
17609 + }
17610 +
17611 +- rem -= rtend - rtstart + 1;
17612 + rtstart = rtend + 1;
17613 + }
17614 +
17615 +diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c
17616 +index 04faa7310c4f0..8140bd870226a 100644
17617 +--- a/fs/xfs/xfs_buf_item_recover.c
17618 ++++ b/fs/xfs/xfs_buf_item_recover.c
17619 +@@ -721,6 +721,8 @@ xlog_recover_get_buf_lsn(
17620 + case XFS_ABTC_MAGIC:
17621 + case XFS_RMAP_CRC_MAGIC:
17622 + case XFS_REFC_CRC_MAGIC:
17623 ++ case XFS_FIBT_CRC_MAGIC:
17624 ++ case XFS_FIBT_MAGIC:
17625 + case XFS_IBT_CRC_MAGIC:
17626 + case XFS_IBT_MAGIC: {
17627 + struct xfs_btree_block *btb = blk;
17628 +diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
17629 +index 4d7385426149c..3ebc73ccc1337 100644
17630 +--- a/fs/xfs/xfs_file.c
17631 ++++ b/fs/xfs/xfs_file.c
17632 +@@ -1005,6 +1005,21 @@ xfs_file_fadvise(
17633 + return ret;
17634 + }
17635 +
17636 ++/* Does this file, inode, or mount want synchronous writes? */
17637 ++static inline bool xfs_file_sync_writes(struct file *filp)
17638 ++{
17639 ++ struct xfs_inode *ip = XFS_I(file_inode(filp));
17640 ++
17641 ++ if (ip->i_mount->m_flags & XFS_MOUNT_WSYNC)
17642 ++ return true;
17643 ++ if (filp->f_flags & (__O_SYNC | O_DSYNC))
17644 ++ return true;
17645 ++ if (IS_SYNC(file_inode(filp)))
17646 ++ return true;
17647 ++
17648 ++ return false;
17649 ++}
17650 ++
17651 + STATIC loff_t
17652 + xfs_file_remap_range(
17653 + struct file *file_in,
17654 +@@ -1062,7 +1077,7 @@ xfs_file_remap_range(
17655 + if (ret)
17656 + goto out_unlock;
17657 +
17658 +- if (mp->m_flags & XFS_MOUNT_WSYNC)
17659 ++ if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
17660 + xfs_log_force_inode(dest);
17661 + out_unlock:
17662 + xfs_reflink_remap_unlock(file_in, file_out);
17663 +diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
17664 +index 4eebcec4aae6c..9ce5e7d5bf8f2 100644
17665 +--- a/fs/xfs/xfs_fsmap.c
17666 ++++ b/fs/xfs/xfs_fsmap.c
17667 +@@ -26,7 +26,7 @@
17668 + #include "xfs_rtalloc.h"
17669 +
17670 + /* Convert an xfs_fsmap to an fsmap. */
17671 +-void
17672 ++static void
17673 + xfs_fsmap_from_internal(
17674 + struct fsmap *dest,
17675 + struct xfs_fsmap *src)
17676 +@@ -155,8 +155,7 @@ xfs_fsmap_owner_from_rmap(
17677 + /* getfsmap query state */
17678 + struct xfs_getfsmap_info {
17679 + struct xfs_fsmap_head *head;
17680 +- xfs_fsmap_format_t formatter; /* formatting fn */
17681 +- void *format_arg; /* format buffer */
17682 ++ struct fsmap *fsmap_recs; /* mapping records */
17683 + struct xfs_buf *agf_bp; /* AGF, for refcount queries */
17684 + xfs_daddr_t next_daddr; /* next daddr we expect */
17685 + u64 missing_owner; /* owner of holes */
17686 +@@ -224,6 +223,20 @@ xfs_getfsmap_is_shared(
17687 + return 0;
17688 + }
17689 +
17690 ++static inline void
17691 ++xfs_getfsmap_format(
17692 ++ struct xfs_mount *mp,
17693 ++ struct xfs_fsmap *xfm,
17694 ++ struct xfs_getfsmap_info *info)
17695 ++{
17696 ++ struct fsmap *rec;
17697 ++
17698 ++ trace_xfs_getfsmap_mapping(mp, xfm);
17699 ++
17700 ++ rec = &info->fsmap_recs[info->head->fmh_entries++];
17701 ++ xfs_fsmap_from_internal(rec, xfm);
17702 ++}
17703 ++
17704 + /*
17705 + * Format a reverse mapping for getfsmap, having translated rm_startblock
17706 + * into the appropriate daddr units.
17707 +@@ -256,6 +269,9 @@ xfs_getfsmap_helper(
17708 +
17709 + /* Are we just counting mappings? */
17710 + if (info->head->fmh_count == 0) {
17711 ++ if (info->head->fmh_entries == UINT_MAX)
17712 ++ return -ECANCELED;
17713 ++
17714 + if (rec_daddr > info->next_daddr)
17715 + info->head->fmh_entries++;
17716 +
17717 +@@ -285,10 +301,7 @@ xfs_getfsmap_helper(
17718 + fmr.fmr_offset = 0;
17719 + fmr.fmr_length = rec_daddr - info->next_daddr;
17720 + fmr.fmr_flags = FMR_OF_SPECIAL_OWNER;
17721 +- error = info->formatter(&fmr, info->format_arg);
17722 +- if (error)
17723 +- return error;
17724 +- info->head->fmh_entries++;
17725 ++ xfs_getfsmap_format(mp, &fmr, info);
17726 + }
17727 +
17728 + if (info->last)
17729 +@@ -320,11 +333,8 @@ xfs_getfsmap_helper(
17730 + if (shared)
17731 + fmr.fmr_flags |= FMR_OF_SHARED;
17732 + }
17733 +- error = info->formatter(&fmr, info->format_arg);
17734 +- if (error)
17735 +- return error;
17736 +- info->head->fmh_entries++;
17737 +
17738 ++ xfs_getfsmap_format(mp, &fmr, info);
17739 + out:
17740 + rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
17741 + if (info->next_daddr < rec_daddr)
17742 +@@ -792,11 +802,11 @@ xfs_getfsmap_check_keys(
17743 + #endif /* CONFIG_XFS_RT */
17744 +
17745 + /*
17746 +- * Get filesystem's extents as described in head, and format for
17747 +- * output. Calls formatter to fill the user's buffer until all
17748 +- * extents are mapped, until the passed-in head->fmh_count slots have
17749 +- * been filled, or until the formatter short-circuits the loop, if it
17750 +- * is tracking filled-in extents on its own.
17751 ++ * Get filesystem's extents as described in head, and format for output. Fills
17752 ++ * in the supplied records array until there are no more reverse mappings to
17753 ++ * return or head.fmh_entries == head.fmh_count. In the second case, this
17754 ++ * function returns -ECANCELED to indicate that more records would have been
17755 ++ * returned.
17756 + *
17757 + * Key to Confusion
17758 + * ----------------
17759 +@@ -816,8 +826,7 @@ int
17760 + xfs_getfsmap(
17761 + struct xfs_mount *mp,
17762 + struct xfs_fsmap_head *head,
17763 +- xfs_fsmap_format_t formatter,
17764 +- void *arg)
17765 ++ struct fsmap *fsmap_recs)
17766 + {
17767 + struct xfs_trans *tp = NULL;
17768 + struct xfs_fsmap dkeys[2]; /* per-dev keys */
17769 +@@ -892,8 +901,7 @@ xfs_getfsmap(
17770 +
17771 + info.next_daddr = head->fmh_keys[0].fmr_physical +
17772 + head->fmh_keys[0].fmr_length;
17773 +- info.formatter = formatter;
17774 +- info.format_arg = arg;
17775 ++ info.fsmap_recs = fsmap_recs;
17776 + info.head = head;
17777 +
17778 + /*
17779 +diff --git a/fs/xfs/xfs_fsmap.h b/fs/xfs/xfs_fsmap.h
17780 +index c6c57739b8626..a0775788e7b13 100644
17781 +--- a/fs/xfs/xfs_fsmap.h
17782 ++++ b/fs/xfs/xfs_fsmap.h
17783 +@@ -27,13 +27,9 @@ struct xfs_fsmap_head {
17784 + struct xfs_fsmap fmh_keys[2]; /* low and high keys */
17785 + };
17786 +
17787 +-void xfs_fsmap_from_internal(struct fsmap *dest, struct xfs_fsmap *src);
17788 + void xfs_fsmap_to_internal(struct xfs_fsmap *dest, struct fsmap *src);
17789 +
17790 +-/* fsmap to userspace formatter - copy to user & advance pointer */
17791 +-typedef int (*xfs_fsmap_format_t)(struct xfs_fsmap *, void *);
17792 +-
17793 + int xfs_getfsmap(struct xfs_mount *mp, struct xfs_fsmap_head *head,
17794 +- xfs_fsmap_format_t formatter, void *arg);
17795 ++ struct fsmap *out_recs);
17796 +
17797 + #endif /* __XFS_FSMAP_H__ */
17798 +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
17799 +index a190212ca85d0..e2a8edcb367bb 100644
17800 +--- a/fs/xfs/xfs_ioctl.c
17801 ++++ b/fs/xfs/xfs_ioctl.c
17802 +@@ -1707,39 +1707,17 @@ out_free_buf:
17803 + return error;
17804 + }
17805 +
17806 +-struct getfsmap_info {
17807 +- struct xfs_mount *mp;
17808 +- struct fsmap_head __user *data;
17809 +- unsigned int idx;
17810 +- __u32 last_flags;
17811 +-};
17812 +-
17813 +-STATIC int
17814 +-xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv)
17815 +-{
17816 +- struct getfsmap_info *info = priv;
17817 +- struct fsmap fm;
17818 +-
17819 +- trace_xfs_getfsmap_mapping(info->mp, xfm);
17820 +-
17821 +- info->last_flags = xfm->fmr_flags;
17822 +- xfs_fsmap_from_internal(&fm, xfm);
17823 +- if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm,
17824 +- sizeof(struct fsmap)))
17825 +- return -EFAULT;
17826 +-
17827 +- return 0;
17828 +-}
17829 +-
17830 + STATIC int
17831 + xfs_ioc_getfsmap(
17832 + struct xfs_inode *ip,
17833 + struct fsmap_head __user *arg)
17834 + {
17835 +- struct getfsmap_info info = { NULL };
17836 + struct xfs_fsmap_head xhead = {0};
17837 + struct fsmap_head head;
17838 +- bool aborted = false;
17839 ++ struct fsmap *recs;
17840 ++ unsigned int count;
17841 ++ __u32 last_flags = 0;
17842 ++ bool done = false;
17843 + int error;
17844 +
17845 + if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
17846 +@@ -1751,38 +1729,112 @@ xfs_ioc_getfsmap(
17847 + sizeof(head.fmh_keys[1].fmr_reserved)))
17848 + return -EINVAL;
17849 +
17850 ++ /*
17851 ++ * Use an internal memory buffer so that we don't have to copy fsmap
17852 ++ * data to userspace while holding locks. Start by trying to allocate
17853 ++ * up to 128k for the buffer, but fall back to a single page if needed.
17854 ++ */
17855 ++ count = min_t(unsigned int, head.fmh_count,
17856 ++ 131072 / sizeof(struct fsmap));
17857 ++ recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
17858 ++ if (!recs) {
17859 ++ count = min_t(unsigned int, head.fmh_count,
17860 ++ PAGE_SIZE / sizeof(struct fsmap));
17861 ++ recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
17862 ++ if (!recs)
17863 ++ return -ENOMEM;
17864 ++ }
17865 ++
17866 + xhead.fmh_iflags = head.fmh_iflags;
17867 +- xhead.fmh_count = head.fmh_count;
17868 + xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
17869 + xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
17870 +
17871 + trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
17872 + trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
17873 +
17874 +- info.mp = ip->i_mount;
17875 +- info.data = arg;
17876 +- error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info);
17877 +- if (error == -ECANCELED) {
17878 +- error = 0;
17879 +- aborted = true;
17880 +- } else if (error)
17881 +- return error;
17882 ++ head.fmh_entries = 0;
17883 ++ do {
17884 ++ struct fsmap __user *user_recs;
17885 ++ struct fsmap *last_rec;
17886 ++
17887 ++ user_recs = &arg->fmh_recs[head.fmh_entries];
17888 ++ xhead.fmh_entries = 0;
17889 ++ xhead.fmh_count = min_t(unsigned int, count,
17890 ++ head.fmh_count - head.fmh_entries);
17891 ++
17892 ++ /* Run query, record how many entries we got. */
17893 ++ error = xfs_getfsmap(ip->i_mount, &xhead, recs);
17894 ++ switch (error) {
17895 ++ case 0:
17896 ++ /*
17897 ++ * There are no more records in the result set. Copy
17898 ++ * whatever we got to userspace and break out.
17899 ++ */
17900 ++ done = true;
17901 ++ break;
17902 ++ case -ECANCELED:
17903 ++ /*
17904 ++ * The internal memory buffer is full. Copy whatever
17905 ++ * records we got to userspace and go again if we have
17906 ++ * not yet filled the userspace buffer.
17907 ++ */
17908 ++ error = 0;
17909 ++ break;
17910 ++ default:
17911 ++ goto out_free;
17912 ++ }
17913 ++ head.fmh_entries += xhead.fmh_entries;
17914 ++ head.fmh_oflags = xhead.fmh_oflags;
17915 +
17916 +- /* If we didn't abort, set the "last" flag in the last fmx */
17917 +- if (!aborted && info.idx) {
17918 +- info.last_flags |= FMR_OF_LAST;
17919 +- if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags,
17920 +- &info.last_flags, sizeof(info.last_flags)))
17921 +- return -EFAULT;
17922 ++ /*
17923 ++ * If the caller wanted a record count or there aren't any
17924 ++ * new records to return, we're done.
17925 ++ */
17926 ++ if (head.fmh_count == 0 || xhead.fmh_entries == 0)
17927 ++ break;
17928 ++
17929 ++ /* Copy all the records we got out to userspace. */
17930 ++ if (copy_to_user(user_recs, recs,
17931 ++ xhead.fmh_entries * sizeof(struct fsmap))) {
17932 ++ error = -EFAULT;
17933 ++ goto out_free;
17934 ++ }
17935 ++
17936 ++ /* Remember the last record flags we copied to userspace. */
17937 ++ last_rec = &recs[xhead.fmh_entries - 1];
17938 ++ last_flags = last_rec->fmr_flags;
17939 ++
17940 ++ /* Set up the low key for the next iteration. */
17941 ++ xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec);
17942 ++ trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
17943 ++ } while (!done && head.fmh_entries < head.fmh_count);
17944 ++
17945 ++ /*
17946 ++ * If there are no more records in the query result set and we're not
17947 ++ * in counting mode, mark the last record returned with the LAST flag.
17948 ++ */
17949 ++ if (done && head.fmh_count > 0 && head.fmh_entries > 0) {
17950 ++ struct fsmap __user *user_rec;
17951 ++
17952 ++ last_flags |= FMR_OF_LAST;
17953 ++ user_rec = &arg->fmh_recs[head.fmh_entries - 1];
17954 ++
17955 ++ if (copy_to_user(&user_rec->fmr_flags, &last_flags,
17956 ++ sizeof(last_flags))) {
17957 ++ error = -EFAULT;
17958 ++ goto out_free;
17959 ++ }
17960 + }
17961 +
17962 + /* copy back header */
17963 +- head.fmh_entries = xhead.fmh_entries;
17964 +- head.fmh_oflags = xhead.fmh_oflags;
17965 +- if (copy_to_user(arg, &head, sizeof(struct fsmap_head)))
17966 +- return -EFAULT;
17967 ++ if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) {
17968 ++ error = -EFAULT;
17969 ++ goto out_free;
17970 ++ }
17971 +
17972 +- return 0;
17973 ++out_free:
17974 ++ kmem_free(recs);
17975 ++ return error;
17976 + }
17977 +
17978 + STATIC int
17979 +diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
17980 +index 6209e7b6b895b..86994d7f7cba3 100644
17981 +--- a/fs/xfs/xfs_rtalloc.c
17982 ++++ b/fs/xfs/xfs_rtalloc.c
17983 +@@ -247,6 +247,9 @@ xfs_rtallocate_extent_block(
17984 + end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
17985 + i <= end;
17986 + i++) {
17987 ++ /* Make sure we don't scan off the end of the rt volume. */
17988 ++ maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i;
17989 ++
17990 + /*
17991 + * See if there's a free extent of maxlen starting at i.
17992 + * If it's not so then next will contain the first non-free.
17993 +@@ -442,6 +445,14 @@ xfs_rtallocate_extent_near(
17994 + */
17995 + if (bno >= mp->m_sb.sb_rextents)
17996 + bno = mp->m_sb.sb_rextents - 1;
17997 ++
17998 ++ /* Make sure we don't run off the end of the rt volume. */
17999 ++ maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno;
18000 ++ if (maxlen < minlen) {
18001 ++ *rtblock = NULLRTBLOCK;
18002 ++ return 0;
18003 ++ }
18004 ++
18005 + /*
18006 + * Try the exact allocation first.
18007 + */
18008 +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
18009 +index ca08db4ffb5f7..ce3f5231aa698 100644
18010 +--- a/include/linux/bpf_verifier.h
18011 ++++ b/include/linux/bpf_verifier.h
18012 +@@ -358,6 +358,7 @@ struct bpf_subprog_info {
18013 + u32 start; /* insn idx of function entry point */
18014 + u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
18015 + u16 stack_depth; /* max. stack depth used by this function */
18016 ++ bool has_tail_call;
18017 + };
18018 +
18019 + /* single container for all structs
18020 +diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
18021 +index a4dc45fbec0a4..23bc366f6c3b3 100644
18022 +--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
18023 ++++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
18024 +@@ -27,8 +27,7 @@
18025 + * bit 16-27: update value
18026 + * bit 31: 1 - update, 0 - no update
18027 + */
18028 +-#define CMDQ_WFE_OPTION (CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \
18029 +- CMDQ_WFE_WAIT_VALUE)
18030 ++#define CMDQ_WFE_OPTION (CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE)
18031 +
18032 + /** cmdq event maximum */
18033 + #define CMDQ_MAX_EVENT 0x3ff
18034 +diff --git a/include/linux/oom.h b/include/linux/oom.h
18035 +index c696c265f0193..b9df34326772c 100644
18036 +--- a/include/linux/oom.h
18037 ++++ b/include/linux/oom.h
18038 +@@ -55,6 +55,7 @@ struct oom_control {
18039 + };
18040 +
18041 + extern struct mutex oom_lock;
18042 ++extern struct mutex oom_adj_mutex;
18043 +
18044 + static inline void set_current_oom_origin(void)
18045 + {
18046 +diff --git a/include/linux/overflow.h b/include/linux/overflow.h
18047 +index 93fcef105061b..ff3c48f0abc5b 100644
18048 +--- a/include/linux/overflow.h
18049 ++++ b/include/linux/overflow.h
18050 +@@ -3,6 +3,7 @@
18051 + #define __LINUX_OVERFLOW_H
18052 +
18053 + #include <linux/compiler.h>
18054 ++#include <linux/limits.h>
18055 +
18056 + /*
18057 + * In the fallback code below, we need to compute the minimum and
18058 +diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
18059 +index 8679ccd722e89..3468794f83d23 100644
18060 +--- a/include/linux/page_owner.h
18061 ++++ b/include/linux/page_owner.h
18062 +@@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops;
18063 + extern void __reset_page_owner(struct page *page, unsigned int order);
18064 + extern void __set_page_owner(struct page *page,
18065 + unsigned int order, gfp_t gfp_mask);
18066 +-extern void __split_page_owner(struct page *page, unsigned int order);
18067 ++extern void __split_page_owner(struct page *page, unsigned int nr);
18068 + extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
18069 + extern void __set_page_owner_migrate_reason(struct page *page, int reason);
18070 + extern void __dump_page_owner(struct page *page);
18071 +@@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page,
18072 + __set_page_owner(page, order, gfp_mask);
18073 + }
18074 +
18075 +-static inline void split_page_owner(struct page *page, unsigned int order)
18076 ++static inline void split_page_owner(struct page *page, unsigned int nr)
18077 + {
18078 + if (static_branch_unlikely(&page_owner_inited))
18079 +- __split_page_owner(page, order);
18080 ++ __split_page_owner(page, nr);
18081 + }
18082 + static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
18083 + {
18084 +diff --git a/include/linux/pci.h b/include/linux/pci.h
18085 +index 34c1c4f45288f..1bc3c020672fd 100644
18086 +--- a/include/linux/pci.h
18087 ++++ b/include/linux/pci.h
18088 +@@ -439,6 +439,7 @@ struct pci_dev {
18089 + unsigned int is_probed:1; /* Device probing in progress */
18090 + unsigned int link_active_reporting:1;/* Device capable of reporting link active */
18091 + unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
18092 ++ unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
18093 + pci_dev_flags_t dev_flags;
18094 + atomic_t enable_cnt; /* pci_enable_device has been called */
18095 +
18096 +diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
18097 +index f3eaf9ec00a1b..70078be166e3c 100644
18098 +--- a/include/linux/platform_data/dma-dw.h
18099 ++++ b/include/linux/platform_data/dma-dw.h
18100 +@@ -21,6 +21,7 @@
18101 + * @dst_id: dst request line
18102 + * @m_master: memory master for transfers on allocated channel
18103 + * @p_master: peripheral master for transfers on allocated channel
18104 ++ * @channels: mask of the channels permitted for allocation (zero value means any)
18105 + * @hs_polarity:set active low polarity of handshake interface
18106 + */
18107 + struct dw_dma_slave {
18108 +@@ -29,6 +30,7 @@ struct dw_dma_slave {
18109 + u8 dst_id;
18110 + u8 m_master;
18111 + u8 p_master;
18112 ++ u8 channels;
18113 + bool hs_polarity;
18114 + };
18115 +
18116 +diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
18117 +index ecdc6542070f1..dfd82eab29025 100644
18118 +--- a/include/linux/sched/coredump.h
18119 ++++ b/include/linux/sched/coredump.h
18120 +@@ -72,6 +72,7 @@ static inline int get_dumpable(struct mm_struct *mm)
18121 + #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
18122 + #define MMF_OOM_VICTIM 25 /* mm is the oom victim */
18123 + #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
18124 ++#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
18125 + #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
18126 +
18127 + #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
18128 +diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
18129 +index a74c1d5acdf3c..cb71dca985589 100644
18130 +--- a/include/linux/soc/mediatek/mtk-cmdq.h
18131 ++++ b/include/linux/soc/mediatek/mtk-cmdq.h
18132 +@@ -105,11 +105,12 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
18133 + /**
18134 + * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet
18135 + * @pkt: the CMDQ packet
18136 +- * @event: the desired event type to "wait and CLEAR"
18137 ++ * @event: the desired event type to wait
18138 ++ * @clear: clear event or not after event arrive
18139 + *
18140 + * Return: 0 for success; else the error code is returned
18141 + */
18142 +-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
18143 ++int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear);
18144 +
18145 + /**
18146 + * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet
18147 +diff --git a/include/net/ip.h b/include/net/ip.h
18148 +index 04ebe7bf54c6a..d61c26ab4ee84 100644
18149 +--- a/include/net/ip.h
18150 ++++ b/include/net/ip.h
18151 +@@ -439,12 +439,18 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
18152 + bool forwarding)
18153 + {
18154 + struct net *net = dev_net(dst->dev);
18155 ++ unsigned int mtu;
18156 +
18157 + if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
18158 + ip_mtu_locked(dst) ||
18159 + !forwarding)
18160 + return dst_mtu(dst);
18161 +
18162 ++ /* 'forwarding = true' case should always honour route mtu */
18163 ++ mtu = dst_metric_raw(dst, RTAX_MTU);
18164 ++ if (mtu)
18165 ++ return mtu;
18166 ++
18167 + return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
18168 + }
18169 +
18170 +diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
18171 +index 0d3920896d502..716db4a0fed89 100644
18172 +--- a/include/net/netfilter/nf_log.h
18173 ++++ b/include/net/netfilter/nf_log.h
18174 +@@ -108,6 +108,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
18175 + unsigned int logflags);
18176 + void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
18177 + struct sock *sk);
18178 ++void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb);
18179 + void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
18180 + unsigned int hooknum, const struct sk_buff *skb,
18181 + const struct net_device *in,
18182 +diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
18183 +index e1057b255f69a..879fe8cff5819 100644
18184 +--- a/include/net/tc_act/tc_tunnel_key.h
18185 ++++ b/include/net/tc_act/tc_tunnel_key.h
18186 +@@ -56,7 +56,10 @@ static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a)
18187 + {
18188 + #ifdef CONFIG_NET_CLS_ACT
18189 + struct tcf_tunnel_key *t = to_tunnel_key(a);
18190 +- struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
18191 ++ struct tcf_tunnel_key_params *params;
18192 ++
18193 ++ params = rcu_dereference_protected(t->params,
18194 ++ lockdep_is_held(&a->tcfa_lock));
18195 +
18196 + return &params->tcft_enc_metadata->u.tun_info;
18197 + #else
18198 +diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
18199 +index e3518fd6b95b1..9353910915d41 100644
18200 +--- a/include/rdma/ib_umem.h
18201 ++++ b/include/rdma/ib_umem.h
18202 +@@ -95,10 +95,11 @@ static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offs
18203 + size_t length) {
18204 + return -EINVAL;
18205 + }
18206 +-static inline int ib_umem_find_best_pgsz(struct ib_umem *umem,
18207 +- unsigned long pgsz_bitmap,
18208 +- unsigned long virt) {
18209 +- return -EINVAL;
18210 ++static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
18211 ++ unsigned long pgsz_bitmap,
18212 ++ unsigned long virt)
18213 ++{
18214 ++ return 0;
18215 + }
18216 +
18217 + #endif /* CONFIG_INFINIBAND_USER_MEM */
18218 +diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
18219 +index ef2f3986c4933..d7809f203715f 100644
18220 +--- a/include/rdma/ib_verbs.h
18221 ++++ b/include/rdma/ib_verbs.h
18222 +@@ -2465,7 +2465,7 @@ struct ib_device_ops {
18223 + int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
18224 + struct ib_udata *udata);
18225 + int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
18226 +- void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
18227 ++ int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
18228 + int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
18229 + struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
18230 + struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
18231 +@@ -3834,46 +3834,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
18232 + return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
18233 + }
18234 +
18235 +-struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
18236 +- int nr_cqe, int comp_vector,
18237 +- enum ib_poll_context poll_ctx,
18238 +- const char *caller, struct ib_udata *udata);
18239 +-
18240 +-/**
18241 +- * ib_alloc_cq_user: Allocate kernel/user CQ
18242 +- * @dev: The IB device
18243 +- * @private: Private data attached to the CQE
18244 +- * @nr_cqe: Number of CQEs in the CQ
18245 +- * @comp_vector: Completion vector used for the IRQs
18246 +- * @poll_ctx: Context used for polling the CQ
18247 +- * @udata: Valid user data or NULL for kernel objects
18248 +- */
18249 +-static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
18250 +- void *private, int nr_cqe,
18251 +- int comp_vector,
18252 +- enum ib_poll_context poll_ctx,
18253 +- struct ib_udata *udata)
18254 +-{
18255 +- return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
18256 +- KBUILD_MODNAME, udata);
18257 +-}
18258 +-
18259 +-/**
18260 +- * ib_alloc_cq: Allocate kernel CQ
18261 +- * @dev: The IB device
18262 +- * @private: Private data attached to the CQE
18263 +- * @nr_cqe: Number of CQEs in the CQ
18264 +- * @comp_vector: Completion vector used for the IRQs
18265 +- * @poll_ctx: Context used for polling the CQ
18266 +- *
18267 +- * NOTE: for user cq use ib_alloc_cq_user with valid udata!
18268 +- */
18269 ++struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
18270 ++ int comp_vector, enum ib_poll_context poll_ctx,
18271 ++ const char *caller);
18272 + static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
18273 + int nr_cqe, int comp_vector,
18274 + enum ib_poll_context poll_ctx)
18275 + {
18276 +- return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
18277 +- NULL);
18278 ++ return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
18279 ++ KBUILD_MODNAME);
18280 + }
18281 +
18282 + struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
18283 +@@ -3895,26 +3864,7 @@ static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
18284 + KBUILD_MODNAME);
18285 + }
18286 +
18287 +-/**
18288 +- * ib_free_cq_user - Free kernel/user CQ
18289 +- * @cq: The CQ to free
18290 +- * @udata: Valid user data or NULL for kernel objects
18291 +- *
18292 +- * NOTE: This function shouldn't be called on shared CQs.
18293 +- */
18294 +-void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
18295 +-
18296 +-/**
18297 +- * ib_free_cq - Free kernel CQ
18298 +- * @cq: The CQ to free
18299 +- *
18300 +- * NOTE: for user cq use ib_free_cq_user with valid udata!
18301 +- */
18302 +-static inline void ib_free_cq(struct ib_cq *cq)
18303 +-{
18304 +- ib_free_cq_user(cq, NULL);
18305 +-}
18306 +-
18307 ++void ib_free_cq(struct ib_cq *cq);
18308 + int ib_process_cq_direct(struct ib_cq *cq, int budget);
18309 +
18310 + /**
18311 +@@ -3972,7 +3922,9 @@ int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
18312 + */
18313 + static inline void ib_destroy_cq(struct ib_cq *cq)
18314 + {
18315 +- ib_destroy_cq_user(cq, NULL);
18316 ++ int ret = ib_destroy_cq_user(cq, NULL);
18317 ++
18318 ++ WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
18319 + }
18320 +
18321 + /**
18322 +diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
18323 +index 731ac09ed2313..5b567b43e1b16 100644
18324 +--- a/include/scsi/scsi_common.h
18325 ++++ b/include/scsi/scsi_common.h
18326 +@@ -25,6 +25,13 @@ scsi_command_size(const unsigned char *cmnd)
18327 + scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
18328 + }
18329 +
18330 ++static inline unsigned char
18331 ++scsi_command_control(const unsigned char *cmnd)
18332 ++{
18333 ++ return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
18334 ++ cmnd[1] : cmnd[COMMAND_SIZE(cmnd[0]) - 1];
18335 ++}
18336 ++
18337 + /* Returns a human-readable name for the device */
18338 + extern const char *scsi_device_type(unsigned type);
18339 +
18340 +diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
18341 +index d16a4229209b2..a2becf13293a3 100644
18342 +--- a/include/sound/hda_codec.h
18343 ++++ b/include/sound/hda_codec.h
18344 +@@ -253,6 +253,7 @@ struct hda_codec {
18345 + unsigned int force_pin_prefix:1; /* Add location prefix */
18346 + unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
18347 + unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
18348 ++ unsigned int forced_resume:1; /* forced resume for jack */
18349 + unsigned int mst_no_extra_pcms:1; /* no backup PCMs for DP-MST */
18350 +
18351 + #ifdef CONFIG_PM
18352 +diff --git a/include/trace/events/target.h b/include/trace/events/target.h
18353 +index 77408edd29d2a..67fad2677ed55 100644
18354 +--- a/include/trace/events/target.h
18355 ++++ b/include/trace/events/target.h
18356 +@@ -141,6 +141,7 @@ TRACE_EVENT(target_sequencer_start,
18357 + __field( unsigned int, opcode )
18358 + __field( unsigned int, data_length )
18359 + __field( unsigned int, task_attribute )
18360 ++ __field( unsigned char, control )
18361 + __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
18362 + __string( initiator, cmd->se_sess->se_node_acl->initiatorname )
18363 + ),
18364 +@@ -151,6 +152,7 @@ TRACE_EVENT(target_sequencer_start,
18365 + __entry->opcode = cmd->t_task_cdb[0];
18366 + __entry->data_length = cmd->data_length;
18367 + __entry->task_attribute = cmd->sam_task_attr;
18368 ++ __entry->control = scsi_command_control(cmd->t_task_cdb);
18369 + memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
18370 + __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
18371 + ),
18372 +@@ -160,9 +162,7 @@ TRACE_EVENT(target_sequencer_start,
18373 + __entry->tag, show_opcode_name(__entry->opcode),
18374 + __entry->data_length, __print_hex(__entry->cdb, 16),
18375 + show_task_attribute_name(__entry->task_attribute),
18376 +- scsi_command_size(__entry->cdb) <= 16 ?
18377 +- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
18378 +- __entry->cdb[1]
18379 ++ __entry->control
18380 + )
18381 + );
18382 +
18383 +@@ -178,6 +178,7 @@ TRACE_EVENT(target_cmd_complete,
18384 + __field( unsigned int, opcode )
18385 + __field( unsigned int, data_length )
18386 + __field( unsigned int, task_attribute )
18387 ++ __field( unsigned char, control )
18388 + __field( unsigned char, scsi_status )
18389 + __field( unsigned char, sense_length )
18390 + __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
18391 +@@ -191,6 +192,7 @@ TRACE_EVENT(target_cmd_complete,
18392 + __entry->opcode = cmd->t_task_cdb[0];
18393 + __entry->data_length = cmd->data_length;
18394 + __entry->task_attribute = cmd->sam_task_attr;
18395 ++ __entry->control = scsi_command_control(cmd->t_task_cdb);
18396 + __entry->scsi_status = cmd->scsi_status;
18397 + __entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ?
18398 + min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0;
18399 +@@ -208,9 +210,7 @@ TRACE_EVENT(target_cmd_complete,
18400 + show_opcode_name(__entry->opcode),
18401 + __entry->data_length, __print_hex(__entry->cdb, 16),
18402 + show_task_attribute_name(__entry->task_attribute),
18403 +- scsi_command_size(__entry->cdb) <= 16 ?
18404 +- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
18405 +- __entry->cdb[1]
18406 ++ __entry->control
18407 + )
18408 + );
18409 +
18410 +diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
18411 +index f9701410d3b52..57a222014cd20 100644
18412 +--- a/include/uapi/linux/pci_regs.h
18413 ++++ b/include/uapi/linux/pci_regs.h
18414 +@@ -76,6 +76,7 @@
18415 + #define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
18416 + #define PCI_LATENCY_TIMER 0x0d /* 8 bits */
18417 + #define PCI_HEADER_TYPE 0x0e /* 8 bits */
18418 ++#define PCI_HEADER_TYPE_MASK 0x7f
18419 + #define PCI_HEADER_TYPE_NORMAL 0
18420 + #define PCI_HEADER_TYPE_BRIDGE 1
18421 + #define PCI_HEADER_TYPE_CARDBUS 2
18422 +diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
18423 +index 7b2d6fc9e6ed7..dc33e3051819d 100644
18424 +--- a/include/uapi/linux/perf_event.h
18425 ++++ b/include/uapi/linux/perf_event.h
18426 +@@ -1155,7 +1155,7 @@ union perf_mem_data_src {
18427 +
18428 + #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
18429 + /* 1 free */
18430 +-#define PERF_MEM_SNOOPX_SHIFT 37
18431 ++#define PERF_MEM_SNOOPX_SHIFT 38
18432 +
18433 + /* locked instruction */
18434 + #define PERF_MEM_LOCK_NA 0x01 /* not available */
18435 +diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
18436 +index b367430e611c7..3d897de890612 100644
18437 +--- a/kernel/bpf/percpu_freelist.c
18438 ++++ b/kernel/bpf/percpu_freelist.c
18439 +@@ -17,6 +17,8 @@ int pcpu_freelist_init(struct pcpu_freelist *s)
18440 + raw_spin_lock_init(&head->lock);
18441 + head->first = NULL;
18442 + }
18443 ++ raw_spin_lock_init(&s->extralist.lock);
18444 ++ s->extralist.first = NULL;
18445 + return 0;
18446 + }
18447 +
18448 +@@ -40,12 +42,50 @@ static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
18449 + raw_spin_unlock(&head->lock);
18450 + }
18451 +
18452 ++static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s,
18453 ++ struct pcpu_freelist_node *node)
18454 ++{
18455 ++ if (!raw_spin_trylock(&s->extralist.lock))
18456 ++ return false;
18457 ++
18458 ++ pcpu_freelist_push_node(&s->extralist, node);
18459 ++ raw_spin_unlock(&s->extralist.lock);
18460 ++ return true;
18461 ++}
18462 ++
18463 ++static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s,
18464 ++ struct pcpu_freelist_node *node)
18465 ++{
18466 ++ int cpu, orig_cpu;
18467 ++
18468 ++ orig_cpu = cpu = raw_smp_processor_id();
18469 ++ while (1) {
18470 ++ struct pcpu_freelist_head *head;
18471 ++
18472 ++ head = per_cpu_ptr(s->freelist, cpu);
18473 ++ if (raw_spin_trylock(&head->lock)) {
18474 ++ pcpu_freelist_push_node(head, node);
18475 ++ raw_spin_unlock(&head->lock);
18476 ++ return;
18477 ++ }
18478 ++ cpu = cpumask_next(cpu, cpu_possible_mask);
18479 ++ if (cpu >= nr_cpu_ids)
18480 ++ cpu = 0;
18481 ++
18482 ++ /* cannot lock any per cpu lock, try extralist */
18483 ++ if (cpu == orig_cpu &&
18484 ++ pcpu_freelist_try_push_extra(s, node))
18485 ++ return;
18486 ++ }
18487 ++}
18488 ++
18489 + void __pcpu_freelist_push(struct pcpu_freelist *s,
18490 + struct pcpu_freelist_node *node)
18491 + {
18492 +- struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
18493 +-
18494 +- ___pcpu_freelist_push(head, node);
18495 ++ if (in_nmi())
18496 ++ ___pcpu_freelist_push_nmi(s, node);
18497 ++ else
18498 ++ ___pcpu_freelist_push(this_cpu_ptr(s->freelist), node);
18499 + }
18500 +
18501 + void pcpu_freelist_push(struct pcpu_freelist *s,
18502 +@@ -81,7 +121,7 @@ again:
18503 + }
18504 + }
18505 +
18506 +-struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
18507 ++static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s)
18508 + {
18509 + struct pcpu_freelist_head *head;
18510 + struct pcpu_freelist_node *node;
18511 +@@ -102,8 +142,59 @@ struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
18512 + if (cpu >= nr_cpu_ids)
18513 + cpu = 0;
18514 + if (cpu == orig_cpu)
18515 +- return NULL;
18516 ++ break;
18517 ++ }
18518 ++
18519 ++ /* per cpu lists are all empty, try extralist */
18520 ++ raw_spin_lock(&s->extralist.lock);
18521 ++ node = s->extralist.first;
18522 ++ if (node)
18523 ++ s->extralist.first = node->next;
18524 ++ raw_spin_unlock(&s->extralist.lock);
18525 ++ return node;
18526 ++}
18527 ++
18528 ++static struct pcpu_freelist_node *
18529 ++___pcpu_freelist_pop_nmi(struct pcpu_freelist *s)
18530 ++{
18531 ++ struct pcpu_freelist_head *head;
18532 ++ struct pcpu_freelist_node *node;
18533 ++ int orig_cpu, cpu;
18534 ++
18535 ++ orig_cpu = cpu = raw_smp_processor_id();
18536 ++ while (1) {
18537 ++ head = per_cpu_ptr(s->freelist, cpu);
18538 ++ if (raw_spin_trylock(&head->lock)) {
18539 ++ node = head->first;
18540 ++ if (node) {
18541 ++ head->first = node->next;
18542 ++ raw_spin_unlock(&head->lock);
18543 ++ return node;
18544 ++ }
18545 ++ raw_spin_unlock(&head->lock);
18546 ++ }
18547 ++ cpu = cpumask_next(cpu, cpu_possible_mask);
18548 ++ if (cpu >= nr_cpu_ids)
18549 ++ cpu = 0;
18550 ++ if (cpu == orig_cpu)
18551 ++ break;
18552 + }
18553 ++
18554 ++ /* cannot pop from per cpu lists, try extralist */
18555 ++ if (!raw_spin_trylock(&s->extralist.lock))
18556 ++ return NULL;
18557 ++ node = s->extralist.first;
18558 ++ if (node)
18559 ++ s->extralist.first = node->next;
18560 ++ raw_spin_unlock(&s->extralist.lock);
18561 ++ return node;
18562 ++}
18563 ++
18564 ++struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
18565 ++{
18566 ++ if (in_nmi())
18567 ++ return ___pcpu_freelist_pop_nmi(s);
18568 ++ return ___pcpu_freelist_pop(s);
18569 + }
18570 +
18571 + struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
18572 +diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
18573 +index fbf8a8a289791..3c76553cfe571 100644
18574 +--- a/kernel/bpf/percpu_freelist.h
18575 ++++ b/kernel/bpf/percpu_freelist.h
18576 +@@ -13,6 +13,7 @@ struct pcpu_freelist_head {
18577 +
18578 + struct pcpu_freelist {
18579 + struct pcpu_freelist_head __percpu *freelist;
18580 ++ struct pcpu_freelist_head extralist;
18581 + };
18582 +
18583 + struct pcpu_freelist_node {
18584 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
18585 +index 89b07db146763..12eb9e47d101c 100644
18586 +--- a/kernel/bpf/verifier.c
18587 ++++ b/kernel/bpf/verifier.c
18588 +@@ -1470,6 +1470,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
18589 + for (i = 0; i < insn_cnt; i++) {
18590 + u8 code = insn[i].code;
18591 +
18592 ++ if (code == (BPF_JMP | BPF_CALL) &&
18593 ++ insn[i].imm == BPF_FUNC_tail_call &&
18594 ++ insn[i].src_reg != BPF_PSEUDO_CALL)
18595 ++ subprog[cur_subprog].has_tail_call = true;
18596 + if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
18597 + goto next;
18598 + if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
18599 +@@ -2951,6 +2955,31 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
18600 + int ret_prog[MAX_CALL_FRAMES];
18601 +
18602 + process_func:
18603 ++ /* protect against potential stack overflow that might happen when
18604 ++ * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
18605 ++ * depth for such case down to 256 so that the worst case scenario
18606 ++ * would result in 8k stack size (32 which is tailcall limit * 256 =
18607 ++ * 8k).
18608 ++ *
18609 ++ * To get the idea what might happen, see an example:
18610 ++ * func1 -> sub rsp, 128
18611 ++ * subfunc1 -> sub rsp, 256
18612 ++ * tailcall1 -> add rsp, 256
18613 ++ * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
18614 ++ * subfunc2 -> sub rsp, 64
18615 ++ * subfunc22 -> sub rsp, 128
18616 ++ * tailcall2 -> add rsp, 128
18617 ++ * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
18618 ++ *
18619 ++ * tailcall will unwind the current stack frame but it will not get rid
18620 ++ * of caller's stack as shown on the example above.
18621 ++ */
18622 ++ if (idx && subprog[idx].has_tail_call && depth >= 256) {
18623 ++ verbose(env,
18624 ++ "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
18625 ++ depth);
18626 ++ return -EACCES;
18627 ++ }
18628 + /* round up to 32-bytes, since this is granularity
18629 + * of interpreter stack size
18630 + */
18631 +@@ -10862,6 +10891,11 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
18632 + }
18633 +
18634 + if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
18635 ++ if (tgt_prog) {
18636 ++ verbose(env, "can't modify return codes of BPF programs\n");
18637 ++ ret = -EINVAL;
18638 ++ goto out;
18639 ++ }
18640 + ret = check_attach_modify_return(prog, addr);
18641 + if (ret)
18642 + verbose(env, "%s() is not modifiable\n",
18643 +diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
18644 +index 683a799618ade..bc827bd547c81 100644
18645 +--- a/kernel/debug/kdb/kdb_io.c
18646 ++++ b/kernel/debug/kdb/kdb_io.c
18647 +@@ -706,12 +706,16 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
18648 + size_avail = sizeof(kdb_buffer) - len;
18649 + goto kdb_print_out;
18650 + }
18651 +- if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH)
18652 ++ if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) {
18653 + /*
18654 + * This was a interactive search (using '/' at more
18655 +- * prompt) and it has completed. Clear the flag.
18656 ++ * prompt) and it has completed. Replace the \0 with
18657 ++ * its original value to ensure multi-line strings
18658 ++ * are handled properly, and return to normal mode.
18659 + */
18660 ++ *cphold = replaced_byte;
18661 + kdb_grepping_flag = 0;
18662 ++ }
18663 + /*
18664 + * at this point the string is a full line and
18665 + * should be printed, up to the null.
18666 +diff --git a/kernel/events/core.c b/kernel/events/core.c
18667 +index fd8cd00099dae..38eeb297255e4 100644
18668 +--- a/kernel/events/core.c
18669 ++++ b/kernel/events/core.c
18670 +@@ -5852,11 +5852,11 @@ static void perf_pmu_output_stop(struct perf_event *event);
18671 + static void perf_mmap_close(struct vm_area_struct *vma)
18672 + {
18673 + struct perf_event *event = vma->vm_file->private_data;
18674 +-
18675 + struct perf_buffer *rb = ring_buffer_get(event);
18676 + struct user_struct *mmap_user = rb->mmap_user;
18677 + int mmap_locked = rb->mmap_locked;
18678 + unsigned long size = perf_data_size(rb);
18679 ++ bool detach_rest = false;
18680 +
18681 + if (event->pmu->event_unmapped)
18682 + event->pmu->event_unmapped(event, vma->vm_mm);
18683 +@@ -5887,7 +5887,8 @@ static void perf_mmap_close(struct vm_area_struct *vma)
18684 + mutex_unlock(&event->mmap_mutex);
18685 + }
18686 +
18687 +- atomic_dec(&rb->mmap_count);
18688 ++ if (atomic_dec_and_test(&rb->mmap_count))
18689 ++ detach_rest = true;
18690 +
18691 + if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
18692 + goto out_put;
18693 +@@ -5896,7 +5897,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
18694 + mutex_unlock(&event->mmap_mutex);
18695 +
18696 + /* If there's still other mmap()s of this buffer, we're done. */
18697 +- if (atomic_read(&rb->mmap_count))
18698 ++ if (!detach_rest)
18699 + goto out_put;
18700 +
18701 + /*
18702 +diff --git a/kernel/fork.c b/kernel/fork.c
18703 +index efc5493203ae0..0074bbe8c66f1 100644
18704 +--- a/kernel/fork.c
18705 ++++ b/kernel/fork.c
18706 +@@ -1830,6 +1830,25 @@ static __always_inline void delayed_free_task(struct task_struct *tsk)
18707 + free_task(tsk);
18708 + }
18709 +
18710 ++static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
18711 ++{
18712 ++ /* Skip if kernel thread */
18713 ++ if (!tsk->mm)
18714 ++ return;
18715 ++
18716 ++ /* Skip if spawning a thread or using vfork */
18717 ++ if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
18718 ++ return;
18719 ++
18720 ++ /* We need to synchronize with __set_oom_adj */
18721 ++ mutex_lock(&oom_adj_mutex);
18722 ++ set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
18723 ++ /* Update the values in case they were changed after copy_signal */
18724 ++ tsk->signal->oom_score_adj = current->signal->oom_score_adj;
18725 ++ tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
18726 ++ mutex_unlock(&oom_adj_mutex);
18727 ++}
18728 ++
18729 + /*
18730 + * This creates a new process as a copy of the old one,
18731 + * but does not actually start it yet.
18732 +@@ -2310,6 +2329,8 @@ static __latent_entropy struct task_struct *copy_process(
18733 + trace_task_newtask(p, clone_flags);
18734 + uprobe_copy_process(p, clone_flags);
18735 +
18736 ++ copy_oom_score_adj(clone_flags, p);
18737 ++
18738 + return p;
18739 +
18740 + bad_fork_cancel_cgroup:
18741 +diff --git a/kernel/module.c b/kernel/module.c
18742 +index 08c46084d8cca..991395d60f59c 100644
18743 +--- a/kernel/module.c
18744 ++++ b/kernel/module.c
18745 +@@ -91,8 +91,9 @@ EXPORT_SYMBOL_GPL(module_mutex);
18746 + static LIST_HEAD(modules);
18747 +
18748 + /* Work queue for freeing init sections in success case */
18749 +-static struct work_struct init_free_wq;
18750 +-static struct llist_head init_free_list;
18751 ++static void do_free_init(struct work_struct *w);
18752 ++static DECLARE_WORK(init_free_wq, do_free_init);
18753 ++static LLIST_HEAD(init_free_list);
18754 +
18755 + #ifdef CONFIG_MODULES_TREE_LOOKUP
18756 +
18757 +@@ -3551,14 +3552,6 @@ static void do_free_init(struct work_struct *w)
18758 + }
18759 + }
18760 +
18761 +-static int __init modules_wq_init(void)
18762 +-{
18763 +- INIT_WORK(&init_free_wq, do_free_init);
18764 +- init_llist_head(&init_free_list);
18765 +- return 0;
18766 +-}
18767 +-module_init(modules_wq_init);
18768 +-
18769 + /*
18770 + * This is where the real work happens.
18771 + *
18772 +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
18773 +index 02ec716a49271..0e60e10ed66a3 100644
18774 +--- a/kernel/power/hibernate.c
18775 ++++ b/kernel/power/hibernate.c
18776 +@@ -851,17 +851,6 @@ static int software_resume(void)
18777 +
18778 + /* Check if the device is there */
18779 + swsusp_resume_device = name_to_dev_t(resume_file);
18780 +-
18781 +- /*
18782 +- * name_to_dev_t is ineffective to verify parition if resume_file is in
18783 +- * integer format. (e.g. major:minor)
18784 +- */
18785 +- if (isdigit(resume_file[0]) && resume_wait) {
18786 +- int partno;
18787 +- while (!get_gendisk(swsusp_resume_device, &partno))
18788 +- msleep(10);
18789 +- }
18790 +-
18791 + if (!swsusp_resume_device) {
18792 + /*
18793 + * Some device discovery might still be in progress; we need
18794 +diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
18795 +index efb792e13fca9..23ec68d8ff3aa 100644
18796 +--- a/kernel/rcu/rcutorture.c
18797 ++++ b/kernel/rcu/rcutorture.c
18798 +@@ -2154,9 +2154,20 @@ static int __init rcu_torture_fwd_prog_init(void)
18799 + return -ENOMEM;
18800 + spin_lock_init(&rfp->rcu_fwd_lock);
18801 + rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
18802 ++ rcu_fwds = rfp;
18803 + return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
18804 + }
18805 +
18806 ++static void rcu_torture_fwd_prog_cleanup(void)
18807 ++{
18808 ++ struct rcu_fwd *rfp;
18809 ++
18810 ++ torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
18811 ++ rfp = rcu_fwds;
18812 ++ rcu_fwds = NULL;
18813 ++ kfree(rfp);
18814 ++}
18815 ++
18816 + /* Callback function for RCU barrier testing. */
18817 + static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
18818 + {
18819 +@@ -2360,7 +2371,7 @@ rcu_torture_cleanup(void)
18820 +
18821 + show_rcu_gp_kthreads();
18822 + rcu_torture_barrier_cleanup();
18823 +- torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
18824 ++ rcu_torture_fwd_prog_cleanup();
18825 + torture_stop_kthread(rcu_torture_stall, stall_task);
18826 + torture_stop_kthread(rcu_torture_writer, writer_task);
18827 +
18828 +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
18829 +index 1e9e500ff7906..572a79b1a8510 100644
18830 +--- a/kernel/rcu/tree.c
18831 ++++ b/kernel/rcu/tree.c
18832 +@@ -1882,7 +1882,7 @@ static void rcu_gp_fqs_loop(void)
18833 + break;
18834 + /* If time for quiescent-state forcing, do it. */
18835 + if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
18836 +- (gf & RCU_GP_FLAG_FQS)) {
18837 ++ (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
18838 + trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
18839 + TPS("fqsstart"));
18840 + rcu_gp_fqs(first_gp_fqs);
18841 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
18842 +index f788cd61df212..1c68621743ac2 100644
18843 +--- a/kernel/sched/core.c
18844 ++++ b/kernel/sched/core.c
18845 +@@ -39,7 +39,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
18846 +
18847 + DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
18848 +
18849 +-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
18850 ++#ifdef CONFIG_SCHED_DEBUG
18851 + /*
18852 + * Debugging: various feature bits
18853 + *
18854 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
18855 +index 6b3b59cc51d6c..f3496556b6992 100644
18856 +--- a/kernel/sched/fair.c
18857 ++++ b/kernel/sched/fair.c
18858 +@@ -1550,7 +1550,7 @@ struct task_numa_env {
18859 +
18860 + static unsigned long cpu_load(struct rq *rq);
18861 + static unsigned long cpu_util(int cpu);
18862 +-static inline long adjust_numa_imbalance(int imbalance, int src_nr_running);
18863 ++static inline long adjust_numa_imbalance(int imbalance, int nr_running);
18864 +
18865 + static inline enum
18866 + numa_type numa_classify(unsigned int imbalance_pct,
18867 +@@ -1927,7 +1927,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
18868 + src_running = env->src_stats.nr_running - 1;
18869 + dst_running = env->dst_stats.nr_running + 1;
18870 + imbalance = max(0, dst_running - src_running);
18871 +- imbalance = adjust_numa_imbalance(imbalance, src_running);
18872 ++ imbalance = adjust_numa_imbalance(imbalance, dst_running);
18873 +
18874 + /* Use idle CPU if there is no imbalance */
18875 + if (!imbalance) {
18876 +@@ -6067,7 +6067,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
18877 + /*
18878 + * Scan the local SMT mask for idle CPUs.
18879 + */
18880 +-static int select_idle_smt(struct task_struct *p, int target)
18881 ++static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
18882 + {
18883 + int cpu;
18884 +
18885 +@@ -6075,7 +6075,8 @@ static int select_idle_smt(struct task_struct *p, int target)
18886 + return -1;
18887 +
18888 + for_each_cpu(cpu, cpu_smt_mask(target)) {
18889 +- if (!cpumask_test_cpu(cpu, p->cpus_ptr))
18890 ++ if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
18891 ++ !cpumask_test_cpu(cpu, sched_domain_span(sd)))
18892 + continue;
18893 + if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
18894 + return cpu;
18895 +@@ -6091,7 +6092,7 @@ static inline int select_idle_core(struct task_struct *p, struct sched_domain *s
18896 + return -1;
18897 + }
18898 +
18899 +-static inline int select_idle_smt(struct task_struct *p, int target)
18900 ++static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
18901 + {
18902 + return -1;
18903 + }
18904 +@@ -6266,7 +6267,7 @@ symmetric:
18905 + if ((unsigned)i < nr_cpumask_bits)
18906 + return i;
18907 +
18908 +- i = select_idle_smt(p, target);
18909 ++ i = select_idle_smt(p, sd, target);
18910 + if ((unsigned)i < nr_cpumask_bits)
18911 + return i;
18912 +
18913 +@@ -6586,7 +6587,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
18914 +
18915 + util = cpu_util_next(cpu, p, cpu);
18916 + cpu_cap = capacity_of(cpu);
18917 +- spare_cap = cpu_cap - util;
18918 ++ spare_cap = cpu_cap;
18919 ++ lsub_positive(&spare_cap, util);
18920 +
18921 + /*
18922 + * Skip CPUs that cannot satisfy the capacity request.
18923 +@@ -8943,7 +8945,7 @@ next_group:
18924 + }
18925 + }
18926 +
18927 +-static inline long adjust_numa_imbalance(int imbalance, int src_nr_running)
18928 ++static inline long adjust_numa_imbalance(int imbalance, int nr_running)
18929 + {
18930 + unsigned int imbalance_min;
18931 +
18932 +@@ -8952,7 +8954,7 @@ static inline long adjust_numa_imbalance(int imbalance, int src_nr_running)
18933 + * tasks that remain local when the source domain is almost idle.
18934 + */
18935 + imbalance_min = 2;
18936 +- if (src_nr_running <= imbalance_min)
18937 ++ if (nr_running <= imbalance_min)
18938 + return 0;
18939 +
18940 + return imbalance;
18941 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
18942 +index c82857e2e288a..0b1485ac19c4e 100644
18943 +--- a/kernel/sched/sched.h
18944 ++++ b/kernel/sched/sched.h
18945 +@@ -1600,7 +1600,7 @@ enum {
18946 +
18947 + #undef SCHED_FEAT
18948 +
18949 +-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
18950 ++#ifdef CONFIG_SCHED_DEBUG
18951 +
18952 + /*
18953 + * To support run-time toggling of sched features, all the translation units
18954 +@@ -1608,6 +1608,7 @@ enum {
18955 + */
18956 + extern const_debug unsigned int sysctl_sched_features;
18957 +
18958 ++#ifdef CONFIG_JUMP_LABEL
18959 + #define SCHED_FEAT(name, enabled) \
18960 + static __always_inline bool static_branch_##name(struct static_key *key) \
18961 + { \
18962 +@@ -1620,7 +1621,13 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
18963 + extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
18964 + #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
18965 +
18966 +-#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */
18967 ++#else /* !CONFIG_JUMP_LABEL */
18968 ++
18969 ++#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
18970 ++
18971 ++#endif /* CONFIG_JUMP_LABEL */
18972 ++
18973 ++#else /* !SCHED_DEBUG */
18974 +
18975 + /*
18976 + * Each translation unit has its own copy of sysctl_sched_features to allow
18977 +@@ -1636,7 +1643,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features =
18978 +
18979 + #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
18980 +
18981 +-#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */
18982 ++#endif /* SCHED_DEBUG */
18983 +
18984 + extern struct static_key_false sched_numa_balancing;
18985 + extern struct static_key_false sched_schedstats;
18986 +diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
18987 +index c6cca0d1d5840..c8892156db341 100644
18988 +--- a/kernel/trace/trace_events_synth.c
18989 ++++ b/kernel/trace/trace_events_synth.c
18990 +@@ -132,7 +132,7 @@ static int synth_field_string_size(char *type)
18991 + start += sizeof("char[") - 1;
18992 +
18993 + end = strchr(type, ']');
18994 +- if (!end || end < start)
18995 ++ if (!end || end < start || type + strlen(type) > end + 1)
18996 + return -EINVAL;
18997 +
18998 + len = end - start;
18999 +@@ -465,6 +465,7 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
19000 + struct synth_field *field;
19001 + const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
19002 + int len, ret = 0;
19003 ++ ssize_t size;
19004 +
19005 + if (field_type[0] == ';')
19006 + field_type++;
19007 +@@ -501,8 +502,14 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
19008 + if (field_type[0] == ';')
19009 + field_type++;
19010 + len = strlen(field_type) + 1;
19011 +- if (array)
19012 +- len += strlen(array);
19013 ++
19014 ++ if (array) {
19015 ++ int l = strlen(array);
19016 ++
19017 ++ if (l && array[l - 1] == ';')
19018 ++ l--;
19019 ++ len += l;
19020 ++ }
19021 + if (prefix)
19022 + len += strlen(prefix);
19023 +
19024 +@@ -520,11 +527,12 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
19025 + field->type[len - 1] = '\0';
19026 + }
19027 +
19028 +- field->size = synth_field_size(field->type);
19029 +- if (!field->size) {
19030 ++ size = synth_field_size(field->type);
19031 ++ if (size <= 0) {
19032 + ret = -EINVAL;
19033 + goto free;
19034 + }
19035 ++ field->size = size;
19036 +
19037 + if (synth_field_is_string(field->type))
19038 + field->is_string = true;
19039 +diff --git a/lib/crc32.c b/lib/crc32.c
19040 +index 4a20455d1f61e..bf60ef26a45c2 100644
19041 +--- a/lib/crc32.c
19042 ++++ b/lib/crc32.c
19043 +@@ -331,7 +331,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
19044 + return crc;
19045 + }
19046 +
19047 +-#if CRC_LE_BITS == 1
19048 ++#if CRC_BE_BITS == 1
19049 + u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
19050 + {
19051 + return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
19052 +diff --git a/lib/idr.c b/lib/idr.c
19053 +index c2cf2c52bbde5..4d2eef0259d2c 100644
19054 +--- a/lib/idr.c
19055 ++++ b/lib/idr.c
19056 +@@ -470,6 +470,7 @@ alloc:
19057 + goto retry;
19058 + nospc:
19059 + xas_unlock_irqrestore(&xas, flags);
19060 ++ kfree(alloc);
19061 + return -ENOSPC;
19062 + }
19063 + EXPORT_SYMBOL(ida_alloc_range);
19064 +diff --git a/mm/filemap.c b/mm/filemap.c
19065 +index 385759c4ce4be..6c3b879116212 100644
19066 +--- a/mm/filemap.c
19067 ++++ b/mm/filemap.c
19068 +@@ -826,10 +826,10 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
19069 + }
19070 + EXPORT_SYMBOL_GPL(replace_page_cache_page);
19071 +
19072 +-static int __add_to_page_cache_locked(struct page *page,
19073 +- struct address_space *mapping,
19074 +- pgoff_t offset, gfp_t gfp_mask,
19075 +- void **shadowp)
19076 ++noinline int __add_to_page_cache_locked(struct page *page,
19077 ++ struct address_space *mapping,
19078 ++ pgoff_t offset, gfp_t gfp_mask,
19079 ++ void **shadowp)
19080 + {
19081 + XA_STATE(xas, &mapping->i_pages, offset);
19082 + int huge = PageHuge(page);
19083 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
19084 +index 74300e337c3c7..358403422104b 100644
19085 +--- a/mm/huge_memory.c
19086 ++++ b/mm/huge_memory.c
19087 +@@ -2449,7 +2449,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
19088 +
19089 + ClearPageCompound(head);
19090 +
19091 +- split_page_owner(head, HPAGE_PMD_ORDER);
19092 ++ split_page_owner(head, HPAGE_PMD_NR);
19093 +
19094 + /* See comment in __split_huge_page_tail() */
19095 + if (PageAnon(head)) {
19096 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
19097 +index 13f559af1ab6a..6795bdf662566 100644
19098 +--- a/mm/memcontrol.c
19099 ++++ b/mm/memcontrol.c
19100 +@@ -5276,7 +5276,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
19101 + struct page *page = NULL;
19102 + swp_entry_t ent = pte_to_swp_entry(ptent);
19103 +
19104 +- if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
19105 ++ if (!(mc.flags & MOVE_ANON))
19106 + return NULL;
19107 +
19108 + /*
19109 +@@ -5295,6 +5295,9 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
19110 + return page;
19111 + }
19112 +
19113 ++ if (non_swap_entry(ent))
19114 ++ return NULL;
19115 ++
19116 + /*
19117 + * Because lookup_swap_cache() updates some statistics counter,
19118 + * we call find_get_page() with swapper_space directly.
19119 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
19120 +index 6e94962893ee8..67e5bb0900b37 100644
19121 +--- a/mm/oom_kill.c
19122 ++++ b/mm/oom_kill.c
19123 +@@ -64,6 +64,8 @@ int sysctl_oom_dump_tasks = 1;
19124 + * and mark_oom_victim
19125 + */
19126 + DEFINE_MUTEX(oom_lock);
19127 ++/* Serializes oom_score_adj and oom_score_adj_min updates */
19128 ++DEFINE_MUTEX(oom_adj_mutex);
19129 +
19130 + static inline bool is_memcg_oom(struct oom_control *oc)
19131 + {
19132 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
19133 +index 43f6d91f57156..8cc774340d490 100644
19134 +--- a/mm/page_alloc.c
19135 ++++ b/mm/page_alloc.c
19136 +@@ -3213,7 +3213,7 @@ void split_page(struct page *page, unsigned int order)
19137 +
19138 + for (i = 1; i < (1 << order); i++)
19139 + set_page_refcounted(page + i);
19140 +- split_page_owner(page, order);
19141 ++ split_page_owner(page, 1 << order);
19142 + }
19143 + EXPORT_SYMBOL_GPL(split_page);
19144 +
19145 +@@ -3487,7 +3487,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
19146 +
19147 + #endif /* CONFIG_FAIL_PAGE_ALLOC */
19148 +
19149 +-static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
19150 ++noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
19151 + {
19152 + return __should_fail_alloc_page(gfp_mask, order);
19153 + }
19154 +diff --git a/mm/page_owner.c b/mm/page_owner.c
19155 +index 3604615094235..4ca3051a10358 100644
19156 +--- a/mm/page_owner.c
19157 ++++ b/mm/page_owner.c
19158 +@@ -204,7 +204,7 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
19159 + page_owner->last_migrate_reason = reason;
19160 + }
19161 +
19162 +-void __split_page_owner(struct page *page, unsigned int order)
19163 ++void __split_page_owner(struct page *page, unsigned int nr)
19164 + {
19165 + int i;
19166 + struct page_ext *page_ext = lookup_page_ext(page);
19167 +@@ -213,7 +213,7 @@ void __split_page_owner(struct page *page, unsigned int order)
19168 + if (unlikely(!page_ext))
19169 + return;
19170 +
19171 +- for (i = 0; i < (1 << order); i++) {
19172 ++ for (i = 0; i < nr; i++) {
19173 + page_owner = get_page_owner(page_ext);
19174 + page_owner->order = 0;
19175 + page_ext = page_ext_next(page_ext);
19176 +diff --git a/mm/swapfile.c b/mm/swapfile.c
19177 +index 26707c5dc9fce..605294e4df684 100644
19178 +--- a/mm/swapfile.c
19179 ++++ b/mm/swapfile.c
19180 +@@ -3336,7 +3336,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
19181 + error = inode_drain_writes(inode);
19182 + if (error) {
19183 + inode->i_flags &= ~S_SWAPFILE;
19184 +- goto bad_swap_unlock_inode;
19185 ++ goto free_swap_address_space;
19186 + }
19187 +
19188 + mutex_lock(&swapon_mutex);
19189 +@@ -3361,6 +3361,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
19190 +
19191 + error = 0;
19192 + goto out;
19193 ++free_swap_address_space:
19194 ++ exit_swap_address_space(p->type);
19195 + bad_swap_unlock_inode:
19196 + inode_unlock(inode);
19197 + bad_swap:
19198 +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
19199 +index fc28dc201b936..131d29e902a30 100644
19200 +--- a/net/bluetooth/hci_core.c
19201 ++++ b/net/bluetooth/hci_core.c
19202 +@@ -3280,6 +3280,16 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
19203 + }
19204 + }
19205 +
19206 ++static void hci_suspend_clear_tasks(struct hci_dev *hdev)
19207 ++{
19208 ++ int i;
19209 ++
19210 ++ for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
19211 ++ clear_bit(i, hdev->suspend_tasks);
19212 ++
19213 ++ wake_up(&hdev->suspend_wait_q);
19214 ++}
19215 ++
19216 + static int hci_suspend_wait_event(struct hci_dev *hdev)
19217 + {
19218 + #define WAKE_COND \
19219 +@@ -3608,6 +3618,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
19220 +
19221 + cancel_work_sync(&hdev->power_on);
19222 +
19223 ++ hci_suspend_clear_tasks(hdev);
19224 + unregister_pm_notifier(&hdev->suspend_notifier);
19225 + cancel_work_sync(&hdev->suspend_prepare);
19226 +
19227 +diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
19228 +index c7fc28a465fdb..fa66e27b73635 100644
19229 +--- a/net/bluetooth/l2cap_sock.c
19230 ++++ b/net/bluetooth/l2cap_sock.c
19231 +@@ -1521,8 +1521,6 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
19232 +
19233 + parent = bt_sk(sk)->parent;
19234 +
19235 +- sock_set_flag(sk, SOCK_ZAPPED);
19236 +-
19237 + switch (chan->state) {
19238 + case BT_OPEN:
19239 + case BT_BOUND:
19240 +@@ -1549,8 +1547,11 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
19241 +
19242 + break;
19243 + }
19244 +-
19245 + release_sock(sk);
19246 ++
19247 ++ /* Only zap after cleanup to avoid use after free race */
19248 ++ sock_set_flag(sk, SOCK_ZAPPED);
19249 ++
19250 + }
19251 +
19252 + static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
19253 +diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
19254 +index 12a4f4d936810..3fda71a8579d1 100644
19255 +--- a/net/bridge/netfilter/ebt_dnat.c
19256 ++++ b/net/bridge/netfilter/ebt_dnat.c
19257 +@@ -21,7 +21,7 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
19258 + {
19259 + const struct ebt_nat_info *info = par->targinfo;
19260 +
19261 +- if (skb_ensure_writable(skb, ETH_ALEN))
19262 ++ if (skb_ensure_writable(skb, 0))
19263 + return EBT_DROP;
19264 +
19265 + ether_addr_copy(eth_hdr(skb)->h_dest, info->mac);
19266 +diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
19267 +index 0cad62a4052b9..307790562b492 100644
19268 +--- a/net/bridge/netfilter/ebt_redirect.c
19269 ++++ b/net/bridge/netfilter/ebt_redirect.c
19270 +@@ -21,7 +21,7 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
19271 + {
19272 + const struct ebt_redirect_info *info = par->targinfo;
19273 +
19274 +- if (skb_ensure_writable(skb, ETH_ALEN))
19275 ++ if (skb_ensure_writable(skb, 0))
19276 + return EBT_DROP;
19277 +
19278 + if (xt_hooknum(par) != NF_BR_BROUTING)
19279 +diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
19280 +index 27443bf229a3b..7dfbcdfc30e5d 100644
19281 +--- a/net/bridge/netfilter/ebt_snat.c
19282 ++++ b/net/bridge/netfilter/ebt_snat.c
19283 +@@ -22,7 +22,7 @@ ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
19284 + {
19285 + const struct ebt_nat_info *info = par->targinfo;
19286 +
19287 +- if (skb_ensure_writable(skb, ETH_ALEN * 2))
19288 ++ if (skb_ensure_writable(skb, 0))
19289 + return EBT_DROP;
19290 +
19291 + ether_addr_copy(eth_hdr(skb)->h_source, info->mac);
19292 +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
19293 +index a8dd956b5e8e1..916fdf2464bc2 100644
19294 +--- a/net/can/j1939/transport.c
19295 ++++ b/net/can/j1939/transport.c
19296 +@@ -580,6 +580,7 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
19297 + skb->dev = priv->ndev;
19298 + can_skb_reserve(skb);
19299 + can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
19300 ++ can_skb_prv(skb)->skbcnt = 0;
19301 + /* reserve CAN header */
19302 + skb_reserve(skb, offsetof(struct can_frame, data));
19303 +
19304 +@@ -1487,6 +1488,7 @@ j1939_session *j1939_session_fresh_new(struct j1939_priv *priv,
19305 + skb->dev = priv->ndev;
19306 + can_skb_reserve(skb);
19307 + can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
19308 ++ can_skb_prv(skb)->skbcnt = 0;
19309 + skcb = j1939_skb_to_cb(skb);
19310 + memcpy(skcb, rel_skcb, sizeof(*skcb));
19311 +
19312 +diff --git a/net/core/filter.c b/net/core/filter.c
19313 +index 0261531d4fda6..3e4de9e461bd0 100644
19314 +--- a/net/core/filter.c
19315 ++++ b/net/core/filter.c
19316 +@@ -4323,7 +4323,8 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
19317 + cmpxchg(&sk->sk_pacing_status,
19318 + SK_PACING_NONE,
19319 + SK_PACING_NEEDED);
19320 +- sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
19321 ++ sk->sk_max_pacing_rate = (val == ~0U) ?
19322 ++ ~0UL : (unsigned int)val;
19323 + sk->sk_pacing_rate = min(sk->sk_pacing_rate,
19324 + sk->sk_max_pacing_rate);
19325 + break;
19326 +diff --git a/net/core/skmsg.c b/net/core/skmsg.c
19327 +index 6a32a1fd34f8c..053472c48354b 100644
19328 +--- a/net/core/skmsg.c
19329 ++++ b/net/core/skmsg.c
19330 +@@ -662,15 +662,16 @@ static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
19331 + {
19332 + int ret;
19333 +
19334 ++ /* strparser clones the skb before handing it to a upper layer,
19335 ++ * meaning we have the same data, but sk is NULL. We do want an
19336 ++ * sk pointer though when we run the BPF program. So we set it
19337 ++ * here and then NULL it to ensure we don't trigger a BUG_ON()
19338 ++ * in skb/sk operations later if kfree_skb is called with a
19339 ++ * valid skb->sk pointer and no destructor assigned.
19340 ++ */
19341 + skb->sk = psock->sk;
19342 + bpf_compute_data_end_sk_skb(skb);
19343 + ret = bpf_prog_run_pin_on_cpu(prog, skb);
19344 +- /* strparser clones the skb before handing it to a upper layer,
19345 +- * meaning skb_orphan has been called. We NULL sk on the way out
19346 +- * to ensure we don't trigger a BUG_ON() in skb/sk operations
19347 +- * later and because we are not charging the memory of this skb
19348 +- * to any socket yet.
19349 +- */
19350 + skb->sk = NULL;
19351 + return ret;
19352 + }
19353 +@@ -795,7 +796,6 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
19354 + }
19355 + prog = READ_ONCE(psock->progs.skb_verdict);
19356 + if (likely(prog)) {
19357 +- skb_orphan(skb);
19358 + tcp_skb_bpf_redirect_clear(skb);
19359 + ret = sk_psock_bpf_run(psock, prog, skb);
19360 + ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
19361 +diff --git a/net/core/sock.c b/net/core/sock.c
19362 +index 78f8736be9c50..25968369fe7f6 100644
19363 +--- a/net/core/sock.c
19364 ++++ b/net/core/sock.c
19365 +@@ -777,7 +777,6 @@ static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
19366 + } else {
19367 + sock_reset_flag(sk, SOCK_RCVTSTAMP);
19368 + sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
19369 +- sock_reset_flag(sk, SOCK_TSTAMP_NEW);
19370 + }
19371 + }
19372 +
19373 +@@ -1007,8 +1006,6 @@ set_sndbuf:
19374 + __sock_set_timestamps(sk, valbool, true, true);
19375 + break;
19376 + case SO_TIMESTAMPING_NEW:
19377 +- sock_set_flag(sk, SOCK_TSTAMP_NEW);
19378 +- /* fall through */
19379 + case SO_TIMESTAMPING_OLD:
19380 + if (val & ~SOF_TIMESTAMPING_MASK) {
19381 + ret = -EINVAL;
19382 +@@ -1037,16 +1034,14 @@ set_sndbuf:
19383 + }
19384 +
19385 + sk->sk_tsflags = val;
19386 ++ sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
19387 ++
19388 + if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
19389 + sock_enable_timestamp(sk,
19390 + SOCK_TIMESTAMPING_RX_SOFTWARE);
19391 +- else {
19392 +- if (optname == SO_TIMESTAMPING_NEW)
19393 +- sock_reset_flag(sk, SOCK_TSTAMP_NEW);
19394 +-
19395 ++ else
19396 + sock_disable_timestamp(sk,
19397 + (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
19398 +- }
19399 + break;
19400 +
19401 + case SO_RCVLOWAT:
19402 +@@ -1189,7 +1184,7 @@ set_sndbuf:
19403 +
19404 + case SO_MAX_PACING_RATE:
19405 + {
19406 +- unsigned long ulval = (val == ~0U) ? ~0UL : val;
19407 ++ unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
19408 +
19409 + if (sizeof(ulval) != sizeof(val) &&
19410 + optlen >= sizeof(ulval) &&
19411 +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
19412 +index e30515f898023..70a505a713a56 100644
19413 +--- a/net/ipv4/icmp.c
19414 ++++ b/net/ipv4/icmp.c
19415 +@@ -239,7 +239,7 @@ static struct {
19416 + /**
19417 + * icmp_global_allow - Are we allowed to send one more ICMP message ?
19418 + *
19419 +- * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec.
19420 ++ * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
19421 + * Returns false if we reached the limit and can not send another packet.
19422 + * Note: called with BH disabled
19423 + */
19424 +@@ -267,7 +267,10 @@ bool icmp_global_allow(void)
19425 + }
19426 + credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
19427 + if (credit) {
19428 +- credit--;
19429 ++ /* We want to use a credit of one in average, but need to randomize
19430 ++ * it for security reasons.
19431 ++ */
19432 ++ credit = max_t(int, credit - prandom_u32_max(3), 0);
19433 + rc = true;
19434 + }
19435 + WRITE_ONCE(icmp_global.credit, credit);
19436 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
19437 +index 4e31f23e4117e..e70291748889b 100644
19438 +--- a/net/ipv4/ip_gre.c
19439 ++++ b/net/ipv4/ip_gre.c
19440 +@@ -625,9 +625,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
19441 + }
19442 +
19443 + if (dev->header_ops) {
19444 +- /* Need space for new headers */
19445 +- if (skb_cow_head(skb, dev->needed_headroom -
19446 +- (tunnel->hlen + sizeof(struct iphdr))))
19447 ++ if (skb_cow_head(skb, 0))
19448 + goto free_skb;
19449 +
19450 + tnl_params = (const struct iphdr *)skb->data;
19451 +@@ -748,7 +746,11 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
19452 + len = tunnel->tun_hlen - len;
19453 + tunnel->hlen = tunnel->hlen + len;
19454 +
19455 +- dev->needed_headroom = dev->needed_headroom + len;
19456 ++ if (dev->header_ops)
19457 ++ dev->hard_header_len += len;
19458 ++ else
19459 ++ dev->needed_headroom += len;
19460 ++
19461 + if (set_mtu)
19462 + dev->mtu = max_t(int, dev->mtu - len, 68);
19463 +
19464 +@@ -944,6 +946,7 @@ static void __gre_tunnel_init(struct net_device *dev)
19465 + tunnel->parms.iph.protocol = IPPROTO_GRE;
19466 +
19467 + tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
19468 ++ dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
19469 +
19470 + dev->features |= GRE_FEATURES;
19471 + dev->hw_features |= GRE_FEATURES;
19472 +@@ -987,10 +990,14 @@ static int ipgre_tunnel_init(struct net_device *dev)
19473 + return -EINVAL;
19474 + dev->flags = IFF_BROADCAST;
19475 + dev->header_ops = &ipgre_header_ops;
19476 ++ dev->hard_header_len = tunnel->hlen + sizeof(*iph);
19477 ++ dev->needed_headroom = 0;
19478 + }
19479 + #endif
19480 + } else if (!tunnel->collect_md) {
19481 + dev->header_ops = &ipgre_header_ops;
19482 ++ dev->hard_header_len = tunnel->hlen + sizeof(*iph);
19483 ++ dev->needed_headroom = 0;
19484 + }
19485 +
19486 + return ip_tunnel_init(dev);
19487 +diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
19488 +index 7a83f881efa9e..136030ad2e546 100644
19489 +--- a/net/ipv4/netfilter/nf_log_arp.c
19490 ++++ b/net/ipv4/netfilter/nf_log_arp.c
19491 +@@ -43,16 +43,31 @@ static void dump_arp_packet(struct nf_log_buf *m,
19492 + const struct nf_loginfo *info,
19493 + const struct sk_buff *skb, unsigned int nhoff)
19494 + {
19495 +- const struct arphdr *ah;
19496 +- struct arphdr _arph;
19497 + const struct arppayload *ap;
19498 + struct arppayload _arpp;
19499 ++ const struct arphdr *ah;
19500 ++ unsigned int logflags;
19501 ++ struct arphdr _arph;
19502 +
19503 + ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
19504 + if (ah == NULL) {
19505 + nf_log_buf_add(m, "TRUNCATED");
19506 + return;
19507 + }
19508 ++
19509 ++ if (info->type == NF_LOG_TYPE_LOG)
19510 ++ logflags = info->u.log.logflags;
19511 ++ else
19512 ++ logflags = NF_LOG_DEFAULT_MASK;
19513 ++
19514 ++ if (logflags & NF_LOG_MACDECODE) {
19515 ++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
19516 ++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
19517 ++ nf_log_dump_vlan(m, skb);
19518 ++ nf_log_buf_add(m, "MACPROTO=%04x ",
19519 ++ ntohs(eth_hdr(skb)->h_proto));
19520 ++ }
19521 ++
19522 + nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d",
19523 + ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op));
19524 +
19525 +diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
19526 +index 0c72156130b68..d07583fac8f8c 100644
19527 +--- a/net/ipv4/netfilter/nf_log_ipv4.c
19528 ++++ b/net/ipv4/netfilter/nf_log_ipv4.c
19529 +@@ -284,8 +284,10 @@ static void dump_ipv4_mac_header(struct nf_log_buf *m,
19530 +
19531 + switch (dev->type) {
19532 + case ARPHRD_ETHER:
19533 +- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
19534 +- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
19535 ++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
19536 ++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
19537 ++ nf_log_dump_vlan(m, skb);
19538 ++ nf_log_buf_add(m, "MACPROTO=%04x ",
19539 + ntohs(eth_hdr(skb)->h_proto));
19540 + return;
19541 + default:
19542 +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
19543 +index 134e923822750..355c4499fa1b5 100644
19544 +--- a/net/ipv4/nexthop.c
19545 ++++ b/net/ipv4/nexthop.c
19546 +@@ -842,7 +842,7 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
19547 + remove_nh_grp_entry(net, nhge, nlinfo);
19548 +
19549 + /* make sure all see the newly published array before releasing rtnl */
19550 +- synchronize_rcu();
19551 ++ synchronize_net();
19552 + }
19553 +
19554 + static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
19555 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
19556 +index 37f1288894747..71a9b11b7126d 100644
19557 +--- a/net/ipv4/route.c
19558 ++++ b/net/ipv4/route.c
19559 +@@ -2764,10 +2764,12 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
19560 + if (IS_ERR(rt))
19561 + return rt;
19562 +
19563 +- if (flp4->flowi4_proto)
19564 ++ if (flp4->flowi4_proto) {
19565 ++ flp4->flowi4_oif = rt->dst.dev->ifindex;
19566 + rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
19567 + flowi4_to_flowi(flp4),
19568 + sk, 0);
19569 ++ }
19570 +
19571 + return rt;
19572 + }
19573 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
19574 +index 518f04355fbf3..02cc972edd0b0 100644
19575 +--- a/net/ipv4/tcp_input.c
19576 ++++ b/net/ipv4/tcp_input.c
19577 +@@ -5716,6 +5716,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
19578 + tcp_data_snd_check(sk);
19579 + if (!inet_csk_ack_scheduled(sk))
19580 + goto no_ack;
19581 ++ } else {
19582 ++ tcp_update_wl(tp, TCP_SKB_CB(skb)->seq);
19583 + }
19584 +
19585 + __tcp_ack_snd_check(sk, 0);
19586 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
19587 +index 3c32dcb5fd8e2..c0a0d41b6c37d 100644
19588 +--- a/net/ipv6/ip6_fib.c
19589 ++++ b/net/ipv6/ip6_fib.c
19590 +@@ -2617,8 +2617,10 @@ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
19591 + iter->skip = *pos;
19592 +
19593 + if (iter->tbl) {
19594 ++ loff_t p = 0;
19595 ++
19596 + ipv6_route_seq_setup_walk(iter, net);
19597 +- return ipv6_route_seq_next(seq, NULL, pos);
19598 ++ return ipv6_route_seq_next(seq, NULL, &p);
19599 + } else {
19600 + return NULL;
19601 + }
19602 +diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
19603 +index da64550a57075..8210ff34ed9b7 100644
19604 +--- a/net/ipv6/netfilter/nf_log_ipv6.c
19605 ++++ b/net/ipv6/netfilter/nf_log_ipv6.c
19606 +@@ -297,9 +297,11 @@ static void dump_ipv6_mac_header(struct nf_log_buf *m,
19607 +
19608 + switch (dev->type) {
19609 + case ARPHRD_ETHER:
19610 +- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
19611 +- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
19612 +- ntohs(eth_hdr(skb)->h_proto));
19613 ++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
19614 ++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
19615 ++ nf_log_dump_vlan(m, skb);
19616 ++ nf_log_buf_add(m, "MACPROTO=%04x ",
19617 ++ ntohs(eth_hdr(skb)->h_proto));
19618 + return;
19619 + default:
19620 + break;
19621 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
19622 +index 1079a07e43e49..d74cfec685477 100644
19623 +--- a/net/mac80211/cfg.c
19624 ++++ b/net/mac80211/cfg.c
19625 +@@ -709,7 +709,8 @@ void sta_set_rate_info_tx(struct sta_info *sta,
19626 + u16 brate;
19627 +
19628 + sband = ieee80211_get_sband(sta->sdata);
19629 +- if (sband) {
19630 ++ WARN_ON_ONCE(sband && !sband->bitrates);
19631 ++ if (sband && sband->bitrates) {
19632 + brate = sband->bitrates[rate->idx].bitrate;
19633 + rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
19634 + }
19635 +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
19636 +index 05e966f1609e2..b93916c382cdb 100644
19637 +--- a/net/mac80211/sta_info.c
19638 ++++ b/net/mac80211/sta_info.c
19639 +@@ -2122,6 +2122,10 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
19640 + int rate_idx = STA_STATS_GET(LEGACY_IDX, rate);
19641 +
19642 + sband = local->hw.wiphy->bands[band];
19643 ++
19644 ++ if (WARN_ON_ONCE(!sband->bitrates))
19645 ++ break;
19646 ++
19647 + brate = sband->bitrates[rate_idx].bitrate;
19648 + if (rinfo->bw == RATE_INFO_BW_5)
19649 + shift = 2;
19650 +diff --git a/net/mptcp/options.c b/net/mptcp/options.c
19651 +index 8f940be42f98a..430a9213a7bf9 100644
19652 +--- a/net/mptcp/options.c
19653 ++++ b/net/mptcp/options.c
19654 +@@ -296,6 +296,7 @@ void mptcp_get_options(const struct sk_buff *skb,
19655 + mp_opt->mp_capable = 0;
19656 + mp_opt->mp_join = 0;
19657 + mp_opt->add_addr = 0;
19658 ++ mp_opt->ahmac = 0;
19659 + mp_opt->rm_addr = 0;
19660 + mp_opt->dss = 0;
19661 +
19662 +@@ -517,7 +518,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
19663 + return ret;
19664 + }
19665 +
19666 +- if (subflow->use_64bit_ack) {
19667 ++ if (READ_ONCE(msk->use_64bit_ack)) {
19668 + ack_size = TCPOLEN_MPTCP_DSS_ACK64;
19669 + opts->ext_copy.data_ack = msk->ack_seq;
19670 + opts->ext_copy.ack64 = 1;
19671 +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
19672 +index c6eeaf3e8dcb7..4675a7bbebb15 100644
19673 +--- a/net/mptcp/protocol.h
19674 ++++ b/net/mptcp/protocol.h
19675 +@@ -199,6 +199,7 @@ struct mptcp_sock {
19676 + u32 token;
19677 + unsigned long flags;
19678 + bool can_ack;
19679 ++ bool use_64bit_ack; /* Set when we received a 64-bit DSN */
19680 + spinlock_t join_list_lock;
19681 + struct work_struct work;
19682 + struct list_head conn_list;
19683 +@@ -285,7 +286,6 @@ struct mptcp_subflow_context {
19684 + data_avail : 1,
19685 + rx_eof : 1,
19686 + data_fin_tx_enable : 1,
19687 +- use_64bit_ack : 1, /* Set when we received a 64-bit DSN */
19688 + can_ack : 1; /* only after processing the remote a key */
19689 + u64 data_fin_tx_seq;
19690 + u32 remote_nonce;
19691 +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
19692 +index 3838a0b3a21ff..2e145b53b81f4 100644
19693 +--- a/net/mptcp/subflow.c
19694 ++++ b/net/mptcp/subflow.c
19695 +@@ -682,12 +682,11 @@ static enum mapping_status get_mapping_status(struct sock *ssk)
19696 + if (!mpext->dsn64) {
19697 + map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
19698 + mpext->data_seq);
19699 +- subflow->use_64bit_ack = 0;
19700 + pr_debug("expanded seq=%llu", subflow->map_seq);
19701 + } else {
19702 + map_seq = mpext->data_seq;
19703 +- subflow->use_64bit_ack = 1;
19704 + }
19705 ++ WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
19706 +
19707 + if (subflow->map_valid) {
19708 + /* Allow replacing only with an identical map */
19709 +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
19710 +index 412656c34f205..beeafa42aad76 100644
19711 +--- a/net/netfilter/ipvs/ip_vs_ctl.c
19712 ++++ b/net/netfilter/ipvs/ip_vs_ctl.c
19713 +@@ -2471,6 +2471,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
19714 + /* Set timeout values for (tcp tcpfin udp) */
19715 + ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg);
19716 + goto out_unlock;
19717 ++ } else if (!len) {
19718 ++ /* No more commands with len == 0 below */
19719 ++ ret = -EINVAL;
19720 ++ goto out_unlock;
19721 + }
19722 +
19723 + usvc_compat = (struct ip_vs_service_user *)arg;
19724 +@@ -2547,9 +2551,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
19725 + break;
19726 + case IP_VS_SO_SET_DELDEST:
19727 + ret = ip_vs_del_dest(svc, &udest);
19728 +- break;
19729 +- default:
19730 +- ret = -EINVAL;
19731 + }
19732 +
19733 + out_unlock:
19734 +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
19735 +index b00866d777fe0..d2e5a8f644b80 100644
19736 +--- a/net/netfilter/ipvs/ip_vs_xmit.c
19737 ++++ b/net/netfilter/ipvs/ip_vs_xmit.c
19738 +@@ -609,6 +609,8 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
19739 + if (ret == NF_ACCEPT) {
19740 + nf_reset_ct(skb);
19741 + skb_forward_csum(skb);
19742 ++ if (skb->dev)
19743 ++ skb->tstamp = 0;
19744 + }
19745 + return ret;
19746 + }
19747 +@@ -649,6 +651,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
19748 +
19749 + if (!local) {
19750 + skb_forward_csum(skb);
19751 ++ if (skb->dev)
19752 ++ skb->tstamp = 0;
19753 + NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
19754 + NULL, skb_dst(skb)->dev, dst_output);
19755 + } else
19756 +@@ -669,6 +673,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
19757 + if (!local) {
19758 + ip_vs_drop_early_demux_sk(skb);
19759 + skb_forward_csum(skb);
19760 ++ if (skb->dev)
19761 ++ skb->tstamp = 0;
19762 + NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
19763 + NULL, skb_dst(skb)->dev, dst_output);
19764 + } else
19765 +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
19766 +index 1926fd56df56a..848b137151c26 100644
19767 +--- a/net/netfilter/nf_conntrack_proto_tcp.c
19768 ++++ b/net/netfilter/nf_conntrack_proto_tcp.c
19769 +@@ -541,13 +541,20 @@ static bool tcp_in_window(const struct nf_conn *ct,
19770 + swin = win << sender->td_scale;
19771 + sender->td_maxwin = (swin == 0 ? 1 : swin);
19772 + sender->td_maxend = end + sender->td_maxwin;
19773 +- /*
19774 +- * We haven't seen traffic in the other direction yet
19775 +- * but we have to tweak window tracking to pass III
19776 +- * and IV until that happens.
19777 +- */
19778 +- if (receiver->td_maxwin == 0)
19779 ++ if (receiver->td_maxwin == 0) {
19780 ++ /* We haven't seen traffic in the other
19781 ++ * direction yet but we have to tweak window
19782 ++ * tracking to pass III and IV until that
19783 ++ * happens.
19784 ++ */
19785 + receiver->td_end = receiver->td_maxend = sack;
19786 ++ } else if (sack == receiver->td_end + 1) {
19787 ++ /* Likely a reply to a keepalive.
19788 ++ * Needed for III.
19789 ++ */
19790 ++ receiver->td_end++;
19791 ++ }
19792 ++
19793 + }
19794 + } else if (((state->state == TCP_CONNTRACK_SYN_SENT
19795 + && dir == IP_CT_DIR_ORIGINAL)
19796 +diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c
19797 +index 2b01a151eaa80..a579e59ee5c5e 100644
19798 +--- a/net/netfilter/nf_dup_netdev.c
19799 ++++ b/net/netfilter/nf_dup_netdev.c
19800 +@@ -19,6 +19,7 @@ static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev)
19801 + skb_push(skb, skb->mac_len);
19802 +
19803 + skb->dev = dev;
19804 ++ skb->tstamp = 0;
19805 + dev_queue_xmit(skb);
19806 + }
19807 +
19808 +diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
19809 +index ae5628ddbe6d7..fd7c5f0f5c25b 100644
19810 +--- a/net/netfilter/nf_log_common.c
19811 ++++ b/net/netfilter/nf_log_common.c
19812 +@@ -171,6 +171,18 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
19813 + }
19814 + EXPORT_SYMBOL_GPL(nf_log_dump_packet_common);
19815 +
19816 ++void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb)
19817 ++{
19818 ++ u16 vid;
19819 ++
19820 ++ if (!skb_vlan_tag_present(skb))
19821 ++ return;
19822 ++
19823 ++ vid = skb_vlan_tag_get(skb);
19824 ++ nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid);
19825 ++}
19826 ++EXPORT_SYMBOL_GPL(nf_log_dump_vlan);
19827 ++
19828 + /* bridge and netdev logging families share this code. */
19829 + void nf_log_l2packet(struct net *net, u_int8_t pf,
19830 + __be16 protocol,
19831 +diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
19832 +index 3087e23297dbf..b77985986b24e 100644
19833 +--- a/net/netfilter/nft_fwd_netdev.c
19834 ++++ b/net/netfilter/nft_fwd_netdev.c
19835 +@@ -138,6 +138,7 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr,
19836 + return;
19837 +
19838 + skb->dev = dev;
19839 ++ skb->tstamp = 0;
19840 + neigh_xmit(neigh_table, dev, addr, skb);
19841 + out:
19842 + regs->verdict.code = verdict;
19843 +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
19844 +index e894254c17d43..8709f3d4e7c4b 100644
19845 +--- a/net/nfc/netlink.c
19846 ++++ b/net/nfc/netlink.c
19847 +@@ -1217,7 +1217,7 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
19848 + u32 idx;
19849 + char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
19850 +
19851 +- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
19852 ++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME])
19853 + return -EINVAL;
19854 +
19855 + idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
19856 +diff --git a/net/sched/act_api.c b/net/sched/act_api.c
19857 +index aa69fc4ce39d9..3715b1261c6f3 100644
19858 +--- a/net/sched/act_api.c
19859 ++++ b/net/sched/act_api.c
19860 +@@ -722,13 +722,6 @@ int tcf_action_destroy(struct tc_action *actions[], int bind)
19861 + return ret;
19862 + }
19863 +
19864 +-static int tcf_action_destroy_1(struct tc_action *a, int bind)
19865 +-{
19866 +- struct tc_action *actions[] = { a, NULL };
19867 +-
19868 +- return tcf_action_destroy(actions, bind);
19869 +-}
19870 +-
19871 + static int tcf_action_put(struct tc_action *p)
19872 + {
19873 + return __tcf_action_put(p, false);
19874 +@@ -1000,13 +993,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
19875 + if (err < 0)
19876 + goto err_mod;
19877 +
19878 +- if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) &&
19879 +- !rcu_access_pointer(a->goto_chain)) {
19880 +- tcf_action_destroy_1(a, bind);
19881 +- NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain");
19882 +- return ERR_PTR(-EINVAL);
19883 +- }
19884 +-
19885 + if (!name && tb[TCA_ACT_COOKIE])
19886 + tcf_set_action_cookie(&a->act_cookie, cookie);
19887 +
19888 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
19889 +index 0eb4722cf7cd9..1558126af0d4b 100644
19890 +--- a/net/sched/act_ct.c
19891 ++++ b/net/sched/act_ct.c
19892 +@@ -156,11 +156,11 @@ tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
19893 + __be16 target_dst = target.dst.u.udp.port;
19894 +
19895 + if (target_src != tuple->src.u.udp.port)
19896 +- tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
19897 ++ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
19898 + offsetof(struct udphdr, source),
19899 + 0xFFFF, be16_to_cpu(target_src));
19900 + if (target_dst != tuple->dst.u.udp.port)
19901 +- tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
19902 ++ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
19903 + offsetof(struct udphdr, dest),
19904 + 0xFFFF, be16_to_cpu(target_dst));
19905 + }
19906 +diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
19907 +index 23cf8469a2e7c..e167f0ddfbcd4 100644
19908 +--- a/net/sched/act_tunnel_key.c
19909 ++++ b/net/sched/act_tunnel_key.c
19910 +@@ -458,7 +458,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
19911 +
19912 + metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
19913 + 0, flags,
19914 +- key_id, 0);
19915 ++ key_id, opts_len);
19916 + } else {
19917 + NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
19918 + ret = -EINVAL;
19919 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
19920 +index 4619cb3cb0a8f..8bf6bde1cfe59 100644
19921 +--- a/net/sched/cls_api.c
19922 ++++ b/net/sched/cls_api.c
19923 +@@ -3707,7 +3707,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
19924 + entry->gate.num_entries = tcf_gate_num_entries(act);
19925 + err = tcf_gate_get_entries(entry, act);
19926 + if (err)
19927 +- goto err_out;
19928 ++ goto err_out_locked;
19929 + } else {
19930 + err = -EOPNOTSUPP;
19931 + goto err_out_locked;
19932 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
19933 +index f82a2e5999171..49696f464794f 100644
19934 +--- a/net/smc/smc_core.c
19935 ++++ b/net/smc/smc_core.c
19936 +@@ -1595,7 +1595,7 @@ out:
19937 + return rc;
19938 + }
19939 +
19940 +-#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
19941 ++#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
19942 +
19943 + static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
19944 + bool is_dmb, int bufsize)
19945 +diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
19946 +index df5b0a6ea8488..398f1d9521351 100644
19947 +--- a/net/smc/smc_llc.c
19948 ++++ b/net/smc/smc_llc.c
19949 +@@ -233,8 +233,6 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
19950 + default:
19951 + flow->type = SMC_LLC_FLOW_NONE;
19952 + }
19953 +- if (qentry == lgr->delayed_event)
19954 +- lgr->delayed_event = NULL;
19955 + smc_llc_flow_qentry_set(flow, qentry);
19956 + spin_unlock_bh(&lgr->llc_flow_lock);
19957 + return true;
19958 +@@ -1590,13 +1588,12 @@ static void smc_llc_event_work(struct work_struct *work)
19959 + struct smc_llc_qentry *qentry;
19960 +
19961 + if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
19962 +- if (smc_link_usable(lgr->delayed_event->link)) {
19963 +- smc_llc_event_handler(lgr->delayed_event);
19964 +- } else {
19965 +- qentry = lgr->delayed_event;
19966 +- lgr->delayed_event = NULL;
19967 ++ qentry = lgr->delayed_event;
19968 ++ lgr->delayed_event = NULL;
19969 ++ if (smc_link_usable(qentry->link))
19970 ++ smc_llc_event_handler(qentry);
19971 ++ else
19972 + kfree(qentry);
19973 +- }
19974 + }
19975 +
19976 + again:
19977 +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
19978 +index c28051f7d217d..653c317694406 100644
19979 +--- a/net/sunrpc/auth_gss/svcauth_gss.c
19980 ++++ b/net/sunrpc/auth_gss/svcauth_gss.c
19981 +@@ -1104,9 +1104,9 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
19982 + struct gssp_in_token *in_token)
19983 + {
19984 + struct kvec *argv = &rqstp->rq_arg.head[0];
19985 +- unsigned int page_base, length;
19986 +- int pages, i, res;
19987 +- size_t inlen;
19988 ++ unsigned int length, pgto_offs, pgfrom_offs;
19989 ++ int pages, i, res, pgto, pgfrom;
19990 ++ size_t inlen, to_offs, from_offs;
19991 +
19992 + res = gss_read_common_verf(gc, argv, authp, in_handle);
19993 + if (res)
19994 +@@ -1134,17 +1134,24 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
19995 + memcpy(page_address(in_token->pages[0]), argv->iov_base, length);
19996 + inlen -= length;
19997 +
19998 +- i = 1;
19999 +- page_base = rqstp->rq_arg.page_base;
20000 ++ to_offs = length;
20001 ++ from_offs = rqstp->rq_arg.page_base;
20002 + while (inlen) {
20003 +- length = min_t(unsigned int, inlen, PAGE_SIZE);
20004 +- memcpy(page_address(in_token->pages[i]),
20005 +- page_address(rqstp->rq_arg.pages[i]) + page_base,
20006 ++ pgto = to_offs >> PAGE_SHIFT;
20007 ++ pgfrom = from_offs >> PAGE_SHIFT;
20008 ++ pgto_offs = to_offs & ~PAGE_MASK;
20009 ++ pgfrom_offs = from_offs & ~PAGE_MASK;
20010 ++
20011 ++ length = min_t(unsigned int, inlen,
20012 ++ min_t(unsigned int, PAGE_SIZE - pgto_offs,
20013 ++ PAGE_SIZE - pgfrom_offs));
20014 ++ memcpy(page_address(in_token->pages[pgto]) + pgto_offs,
20015 ++ page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs,
20016 + length);
20017 +
20018 ++ to_offs += length;
20019 ++ from_offs += length;
20020 + inlen -= length;
20021 +- page_base = 0;
20022 +- i++;
20023 + }
20024 + return 0;
20025 + }
20026 +diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
20027 +index 38e7c3c8c4a9c..e4f410084c748 100644
20028 +--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
20029 ++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
20030 +@@ -637,10 +637,11 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
20031 + while (remaining) {
20032 + len = min_t(u32, PAGE_SIZE - pageoff, remaining);
20033 +
20034 +- memcpy(dst, page_address(*ppages), len);
20035 ++ memcpy(dst, page_address(*ppages) + pageoff, len);
20036 + remaining -= len;
20037 + dst += len;
20038 + pageoff = 0;
20039 ++ ppages++;
20040 + }
20041 + }
20042 +
20043 +diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
20044 +index 383f87bc10615..f69fb54821e6b 100644
20045 +--- a/net/tipc/bcast.c
20046 ++++ b/net/tipc/bcast.c
20047 +@@ -108,6 +108,8 @@ static void tipc_bcbase_select_primary(struct net *net)
20048 + {
20049 + struct tipc_bc_base *bb = tipc_bc_base(net);
20050 + int all_dests = tipc_link_bc_peers(bb->link);
20051 ++ int max_win = tipc_link_max_win(bb->link);
20052 ++ int min_win = tipc_link_min_win(bb->link);
20053 + int i, mtu, prim;
20054 +
20055 + bb->primary_bearer = INVALID_BEARER_ID;
20056 +@@ -121,8 +123,12 @@ static void tipc_bcbase_select_primary(struct net *net)
20057 + continue;
20058 +
20059 + mtu = tipc_bearer_mtu(net, i);
20060 +- if (mtu < tipc_link_mtu(bb->link))
20061 ++ if (mtu < tipc_link_mtu(bb->link)) {
20062 + tipc_link_set_mtu(bb->link, mtu);
20063 ++ tipc_link_set_queue_limits(bb->link,
20064 ++ min_win,
20065 ++ max_win);
20066 ++ }
20067 + bb->bcast_support &= tipc_bearer_bcast_support(net, i);
20068 + if (bb->dests[i] < all_dests)
20069 + continue;
20070 +@@ -585,7 +591,7 @@ static int tipc_bc_link_set_queue_limits(struct net *net, u32 max_win)
20071 + if (max_win > TIPC_MAX_LINK_WIN)
20072 + return -EINVAL;
20073 + tipc_bcast_lock(net);
20074 +- tipc_link_set_queue_limits(l, BCLINK_WIN_MIN, max_win);
20075 ++ tipc_link_set_queue_limits(l, tipc_link_min_win(l), max_win);
20076 + tipc_bcast_unlock(net);
20077 + return 0;
20078 + }
20079 +diff --git a/net/tipc/msg.c b/net/tipc/msg.c
20080 +index 2776a41e0dece..15b24fbcbe970 100644
20081 +--- a/net/tipc/msg.c
20082 ++++ b/net/tipc/msg.c
20083 +@@ -150,7 +150,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
20084 + if (fragid == FIRST_FRAGMENT) {
20085 + if (unlikely(head))
20086 + goto err;
20087 +- frag = skb_unshare(frag, GFP_ATOMIC);
20088 ++ if (skb_cloned(frag))
20089 ++ frag = skb_copy(frag, GFP_ATOMIC);
20090 + if (unlikely(!frag))
20091 + goto err;
20092 + head = *headbuf = frag;
20093 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
20094 +index 0cbad566f2811..f19416371bb99 100644
20095 +--- a/net/tls/tls_device.c
20096 ++++ b/net/tls/tls_device.c
20097 +@@ -418,14 +418,14 @@ static int tls_push_data(struct sock *sk,
20098 + struct tls_context *tls_ctx = tls_get_ctx(sk);
20099 + struct tls_prot_info *prot = &tls_ctx->prot_info;
20100 + struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
20101 +- int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
20102 + struct tls_record_info *record = ctx->open_record;
20103 + int tls_push_record_flags;
20104 + struct page_frag *pfrag;
20105 + size_t orig_size = size;
20106 + u32 max_open_record_len;
20107 +- int copy, rc = 0;
20108 ++ bool more = false;
20109 + bool done = false;
20110 ++ int copy, rc = 0;
20111 + long timeo;
20112 +
20113 + if (flags &
20114 +@@ -492,9 +492,8 @@ handle_error:
20115 + if (!size) {
20116 + last_record:
20117 + tls_push_record_flags = flags;
20118 +- if (more) {
20119 +- tls_ctx->pending_open_record_frags =
20120 +- !!record->num_frags;
20121 ++ if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
20122 ++ more = true;
20123 + break;
20124 + }
20125 +
20126 +@@ -526,6 +525,8 @@ last_record:
20127 + }
20128 + } while (!done);
20129 +
20130 ++ tls_ctx->pending_open_record_frags = more;
20131 ++
20132 + if (orig_size - size > 0)
20133 + rc = orig_size - size;
20134 +
20135 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
20136 +index 4d7b255067225..47ab86ee192ac 100644
20137 +--- a/net/wireless/nl80211.c
20138 ++++ b/net/wireless/nl80211.c
20139 +@@ -2355,7 +2355,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
20140 + * case we'll continue with more data in the next round,
20141 + * but break unconditionally so unsplit data stops here.
20142 + */
20143 +- state->split_start++;
20144 ++ if (state->split)
20145 ++ state->split_start++;
20146 ++ else
20147 ++ state->split_start = 0;
20148 + break;
20149 + case 9:
20150 + if (rdev->wiphy.extended_capabilities &&
20151 +@@ -4683,16 +4686,14 @@ static int nl80211_parse_he_obss_pd(struct nlattr *attrs,
20152 + if (err)
20153 + return err;
20154 +
20155 +- if (!tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] ||
20156 +- !tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
20157 +- return -EINVAL;
20158 +-
20159 +- he_obss_pd->min_offset =
20160 +- nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
20161 +- he_obss_pd->max_offset =
20162 +- nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
20163 ++ if (tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET])
20164 ++ he_obss_pd->min_offset =
20165 ++ nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
20166 ++ if (tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
20167 ++ he_obss_pd->max_offset =
20168 ++ nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
20169 +
20170 +- if (he_obss_pd->min_offset >= he_obss_pd->max_offset)
20171 ++ if (he_obss_pd->min_offset > he_obss_pd->max_offset)
20172 + return -EINVAL;
20173 +
20174 + he_obss_pd->enable = true;
20175 +diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
20176 +index c91e91362a0c6..0151bb0b2fc71 100644
20177 +--- a/samples/bpf/xdpsock_user.c
20178 ++++ b/samples/bpf/xdpsock_user.c
20179 +@@ -921,7 +921,7 @@ static void rx_drop_all(void)
20180 + }
20181 + }
20182 +
20183 +-static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb, int batch_size)
20184 ++static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
20185 + {
20186 + u32 idx;
20187 + unsigned int i;
20188 +@@ -934,14 +934,14 @@ static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb, int batch_size)
20189 + for (i = 0; i < batch_size; i++) {
20190 + struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
20191 + idx + i);
20192 +- tx_desc->addr = (frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
20193 ++ tx_desc->addr = (*frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
20194 + tx_desc->len = PKT_SIZE;
20195 + }
20196 +
20197 + xsk_ring_prod__submit(&xsk->tx, batch_size);
20198 + xsk->outstanding_tx += batch_size;
20199 +- frame_nb += batch_size;
20200 +- frame_nb %= NUM_FRAMES;
20201 ++ *frame_nb += batch_size;
20202 ++ *frame_nb %= NUM_FRAMES;
20203 + complete_tx_only(xsk, batch_size);
20204 + }
20205 +
20206 +@@ -997,7 +997,7 @@ static void tx_only_all(void)
20207 + }
20208 +
20209 + for (i = 0; i < num_socks; i++)
20210 +- tx_only(xsks[i], frame_nb[i], batch_size);
20211 ++ tx_only(xsks[i], &frame_nb[i], batch_size);
20212 +
20213 + pkt_cnt += batch_size;
20214 +
20215 +diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c
20216 +index a11bf6c5b53b4..cd3f16a6f5caf 100644
20217 +--- a/samples/mic/mpssd/mpssd.c
20218 ++++ b/samples/mic/mpssd/mpssd.c
20219 +@@ -403,9 +403,9 @@ mic_virtio_copy(struct mic_info *mic, int fd,
20220 +
20221 + static inline unsigned _vring_size(unsigned int num, unsigned long align)
20222 + {
20223 +- return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
20224 ++ return _ALIGN_UP(((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
20225 + + align - 1) & ~(align - 1))
20226 +- + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
20227 ++ + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num, 4);
20228 + }
20229 +
20230 + /*
20231 +diff --git a/scripts/package/builddeb b/scripts/package/builddeb
20232 +index 6df3c9f8b2da6..8277144298a00 100755
20233 +--- a/scripts/package/builddeb
20234 ++++ b/scripts/package/builddeb
20235 +@@ -202,8 +202,10 @@ EOF
20236 + done
20237 +
20238 + if [ "$ARCH" != "um" ]; then
20239 +- deploy_kernel_headers debian/linux-headers
20240 +- create_package linux-headers-$version debian/linux-headers
20241 ++ if is_enabled CONFIG_MODULES; then
20242 ++ deploy_kernel_headers debian/linux-headers
20243 ++ create_package linux-headers-$version debian/linux-headers
20244 ++ fi
20245 +
20246 + deploy_libc_headers debian/linux-libc-dev
20247 + create_package linux-libc-dev debian/linux-libc-dev
20248 +diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
20249 +index df1adbfb8ead0..9342517778bf3 100755
20250 +--- a/scripts/package/mkdebian
20251 ++++ b/scripts/package/mkdebian
20252 +@@ -183,13 +183,6 @@ Description: Linux kernel, version $version
20253 + This package contains the Linux kernel, modules and corresponding other
20254 + files, version: $version.
20255 +
20256 +-Package: $kernel_headers_packagename
20257 +-Architecture: $debarch
20258 +-Description: Linux kernel headers for $version on $debarch
20259 +- This package provides kernel header files for $version on $debarch
20260 +- .
20261 +- This is useful for people who need to build external modules
20262 +-
20263 + Package: linux-libc-dev
20264 + Section: devel
20265 + Provides: linux-kernel-headers
20266 +@@ -200,6 +193,18 @@ Description: Linux support headers for userspace development
20267 + Multi-Arch: same
20268 + EOF
20269 +
20270 ++if is_enabled CONFIG_MODULES; then
20271 ++cat <<EOF >> debian/control
20272 ++
20273 ++Package: $kernel_headers_packagename
20274 ++Architecture: $debarch
20275 ++Description: Linux kernel headers for $version on $debarch
20276 ++ This package provides kernel header files for $version on $debarch
20277 ++ .
20278 ++ This is useful for people who need to build external modules
20279 ++EOF
20280 ++fi
20281 ++
20282 + if is_enabled CONFIG_DEBUG_INFO; then
20283 + cat <<EOF >> debian/control
20284 +
20285 +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
20286 +index 011c3c76af865..21989fa0c1074 100644
20287 +--- a/security/integrity/ima/ima_crypto.c
20288 ++++ b/security/integrity/ima/ima_crypto.c
20289 +@@ -829,6 +829,8 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
20290 + /* now accumulate with current aggregate */
20291 + rc = crypto_shash_update(shash, d.digest,
20292 + crypto_shash_digestsize(tfm));
20293 ++ if (rc != 0)
20294 ++ return rc;
20295 + }
20296 + /*
20297 + * Extend cumulative digest over TPM registers 8-9, which contain
20298 +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
20299 +index c1583d98c5e50..0b8f17570f210 100644
20300 +--- a/security/integrity/ima/ima_main.c
20301 ++++ b/security/integrity/ima/ima_main.c
20302 +@@ -531,6 +531,16 @@ int ima_file_hash(struct file *file, char *buf, size_t buf_size)
20303 + return -EOPNOTSUPP;
20304 +
20305 + mutex_lock(&iint->mutex);
20306 ++
20307 ++ /*
20308 ++ * ima_file_hash can be called when ima_collect_measurement has still
20309 ++ * not been called, we might not always have a hash.
20310 ++ */
20311 ++ if (!iint->ima_hash) {
20312 ++ mutex_unlock(&iint->mutex);
20313 ++ return -EOPNOTSUPP;
20314 ++ }
20315 ++
20316 + if (buf) {
20317 + size_t copied_size;
20318 +
20319 +diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
20320 +index c8b9c0b315d8f..250a92b187265 100644
20321 +--- a/sound/core/seq/oss/seq_oss.c
20322 ++++ b/sound/core/seq/oss/seq_oss.c
20323 +@@ -174,9 +174,12 @@ odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
20324 + if (snd_BUG_ON(!dp))
20325 + return -ENXIO;
20326 +
20327 +- mutex_lock(&register_mutex);
20328 ++ if (cmd != SNDCTL_SEQ_SYNC &&
20329 ++ mutex_lock_interruptible(&register_mutex))
20330 ++ return -ERESTARTSYS;
20331 + rc = snd_seq_oss_ioctl(dp, cmd, arg);
20332 +- mutex_unlock(&register_mutex);
20333 ++ if (cmd != SNDCTL_SEQ_SYNC)
20334 ++ mutex_unlock(&register_mutex);
20335 + return rc;
20336 + }
20337 +
20338 +diff --git a/sound/firewire/bebob/bebob_hwdep.c b/sound/firewire/bebob/bebob_hwdep.c
20339 +index 45b740f44c459..c362eb38ab906 100644
20340 +--- a/sound/firewire/bebob/bebob_hwdep.c
20341 ++++ b/sound/firewire/bebob/bebob_hwdep.c
20342 +@@ -36,12 +36,11 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
20343 + }
20344 +
20345 + memset(&event, 0, sizeof(event));
20346 ++ count = min_t(long, count, sizeof(event.lock_status));
20347 + if (bebob->dev_lock_changed) {
20348 + event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
20349 + event.lock_status.status = (bebob->dev_lock_count > 0);
20350 + bebob->dev_lock_changed = false;
20351 +-
20352 +- count = min_t(long, count, sizeof(event.lock_status));
20353 + }
20354 +
20355 + spin_unlock_irq(&bebob->lock);
20356 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
20357 +index 4c23b169ac67e..cc51ef98752a9 100644
20358 +--- a/sound/pci/hda/hda_intel.c
20359 ++++ b/sound/pci/hda/hda_intel.c
20360 +@@ -1003,12 +1003,14 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt)
20361 + azx_init_pci(chip);
20362 + hda_intel_init_chip(chip, true);
20363 +
20364 +- if (status && from_rt) {
20365 +- list_for_each_codec(codec, &chip->bus)
20366 +- if (!codec->relaxed_resume &&
20367 +- (status & (1 << codec->addr)))
20368 +- schedule_delayed_work(&codec->jackpoll_work,
20369 +- codec->jackpoll_interval);
20370 ++ if (from_rt) {
20371 ++ list_for_each_codec(codec, &chip->bus) {
20372 ++ if (codec->relaxed_resume)
20373 ++ continue;
20374 ++
20375 ++ if (codec->forced_resume || (status & (1 << codec->addr)))
20376 ++ pm_request_resume(hda_codec_dev(codec));
20377 ++ }
20378 + }
20379 +
20380 + /* power down again for link-controlled chips */
20381 +diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
20382 +index 02cc682caa55a..588059428d8f5 100644
20383 +--- a/sound/pci/hda/hda_jack.c
20384 ++++ b/sound/pci/hda/hda_jack.c
20385 +@@ -275,6 +275,23 @@ int snd_hda_jack_detect_state_mst(struct hda_codec *codec,
20386 + }
20387 + EXPORT_SYMBOL_GPL(snd_hda_jack_detect_state_mst);
20388 +
20389 ++static struct hda_jack_callback *
20390 ++find_callback_from_list(struct hda_jack_tbl *jack,
20391 ++ hda_jack_callback_fn func)
20392 ++{
20393 ++ struct hda_jack_callback *cb;
20394 ++
20395 ++ if (!func)
20396 ++ return NULL;
20397 ++
20398 ++ for (cb = jack->callback; cb; cb = cb->next) {
20399 ++ if (cb->func == func)
20400 ++ return cb;
20401 ++ }
20402 ++
20403 ++ return NULL;
20404 ++}
20405 ++
20406 + /**
20407 + * snd_hda_jack_detect_enable_mst - enable the jack-detection
20408 + * @codec: the HDA codec
20409 +@@ -297,7 +314,10 @@ snd_hda_jack_detect_enable_callback_mst(struct hda_codec *codec, hda_nid_t nid,
20410 + jack = snd_hda_jack_tbl_new(codec, nid, dev_id);
20411 + if (!jack)
20412 + return ERR_PTR(-ENOMEM);
20413 +- if (func) {
20414 ++
20415 ++ callback = find_callback_from_list(jack, func);
20416 ++
20417 ++ if (func && !callback) {
20418 + callback = kzalloc(sizeof(*callback), GFP_KERNEL);
20419 + if (!callback)
20420 + return ERR_PTR(-ENOMEM);
20421 +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
20422 +index 6dfa864d3fe7b..a49c322bdbe9d 100644
20423 +--- a/sound/pci/hda/patch_ca0132.c
20424 ++++ b/sound/pci/hda/patch_ca0132.c
20425 +@@ -1065,6 +1065,7 @@ enum {
20426 + QUIRK_R3DI,
20427 + QUIRK_R3D,
20428 + QUIRK_AE5,
20429 ++ QUIRK_AE7,
20430 + };
20431 +
20432 + #ifdef CONFIG_PCI
20433 +@@ -1184,6 +1185,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
20434 + SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
20435 + SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
20436 + SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
20437 ++ SND_PCI_QUIRK(0x1102, 0x0081, "Sound Blaster AE-7", QUIRK_AE7),
20438 + {}
20439 + };
20440 +
20441 +@@ -4675,6 +4677,15 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
20442 + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
20443 + tmp = FLOAT_THREE;
20444 + break;
20445 ++ case QUIRK_AE7:
20446 ++ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
20447 ++ tmp = FLOAT_THREE;
20448 ++ chipio_set_conn_rate(codec, MEM_CONNID_MICIN2,
20449 ++ SR_96_000);
20450 ++ chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2,
20451 ++ SR_96_000);
20452 ++ dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO);
20453 ++ break;
20454 + default:
20455 + tmp = FLOAT_ONE;
20456 + break;
20457 +@@ -4720,6 +4731,14 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
20458 + case QUIRK_AE5:
20459 + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
20460 + break;
20461 ++ case QUIRK_AE7:
20462 ++ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x3f);
20463 ++ chipio_set_conn_rate(codec, MEM_CONNID_MICIN2,
20464 ++ SR_96_000);
20465 ++ chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2,
20466 ++ SR_96_000);
20467 ++ dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO);
20468 ++ break;
20469 + default:
20470 + break;
20471 + }
20472 +@@ -4729,7 +4748,10 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
20473 + if (ca0132_quirk(spec) == QUIRK_R3DI)
20474 + chipio_set_conn_rate(codec, 0x0F, SR_96_000);
20475 +
20476 +- tmp = FLOAT_ZERO;
20477 ++ if (ca0132_quirk(spec) == QUIRK_AE7)
20478 ++ tmp = FLOAT_THREE;
20479 ++ else
20480 ++ tmp = FLOAT_ZERO;
20481 + dspio_set_uint_param(codec, 0x80, 0x00, tmp);
20482 +
20483 + switch (ca0132_quirk(spec)) {
20484 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
20485 +index 419f012b9853c..0d3e996beede1 100644
20486 +--- a/sound/pci/hda/patch_hdmi.c
20487 ++++ b/sound/pci/hda/patch_hdmi.c
20488 +@@ -1989,22 +1989,25 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
20489 + int pinctl;
20490 + int err = 0;
20491 +
20492 ++ mutex_lock(&spec->pcm_lock);
20493 + if (hinfo->nid) {
20494 + pcm_idx = hinfo_to_pcm_index(codec, hinfo);
20495 +- if (snd_BUG_ON(pcm_idx < 0))
20496 +- return -EINVAL;
20497 ++ if (snd_BUG_ON(pcm_idx < 0)) {
20498 ++ err = -EINVAL;
20499 ++ goto unlock;
20500 ++ }
20501 + cvt_idx = cvt_nid_to_cvt_index(codec, hinfo->nid);
20502 +- if (snd_BUG_ON(cvt_idx < 0))
20503 +- return -EINVAL;
20504 ++ if (snd_BUG_ON(cvt_idx < 0)) {
20505 ++ err = -EINVAL;
20506 ++ goto unlock;
20507 ++ }
20508 + per_cvt = get_cvt(spec, cvt_idx);
20509 +-
20510 + snd_BUG_ON(!per_cvt->assigned);
20511 + per_cvt->assigned = 0;
20512 + hinfo->nid = 0;
20513 +
20514 + azx_stream(get_azx_dev(substream))->stripe = 0;
20515 +
20516 +- mutex_lock(&spec->pcm_lock);
20517 + snd_hda_spdif_ctls_unassign(codec, pcm_idx);
20518 + clear_bit(pcm_idx, &spec->pcm_in_use);
20519 + pin_idx = hinfo_to_pin_index(codec, hinfo);
20520 +@@ -2034,10 +2037,11 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
20521 + per_pin->setup = false;
20522 + per_pin->channels = 0;
20523 + mutex_unlock(&per_pin->lock);
20524 +- unlock:
20525 +- mutex_unlock(&spec->pcm_lock);
20526 + }
20527 +
20528 ++unlock:
20529 ++ mutex_unlock(&spec->pcm_lock);
20530 ++
20531 + return err;
20532 + }
20533 +
20534 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
20535 +index 601683e05ccca..e9593abd4e232 100644
20536 +--- a/sound/pci/hda/patch_realtek.c
20537 ++++ b/sound/pci/hda/patch_realtek.c
20538 +@@ -1142,6 +1142,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid)
20539 + codec->single_adc_amp = 1;
20540 + /* FIXME: do we need this for all Realtek codec models? */
20541 + codec->spdif_status_reset = 1;
20542 ++ codec->forced_resume = 1;
20543 + codec->patch_ops = alc_patch_ops;
20544 +
20545 + err = alc_codec_rename_from_preset(codec);
20546 +@@ -1921,6 +1922,8 @@ enum {
20547 + ALC1220_FIXUP_CLEVO_P950,
20548 + ALC1220_FIXUP_CLEVO_PB51ED,
20549 + ALC1220_FIXUP_CLEVO_PB51ED_PINS,
20550 ++ ALC887_FIXUP_ASUS_AUDIO,
20551 ++ ALC887_FIXUP_ASUS_HMIC,
20552 + };
20553 +
20554 + static void alc889_fixup_coef(struct hda_codec *codec,
20555 +@@ -2133,6 +2136,31 @@ static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
20556 + alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
20557 + }
20558 +
20559 ++static void alc887_asus_hp_automute_hook(struct hda_codec *codec,
20560 ++ struct hda_jack_callback *jack)
20561 ++{
20562 ++ struct alc_spec *spec = codec->spec;
20563 ++ unsigned int vref;
20564 ++
20565 ++ snd_hda_gen_hp_automute(codec, jack);
20566 ++
20567 ++ if (spec->gen.hp_jack_present)
20568 ++ vref = AC_PINCTL_VREF_80;
20569 ++ else
20570 ++ vref = AC_PINCTL_VREF_HIZ;
20571 ++ snd_hda_set_pin_ctl(codec, 0x19, PIN_HP | vref);
20572 ++}
20573 ++
20574 ++static void alc887_fixup_asus_jack(struct hda_codec *codec,
20575 ++ const struct hda_fixup *fix, int action)
20576 ++{
20577 ++ struct alc_spec *spec = codec->spec;
20578 ++ if (action != HDA_FIXUP_ACT_PROBE)
20579 ++ return;
20580 ++ snd_hda_set_pin_ctl_cache(codec, 0x1b, PIN_HP);
20581 ++ spec->gen.hp_automute_hook = alc887_asus_hp_automute_hook;
20582 ++}
20583 ++
20584 + static const struct hda_fixup alc882_fixups[] = {
20585 + [ALC882_FIXUP_ABIT_AW9D_MAX] = {
20586 + .type = HDA_FIXUP_PINS,
20587 +@@ -2390,6 +2418,20 @@ static const struct hda_fixup alc882_fixups[] = {
20588 + .chained = true,
20589 + .chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
20590 + },
20591 ++ [ALC887_FIXUP_ASUS_AUDIO] = {
20592 ++ .type = HDA_FIXUP_PINS,
20593 ++ .v.pins = (const struct hda_pintbl[]) {
20594 ++ { 0x15, 0x02a14150 }, /* use as headset mic, without its own jack detect */
20595 ++ { 0x19, 0x22219420 },
20596 ++ {}
20597 ++ },
20598 ++ },
20599 ++ [ALC887_FIXUP_ASUS_HMIC] = {
20600 ++ .type = HDA_FIXUP_FUNC,
20601 ++ .v.func = alc887_fixup_asus_jack,
20602 ++ .chained = true,
20603 ++ .chain_id = ALC887_FIXUP_ASUS_AUDIO,
20604 ++ },
20605 + };
20606 +
20607 + static const struct snd_pci_quirk alc882_fixup_tbl[] = {
20608 +@@ -2423,6 +2465,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
20609 + SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD),
20610 + SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
20611 + SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
20612 ++ SND_PCI_QUIRK(0x1043, 0x2390, "Asus D700SA", ALC887_FIXUP_ASUS_HMIC),
20613 + SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
20614 + SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
20615 + SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
20616 +@@ -6245,6 +6288,7 @@ enum {
20617 + ALC269_FIXUP_LEMOTE_A190X,
20618 + ALC256_FIXUP_INTEL_NUC8_RUGGED,
20619 + ALC255_FIXUP_XIAOMI_HEADSET_MIC,
20620 ++ ALC274_FIXUP_HP_MIC,
20621 + };
20622 +
20623 + static const struct hda_fixup alc269_fixups[] = {
20624 +@@ -7624,6 +7668,14 @@ static const struct hda_fixup alc269_fixups[] = {
20625 + .chained = true,
20626 + .chain_id = ALC289_FIXUP_ASUS_GA401
20627 + },
20628 ++ [ALC274_FIXUP_HP_MIC] = {
20629 ++ .type = HDA_FIXUP_VERBS,
20630 ++ .v.verbs = (const struct hda_verb[]) {
20631 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
20632 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
20633 ++ { }
20634 ++ },
20635 ++ },
20636 + };
20637 +
20638 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
20639 +@@ -7775,6 +7827,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
20640 + SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
20641 + SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
20642 + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
20643 ++ SND_PCI_QUIRK(0x103c, 0x874e, "HP", ALC274_FIXUP_HP_MIC),
20644 ++ SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
20645 + SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
20646 + SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
20647 + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
20648 +@@ -8100,6 +8154,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
20649 + {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
20650 + {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
20651 + {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
20652 ++ {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
20653 + {}
20654 + };
20655 + #define ALC225_STANDARD_PINS \
20656 +@@ -9634,6 +9689,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
20657 + SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
20658 + SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
20659 + SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
20660 ++ SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
20661 + SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
20662 + SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
20663 + SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
20664 +diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
20665 +index 986a6308818b2..2a8484f37496c 100644
20666 +--- a/sound/soc/codecs/Kconfig
20667 ++++ b/sound/soc/codecs/Kconfig
20668 +@@ -539,6 +539,7 @@ config SND_SOC_CQ0093VC
20669 + config SND_SOC_CROS_EC_CODEC
20670 + tristate "codec driver for ChromeOS EC"
20671 + depends on CROS_EC
20672 ++ select CRYPTO
20673 + select CRYPTO_LIB_SHA256
20674 + help
20675 + If you say yes here you will get support for the
20676 +diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
20677 +index cf071121c8398..531bf32043813 100644
20678 +--- a/sound/soc/codecs/tas2770.c
20679 ++++ b/sound/soc/codecs/tas2770.c
20680 +@@ -16,7 +16,6 @@
20681 + #include <linux/i2c.h>
20682 + #include <linux/gpio.h>
20683 + #include <linux/gpio/consumer.h>
20684 +-#include <linux/pm_runtime.h>
20685 + #include <linux/regulator/consumer.h>
20686 + #include <linux/firmware.h>
20687 + #include <linux/regmap.h>
20688 +@@ -57,7 +56,12 @@ static int tas2770_set_bias_level(struct snd_soc_component *component,
20689 + TAS2770_PWR_CTRL_MASK,
20690 + TAS2770_PWR_CTRL_ACTIVE);
20691 + break;
20692 +-
20693 ++ case SND_SOC_BIAS_STANDBY:
20694 ++ case SND_SOC_BIAS_PREPARE:
20695 ++ snd_soc_component_update_bits(component,
20696 ++ TAS2770_PWR_CTRL,
20697 ++ TAS2770_PWR_CTRL_MASK, TAS2770_PWR_CTRL_MUTE);
20698 ++ break;
20699 + case SND_SOC_BIAS_OFF:
20700 + snd_soc_component_update_bits(component,
20701 + TAS2770_PWR_CTRL,
20702 +@@ -135,23 +139,18 @@ static int tas2770_dac_event(struct snd_soc_dapm_widget *w,
20703 + TAS2770_PWR_CTRL,
20704 + TAS2770_PWR_CTRL_MASK,
20705 + TAS2770_PWR_CTRL_MUTE);
20706 +- if (ret)
20707 +- goto end;
20708 + break;
20709 + case SND_SOC_DAPM_PRE_PMD:
20710 + ret = snd_soc_component_update_bits(component,
20711 + TAS2770_PWR_CTRL,
20712 + TAS2770_PWR_CTRL_MASK,
20713 + TAS2770_PWR_CTRL_SHUTDOWN);
20714 +- if (ret)
20715 +- goto end;
20716 + break;
20717 + default:
20718 + dev_err(tas2770->dev, "Not supported evevt\n");
20719 + return -EINVAL;
20720 + }
20721 +
20722 +-end:
20723 + if (ret < 0)
20724 + return ret;
20725 +
20726 +@@ -243,6 +242,9 @@ static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
20727 + return -EINVAL;
20728 + }
20729 +
20730 ++ if (ret < 0)
20731 ++ return ret;
20732 ++
20733 + tas2770->channel_size = bitwidth;
20734 +
20735 + ret = snd_soc_component_update_bits(component,
20736 +@@ -251,16 +253,15 @@ static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
20737 + TAS2770_TDM_CFG_REG5_50_MASK,
20738 + TAS2770_TDM_CFG_REG5_VSNS_ENABLE |
20739 + tas2770->v_sense_slot);
20740 +- if (ret)
20741 +- goto end;
20742 ++ if (ret < 0)
20743 ++ return ret;
20744 ++
20745 + ret = snd_soc_component_update_bits(component,
20746 + TAS2770_TDM_CFG_REG6,
20747 + TAS2770_TDM_CFG_REG6_ISNS_MASK |
20748 + TAS2770_TDM_CFG_REG6_50_MASK,
20749 + TAS2770_TDM_CFG_REG6_ISNS_ENABLE |
20750 + tas2770->i_sense_slot);
20751 +-
20752 +-end:
20753 + if (ret < 0)
20754 + return ret;
20755 +
20756 +@@ -278,36 +279,35 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
20757 + TAS2770_TDM_CFG_REG0,
20758 + TAS2770_TDM_CFG_REG0_SMP_MASK,
20759 + TAS2770_TDM_CFG_REG0_SMP_48KHZ);
20760 +- if (ret)
20761 +- goto end;
20762 ++ if (ret < 0)
20763 ++ return ret;
20764 ++
20765 + ret = snd_soc_component_update_bits(component,
20766 + TAS2770_TDM_CFG_REG0,
20767 + TAS2770_TDM_CFG_REG0_31_MASK,
20768 + TAS2770_TDM_CFG_REG0_31_44_1_48KHZ);
20769 +- if (ret)
20770 +- goto end;
20771 + break;
20772 + case 44100:
20773 + ret = snd_soc_component_update_bits(component,
20774 + TAS2770_TDM_CFG_REG0,
20775 + TAS2770_TDM_CFG_REG0_SMP_MASK,
20776 + TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
20777 +- if (ret)
20778 +- goto end;
20779 ++ if (ret < 0)
20780 ++ return ret;
20781 ++
20782 + ret = snd_soc_component_update_bits(component,
20783 + TAS2770_TDM_CFG_REG0,
20784 + TAS2770_TDM_CFG_REG0_31_MASK,
20785 + TAS2770_TDM_CFG_REG0_31_44_1_48KHZ);
20786 +- if (ret)
20787 +- goto end;
20788 + break;
20789 + case 96000:
20790 + ret = snd_soc_component_update_bits(component,
20791 + TAS2770_TDM_CFG_REG0,
20792 + TAS2770_TDM_CFG_REG0_SMP_MASK,
20793 + TAS2770_TDM_CFG_REG0_SMP_48KHZ);
20794 +- if (ret)
20795 +- goto end;
20796 ++ if (ret < 0)
20797 ++ return ret;
20798 ++
20799 + ret = snd_soc_component_update_bits(component,
20800 + TAS2770_TDM_CFG_REG0,
20801 + TAS2770_TDM_CFG_REG0_31_MASK,
20802 +@@ -318,8 +318,9 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
20803 + TAS2770_TDM_CFG_REG0,
20804 + TAS2770_TDM_CFG_REG0_SMP_MASK,
20805 + TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
20806 +- if (ret)
20807 +- goto end;
20808 ++ if (ret < 0)
20809 ++ return ret;
20810 ++
20811 + ret = snd_soc_component_update_bits(component,
20812 + TAS2770_TDM_CFG_REG0,
20813 + TAS2770_TDM_CFG_REG0_31_MASK,
20814 +@@ -330,22 +331,22 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
20815 + TAS2770_TDM_CFG_REG0,
20816 + TAS2770_TDM_CFG_REG0_SMP_MASK,
20817 + TAS2770_TDM_CFG_REG0_SMP_48KHZ);
20818 +- if (ret)
20819 +- goto end;
20820 ++ if (ret < 0)
20821 ++ return ret;
20822 ++
20823 + ret = snd_soc_component_update_bits(component,
20824 + TAS2770_TDM_CFG_REG0,
20825 + TAS2770_TDM_CFG_REG0_31_MASK,
20826 + TAS2770_TDM_CFG_REG0_31_176_4_192KHZ);
20827 +- if (ret)
20828 +- goto end;
20829 + break;
20830 + case 17640:
20831 + ret = snd_soc_component_update_bits(component,
20832 + TAS2770_TDM_CFG_REG0,
20833 + TAS2770_TDM_CFG_REG0_SMP_MASK,
20834 + TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
20835 +- if (ret)
20836 +- goto end;
20837 ++ if (ret < 0)
20838 ++ return ret;
20839 ++
20840 + ret = snd_soc_component_update_bits(component,
20841 + TAS2770_TDM_CFG_REG0,
20842 + TAS2770_TDM_CFG_REG0_31_MASK,
20843 +@@ -355,7 +356,6 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
20844 + ret = -EINVAL;
20845 + }
20846 +
20847 +-end:
20848 + if (ret < 0)
20849 + return ret;
20850 +
20851 +@@ -574,6 +574,8 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
20852 +
20853 + tas2770->component = component;
20854 +
20855 ++ tas2770_reset(tas2770);
20856 ++
20857 + return 0;
20858 + }
20859 +
20860 +@@ -700,29 +702,28 @@ static int tas2770_parse_dt(struct device *dev, struct tas2770_priv *tas2770)
20861 + rc = fwnode_property_read_u32(dev->fwnode, "ti,asi-format",
20862 + &tas2770->asi_format);
20863 + if (rc) {
20864 +- dev_err(tas2770->dev, "Looking up %s property failed %d\n",
20865 +- "ti,asi-format", rc);
20866 +- goto end;
20867 ++ dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
20868 ++ "ti,asi-format");
20869 ++ tas2770->asi_format = 0;
20870 + }
20871 +
20872 + rc = fwnode_property_read_u32(dev->fwnode, "ti,imon-slot-no",
20873 + &tas2770->i_sense_slot);
20874 + if (rc) {
20875 +- dev_err(tas2770->dev, "Looking up %s property failed %d\n",
20876 +- "ti,imon-slot-no", rc);
20877 +- goto end;
20878 ++ dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
20879 ++ "ti,imon-slot-no");
20880 ++ tas2770->i_sense_slot = 0;
20881 + }
20882 +
20883 + rc = fwnode_property_read_u32(dev->fwnode, "ti,vmon-slot-no",
20884 + &tas2770->v_sense_slot);
20885 + if (rc) {
20886 +- dev_err(tas2770->dev, "Looking up %s property failed %d\n",
20887 +- "ti,vmon-slot-no", rc);
20888 +- goto end;
20889 ++ dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
20890 ++ "ti,vmon-slot-no");
20891 ++ tas2770->v_sense_slot = 2;
20892 + }
20893 +
20894 +-end:
20895 +- return rc;
20896 ++ return 0;
20897 + }
20898 +
20899 + static int tas2770_i2c_probe(struct i2c_client *client,
20900 +@@ -770,8 +771,6 @@ static int tas2770_i2c_probe(struct i2c_client *client,
20901 + tas2770->channel_size = 0;
20902 + tas2770->slot_width = 0;
20903 +
20904 +- tas2770_reset(tas2770);
20905 +-
20906 + result = tas2770_register_codec(tas2770);
20907 + if (result)
20908 + dev_err(tas2770->dev, "Register codec failed.\n");
20909 +@@ -780,13 +779,6 @@ end:
20910 + return result;
20911 + }
20912 +
20913 +-static int tas2770_i2c_remove(struct i2c_client *client)
20914 +-{
20915 +- pm_runtime_disable(&client->dev);
20916 +- return 0;
20917 +-}
20918 +-
20919 +-
20920 + static const struct i2c_device_id tas2770_i2c_id[] = {
20921 + { "tas2770", 0},
20922 + { }
20923 +@@ -807,7 +799,6 @@ static struct i2c_driver tas2770_i2c_driver = {
20924 + .of_match_table = of_match_ptr(tas2770_of_match),
20925 + },
20926 + .probe = tas2770_i2c_probe,
20927 +- .remove = tas2770_i2c_remove,
20928 + .id_table = tas2770_i2c_id,
20929 + };
20930 +
20931 +diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c
20932 +index 03fb50175d876..a6273ccb84013 100644
20933 +--- a/sound/soc/codecs/tlv320adcx140.c
20934 ++++ b/sound/soc/codecs/tlv320adcx140.c
20935 +@@ -154,7 +154,7 @@ static const struct regmap_config adcx140_i2c_regmap = {
20936 + };
20937 +
20938 + /* Digital Volume control. From -100 to 27 dB in 0.5 dB steps */
20939 +-static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10000, 50, 0);
20940 ++static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10050, 50, 0);
20941 +
20942 + /* ADC gain. From 0 to 42 dB in 1 dB steps */
20943 + static DECLARE_TLV_DB_SCALE(adc_tlv, 0, 100, 0);
20944 +diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
20945 +index d087f3b20b1d5..50b66cf9ea8f9 100644
20946 +--- a/sound/soc/codecs/tlv320aic32x4.c
20947 ++++ b/sound/soc/codecs/tlv320aic32x4.c
20948 +@@ -665,7 +665,7 @@ static int aic32x4_set_processing_blocks(struct snd_soc_component *component,
20949 + }
20950 +
20951 + static int aic32x4_setup_clocks(struct snd_soc_component *component,
20952 +- unsigned int sample_rate)
20953 ++ unsigned int sample_rate, unsigned int channels)
20954 + {
20955 + u8 aosr;
20956 + u16 dosr;
20957 +@@ -753,7 +753,9 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
20958 + dosr);
20959 +
20960 + clk_set_rate(clocks[5].clk,
20961 +- sample_rate * 32);
20962 ++ sample_rate * 32 *
20963 ++ channels);
20964 ++
20965 + return 0;
20966 + }
20967 + }
20968 +@@ -775,7 +777,8 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
20969 + u8 iface1_reg = 0;
20970 + u8 dacsetup_reg = 0;
20971 +
20972 +- aic32x4_setup_clocks(component, params_rate(params));
20973 ++ aic32x4_setup_clocks(component, params_rate(params),
20974 ++ params_channels(params));
20975 +
20976 + switch (params_width(params)) {
20977 + case 16:
20978 +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
20979 +index 519ca2e696372..18f62fde92537 100644
20980 +--- a/sound/soc/codecs/wm_adsp.c
20981 ++++ b/sound/soc/codecs/wm_adsp.c
20982 +@@ -2043,6 +2043,7 @@ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
20983 + {
20984 + struct wm_coeff_ctl *ctl;
20985 + struct snd_kcontrol *kcontrol;
20986 ++ char ctl_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
20987 + int ret;
20988 +
20989 + ctl = wm_adsp_get_ctl(dsp, name, type, alg);
20990 +@@ -2053,8 +2054,25 @@ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
20991 + return -EINVAL;
20992 +
20993 + ret = wm_coeff_write_ctrl(ctl, buf, len);
20994 ++ if (ret)
20995 ++ return ret;
20996 ++
20997 ++ if (ctl->flags & WMFW_CTL_FLAG_SYS)
20998 ++ return 0;
20999 ++
21000 ++ if (dsp->component->name_prefix)
21001 ++ snprintf(ctl_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s",
21002 ++ dsp->component->name_prefix, ctl->name);
21003 ++ else
21004 ++ snprintf(ctl_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s",
21005 ++ ctl->name);
21006 ++
21007 ++ kcontrol = snd_soc_card_get_kcontrol(dsp->component->card, ctl_name);
21008 ++ if (!kcontrol) {
21009 ++ adsp_err(dsp, "Can't find kcontrol %s\n", ctl_name);
21010 ++ return -EINVAL;
21011 ++ }
21012 +
21013 +- kcontrol = snd_soc_card_get_kcontrol(dsp->component->card, ctl->name);
21014 + snd_ctl_notify(dsp->component->card->snd_card,
21015 + SNDRV_CTL_EVENT_MASK_VALUE, &kcontrol->id);
21016 +
21017 +diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
21018 +index 7031869a023a1..211e29a73a41a 100644
21019 +--- a/sound/soc/fsl/fsl_sai.c
21020 ++++ b/sound/soc/fsl/fsl_sai.c
21021 +@@ -694,7 +694,7 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai)
21022 + return 0;
21023 + }
21024 +
21025 +-static struct snd_soc_dai_driver fsl_sai_dai = {
21026 ++static struct snd_soc_dai_driver fsl_sai_dai_template = {
21027 + .probe = fsl_sai_dai_probe,
21028 + .playback = {
21029 + .stream_name = "CPU-Playback",
21030 +@@ -966,12 +966,15 @@ static int fsl_sai_probe(struct platform_device *pdev)
21031 + return ret;
21032 + }
21033 +
21034 ++ memcpy(&sai->cpu_dai_drv, &fsl_sai_dai_template,
21035 ++ sizeof(fsl_sai_dai_template));
21036 ++
21037 + /* Sync Tx with Rx as default by following old DT binding */
21038 + sai->synchronous[RX] = true;
21039 + sai->synchronous[TX] = false;
21040 +- fsl_sai_dai.symmetric_rates = 1;
21041 +- fsl_sai_dai.symmetric_channels = 1;
21042 +- fsl_sai_dai.symmetric_samplebits = 1;
21043 ++ sai->cpu_dai_drv.symmetric_rates = 1;
21044 ++ sai->cpu_dai_drv.symmetric_channels = 1;
21045 ++ sai->cpu_dai_drv.symmetric_samplebits = 1;
21046 +
21047 + if (of_find_property(np, "fsl,sai-synchronous-rx", NULL) &&
21048 + of_find_property(np, "fsl,sai-asynchronous", NULL)) {
21049 +@@ -988,9 +991,9 @@ static int fsl_sai_probe(struct platform_device *pdev)
21050 + /* Discard all settings for asynchronous mode */
21051 + sai->synchronous[RX] = false;
21052 + sai->synchronous[TX] = false;
21053 +- fsl_sai_dai.symmetric_rates = 0;
21054 +- fsl_sai_dai.symmetric_channels = 0;
21055 +- fsl_sai_dai.symmetric_samplebits = 0;
21056 ++ sai->cpu_dai_drv.symmetric_rates = 0;
21057 ++ sai->cpu_dai_drv.symmetric_channels = 0;
21058 ++ sai->cpu_dai_drv.symmetric_samplebits = 0;
21059 + }
21060 +
21061 + if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) &&
21062 +@@ -1019,7 +1022,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
21063 + pm_runtime_enable(&pdev->dev);
21064 +
21065 + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
21066 +- &fsl_sai_dai, 1);
21067 ++ &sai->cpu_dai_drv, 1);
21068 + if (ret)
21069 + goto err_pm_disable;
21070 +
21071 +diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
21072 +index 6aba7d28f5f34..677ecfc1ec68f 100644
21073 +--- a/sound/soc/fsl/fsl_sai.h
21074 ++++ b/sound/soc/fsl/fsl_sai.h
21075 +@@ -180,6 +180,7 @@ struct fsl_sai {
21076 + unsigned int bclk_ratio;
21077 +
21078 + const struct fsl_sai_soc_data *soc_data;
21079 ++ struct snd_soc_dai_driver cpu_dai_drv;
21080 + struct snd_dmaengine_dai_dma_data dma_params_rx;
21081 + struct snd_dmaengine_dai_dma_data dma_params_tx;
21082 + };
21083 +diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c
21084 +index 15a27a2cd0cae..fad1eb6253d53 100644
21085 +--- a/sound/soc/fsl/imx-es8328.c
21086 ++++ b/sound/soc/fsl/imx-es8328.c
21087 +@@ -145,13 +145,13 @@ static int imx_es8328_probe(struct platform_device *pdev)
21088 + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
21089 + if (!data) {
21090 + ret = -ENOMEM;
21091 +- goto fail;
21092 ++ goto put_device;
21093 + }
21094 +
21095 + comp = devm_kzalloc(dev, 3 * sizeof(*comp), GFP_KERNEL);
21096 + if (!comp) {
21097 + ret = -ENOMEM;
21098 +- goto fail;
21099 ++ goto put_device;
21100 + }
21101 +
21102 + data->dev = dev;
21103 +@@ -182,12 +182,12 @@ static int imx_es8328_probe(struct platform_device *pdev)
21104 + ret = snd_soc_of_parse_card_name(&data->card, "model");
21105 + if (ret) {
21106 + dev_err(dev, "Unable to parse card name\n");
21107 +- goto fail;
21108 ++ goto put_device;
21109 + }
21110 + ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing");
21111 + if (ret) {
21112 + dev_err(dev, "Unable to parse routing: %d\n", ret);
21113 +- goto fail;
21114 ++ goto put_device;
21115 + }
21116 + data->card.num_links = 1;
21117 + data->card.owner = THIS_MODULE;
21118 +@@ -196,10 +196,12 @@ static int imx_es8328_probe(struct platform_device *pdev)
21119 + ret = snd_soc_register_card(&data->card);
21120 + if (ret) {
21121 + dev_err(dev, "Unable to register: %d\n", ret);
21122 +- goto fail;
21123 ++ goto put_device;
21124 + }
21125 +
21126 + platform_set_drvdata(pdev, data);
21127 ++put_device:
21128 ++ put_device(&ssi_pdev->dev);
21129 + fail:
21130 + of_node_put(ssi_np);
21131 + of_node_put(codec_np);
21132 +diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
21133 +index 13a48b0c35aef..11233c3aeadfb 100644
21134 +--- a/sound/soc/intel/boards/sof_rt5682.c
21135 ++++ b/sound/soc/intel/boards/sof_rt5682.c
21136 +@@ -118,6 +118,19 @@ static const struct dmi_system_id sof_rt5682_quirk_table[] = {
21137 + .driver_data = (void *)(SOF_RT5682_MCLK_EN |
21138 + SOF_RT5682_SSP_CODEC(0)),
21139 + },
21140 ++ {
21141 ++ .callback = sof_rt5682_quirk_cb,
21142 ++ .matches = {
21143 ++ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Volteer"),
21144 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Terrador"),
21145 ++ },
21146 ++ .driver_data = (void *)(SOF_RT5682_MCLK_EN |
21147 ++ SOF_RT5682_SSP_CODEC(0) |
21148 ++ SOF_SPEAKER_AMP_PRESENT |
21149 ++ SOF_MAX98373_SPEAKER_AMP_PRESENT |
21150 ++ SOF_RT5682_SSP_AMP(2) |
21151 ++ SOF_RT5682_NUM_HDMIDEV(4)),
21152 ++ },
21153 + {}
21154 + };
21155 +
21156 +diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
21157 +index e00a4af29c13f..f25da84f175ac 100644
21158 +--- a/sound/soc/qcom/lpass-cpu.c
21159 ++++ b/sound/soc/qcom/lpass-cpu.c
21160 +@@ -209,21 +209,6 @@ static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
21161 + return 0;
21162 + }
21163 +
21164 +-static int lpass_cpu_daiops_hw_free(struct snd_pcm_substream *substream,
21165 +- struct snd_soc_dai *dai)
21166 +-{
21167 +- struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
21168 +- int ret;
21169 +-
21170 +- ret = regmap_write(drvdata->lpaif_map,
21171 +- LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id),
21172 +- 0);
21173 +- if (ret)
21174 +- dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
21175 +-
21176 +- return ret;
21177 +-}
21178 +-
21179 + static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
21180 + struct snd_soc_dai *dai)
21181 + {
21182 +@@ -304,7 +289,6 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
21183 + .startup = lpass_cpu_daiops_startup,
21184 + .shutdown = lpass_cpu_daiops_shutdown,
21185 + .hw_params = lpass_cpu_daiops_hw_params,
21186 +- .hw_free = lpass_cpu_daiops_hw_free,
21187 + .prepare = lpass_cpu_daiops_prepare,
21188 + .trigger = lpass_cpu_daiops_trigger,
21189 + };
21190 +diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
21191 +index 34f7fd1bab1cf..693839deebfe8 100644
21192 +--- a/sound/soc/qcom/lpass-platform.c
21193 ++++ b/sound/soc/qcom/lpass-platform.c
21194 +@@ -61,7 +61,7 @@ static int lpass_platform_pcmops_open(struct snd_soc_component *component,
21195 + int ret, dma_ch, dir = substream->stream;
21196 + struct lpass_pcm_data *data;
21197 +
21198 +- data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL);
21199 ++ data = kzalloc(sizeof(*data), GFP_KERNEL);
21200 + if (!data)
21201 + return -ENOMEM;
21202 +
21203 +@@ -118,6 +118,7 @@ static int lpass_platform_pcmops_close(struct snd_soc_component *component,
21204 + if (v->free_dma_channel)
21205 + v->free_dma_channel(drvdata, data->dma_ch);
21206 +
21207 ++ kfree(data);
21208 + return 0;
21209 + }
21210 +
21211 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
21212 +index 6eaa00c210117..a5460155b3f64 100644
21213 +--- a/sound/soc/soc-topology.c
21214 ++++ b/sound/soc/soc-topology.c
21215 +@@ -592,6 +592,17 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
21216 + k->info = snd_soc_bytes_info_ext;
21217 + k->tlv.c = snd_soc_bytes_tlv_callback;
21218 +
21219 ++ /*
21220 ++ * When a topology-based implementation abuses the
21221 ++ * control interface and uses bytes_ext controls of
21222 ++ * more than 512 bytes, we need to disable the size
21223 ++ * checks, otherwise accesses to such controls will
21224 ++ * return an -EINVAL error and prevent the card from
21225 ++ * being configured.
21226 ++ */
21227 ++ if (IS_ENABLED(CONFIG_SND_CTL_VALIDATION) && sbe->max > 512)
21228 ++ k->access |= SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK;
21229 ++
21230 + ext_ops = tplg->bytes_ext_ops;
21231 + num_ops = tplg->bytes_ext_ops_count;
21232 + for (i = 0; i < num_ops; i++) {
21233 +diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c
21234 +index 186eea105bb15..009938d45ddd9 100644
21235 +--- a/sound/soc/sof/control.c
21236 ++++ b/sound/soc/sof/control.c
21237 +@@ -298,6 +298,10 @@ int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
21238 + const struct snd_ctl_tlv __user *tlvd =
21239 + (const struct snd_ctl_tlv __user *)binary_data;
21240 +
21241 ++ /* make sure we have at least a header */
21242 ++ if (size < sizeof(struct snd_ctl_tlv))
21243 ++ return -EINVAL;
21244 ++
21245 + /*
21246 + * The beginning of bytes data contains a header from where
21247 + * the length (as bytes) is needed to know the correct copy
21248 +@@ -306,6 +310,13 @@ int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
21249 + if (copy_from_user(&header, tlvd, sizeof(const struct snd_ctl_tlv)))
21250 + return -EFAULT;
21251 +
21252 ++ /* make sure TLV info is consistent */
21253 ++ if (header.length + sizeof(struct snd_ctl_tlv) > size) {
21254 ++ dev_err_ratelimited(scomp->dev, "error: inconsistent TLV, data %d + header %zu > %d\n",
21255 ++ header.length, sizeof(struct snd_ctl_tlv), size);
21256 ++ return -EINVAL;
21257 ++ }
21258 ++
21259 + /* be->max is coming from topology */
21260 + if (header.length > be->max) {
21261 + dev_err_ratelimited(scomp->dev, "error: Bytes data size %d exceeds max %d.\n",
21262 +diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
21263 +index 63ca920c8e6e0..7152e6d1cf673 100644
21264 +--- a/sound/soc/sof/intel/hda.c
21265 ++++ b/sound/soc/sof/intel/hda.c
21266 +@@ -1179,7 +1179,13 @@ void hda_machine_select(struct snd_sof_dev *sdev)
21267 +
21268 + mach = snd_soc_acpi_find_machine(desc->machines);
21269 + if (mach) {
21270 +- sof_pdata->tplg_filename = mach->sof_tplg_filename;
21271 ++ /*
21272 ++ * If tplg file name is overridden, use it instead of
21273 ++ * the one set in mach table
21274 ++ */
21275 ++ if (!sof_pdata->tplg_filename)
21276 ++ sof_pdata->tplg_filename = mach->sof_tplg_filename;
21277 ++
21278 + sof_pdata->machine = mach;
21279 +
21280 + if (mach->link_mask) {
21281 +diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
21282 +index aa3532ba14349..f3a8140773db5 100644
21283 +--- a/sound/soc/sof/sof-pci-dev.c
21284 ++++ b/sound/soc/sof/sof-pci-dev.c
21285 +@@ -35,8 +35,28 @@ static int sof_pci_debug;
21286 + module_param_named(sof_pci_debug, sof_pci_debug, int, 0444);
21287 + MODULE_PARM_DESC(sof_pci_debug, "SOF PCI debug options (0x0 all off)");
21288 +
21289 ++static const char *sof_override_tplg_name;
21290 ++
21291 + #define SOF_PCI_DISABLE_PM_RUNTIME BIT(0)
21292 +
21293 ++static int sof_tplg_cb(const struct dmi_system_id *id)
21294 ++{
21295 ++ sof_override_tplg_name = id->driver_data;
21296 ++ return 1;
21297 ++}
21298 ++
21299 ++static const struct dmi_system_id sof_tplg_table[] = {
21300 ++ {
21301 ++ .callback = sof_tplg_cb,
21302 ++ .matches = {
21303 ++ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Volteer"),
21304 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Terrador"),
21305 ++ },
21306 ++ .driver_data = "sof-tgl-rt5682-ssp0-max98373-ssp2.tplg",
21307 ++ },
21308 ++ {}
21309 ++};
21310 ++
21311 + static const struct dmi_system_id community_key_platforms[] = {
21312 + {
21313 + .ident = "Up Squared",
21314 +@@ -347,6 +367,10 @@ static int sof_pci_probe(struct pci_dev *pci,
21315 + sof_pdata->tplg_filename_prefix =
21316 + sof_pdata->desc->default_tplg_path;
21317 +
21318 ++ dmi_check_system(sof_tplg_table);
21319 ++ if (sof_override_tplg_name)
21320 ++ sof_pdata->tplg_filename = sof_override_tplg_name;
21321 ++
21322 + #if IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
21323 + /* set callback to enable runtime_pm */
21324 + sof_pdata->sof_probe_complete = sof_pci_probe_complete;
21325 +diff --git a/sound/usb/format.c b/sound/usb/format.c
21326 +index 1b28d01d1f4cd..3bfead393aa34 100644
21327 +--- a/sound/usb/format.c
21328 ++++ b/sound/usb/format.c
21329 +@@ -406,6 +406,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
21330 + case USB_ID(0x0e41, 0x4242): /* Line6 Helix Rack */
21331 + case USB_ID(0x0e41, 0x4244): /* Line6 Helix LT */
21332 + case USB_ID(0x0e41, 0x4246): /* Line6 HX-Stomp */
21333 ++ case USB_ID(0x0e41, 0x4247): /* Line6 Pod Go */
21334 + case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
21335 + case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
21336 + case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
21337 +diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
21338 +index e7818b44b48ee..6e5c907680b1a 100644
21339 +--- a/tools/build/Makefile.feature
21340 ++++ b/tools/build/Makefile.feature
21341 +@@ -38,8 +38,6 @@ FEATURE_TESTS_BASIC := \
21342 + get_current_dir_name \
21343 + gettid \
21344 + glibc \
21345 +- gtk2 \
21346 +- gtk2-infobar \
21347 + libbfd \
21348 + libcap \
21349 + libelf \
21350 +@@ -81,6 +79,8 @@ FEATURE_TESTS_EXTRA := \
21351 + compile-32 \
21352 + compile-x32 \
21353 + cplus-demangle \
21354 ++ gtk2 \
21355 ++ gtk2-infobar \
21356 + hello \
21357 + libbabeltrace \
21358 + libbfd-liberty \
21359 +@@ -110,7 +110,6 @@ FEATURE_DISPLAY ?= \
21360 + dwarf \
21361 + dwarf_getlocations \
21362 + glibc \
21363 +- gtk2 \
21364 + libbfd \
21365 + libcap \
21366 + libelf \
21367 +diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
21368 +index 93b590d81209c..85d341e25eaec 100644
21369 +--- a/tools/build/feature/Makefile
21370 ++++ b/tools/build/feature/Makefile
21371 +@@ -89,7 +89,7 @@ __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(
21372 + ###############################
21373 +
21374 + $(OUTPUT)test-all.bin:
21375 +- $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma
21376 ++ $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd
21377 +
21378 + $(OUTPUT)test-hello.bin:
21379 + $(BUILD)
21380 +diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
21381 +index 5479e543b1947..d2623992ccd61 100644
21382 +--- a/tools/build/feature/test-all.c
21383 ++++ b/tools/build/feature/test-all.c
21384 +@@ -78,14 +78,6 @@
21385 + # include "test-libslang.c"
21386 + #undef main
21387 +
21388 +-#define main main_test_gtk2
21389 +-# include "test-gtk2.c"
21390 +-#undef main
21391 +-
21392 +-#define main main_test_gtk2_infobar
21393 +-# include "test-gtk2-infobar.c"
21394 +-#undef main
21395 +-
21396 + #define main main_test_libbfd
21397 + # include "test-libbfd.c"
21398 + #undef main
21399 +@@ -205,8 +197,6 @@ int main(int argc, char *argv[])
21400 + main_test_libelf_getshdrstrndx();
21401 + main_test_libunwind();
21402 + main_test_libslang();
21403 +- main_test_gtk2(argc, argv);
21404 +- main_test_gtk2_infobar(argc, argv);
21405 + main_test_libbfd();
21406 + main_test_backtrace();
21407 + main_test_libnuma();
21408 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
21409 +index 236c91aff48f8..3e71c2f69afe8 100644
21410 +--- a/tools/lib/bpf/libbpf.c
21411 ++++ b/tools/lib/bpf/libbpf.c
21412 +@@ -3677,6 +3677,36 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
21413 + return 0;
21414 + }
21415 +
21416 ++static int init_map_slots(struct bpf_map *map)
21417 ++{
21418 ++ const struct bpf_map *targ_map;
21419 ++ unsigned int i;
21420 ++ int fd, err;
21421 ++
21422 ++ for (i = 0; i < map->init_slots_sz; i++) {
21423 ++ if (!map->init_slots[i])
21424 ++ continue;
21425 ++
21426 ++ targ_map = map->init_slots[i];
21427 ++ fd = bpf_map__fd(targ_map);
21428 ++ err = bpf_map_update_elem(map->fd, &i, &fd, 0);
21429 ++ if (err) {
21430 ++ err = -errno;
21431 ++ pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
21432 ++ map->name, i, targ_map->name,
21433 ++ fd, err);
21434 ++ return err;
21435 ++ }
21436 ++ pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
21437 ++ map->name, i, targ_map->name, fd);
21438 ++ }
21439 ++
21440 ++ zfree(&map->init_slots);
21441 ++ map->init_slots_sz = 0;
21442 ++
21443 ++ return 0;
21444 ++}
21445 ++
21446 + static int
21447 + bpf_object__create_maps(struct bpf_object *obj)
21448 + {
21449 +@@ -3719,28 +3749,11 @@ bpf_object__create_maps(struct bpf_object *obj)
21450 + }
21451 +
21452 + if (map->init_slots_sz) {
21453 +- for (j = 0; j < map->init_slots_sz; j++) {
21454 +- const struct bpf_map *targ_map;
21455 +- int fd;
21456 +-
21457 +- if (!map->init_slots[j])
21458 +- continue;
21459 +-
21460 +- targ_map = map->init_slots[j];
21461 +- fd = bpf_map__fd(targ_map);
21462 +- err = bpf_map_update_elem(map->fd, &j, &fd, 0);
21463 +- if (err) {
21464 +- err = -errno;
21465 +- pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
21466 +- map->name, j, targ_map->name,
21467 +- fd, err);
21468 +- goto err_out;
21469 +- }
21470 +- pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
21471 +- map->name, j, targ_map->name, fd);
21472 ++ err = init_map_slots(map);
21473 ++ if (err < 0) {
21474 ++ zclose(map->fd);
21475 ++ goto err_out;
21476 + }
21477 +- zfree(&map->init_slots);
21478 +- map->init_slots_sz = 0;
21479 + }
21480 +
21481 + if (map->pin_path && !map->pinned) {
21482 +@@ -5253,7 +5266,7 @@ retry_load:
21483 + free(log_buf);
21484 + goto retry_load;
21485 + }
21486 +- ret = -errno;
21487 ++ ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
21488 + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
21489 + pr_warn("load bpf program failed: %s\n", cp);
21490 + pr_perm_msg(ret);
21491 +diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
21492 +index 6a875a0f01bb0..233592c5a52c7 100644
21493 +--- a/tools/lib/perf/evlist.c
21494 ++++ b/tools/lib/perf/evlist.c
21495 +@@ -45,6 +45,9 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
21496 + if (!evsel->own_cpus || evlist->has_user_cpus) {
21497 + perf_cpu_map__put(evsel->cpus);
21498 + evsel->cpus = perf_cpu_map__get(evlist->cpus);
21499 ++ } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) {
21500 ++ perf_cpu_map__put(evsel->cpus);
21501 ++ evsel->cpus = perf_cpu_map__get(evlist->cpus);
21502 + } else if (evsel->cpus != evsel->own_cpus) {
21503 + perf_cpu_map__put(evsel->cpus);
21504 + evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
21505 +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
21506 +index 513633809c81e..ab6dbd8ef6cf6 100644
21507 +--- a/tools/perf/Makefile.config
21508 ++++ b/tools/perf/Makefile.config
21509 +@@ -716,12 +716,14 @@ ifndef NO_SLANG
21510 + endif
21511 + endif
21512 +
21513 +-ifndef NO_GTK2
21514 ++ifdef GTK2
21515 + FLAGS_GTK2=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
21516 ++ $(call feature_check,gtk2)
21517 + ifneq ($(feature-gtk2), 1)
21518 + msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
21519 + NO_GTK2 := 1
21520 + else
21521 ++ $(call feature_check,gtk2-infobar)
21522 + ifeq ($(feature-gtk2-infobar), 1)
21523 + GTK_CFLAGS := -DHAVE_GTK_INFO_BAR_SUPPORT
21524 + endif
21525 +diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
21526 +index 86dbb51bb2723..bc45b1a61d3a3 100644
21527 +--- a/tools/perf/Makefile.perf
21528 ++++ b/tools/perf/Makefile.perf
21529 +@@ -48,7 +48,7 @@ include ../scripts/utilities.mak
21530 + #
21531 + # Define NO_SLANG if you do not want TUI support.
21532 + #
21533 +-# Define NO_GTK2 if you do not want GTK+ GUI support.
21534 ++# Define GTK2 if you want GTK+ GUI support.
21535 + #
21536 + # Define NO_DEMANGLE if you do not want C++ symbol demangling.
21537 + #
21538 +@@ -384,7 +384,7 @@ ifneq ($(OUTPUT),)
21539 + CFLAGS += -I$(OUTPUT)
21540 + endif
21541 +
21542 +-ifndef NO_GTK2
21543 ++ifdef GTK2
21544 + ALL_PROGRAMS += $(OUTPUT)libperf-gtk.so
21545 + GTK_IN := $(OUTPUT)gtk-in.o
21546 + endif
21547 +@@ -876,7 +876,7 @@ check: $(OUTPUT)common-cmds.h
21548 +
21549 + ### Installation rules
21550 +
21551 +-ifndef NO_GTK2
21552 ++ifdef GTK2
21553 + install-gtk: $(OUTPUT)libperf-gtk.so
21554 + $(call QUIET_INSTALL, 'GTK UI') \
21555 + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(libdir_SQ)'; \
21556 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
21557 +index 6e2502de755a8..6494383687f89 100644
21558 +--- a/tools/perf/builtin-stat.c
21559 ++++ b/tools/perf/builtin-stat.c
21560 +@@ -1963,8 +1963,10 @@ static void setup_system_wide(int forks)
21561 + struct evsel *counter;
21562 +
21563 + evlist__for_each_entry(evsel_list, counter) {
21564 +- if (!counter->core.system_wide)
21565 ++ if (!counter->core.system_wide &&
21566 ++ strcmp(counter->name, "duration_time")) {
21567 + return;
21568 ++ }
21569 + }
21570 +
21571 + if (evsel_list->core.nr_entries)
21572 +diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
21573 +index 4cbb64edc9983..83e8cd663b4e4 100644
21574 +--- a/tools/perf/builtin-trace.c
21575 ++++ b/tools/perf/builtin-trace.c
21576 +@@ -1762,7 +1762,11 @@ static int trace__read_syscall_info(struct trace *trace, int id)
21577 + if (table == NULL)
21578 + return -ENOMEM;
21579 +
21580 +- memset(table + trace->sctbl->syscalls.max_id, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
21581 ++ // Need to memset from offset 0 and +1 members if brand new
21582 ++ if (trace->syscalls.table == NULL)
21583 ++ memset(table, 0, (id + 1) * sizeof(*sc));
21584 ++ else
21585 ++ memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
21586 +
21587 + trace->syscalls.table = table;
21588 + trace->sctbl->syscalls.max_id = id;
21589 +diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
21590 +index 05cf2af9e2c27..d09ec2f030719 100644
21591 +--- a/tools/perf/builtin-version.c
21592 ++++ b/tools/perf/builtin-version.c
21593 +@@ -60,7 +60,6 @@ static void library_status(void)
21594 + STATUS(HAVE_DWARF_SUPPORT, dwarf);
21595 + STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations);
21596 + STATUS(HAVE_GLIBC_SUPPORT, glibc);
21597 +- STATUS(HAVE_GTK2_SUPPORT, gtk2);
21598 + #ifndef HAVE_SYSCALL_TABLE_SUPPORT
21599 + STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit);
21600 + #endif
21601 +diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
21602 +index 9357b5f62c273..bc88175e377ce 100644
21603 +--- a/tools/perf/util/intel-pt.c
21604 ++++ b/tools/perf/util/intel-pt.c
21605 +@@ -1071,6 +1071,8 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
21606 +
21607 + if (queue->tid == -1 || pt->have_sched_switch) {
21608 + ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
21609 ++ if (ptq->tid == -1)
21610 ++ ptq->pid = -1;
21611 + thread__zput(ptq->thread);
21612 + }
21613 +
21614 +@@ -2561,10 +2563,8 @@ static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
21615 + tid = sample->tid;
21616 + }
21617 +
21618 +- if (tid == -1) {
21619 +- pr_err("context_switch event has no tid\n");
21620 +- return -EINVAL;
21621 +- }
21622 ++ if (tid == -1)
21623 ++ intel_pt_log("context_switch event has no tid\n");
21624 +
21625 + intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
21626 + cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
21627 +diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
21628 +index 8995092d541ec..3b796dd5e5772 100644
21629 +--- a/tools/testing/radix-tree/idr-test.c
21630 ++++ b/tools/testing/radix-tree/idr-test.c
21631 +@@ -523,8 +523,27 @@ static void *ida_random_fn(void *arg)
21632 + return NULL;
21633 + }
21634 +
21635 ++static void *ida_leak_fn(void *arg)
21636 ++{
21637 ++ struct ida *ida = arg;
21638 ++ time_t s = time(NULL);
21639 ++ int i, ret;
21640 ++
21641 ++ rcu_register_thread();
21642 ++
21643 ++ do for (i = 0; i < 1000; i++) {
21644 ++ ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL);
21645 ++ if (ret >= 0)
21646 ++ ida_free(ida, 128);
21647 ++ } while (time(NULL) < s + 2);
21648 ++
21649 ++ rcu_unregister_thread();
21650 ++ return NULL;
21651 ++}
21652 ++
21653 + void ida_thread_tests(void)
21654 + {
21655 ++ DEFINE_IDA(ida);
21656 + pthread_t threads[20];
21657 + int i;
21658 +
21659 +@@ -536,6 +555,16 @@ void ida_thread_tests(void)
21660 +
21661 + while (i--)
21662 + pthread_join(threads[i], NULL);
21663 ++
21664 ++ for (i = 0; i < ARRAY_SIZE(threads); i++)
21665 ++ if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) {
21666 ++ perror("creating ida thread");
21667 ++ exit(1);
21668 ++ }
21669 ++
21670 ++ while (i--)
21671 ++ pthread_join(threads[i], NULL);
21672 ++ assert(ida_is_empty(&ida));
21673 + }
21674 +
21675 + void ida_tests(void)
21676 +diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
21677 +index 944ad4721c83c..da14eaac71d03 100644
21678 +--- a/tools/testing/selftests/bpf/bench.c
21679 ++++ b/tools/testing/selftests/bpf/bench.c
21680 +@@ -311,7 +311,6 @@ extern const struct bench bench_rename_kretprobe;
21681 + extern const struct bench bench_rename_rawtp;
21682 + extern const struct bench bench_rename_fentry;
21683 + extern const struct bench bench_rename_fexit;
21684 +-extern const struct bench bench_rename_fmodret;
21685 + extern const struct bench bench_trig_base;
21686 + extern const struct bench bench_trig_tp;
21687 + extern const struct bench bench_trig_rawtp;
21688 +@@ -332,7 +331,6 @@ static const struct bench *benchs[] = {
21689 + &bench_rename_rawtp,
21690 + &bench_rename_fentry,
21691 + &bench_rename_fexit,
21692 +- &bench_rename_fmodret,
21693 + &bench_trig_base,
21694 + &bench_trig_tp,
21695 + &bench_trig_rawtp,
21696 +@@ -462,4 +460,3 @@ int main(int argc, char **argv)
21697 +
21698 + return 0;
21699 + }
21700 +-
21701 +diff --git a/tools/testing/selftests/bpf/benchs/bench_rename.c b/tools/testing/selftests/bpf/benchs/bench_rename.c
21702 +index e74cff40f4fea..a967674098ada 100644
21703 +--- a/tools/testing/selftests/bpf/benchs/bench_rename.c
21704 ++++ b/tools/testing/selftests/bpf/benchs/bench_rename.c
21705 +@@ -106,12 +106,6 @@ static void setup_fexit()
21706 + attach_bpf(ctx.skel->progs.prog5);
21707 + }
21708 +
21709 +-static void setup_fmodret()
21710 +-{
21711 +- setup_ctx();
21712 +- attach_bpf(ctx.skel->progs.prog6);
21713 +-}
21714 +-
21715 + static void *consumer(void *input)
21716 + {
21717 + return NULL;
21718 +@@ -182,14 +176,3 @@ const struct bench bench_rename_fexit = {
21719 + .report_progress = hits_drops_report_progress,
21720 + .report_final = hits_drops_report_final,
21721 + };
21722 +-
21723 +-const struct bench bench_rename_fmodret = {
21724 +- .name = "rename-fmodret",
21725 +- .validate = validate,
21726 +- .setup = setup_fmodret,
21727 +- .producer_thread = producer,
21728 +- .consumer_thread = consumer,
21729 +- .measure = measure,
21730 +- .report_progress = hits_drops_report_progress,
21731 +- .report_final = hits_drops_report_final,
21732 +-};
21733 +diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
21734 +index 47fa04adc1471..21c2d265c3e8e 100644
21735 +--- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c
21736 ++++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
21737 +@@ -265,7 +265,7 @@ void test_sk_assign(void)
21738 + TEST("ipv6 udp port redir", AF_INET6, SOCK_DGRAM, false),
21739 + TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true),
21740 + };
21741 +- int server = -1;
21742 ++ __s64 server = -1;
21743 + int server_map;
21744 + int self_net;
21745 +
21746 +diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
21747 +index 5f54c6aec7f07..b25c9c45c1484 100644
21748 +--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
21749 ++++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
21750 +@@ -45,9 +45,9 @@ static int getsetsockopt(void)
21751 + goto err;
21752 + }
21753 +
21754 +- if (*(int *)big_buf != 0x08) {
21755 ++ if (*big_buf != 0x08) {
21756 + log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08",
21757 +- *(int *)big_buf);
21758 ++ (int)*big_buf);
21759 + goto err;
21760 + }
21761 +
21762 +diff --git a/tools/testing/selftests/bpf/prog_tests/test_overhead.c b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
21763 +index 2702df2b23433..9966685866fdf 100644
21764 +--- a/tools/testing/selftests/bpf/prog_tests/test_overhead.c
21765 ++++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
21766 +@@ -61,10 +61,9 @@ void test_test_overhead(void)
21767 + const char *raw_tp_name = "raw_tp/task_rename";
21768 + const char *fentry_name = "fentry/__set_task_comm";
21769 + const char *fexit_name = "fexit/__set_task_comm";
21770 +- const char *fmodret_name = "fmod_ret/__set_task_comm";
21771 + const char *kprobe_func = "__set_task_comm";
21772 + struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog;
21773 +- struct bpf_program *fentry_prog, *fexit_prog, *fmodret_prog;
21774 ++ struct bpf_program *fentry_prog, *fexit_prog;
21775 + struct bpf_object *obj;
21776 + struct bpf_link *link;
21777 + int err, duration = 0;
21778 +@@ -97,11 +96,6 @@ void test_test_overhead(void)
21779 + if (CHECK(!fexit_prog, "find_probe",
21780 + "prog '%s' not found\n", fexit_name))
21781 + goto cleanup;
21782 +- fmodret_prog = bpf_object__find_program_by_title(obj, fmodret_name);
21783 +- if (CHECK(!fmodret_prog, "find_probe",
21784 +- "prog '%s' not found\n", fmodret_name))
21785 +- goto cleanup;
21786 +-
21787 + err = bpf_object__load(obj);
21788 + if (CHECK(err, "obj_load", "err %d\n", err))
21789 + goto cleanup;
21790 +@@ -148,12 +142,6 @@ void test_test_overhead(void)
21791 + test_run("fexit");
21792 + bpf_link__destroy(link);
21793 +
21794 +- /* attach fmod_ret */
21795 +- link = bpf_program__attach_trace(fmodret_prog);
21796 +- if (CHECK(IS_ERR(link), "attach fmod_ret", "err %ld\n", PTR_ERR(link)))
21797 +- goto cleanup;
21798 +- test_run("fmod_ret");
21799 +- bpf_link__destroy(link);
21800 + cleanup:
21801 + prctl(PR_SET_NAME, comm, 0L, 0L, 0L);
21802 + bpf_object__close(obj);
21803 +diff --git a/tools/testing/selftests/bpf/progs/test_overhead.c b/tools/testing/selftests/bpf/progs/test_overhead.c
21804 +index 42403d088abc9..abb7344b531f4 100644
21805 +--- a/tools/testing/selftests/bpf/progs/test_overhead.c
21806 ++++ b/tools/testing/selftests/bpf/progs/test_overhead.c
21807 +@@ -39,10 +39,4 @@ int BPF_PROG(prog5, struct task_struct *tsk, const char *buf, bool exec)
21808 + return 0;
21809 + }
21810 +
21811 +-SEC("fmod_ret/__set_task_comm")
21812 +-int BPF_PROG(prog6, struct task_struct *tsk, const char *buf, bool exec)
21813 +-{
21814 +- return !tsk;
21815 +-}
21816 +-
21817 + char _license[] SEC("license") = "GPL";
21818 +diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
21819 +index 458b0d69133e4..553a282d816ab 100644
21820 +--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
21821 ++++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
21822 +@@ -18,11 +18,11 @@
21823 + #define MAX_ULONG_STR_LEN 7
21824 + #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
21825 +
21826 ++const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
21827 + static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
21828 + {
21829 +- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
21830 + unsigned char i;
21831 +- char name[64];
21832 ++ char name[sizeof(tcp_mem_name)];
21833 + int ret;
21834 +
21835 + memset(name, 0, sizeof(name));
21836 +diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
21837 +index b2e6f9b0894d8..2b64bc563a12e 100644
21838 +--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
21839 ++++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
21840 +@@ -18,11 +18,11 @@
21841 + #define MAX_ULONG_STR_LEN 7
21842 + #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
21843 +
21844 ++const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
21845 + static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
21846 + {
21847 +- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
21848 + unsigned char i;
21849 +- char name[64];
21850 ++ char name[sizeof(tcp_mem_name)];
21851 + int ret;
21852 +
21853 + memset(name, 0, sizeof(name));
21854 +diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c
21855 +index 5611b564d3b1c..f54b2293c490f 100644
21856 +--- a/tools/testing/selftests/bpf/progs/test_vmlinux.c
21857 ++++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c
21858 +@@ -19,12 +19,14 @@ SEC("tp/syscalls/sys_enter_nanosleep")
21859 + int handle__tp(struct trace_event_raw_sys_enter *args)
21860 + {
21861 + struct __kernel_timespec *ts;
21862 ++ long tv_nsec;
21863 +
21864 + if (args->id != __NR_nanosleep)
21865 + return 0;
21866 +
21867 + ts = (void *)args->args[0];
21868 +- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
21869 ++ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
21870 ++ tv_nsec != MY_TV_NSEC)
21871 + return 0;
21872 +
21873 + tp_called = true;
21874 +@@ -35,12 +37,14 @@ SEC("raw_tp/sys_enter")
21875 + int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
21876 + {
21877 + struct __kernel_timespec *ts;
21878 ++ long tv_nsec;
21879 +
21880 + if (id != __NR_nanosleep)
21881 + return 0;
21882 +
21883 + ts = (void *)PT_REGS_PARM1_CORE(regs);
21884 +- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
21885 ++ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
21886 ++ tv_nsec != MY_TV_NSEC)
21887 + return 0;
21888 +
21889 + raw_tp_called = true;
21890 +@@ -51,12 +55,14 @@ SEC("tp_btf/sys_enter")
21891 + int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
21892 + {
21893 + struct __kernel_timespec *ts;
21894 ++ long tv_nsec;
21895 +
21896 + if (id != __NR_nanosleep)
21897 + return 0;
21898 +
21899 + ts = (void *)PT_REGS_PARM1_CORE(regs);
21900 +- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
21901 ++ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
21902 ++ tv_nsec != MY_TV_NSEC)
21903 + return 0;
21904 +
21905 + tp_btf_called = true;
21906 +diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
21907 +index 7449a4b8f1f9a..9098f1e7433fd 100644
21908 +--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
21909 ++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
21910 +@@ -25,12 +25,12 @@ echo 'wakeup_latency u64 lat pid_t pid' >> synthetic_events
21911 + echo 'hist:keys=pid:ts1=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger
21912 + echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts1:onmatch(sched.sched_wakeup).wakeup_latency($wakeup_lat,next_pid) if next_comm=="ping"' > events/sched/sched_switch/trigger
21913 +
21914 +-echo 'waking+wakeup_latency u64 lat; pid_t pid' >> synthetic_events
21915 +-echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking+wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger
21916 +-echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking+wakeup_latency/trigger
21917 ++echo 'waking_plus_wakeup_latency u64 lat; pid_t pid' >> synthetic_events
21918 ++echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking_plus_wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger
21919 ++echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking_plus_wakeup_latency/trigger
21920 +
21921 + ping $LOCALHOST -c 3
21922 +-if ! grep -q "pid:" events/synthetic/waking+wakeup_latency/hist; then
21923 ++if ! grep -q "pid:" events/synthetic/waking_plus_wakeup_latency/hist; then
21924 + fail "Failed to create combined histogram"
21925 + fi
21926 +
21927 +diff --git a/tools/testing/selftests/lkdtm/run.sh b/tools/testing/selftests/lkdtm/run.sh
21928 +index 8383eb89d88a9..bb7a1775307b8 100755
21929 +--- a/tools/testing/selftests/lkdtm/run.sh
21930 ++++ b/tools/testing/selftests/lkdtm/run.sh
21931 +@@ -82,7 +82,7 @@ dmesg > "$DMESG"
21932 + ($SHELL -c 'cat <(echo '"$test"') >'"$TRIGGER" 2>/dev/null) || true
21933 +
21934 + # Record and dump the results
21935 +-dmesg | diff --changed-group-format='%>' --unchanged-group-format='' "$DMESG" - > "$LOG" || true
21936 ++dmesg | comm --nocheck-order -13 "$DMESG" - > "$LOG" || true
21937 +
21938 + cat "$LOG"
21939 + # Check for expected output
21940 +diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
21941 +index 3b42c06b59858..c5e50ab2ced60 100644
21942 +--- a/tools/testing/selftests/net/config
21943 ++++ b/tools/testing/selftests/net/config
21944 +@@ -31,3 +31,4 @@ CONFIG_NET_SCH_ETF=m
21945 + CONFIG_NET_SCH_NETEM=y
21946 + CONFIG_TEST_BLACKHOLE_DEV=m
21947 + CONFIG_KALLSYMS=y
21948 ++CONFIG_NET_FOU=m
21949 +diff --git a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
21950 +index a0b5f57d6bd31..0727e2012b685 100755
21951 +--- a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
21952 ++++ b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
21953 +@@ -215,10 +215,16 @@ switch_create()
21954 +
21955 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
21956 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
21957 ++
21958 ++ sysctl_set net.ipv4.conf.all.rp_filter 0
21959 ++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
21960 ++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
21961 + }
21962 +
21963 + switch_destroy()
21964 + {
21965 ++ sysctl_restore net.ipv4.conf.all.rp_filter
21966 ++
21967 + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
21968 + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
21969 +
21970 +@@ -359,6 +365,10 @@ ns_switch_create()
21971 +
21972 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
21973 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
21974 ++
21975 ++ sysctl_set net.ipv4.conf.all.rp_filter 0
21976 ++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
21977 ++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
21978 + }
21979 + export -f ns_switch_create
21980 +
21981 +diff --git a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
21982 +index 1209031bc794d..5d97fa347d75a 100755
21983 +--- a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
21984 ++++ b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
21985 +@@ -237,10 +237,16 @@ switch_create()
21986 +
21987 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
21988 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
21989 ++
21990 ++ sysctl_set net.ipv4.conf.all.rp_filter 0
21991 ++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
21992 ++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
21993 + }
21994 +
21995 + switch_destroy()
21996 + {
21997 ++ sysctl_restore net.ipv4.conf.all.rp_filter
21998 ++
21999 + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
22000 + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
22001 +
22002 +@@ -402,6 +408,10 @@ ns_switch_create()
22003 +
22004 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
22005 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
22006 ++
22007 ++ sysctl_set net.ipv4.conf.all.rp_filter 0
22008 ++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
22009 ++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
22010 + }
22011 + export -f ns_switch_create
22012 +
22013 +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
22014 +index acf02e156d20f..ed163e4ad4344 100755
22015 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
22016 ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
22017 +@@ -421,9 +421,9 @@ do_transfer()
22018 + duration=$(printf "(duration %05sms)" $duration)
22019 + if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
22020 + echo "$duration [ FAIL ] client exit code $retc, server $rets" 1>&2
22021 +- echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2
22022 ++ echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
22023 + ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
22024 +- echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2
22025 ++ echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
22026 + ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
22027 +
22028 + cat "$capout"
22029 +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
22030 +index dd42c2f692d01..9cb0c6af326ba 100755
22031 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
22032 ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
22033 +@@ -167,9 +167,9 @@ do_transfer()
22034 +
22035 + if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
22036 + echo " client exit code $retc, server $rets" 1>&2
22037 +- echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2
22038 ++ echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
22039 + ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
22040 +- echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2
22041 ++ echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
22042 + ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
22043 +
22044 + cat "$capout"
22045 +diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
22046 +index bdbf4b3125b6a..28ea3753da207 100755
22047 +--- a/tools/testing/selftests/net/rtnetlink.sh
22048 ++++ b/tools/testing/selftests/net/rtnetlink.sh
22049 +@@ -521,6 +521,11 @@ kci_test_encap_fou()
22050 + return $ksft_skip
22051 + fi
22052 +
22053 ++ if ! /sbin/modprobe -q -n fou; then
22054 ++ echo "SKIP: module fou is not found"
22055 ++ return $ksft_skip
22056 ++ fi
22057 ++ /sbin/modprobe -q fou
22058 + ip -netns "$testns" fou add port 7777 ipproto 47 2>/dev/null
22059 + if [ $? -ne 0 ];then
22060 + echo "FAIL: can't add fou port 7777, skipping test"
22061 +diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
22062 +index 8a8d0f456946c..0d783e1065c86 100755
22063 +--- a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
22064 ++++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
22065 +@@ -1,17 +1,19 @@
22066 + #!/bin/sh
22067 + # SPDX-License-Identifier: GPL-2.0-only
22068 +
22069 ++KSELFTESTS_SKIP=4
22070 ++
22071 + . ./eeh-functions.sh
22072 +
22073 + if ! eeh_supported ; then
22074 + echo "EEH not supported on this system, skipping"
22075 +- exit 0;
22076 ++ exit $KSELFTESTS_SKIP;
22077 + fi
22078 +
22079 + if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \
22080 + [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then
22081 + echo "debugfs EEH testing files are missing. Is debugfs mounted?"
22082 +- exit 1;
22083 ++ exit $KSELFTESTS_SKIP;
22084 + fi
22085 +
22086 + pre_lspci=`mktemp`
22087 +@@ -84,4 +86,5 @@ echo "$failed devices failed to recover ($dev_count tested)"
22088 + lspci | diff -u $pre_lspci -
22089 + rm -f $pre_lspci
22090 +
22091 +-exit $failed
22092 ++test "$failed" == 0
22093 ++exit $?
22094 +diff --git a/tools/testing/selftests/vm/config b/tools/testing/selftests/vm/config
22095 +index 3ba674b64fa9f..69dd0d1aa30b2 100644
22096 +--- a/tools/testing/selftests/vm/config
22097 ++++ b/tools/testing/selftests/vm/config
22098 +@@ -3,3 +3,4 @@ CONFIG_USERFAULTFD=y
22099 + CONFIG_TEST_VMALLOC=m
22100 + CONFIG_DEVICE_PRIVATE=y
22101 + CONFIG_TEST_HMM=m
22102 ++CONFIG_GUP_BENCHMARK=y