Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Fri, 26 Nov 2021 11:57:48
Message-Id: 1637927847.622e893ac3faa125fb76d52b63229cee5b8c0fae.mpagano@gentoo
1 commit: 622e893ac3faa125fb76d52b63229cee5b8c0fae
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Nov 26 11:57:27 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Nov 26 11:57:27 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=622e893a
7
8 Linux patch 5.10.82
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1081_linux-5.10.82.patch | 6378 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6382 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index f4fa5656..db2b4487 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -367,6 +367,10 @@ Patch: 1080_linux-5.10.81.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.81
23
24 +Patch: 1081_linux-5.10.82.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.82
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1081_linux-5.10.82.patch b/1081_linux-5.10.82.patch
33 new file mode 100644
34 index 00000000..3518a7c9
35 --- /dev/null
36 +++ b/1081_linux-5.10.82.patch
37 @@ -0,0 +1,6378 @@
38 +diff --git a/Makefile b/Makefile
39 +index 1baeadb574f1c..84b15766ad66f 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 10
46 +-SUBLEVEL = 81
47 ++SUBLEVEL = 82
48 + EXTRAVERSION =
49 + NAME = Dare mighty things
50 +
51 +diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
52 +index 605b6d2f4a569..1dae02bb82c2d 100644
53 +--- a/arch/arm/boot/dts/bcm-nsp.dtsi
54 ++++ b/arch/arm/boot/dts/bcm-nsp.dtsi
55 +@@ -77,7 +77,7 @@
56 + interrupt-affinity = <&cpu0>, <&cpu1>;
57 + };
58 +
59 +- mpcore@19000000 {
60 ++ mpcore-bus@19000000 {
61 + compatible = "simple-bus";
62 + ranges = <0x00000000 0x19000000 0x00023000>;
63 + #address-cells = <1>;
64 +@@ -219,7 +219,7 @@
65 + status = "disabled";
66 + };
67 +
68 +- sdio: sdhci@21000 {
69 ++ sdio: mmc@21000 {
70 + compatible = "brcm,sdhci-iproc-cygnus";
71 + reg = <0x21000 0x100>;
72 + interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
73 +diff --git a/arch/arm/boot/dts/bcm53016-meraki-mr32.dts b/arch/arm/boot/dts/bcm53016-meraki-mr32.dts
74 +index 612d61852bfb9..577a4dc604d93 100644
75 +--- a/arch/arm/boot/dts/bcm53016-meraki-mr32.dts
76 ++++ b/arch/arm/boot/dts/bcm53016-meraki-mr32.dts
77 +@@ -195,3 +195,25 @@
78 + };
79 + };
80 + };
81 ++
82 ++&srab {
83 ++ status = "okay";
84 ++
85 ++ ports {
86 ++ port@0 {
87 ++ reg = <0>;
88 ++ label = "poe";
89 ++ };
90 ++
91 ++ port@5 {
92 ++ reg = <5>;
93 ++ label = "cpu";
94 ++ ethernet = <&gmac0>;
95 ++
96 ++ fixed-link {
97 ++ speed = <1000>;
98 ++ duplex-full;
99 ++ };
100 ++ };
101 ++ };
102 ++};
103 +diff --git a/arch/arm/boot/dts/ls1021a-tsn.dts b/arch/arm/boot/dts/ls1021a-tsn.dts
104 +index 9d8f0c2a8aba3..aca78b5eddf20 100644
105 +--- a/arch/arm/boot/dts/ls1021a-tsn.dts
106 ++++ b/arch/arm/boot/dts/ls1021a-tsn.dts
107 +@@ -251,7 +251,7 @@
108 +
109 + flash@0 {
110 + /* Rev. A uses 64MB flash, Rev. B & C use 32MB flash */
111 +- compatible = "jedec,spi-nor", "s25fl256s1", "s25fl512s";
112 ++ compatible = "jedec,spi-nor";
113 + spi-max-frequency = <20000000>;
114 + #address-cells = <1>;
115 + #size-cells = <1>;
116 +diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
117 +index 827373ef1a547..37026b2fa6497 100644
118 +--- a/arch/arm/boot/dts/ls1021a.dtsi
119 ++++ b/arch/arm/boot/dts/ls1021a.dtsi
120 +@@ -331,39 +331,6 @@
121 + #thermal-sensor-cells = <1>;
122 + };
123 +
124 +- thermal-zones {
125 +- cpu_thermal: cpu-thermal {
126 +- polling-delay-passive = <1000>;
127 +- polling-delay = <5000>;
128 +-
129 +- thermal-sensors = <&tmu 0>;
130 +-
131 +- trips {
132 +- cpu_alert: cpu-alert {
133 +- temperature = <85000>;
134 +- hysteresis = <2000>;
135 +- type = "passive";
136 +- };
137 +- cpu_crit: cpu-crit {
138 +- temperature = <95000>;
139 +- hysteresis = <2000>;
140 +- type = "critical";
141 +- };
142 +- };
143 +-
144 +- cooling-maps {
145 +- map0 {
146 +- trip = <&cpu_alert>;
147 +- cooling-device =
148 +- <&cpu0 THERMAL_NO_LIMIT
149 +- THERMAL_NO_LIMIT>,
150 +- <&cpu1 THERMAL_NO_LIMIT
151 +- THERMAL_NO_LIMIT>;
152 +- };
153 +- };
154 +- };
155 +- };
156 +-
157 + dspi0: spi@2100000 {
158 + compatible = "fsl,ls1021a-v1.0-dspi";
159 + #address-cells = <1>;
160 +@@ -1018,4 +985,37 @@
161 + big-endian;
162 + };
163 + };
164 ++
165 ++ thermal-zones {
166 ++ cpu_thermal: cpu-thermal {
167 ++ polling-delay-passive = <1000>;
168 ++ polling-delay = <5000>;
169 ++
170 ++ thermal-sensors = <&tmu 0>;
171 ++
172 ++ trips {
173 ++ cpu_alert: cpu-alert {
174 ++ temperature = <85000>;
175 ++ hysteresis = <2000>;
176 ++ type = "passive";
177 ++ };
178 ++ cpu_crit: cpu-crit {
179 ++ temperature = <95000>;
180 ++ hysteresis = <2000>;
181 ++ type = "critical";
182 ++ };
183 ++ };
184 ++
185 ++ cooling-maps {
186 ++ map0 {
187 ++ trip = <&cpu_alert>;
188 ++ cooling-device =
189 ++ <&cpu0 THERMAL_NO_LIMIT
190 ++ THERMAL_NO_LIMIT>,
191 ++ <&cpu1 THERMAL_NO_LIMIT
192 ++ THERMAL_NO_LIMIT>;
193 ++ };
194 ++ };
195 ++ };
196 ++ };
197 + };
198 +diff --git a/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi
199 +index 7f6aefd134514..e7534fe9c53cf 100644
200 +--- a/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi
201 ++++ b/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi
202 +@@ -29,7 +29,7 @@
203 + compatible = "smsc,lan9221","smsc,lan9115";
204 + bank-width = <2>;
205 +
206 +- gpmc,mux-add-data;
207 ++ gpmc,mux-add-data = <0>;
208 + gpmc,cs-on-ns = <0>;
209 + gpmc,cs-rd-off-ns = <42>;
210 + gpmc,cs-wr-off-ns = <36>;
211 +diff --git a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
212 +index e5da3bc6f1050..218a10c0d8159 100644
213 +--- a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
214 ++++ b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
215 +@@ -22,7 +22,7 @@
216 + compatible = "smsc,lan9221","smsc,lan9115";
217 + bank-width = <2>;
218 +
219 +- gpmc,mux-add-data;
220 ++ gpmc,mux-add-data = <0>;
221 + gpmc,cs-on-ns = <0>;
222 + gpmc,cs-rd-off-ns = <42>;
223 + gpmc,cs-wr-off-ns = <36>;
224 +diff --git a/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts b/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts
225 +index 282b89ce3d451..33545cf40f3ab 100644
226 +--- a/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts
227 ++++ b/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts
228 +@@ -19,12 +19,12 @@
229 + stdout-path = "serial0:115200n8";
230 + };
231 +
232 +- memory@0 {
233 ++ memory@42000000 {
234 + reg = <0x42000000 0x3e000000>;
235 + device_type = "memory";
236 + };
237 +
238 +- mdio0: mdio@0 {
239 ++ mdio0: mdio-0 {
240 + status = "okay";
241 + compatible = "virtual,mdio-gpio";
242 + gpios = <&qcom_pinmux 1 GPIO_ACTIVE_HIGH>,
243 +@@ -91,7 +91,7 @@
244 + };
245 + };
246 +
247 +- mdio1: mdio@1 {
248 ++ mdio1: mdio-1 {
249 + status = "okay";
250 + compatible = "virtual,mdio-gpio";
251 + gpios = <&qcom_pinmux 11 GPIO_ACTIVE_HIGH>,
252 +diff --git a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
253 +index 27722c42b61c4..08bddbf0336da 100644
254 +--- a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
255 ++++ b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
256 +@@ -262,10 +262,10 @@
257 + };
258 +
259 + ab8500_ldo_aux2 {
260 +- /* Supplies the Cypress TMA140 touchscreen only with 3.3V */
261 ++ /* Supplies the Cypress TMA140 touchscreen only with 3.0V */
262 + regulator-name = "AUX2";
263 +- regulator-min-microvolt = <3300000>;
264 +- regulator-max-microvolt = <3300000>;
265 ++ regulator-min-microvolt = <3000000>;
266 ++ regulator-max-microvolt = <3000000>;
267 + };
268 +
269 + ab8500_ldo_aux3 {
270 +@@ -284,9 +284,9 @@
271 +
272 + ab8500_ldo_aux5 {
273 + regulator-name = "AUX5";
274 ++ /* Intended for 1V8 for touchscreen but actually left unused */
275 + regulator-min-microvolt = <1050000>;
276 + regulator-max-microvolt = <2790000>;
277 +- regulator-always-on;
278 + };
279 +
280 + ab8500_ldo_aux6 {
281 +diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
282 +index c458f5fb124fb..46f4242e9f95d 100644
283 +--- a/arch/arm/boot/dts/sun8i-a33.dtsi
284 ++++ b/arch/arm/boot/dts/sun8i-a33.dtsi
285 +@@ -46,7 +46,7 @@
286 + #include <dt-bindings/thermal/thermal.h>
287 +
288 + / {
289 +- cpu0_opp_table: opp_table0 {
290 ++ cpu0_opp_table: opp-table-cpu {
291 + compatible = "operating-points-v2";
292 + opp-shared;
293 +
294 +@@ -164,7 +164,7 @@
295 + io-channels = <&ths>;
296 + };
297 +
298 +- mali_opp_table: gpu-opp-table {
299 ++ mali_opp_table: opp-table-gpu {
300 + compatible = "operating-points-v2";
301 +
302 + opp-144000000 {
303 +diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
304 +index c010b27fdb6a6..a746e449b0bae 100644
305 +--- a/arch/arm/boot/dts/sun8i-a83t.dtsi
306 ++++ b/arch/arm/boot/dts/sun8i-a83t.dtsi
307 +@@ -200,7 +200,7 @@
308 + status = "disabled";
309 + };
310 +
311 +- cpu0_opp_table: opp_table0 {
312 ++ cpu0_opp_table: opp-table-cluster0 {
313 + compatible = "operating-points-v2";
314 + opp-shared;
315 +
316 +@@ -253,7 +253,7 @@
317 + };
318 + };
319 +
320 +- cpu1_opp_table: opp_table1 {
321 ++ cpu1_opp_table: opp-table-cluster1 {
322 + compatible = "operating-points-v2";
323 + opp-shared;
324 +
325 +diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
326 +index 4e89701df91f8..ae4f933abb895 100644
327 +--- a/arch/arm/boot/dts/sun8i-h3.dtsi
328 ++++ b/arch/arm/boot/dts/sun8i-h3.dtsi
329 +@@ -44,7 +44,7 @@
330 + #include <dt-bindings/thermal/thermal.h>
331 +
332 + / {
333 +- cpu0_opp_table: opp_table0 {
334 ++ cpu0_opp_table: opp-table-cpu {
335 + compatible = "operating-points-v2";
336 + opp-shared;
337 +
338 +@@ -112,7 +112,7 @@
339 + };
340 + };
341 +
342 +- gpu_opp_table: gpu-opp-table {
343 ++ gpu_opp_table: opp-table-gpu {
344 + compatible = "operating-points-v2";
345 +
346 + opp-120000000 {
347 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi
348 +index cc321c04f1219..f6d7d7f7fdabe 100644
349 +--- a/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi
350 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi
351 +@@ -343,19 +343,19 @@
352 + };
353 +
354 + thermal-zones {
355 +- cpu-thermal-zone {
356 ++ cpu-thermal {
357 + polling-delay-passive = <0>;
358 + polling-delay = <0>;
359 + thermal-sensors = <&ths 0>;
360 + };
361 +
362 +- ddr-thermal-zone {
363 ++ ddr-thermal {
364 + polling-delay-passive = <0>;
365 + polling-delay = <0>;
366 + thermal-sensors = <&ths 2>;
367 + };
368 +
369 +- gpu-thermal-zone {
370 ++ gpu-thermal {
371 + polling-delay-passive = <0>;
372 + polling-delay = <0>;
373 + thermal-sensors = <&ths 1>;
374 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-cpu-opp.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-cpu-opp.dtsi
375 +index 578c37490d901..e39db51eb4489 100644
376 +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-cpu-opp.dtsi
377 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-cpu-opp.dtsi
378 +@@ -4,7 +4,7 @@
379 + */
380 +
381 + / {
382 +- cpu0_opp_table: opp_table0 {
383 ++ cpu0_opp_table: opp-table-cpu {
384 + compatible = "operating-points-v2";
385 + opp-shared;
386 +
387 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-cpu-opp.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5-cpu-opp.dtsi
388 +index b2657201957eb..1afad8b437d72 100644
389 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-cpu-opp.dtsi
390 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-cpu-opp.dtsi
391 +@@ -2,7 +2,7 @@
392 + // Copyright (C) 2020 Chen-Yu Tsai <wens@××××.org>
393 +
394 + / {
395 +- cpu_opp_table: cpu-opp-table {
396 ++ cpu_opp_table: opp-table-cpu {
397 + compatible = "operating-points-v2";
398 + opp-shared;
399 +
400 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
401 +index 10489e5086956..0ee8a5adf02b0 100644
402 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
403 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
404 +@@ -204,7 +204,7 @@
405 + };
406 + };
407 +
408 +- gpu_thermal {
409 ++ gpu-thermal {
410 + polling-delay-passive = <0>;
411 + polling-delay = <0>;
412 + thermal-sensors = <&ths 1>;
413 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi
414 +index 1a5eddc5a40f3..653452926d857 100644
415 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi
416 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi
417 +@@ -3,7 +3,7 @@
418 + // Copyright (C) 2020 Clément Péron <peron.clem@×××××.com>
419 +
420 + / {
421 +- cpu_opp_table: cpu-opp-table {
422 ++ cpu_opp_table: opp-table-cpu {
423 + compatible = "allwinner,sun50i-h6-operating-points";
424 + nvmem-cells = <&cpu_speed_grade>;
425 + opp-shared;
426 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
427 +index 692d8f4a206da..334af263d7b5d 100644
428 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
429 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
430 +@@ -673,56 +673,56 @@
431 + };
432 +
433 + cluster1_core0_watchdog: wdt@c000000 {
434 +- compatible = "arm,sp805-wdt", "arm,primecell";
435 ++ compatible = "arm,sp805", "arm,primecell";
436 + reg = <0x0 0xc000000 0x0 0x1000>;
437 + clocks = <&clockgen 4 15>, <&clockgen 4 15>;
438 + clock-names = "wdog_clk", "apb_pclk";
439 + };
440 +
441 + cluster1_core1_watchdog: wdt@c010000 {
442 +- compatible = "arm,sp805-wdt", "arm,primecell";
443 ++ compatible = "arm,sp805", "arm,primecell";
444 + reg = <0x0 0xc010000 0x0 0x1000>;
445 + clocks = <&clockgen 4 15>, <&clockgen 4 15>;
446 + clock-names = "wdog_clk", "apb_pclk";
447 + };
448 +
449 + cluster1_core2_watchdog: wdt@c020000 {
450 +- compatible = "arm,sp805-wdt", "arm,primecell";
451 ++ compatible = "arm,sp805", "arm,primecell";
452 + reg = <0x0 0xc020000 0x0 0x1000>;
453 + clocks = <&clockgen 4 15>, <&clockgen 4 15>;
454 + clock-names = "wdog_clk", "apb_pclk";
455 + };
456 +
457 + cluster1_core3_watchdog: wdt@c030000 {
458 +- compatible = "arm,sp805-wdt", "arm,primecell";
459 ++ compatible = "arm,sp805", "arm,primecell";
460 + reg = <0x0 0xc030000 0x0 0x1000>;
461 + clocks = <&clockgen 4 15>, <&clockgen 4 15>;
462 + clock-names = "wdog_clk", "apb_pclk";
463 + };
464 +
465 + cluster2_core0_watchdog: wdt@c100000 {
466 +- compatible = "arm,sp805-wdt", "arm,primecell";
467 ++ compatible = "arm,sp805", "arm,primecell";
468 + reg = <0x0 0xc100000 0x0 0x1000>;
469 + clocks = <&clockgen 4 15>, <&clockgen 4 15>;
470 + clock-names = "wdog_clk", "apb_pclk";
471 + };
472 +
473 + cluster2_core1_watchdog: wdt@c110000 {
474 +- compatible = "arm,sp805-wdt", "arm,primecell";
475 ++ compatible = "arm,sp805", "arm,primecell";
476 + reg = <0x0 0xc110000 0x0 0x1000>;
477 + clocks = <&clockgen 4 15>, <&clockgen 4 15>;
478 + clock-names = "wdog_clk", "apb_pclk";
479 + };
480 +
481 + cluster2_core2_watchdog: wdt@c120000 {
482 +- compatible = "arm,sp805-wdt", "arm,primecell";
483 ++ compatible = "arm,sp805", "arm,primecell";
484 + reg = <0x0 0xc120000 0x0 0x1000>;
485 + clocks = <&clockgen 4 15>, <&clockgen 4 15>;
486 + clock-names = "wdog_clk", "apb_pclk";
487 + };
488 +
489 + cluster2_core3_watchdog: wdt@c130000 {
490 +- compatible = "arm,sp805-wdt", "arm,primecell";
491 ++ compatible = "arm,sp805", "arm,primecell";
492 + reg = <0x0 0xc130000 0x0 0x1000>;
493 + clocks = <&clockgen 4 15>, <&clockgen 4 15>;
494 + clock-names = "wdog_clk", "apb_pclk";
495 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
496 +index 4d34d82b898a4..eb6641a3566e1 100644
497 +--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
498 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
499 +@@ -351,56 +351,56 @@
500 + };
501 +
502 + cluster1_core0_watchdog: wdt@c000000 {
503 +- compatible = "arm,sp805-wdt", "arm,primecell";
504 ++ compatible = "arm,sp805", "arm,primecell";
505 + reg = <0x0 0xc000000 0x0 0x1000>;
506 + clocks = <&clockgen 4 3>, <&clockgen 4 3>;
507 + clock-names = "wdog_clk", "apb_pclk";
508 + };
509 +
510 + cluster1_core1_watchdog: wdt@c010000 {
511 +- compatible = "arm,sp805-wdt", "arm,primecell";
512 ++ compatible = "arm,sp805", "arm,primecell";
513 + reg = <0x0 0xc010000 0x0 0x1000>;
514 + clocks = <&clockgen 4 3>, <&clockgen 4 3>;
515 + clock-names = "wdog_clk", "apb_pclk";
516 + };
517 +
518 + cluster2_core0_watchdog: wdt@c100000 {
519 +- compatible = "arm,sp805-wdt", "arm,primecell";
520 ++ compatible = "arm,sp805", "arm,primecell";
521 + reg = <0x0 0xc100000 0x0 0x1000>;
522 + clocks = <&clockgen 4 3>, <&clockgen 4 3>;
523 + clock-names = "wdog_clk", "apb_pclk";
524 + };
525 +
526 + cluster2_core1_watchdog: wdt@c110000 {
527 +- compatible = "arm,sp805-wdt", "arm,primecell";
528 ++ compatible = "arm,sp805", "arm,primecell";
529 + reg = <0x0 0xc110000 0x0 0x1000>;
530 + clocks = <&clockgen 4 3>, <&clockgen 4 3>;
531 + clock-names = "wdog_clk", "apb_pclk";
532 + };
533 +
534 + cluster3_core0_watchdog: wdt@c200000 {
535 +- compatible = "arm,sp805-wdt", "arm,primecell";
536 ++ compatible = "arm,sp805", "arm,primecell";
537 + reg = <0x0 0xc200000 0x0 0x1000>;
538 + clocks = <&clockgen 4 3>, <&clockgen 4 3>;
539 + clock-names = "wdog_clk", "apb_pclk";
540 + };
541 +
542 + cluster3_core1_watchdog: wdt@c210000 {
543 +- compatible = "arm,sp805-wdt", "arm,primecell";
544 ++ compatible = "arm,sp805", "arm,primecell";
545 + reg = <0x0 0xc210000 0x0 0x1000>;
546 + clocks = <&clockgen 4 3>, <&clockgen 4 3>;
547 + clock-names = "wdog_clk", "apb_pclk";
548 + };
549 +
550 + cluster4_core0_watchdog: wdt@c300000 {
551 +- compatible = "arm,sp805-wdt", "arm,primecell";
552 ++ compatible = "arm,sp805", "arm,primecell";
553 + reg = <0x0 0xc300000 0x0 0x1000>;
554 + clocks = <&clockgen 4 3>, <&clockgen 4 3>;
555 + clock-names = "wdog_clk", "apb_pclk";
556 + };
557 +
558 + cluster4_core1_watchdog: wdt@c310000 {
559 +- compatible = "arm,sp805-wdt", "arm,primecell";
560 ++ compatible = "arm,sp805", "arm,primecell";
561 + reg = <0x0 0xc310000 0x0 0x1000>;
562 + clocks = <&clockgen 4 3>, <&clockgen 4 3>;
563 + clock-names = "wdog_clk", "apb_pclk";
564 +diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
565 +index 994140fbc916e..fe4dce23ef7e1 100644
566 +--- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
567 ++++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
568 +@@ -1086,7 +1086,7 @@
569 + };
570 +
571 + watchdog0: watchdog@e8a06000 {
572 +- compatible = "arm,sp805-wdt", "arm,primecell";
573 ++ compatible = "arm,sp805", "arm,primecell";
574 + reg = <0x0 0xe8a06000 0x0 0x1000>;
575 + interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
576 + clocks = <&crg_ctrl HI3660_OSC32K>,
577 +@@ -1095,7 +1095,7 @@
578 + };
579 +
580 + watchdog1: watchdog@e8a07000 {
581 +- compatible = "arm,sp805-wdt", "arm,primecell";
582 ++ compatible = "arm,sp805", "arm,primecell";
583 + reg = <0x0 0xe8a07000 0x0 0x1000>;
584 + interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
585 + clocks = <&crg_ctrl HI3660_OSC32K>,
586 +diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
587 +index 014735a9bc731..fbce014bdc270 100644
588 +--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
589 ++++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
590 +@@ -840,7 +840,7 @@
591 + };
592 +
593 + watchdog0: watchdog@f8005000 {
594 +- compatible = "arm,sp805-wdt", "arm,primecell";
595 ++ compatible = "arm,sp805", "arm,primecell";
596 + reg = <0x0 0xf8005000 0x0 0x1000>;
597 + interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
598 + clocks = <&ao_ctrl HI6220_WDT0_PCLK>,
599 +diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
600 +index 3ceb36cac512f..9cb8f7a052df9 100644
601 +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
602 ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
603 +@@ -200,7 +200,7 @@
604 + clock-names = "bam_clk";
605 + #dma-cells = <1>;
606 + qcom,ee = <1>;
607 +- qcom,controlled-remotely = <1>;
608 ++ qcom,controlled-remotely;
609 + qcom,config-pipe-trust-reg = <0>;
610 + };
611 +
612 +diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
613 +index c45870600909f..9e04ac3f596d0 100644
614 +--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
615 ++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
616 +@@ -300,38 +300,42 @@
617 + LITTLE_CPU_SLEEP_0: cpu-sleep-0-0 {
618 + compatible = "arm,idle-state";
619 + idle-state-name = "little-retention";
620 ++ /* CPU Retention (C2D), L2 Active */
621 + arm,psci-suspend-param = <0x00000002>;
622 + entry-latency-us = <81>;
623 + exit-latency-us = <86>;
624 +- min-residency-us = <200>;
625 ++ min-residency-us = <504>;
626 + };
627 +
628 + LITTLE_CPU_SLEEP_1: cpu-sleep-0-1 {
629 + compatible = "arm,idle-state";
630 + idle-state-name = "little-power-collapse";
631 ++ /* CPU + L2 Power Collapse (C3, D4) */
632 + arm,psci-suspend-param = <0x40000003>;
633 +- entry-latency-us = <273>;
634 +- exit-latency-us = <612>;
635 +- min-residency-us = <1000>;
636 ++ entry-latency-us = <814>;
637 ++ exit-latency-us = <4562>;
638 ++ min-residency-us = <9183>;
639 + local-timer-stop;
640 + };
641 +
642 + BIG_CPU_SLEEP_0: cpu-sleep-1-0 {
643 + compatible = "arm,idle-state";
644 + idle-state-name = "big-retention";
645 ++ /* CPU Retention (C2D), L2 Active */
646 + arm,psci-suspend-param = <0x00000002>;
647 + entry-latency-us = <79>;
648 + exit-latency-us = <82>;
649 +- min-residency-us = <200>;
650 ++ min-residency-us = <1302>;
651 + };
652 +
653 + BIG_CPU_SLEEP_1: cpu-sleep-1-1 {
654 + compatible = "arm,idle-state";
655 + idle-state-name = "big-power-collapse";
656 ++ /* CPU + L2 Power Collapse (C3, D4) */
657 + arm,psci-suspend-param = <0x40000003>;
658 +- entry-latency-us = <336>;
659 +- exit-latency-us = <525>;
660 +- min-residency-us = <1000>;
661 ++ entry-latency-us = <724>;
662 ++ exit-latency-us = <2027>;
663 ++ min-residency-us = <9419>;
664 + local-timer-stop;
665 + };
666 + };
667 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
668 +index 219b7507a10fb..4297c1db5a413 100644
669 +--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
670 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
671 +@@ -379,10 +379,6 @@
672 + };
673 + };
674 +
675 +-&cdn_dp {
676 +- status = "okay";
677 +-};
678 +-
679 + &cpu_b0 {
680 + cpu-supply = <&vdd_cpu_b>;
681 + };
682 +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
683 +index 4a86efa32d687..f7124e15f0ff6 100644
684 +--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
685 ++++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
686 +@@ -131,7 +131,7 @@
687 + reg = <0>;
688 +
689 + partition@0 {
690 +- label = "data";
691 ++ label = "spi0-data";
692 + reg = <0x0 0x100000>;
693 + };
694 + };
695 +@@ -149,7 +149,7 @@
696 + reg = <0>;
697 +
698 + partition@0 {
699 +- label = "data";
700 ++ label = "spi1-data";
701 + reg = <0x0 0x84000>;
702 + };
703 + };
704 +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
705 +index 771f60e0346d0..9e198cacc37dd 100644
706 +--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
707 ++++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
708 +@@ -688,7 +688,7 @@
709 + };
710 +
711 + uart0: serial@ff000000 {
712 +- compatible = "cdns,uart-r1p12", "xlnx,xuartps";
713 ++ compatible = "xlnx,zynqmp-uart", "cdns,uart-r1p12";
714 + status = "disabled";
715 + interrupt-parent = <&gic>;
716 + interrupts = <0 21 4>;
717 +@@ -698,7 +698,7 @@
718 + };
719 +
720 + uart1: serial@ff010000 {
721 +- compatible = "cdns,uart-r1p12", "xlnx,xuartps";
722 ++ compatible = "xlnx,zynqmp-uart", "cdns,uart-r1p12";
723 + status = "disabled";
724 + interrupt-parent = <&gic>;
725 + interrupts = <0 22 4>;
726 +diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
727 +index 79280c53b9a61..a463b9bceed41 100644
728 +--- a/arch/arm64/kernel/vdso32/Makefile
729 ++++ b/arch/arm64/kernel/vdso32/Makefile
730 +@@ -48,7 +48,8 @@ cc32-as-instr = $(call try-run,\
731 + # As a result we set our own flags here.
732 +
733 + # KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile
734 +-VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include)
735 ++VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc
736 ++VDSO_CPPFLAGS += -isystem $(shell $(CC_COMPAT) -print-file-name=include 2>/dev/null)
737 + VDSO_CPPFLAGS += $(LINUXINCLUDE)
738 +
739 + # Common C and assembly flags
740 +diff --git a/arch/hexagon/include/asm/timer-regs.h b/arch/hexagon/include/asm/timer-regs.h
741 +deleted file mode 100644
742 +index ee6c61423a058..0000000000000
743 +--- a/arch/hexagon/include/asm/timer-regs.h
744 ++++ /dev/null
745 +@@ -1,26 +0,0 @@
746 +-/* SPDX-License-Identifier: GPL-2.0-only */
747 +-/*
748 +- * Timer support for Hexagon
749 +- *
750 +- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
751 +- */
752 +-
753 +-#ifndef _ASM_TIMER_REGS_H
754 +-#define _ASM_TIMER_REGS_H
755 +-
756 +-/* This stuff should go into a platform specific file */
757 +-#define TCX0_CLK_RATE 19200
758 +-#define TIMER_ENABLE 0
759 +-#define TIMER_CLR_ON_MATCH 1
760 +-
761 +-/*
762 +- * 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
763 +- * release 1.1, and then it's "adjustable" and probably not defaulted.
764 +- */
765 +-#define RTOS_TIMER_INT 3
766 +-#ifdef CONFIG_HEXAGON_COMET
767 +-#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
768 +-#endif
769 +-#define SLEEP_CLK_RATE 32000
770 +-
771 +-#endif
772 +diff --git a/arch/hexagon/include/asm/timex.h b/arch/hexagon/include/asm/timex.h
773 +index 8d4ec76fceb45..dfe69e118b2be 100644
774 +--- a/arch/hexagon/include/asm/timex.h
775 ++++ b/arch/hexagon/include/asm/timex.h
776 +@@ -7,11 +7,10 @@
777 + #define _ASM_TIMEX_H
778 +
779 + #include <asm-generic/timex.h>
780 +-#include <asm/timer-regs.h>
781 + #include <asm/hexagon_vm.h>
782 +
783 + /* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */
784 +-#define CLOCK_TICK_RATE TCX0_CLK_RATE
785 ++#define CLOCK_TICK_RATE 19200
786 +
787 + #define ARCH_HAS_READ_CURRENT_TIMER
788 +
789 +diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c
790 +index feffe527ac929..febc95714d756 100644
791 +--- a/arch/hexagon/kernel/time.c
792 ++++ b/arch/hexagon/kernel/time.c
793 +@@ -17,9 +17,10 @@
794 + #include <linux/of_irq.h>
795 + #include <linux/module.h>
796 +
797 +-#include <asm/timer-regs.h>
798 + #include <asm/hexagon_vm.h>
799 +
800 ++#define TIMER_ENABLE BIT(0)
801 ++
802 + /*
803 + * For the clocksource we need:
804 + * pcycle frequency (600MHz)
805 +@@ -33,6 +34,13 @@ cycles_t pcycle_freq_mhz;
806 + cycles_t thread_freq_mhz;
807 + cycles_t sleep_clk_freq;
808 +
809 ++/*
810 ++ * 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
811 ++ * release 1.1, and then it's "adjustable" and probably not defaulted.
812 ++ */
813 ++#define RTOS_TIMER_INT 3
814 ++#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
815 ++
816 + static struct resource rtos_timer_resources[] = {
817 + {
818 + .start = RTOS_TIMER_REGS_ADDR,
819 +@@ -80,7 +88,7 @@ static int set_next_event(unsigned long delta, struct clock_event_device *evt)
820 + iowrite32(0, &rtos_timer->clear);
821 +
822 + iowrite32(delta, &rtos_timer->match);
823 +- iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable);
824 ++ iowrite32(TIMER_ENABLE, &rtos_timer->enable);
825 + return 0;
826 + }
827 +
828 +diff --git a/arch/hexagon/lib/io.c b/arch/hexagon/lib/io.c
829 +index d35d69d6588c4..55f75392857b0 100644
830 +--- a/arch/hexagon/lib/io.c
831 ++++ b/arch/hexagon/lib/io.c
832 +@@ -27,6 +27,7 @@ void __raw_readsw(const void __iomem *addr, void *data, int len)
833 + *dst++ = *src;
834 +
835 + }
836 ++EXPORT_SYMBOL(__raw_readsw);
837 +
838 + /*
839 + * __raw_writesw - read words a short at a time
840 +@@ -47,6 +48,7 @@ void __raw_writesw(void __iomem *addr, const void *data, int len)
841 +
842 +
843 + }
844 ++EXPORT_SYMBOL(__raw_writesw);
845 +
846 + /* Pretty sure len is pre-adjusted for the length of the access already */
847 + void __raw_readsl(const void __iomem *addr, void *data, int len)
848 +@@ -62,6 +64,7 @@ void __raw_readsl(const void __iomem *addr, void *data, int len)
849 +
850 +
851 + }
852 ++EXPORT_SYMBOL(__raw_readsl);
853 +
854 + void __raw_writesl(void __iomem *addr, const void *data, int len)
855 + {
856 +@@ -76,3 +79,4 @@ void __raw_writesl(void __iomem *addr, const void *data, int len)
857 +
858 +
859 + }
860 ++EXPORT_SYMBOL(__raw_writesl);
861 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
862 +index 5c6e9ed9b2a75..94a748e95231b 100644
863 +--- a/arch/mips/Kconfig
864 ++++ b/arch/mips/Kconfig
865 +@@ -320,6 +320,9 @@ config BCM63XX
866 + select SYS_SUPPORTS_32BIT_KERNEL
867 + select SYS_SUPPORTS_BIG_ENDIAN
868 + select SYS_HAS_EARLY_PRINTK
869 ++ select SYS_HAS_CPU_BMIPS32_3300
870 ++ select SYS_HAS_CPU_BMIPS4350
871 ++ select SYS_HAS_CPU_BMIPS4380
872 + select SWAP_IO_SPACE
873 + select GPIOLIB
874 + select MIPS_L1_CACHE_SHIFT_4
875 +diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
876 +index 164115944a7fd..aba6e2d6a736c 100644
877 +--- a/arch/mips/bcm63xx/clk.c
878 ++++ b/arch/mips/bcm63xx/clk.c
879 +@@ -381,6 +381,12 @@ void clk_disable(struct clk *clk)
880 +
881 + EXPORT_SYMBOL(clk_disable);
882 +
883 ++struct clk *clk_get_parent(struct clk *clk)
884 ++{
885 ++ return NULL;
886 ++}
887 ++EXPORT_SYMBOL(clk_get_parent);
888 ++
889 + unsigned long clk_get_rate(struct clk *clk)
890 + {
891 + if (!clk)
892 +diff --git a/arch/mips/generic/yamon-dt.c b/arch/mips/generic/yamon-dt.c
893 +index a3aa22c77cadc..a07a5edbcda78 100644
894 +--- a/arch/mips/generic/yamon-dt.c
895 ++++ b/arch/mips/generic/yamon-dt.c
896 +@@ -75,7 +75,7 @@ static unsigned int __init gen_fdt_mem_array(
897 + __init int yamon_dt_append_memory(void *fdt,
898 + const struct yamon_mem_region *regions)
899 + {
900 +- unsigned long phys_memsize, memsize;
901 ++ unsigned long phys_memsize = 0, memsize;
902 + __be32 mem_array[2 * MAX_MEM_ARRAY_ENTRIES];
903 + unsigned int mem_entries;
904 + int i, err, mem_off;
905 +diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
906 +index dd819e31fcbbf..4916cccf378fd 100644
907 +--- a/arch/mips/lantiq/clk.c
908 ++++ b/arch/mips/lantiq/clk.c
909 +@@ -158,6 +158,12 @@ void clk_deactivate(struct clk *clk)
910 + }
911 + EXPORT_SYMBOL(clk_deactivate);
912 +
913 ++struct clk *clk_get_parent(struct clk *clk)
914 ++{
915 ++ return NULL;
916 ++}
917 ++EXPORT_SYMBOL(clk_get_parent);
918 ++
919 + static inline u32 get_counter_resolution(void)
920 + {
921 + u32 res;
922 +diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
923 +index 240bb68ec2478..ff3ba7e778901 100644
924 +--- a/arch/mips/sni/time.c
925 ++++ b/arch/mips/sni/time.c
926 +@@ -18,14 +18,14 @@ static int a20r_set_periodic(struct clock_event_device *evt)
927 + {
928 + *(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0x34;
929 + wmb();
930 +- *(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV;
931 ++ *(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV & 0xff;
932 + wmb();
933 + *(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV >> 8;
934 + wmb();
935 +
936 + *(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0xb4;
937 + wmb();
938 +- *(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV;
939 ++ *(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV & 0xff;
940 + wmb();
941 + *(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV >> 8;
942 + wmb();
943 +diff --git a/arch/powerpc/boot/dts/charon.dts b/arch/powerpc/boot/dts/charon.dts
944 +index 408b486b13dff..cd589539f313f 100644
945 +--- a/arch/powerpc/boot/dts/charon.dts
946 ++++ b/arch/powerpc/boot/dts/charon.dts
947 +@@ -35,7 +35,7 @@
948 + };
949 + };
950 +
951 +- memory {
952 ++ memory@0 {
953 + device_type = "memory";
954 + reg = <0x00000000 0x08000000>; // 128MB
955 + };
956 +diff --git a/arch/powerpc/boot/dts/digsy_mtc.dts b/arch/powerpc/boot/dts/digsy_mtc.dts
957 +index 0e5e9d3acf79f..19a14e62e65f4 100644
958 +--- a/arch/powerpc/boot/dts/digsy_mtc.dts
959 ++++ b/arch/powerpc/boot/dts/digsy_mtc.dts
960 +@@ -16,7 +16,7 @@
961 + model = "intercontrol,digsy-mtc";
962 + compatible = "intercontrol,digsy-mtc";
963 +
964 +- memory {
965 ++ memory@0 {
966 + reg = <0x00000000 0x02000000>; // 32MB
967 + };
968 +
969 +diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts
970 +index cb2782dd6132c..e7b194775d783 100644
971 +--- a/arch/powerpc/boot/dts/lite5200.dts
972 ++++ b/arch/powerpc/boot/dts/lite5200.dts
973 +@@ -32,7 +32,7 @@
974 + };
975 + };
976 +
977 +- memory {
978 ++ memory@0 {
979 + device_type = "memory";
980 + reg = <0x00000000 0x04000000>; // 64MB
981 + };
982 +diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
983 +index 2b86c81f90485..547cbe726ff23 100644
984 +--- a/arch/powerpc/boot/dts/lite5200b.dts
985 ++++ b/arch/powerpc/boot/dts/lite5200b.dts
986 +@@ -31,7 +31,7 @@
987 + led4 { gpios = <&gpio_simple 2 1>; };
988 + };
989 +
990 +- memory {
991 ++ memory@0 {
992 + reg = <0x00000000 0x10000000>; // 256MB
993 + };
994 +
995 +diff --git a/arch/powerpc/boot/dts/media5200.dts b/arch/powerpc/boot/dts/media5200.dts
996 +index 61cae9dcddef4..f3188018faceb 100644
997 +--- a/arch/powerpc/boot/dts/media5200.dts
998 ++++ b/arch/powerpc/boot/dts/media5200.dts
999 +@@ -32,7 +32,7 @@
1000 + };
1001 + };
1002 +
1003 +- memory {
1004 ++ memory@0 {
1005 + reg = <0x00000000 0x08000000>; // 128MB RAM
1006 + };
1007 +
1008 +diff --git a/arch/powerpc/boot/dts/mpc5200b.dtsi b/arch/powerpc/boot/dts/mpc5200b.dtsi
1009 +index 648fe31795f49..8b796f3b11da7 100644
1010 +--- a/arch/powerpc/boot/dts/mpc5200b.dtsi
1011 ++++ b/arch/powerpc/boot/dts/mpc5200b.dtsi
1012 +@@ -33,7 +33,7 @@
1013 + };
1014 + };
1015 +
1016 +- memory: memory {
1017 ++ memory: memory@0 {
1018 + device_type = "memory";
1019 + reg = <0x00000000 0x04000000>; // 64MB
1020 + };
1021 +diff --git a/arch/powerpc/boot/dts/o2d.dts b/arch/powerpc/boot/dts/o2d.dts
1022 +index 24a46f65e5299..e0a8d3034417f 100644
1023 +--- a/arch/powerpc/boot/dts/o2d.dts
1024 ++++ b/arch/powerpc/boot/dts/o2d.dts
1025 +@@ -12,7 +12,7 @@
1026 + model = "ifm,o2d";
1027 + compatible = "ifm,o2d";
1028 +
1029 +- memory {
1030 ++ memory@0 {
1031 + reg = <0x00000000 0x08000000>; // 128MB
1032 + };
1033 +
1034 +diff --git a/arch/powerpc/boot/dts/o2d.dtsi b/arch/powerpc/boot/dts/o2d.dtsi
1035 +index 6661955a2be47..b55a9e5bd828c 100644
1036 +--- a/arch/powerpc/boot/dts/o2d.dtsi
1037 ++++ b/arch/powerpc/boot/dts/o2d.dtsi
1038 +@@ -19,7 +19,7 @@
1039 + model = "ifm,o2d";
1040 + compatible = "ifm,o2d";
1041 +
1042 +- memory {
1043 ++ memory@0 {
1044 + reg = <0x00000000 0x04000000>; // 64MB
1045 + };
1046 +
1047 +diff --git a/arch/powerpc/boot/dts/o2dnt2.dts b/arch/powerpc/boot/dts/o2dnt2.dts
1048 +index eeba7f5507d5d..c2eedbd1f5fcb 100644
1049 +--- a/arch/powerpc/boot/dts/o2dnt2.dts
1050 ++++ b/arch/powerpc/boot/dts/o2dnt2.dts
1051 +@@ -12,7 +12,7 @@
1052 + model = "ifm,o2dnt2";
1053 + compatible = "ifm,o2d";
1054 +
1055 +- memory {
1056 ++ memory@0 {
1057 + reg = <0x00000000 0x08000000>; // 128MB
1058 + };
1059 +
1060 +diff --git a/arch/powerpc/boot/dts/o3dnt.dts b/arch/powerpc/boot/dts/o3dnt.dts
1061 +index fd00396b0593e..e4c1bdd412716 100644
1062 +--- a/arch/powerpc/boot/dts/o3dnt.dts
1063 ++++ b/arch/powerpc/boot/dts/o3dnt.dts
1064 +@@ -12,7 +12,7 @@
1065 + model = "ifm,o3dnt";
1066 + compatible = "ifm,o2d";
1067 +
1068 +- memory {
1069 ++ memory@0 {
1070 + reg = <0x00000000 0x04000000>; // 64MB
1071 + };
1072 +
1073 +diff --git a/arch/powerpc/boot/dts/pcm032.dts b/arch/powerpc/boot/dts/pcm032.dts
1074 +index 780e13d99e7b8..1895bc95900cc 100644
1075 +--- a/arch/powerpc/boot/dts/pcm032.dts
1076 ++++ b/arch/powerpc/boot/dts/pcm032.dts
1077 +@@ -20,7 +20,7 @@
1078 + model = "phytec,pcm032";
1079 + compatible = "phytec,pcm032";
1080 +
1081 +- memory {
1082 ++ memory@0 {
1083 + reg = <0x00000000 0x08000000>; // 128MB
1084 + };
1085 +
1086 +diff --git a/arch/powerpc/boot/dts/tqm5200.dts b/arch/powerpc/boot/dts/tqm5200.dts
1087 +index 9ed0bc78967e1..5bb25a9e40a01 100644
1088 +--- a/arch/powerpc/boot/dts/tqm5200.dts
1089 ++++ b/arch/powerpc/boot/dts/tqm5200.dts
1090 +@@ -32,7 +32,7 @@
1091 + };
1092 + };
1093 +
1094 +- memory {
1095 ++ memory@0 {
1096 + device_type = "memory";
1097 + reg = <0x00000000 0x04000000>; // 64MB
1098 + };
1099 +diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
1100 +index ce5fd93499a74..a61b4ff3b7102 100644
1101 +--- a/arch/powerpc/kernel/head_8xx.S
1102 ++++ b/arch/powerpc/kernel/head_8xx.S
1103 +@@ -766,6 +766,7 @@ _GLOBAL(mmu_pin_tlb)
1104 + #ifdef CONFIG_PIN_TLB_DATA
1105 + LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
1106 + LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
1107 ++ li r8, 0
1108 + #ifdef CONFIG_PIN_TLB_IMMR
1109 + li r0, 3
1110 + #else
1111 +@@ -774,26 +775,26 @@ _GLOBAL(mmu_pin_tlb)
1112 + mtctr r0
1113 + cmpwi r4, 0
1114 + beq 4f
1115 +- LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
1116 + LOAD_REG_ADDR(r9, _sinittext)
1117 +
1118 + 2: ori r0, r6, MD_EVALID
1119 ++ ori r12, r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
1120 + mtspr SPRN_MD_CTR, r5
1121 + mtspr SPRN_MD_EPN, r0
1122 + mtspr SPRN_MD_TWC, r7
1123 +- mtspr SPRN_MD_RPN, r8
1124 ++ mtspr SPRN_MD_RPN, r12
1125 + addi r5, r5, 0x100
1126 + addis r6, r6, SZ_8M@h
1127 + addis r8, r8, SZ_8M@h
1128 + cmplw r6, r9
1129 + bdnzt lt, 2b
1130 +-
1131 +-4: LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
1132 ++4:
1133 + 2: ori r0, r6, MD_EVALID
1134 ++ ori r12, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
1135 + mtspr SPRN_MD_CTR, r5
1136 + mtspr SPRN_MD_EPN, r0
1137 + mtspr SPRN_MD_TWC, r7
1138 +- mtspr SPRN_MD_RPN, r8
1139 ++ mtspr SPRN_MD_RPN, r12
1140 + addi r5, r5, 0x100
1141 + addis r6, r6, SZ_8M@h
1142 + addis r8, r8, SZ_8M@h
1143 +@@ -814,7 +815,7 @@ _GLOBAL(mmu_pin_tlb)
1144 + #endif
1145 + #if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA)
1146 + lis r0, (MD_RSV4I | MD_TWAM)@h
1147 +- mtspr SPRN_MI_CTR, r0
1148 ++ mtspr SPRN_MD_CTR, r0
1149 + #endif
1150 + mtspr SPRN_SRR1, r10
1151 + mtspr SPRN_SRR0, r11
1152 +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
1153 +index db78123166a8b..b1d9afffd8419 100644
1154 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
1155 ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
1156 +@@ -2539,7 +2539,7 @@ hcall_real_table:
1157 + .globl hcall_real_table_end
1158 + hcall_real_table_end:
1159 +
1160 +-_GLOBAL(kvmppc_h_set_xdabr)
1161 ++_GLOBAL_TOC(kvmppc_h_set_xdabr)
1162 + EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
1163 + andi. r0, r5, DABRX_USER | DABRX_KERNEL
1164 + beq 6f
1165 +@@ -2549,7 +2549,7 @@ EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
1166 + 6: li r3, H_PARAMETER
1167 + blr
1168 +
1169 +-_GLOBAL(kvmppc_h_set_dabr)
1170 ++_GLOBAL_TOC(kvmppc_h_set_dabr)
1171 + EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
1172 + li r5, DABRX_USER | DABRX_KERNEL
1173 + 3:
1174 +diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S
1175 +index efeeb1b885a17..329b9c4ae5429 100644
1176 +--- a/arch/powerpc/sysdev/dcr-low.S
1177 ++++ b/arch/powerpc/sysdev/dcr-low.S
1178 +@@ -11,7 +11,7 @@
1179 + #include <asm/export.h>
1180 +
1181 + #define DCR_ACCESS_PROLOG(table) \
1182 +- cmpli cr0,r3,1024; \
1183 ++ cmplwi cr0,r3,1024; \
1184 + rlwinm r3,r3,4,18,27; \
1185 + lis r5,table@h; \
1186 + ori r5,r5,table@l; \
1187 +diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
1188 +index ea398a05f6432..7f3c9ac34bd8d 100644
1189 +--- a/arch/s390/include/asm/kexec.h
1190 ++++ b/arch/s390/include/asm/kexec.h
1191 +@@ -74,6 +74,12 @@ void *kexec_file_add_components(struct kimage *image,
1192 + int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
1193 + unsigned long addr);
1194 +
1195 ++#define ARCH_HAS_KIMAGE_ARCH
1196 ++
1197 ++struct kimage_arch {
1198 ++ void *ipl_buf;
1199 ++};
1200 ++
1201 + extern const struct kexec_file_ops s390_kexec_image_ops;
1202 + extern const struct kexec_file_ops s390_kexec_elf_ops;
1203 +
1204 +diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
1205 +index 98b3aca1de8e1..6da06905ddce5 100644
1206 +--- a/arch/s390/kernel/ipl.c
1207 ++++ b/arch/s390/kernel/ipl.c
1208 +@@ -2156,7 +2156,7 @@ void *ipl_report_finish(struct ipl_report *report)
1209 +
1210 + buf = vzalloc(report->size);
1211 + if (!buf)
1212 +- return ERR_PTR(-ENOMEM);
1213 ++ goto out;
1214 + ptr = buf;
1215 +
1216 + memcpy(ptr, report->ipib, report->ipib->hdr.len);
1217 +@@ -2195,6 +2195,7 @@ void *ipl_report_finish(struct ipl_report *report)
1218 + }
1219 +
1220 + BUG_ON(ptr > buf + report->size);
1221 ++out:
1222 + return buf;
1223 + }
1224 +
1225 +diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
1226 +index f9e4baa64b675..e7435f3a3d2d2 100644
1227 +--- a/arch/s390/kernel/machine_kexec_file.c
1228 ++++ b/arch/s390/kernel/machine_kexec_file.c
1229 +@@ -12,6 +12,7 @@
1230 + #include <linux/kexec.h>
1231 + #include <linux/module_signature.h>
1232 + #include <linux/verification.h>
1233 ++#include <linux/vmalloc.h>
1234 + #include <asm/boot_data.h>
1235 + #include <asm/ipl.h>
1236 + #include <asm/setup.h>
1237 +@@ -170,6 +171,7 @@ static int kexec_file_add_ipl_report(struct kimage *image,
1238 + struct kexec_buf buf;
1239 + unsigned long addr;
1240 + void *ptr, *end;
1241 ++ int ret;
1242 +
1243 + buf.image = image;
1244 +
1245 +@@ -199,9 +201,13 @@ static int kexec_file_add_ipl_report(struct kimage *image,
1246 + ptr += len;
1247 + }
1248 +
1249 ++ ret = -ENOMEM;
1250 + buf.buffer = ipl_report_finish(data->report);
1251 ++ if (!buf.buffer)
1252 ++ goto out;
1253 + buf.bufsz = data->report->size;
1254 + buf.memsz = buf.bufsz;
1255 ++ image->arch.ipl_buf = buf.buffer;
1256 +
1257 + data->memsz += buf.memsz;
1258 +
1259 +@@ -209,7 +215,9 @@ static int kexec_file_add_ipl_report(struct kimage *image,
1260 + data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
1261 + *lc_ipl_parmblock_ptr = (__u32)buf.mem;
1262 +
1263 +- return kexec_add_buffer(&buf);
1264 ++ ret = kexec_add_buffer(&buf);
1265 ++out:
1266 ++ return ret;
1267 + }
1268 +
1269 + void *kexec_file_add_components(struct kimage *image,
1270 +@@ -321,3 +329,11 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
1271 +
1272 + return kexec_image_probe_default(image, buf, buf_len);
1273 + }
1274 ++
1275 ++int arch_kimage_file_post_load_cleanup(struct kimage *image)
1276 ++{
1277 ++ vfree(image->arch.ipl_buf);
1278 ++ image->arch.ipl_buf = NULL;
1279 ++
1280 ++ return kexec_image_post_load_cleanup_default(image);
1281 ++}
1282 +diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
1283 +index 28a43d63bde1f..97b0e26cf05a1 100644
1284 +--- a/arch/sh/Kconfig.debug
1285 ++++ b/arch/sh/Kconfig.debug
1286 +@@ -57,6 +57,7 @@ config DUMP_CODE
1287 +
1288 + config DWARF_UNWINDER
1289 + bool "Enable the DWARF unwinder for stacktraces"
1290 ++ depends on DEBUG_KERNEL
1291 + select FRAME_POINTER
1292 + default n
1293 + help
1294 +diff --git a/arch/sh/include/asm/sfp-machine.h b/arch/sh/include/asm/sfp-machine.h
1295 +index cbc7cf8c97ce6..2d2423478b71d 100644
1296 +--- a/arch/sh/include/asm/sfp-machine.h
1297 ++++ b/arch/sh/include/asm/sfp-machine.h
1298 +@@ -13,6 +13,14 @@
1299 + #ifndef _SFP_MACHINE_H
1300 + #define _SFP_MACHINE_H
1301 +
1302 ++#ifdef __BIG_ENDIAN__
1303 ++#define __BYTE_ORDER __BIG_ENDIAN
1304 ++#define __LITTLE_ENDIAN 0
1305 ++#else
1306 ++#define __BYTE_ORDER __LITTLE_ENDIAN
1307 ++#define __BIG_ENDIAN 0
1308 ++#endif
1309 ++
1310 + #define _FP_W_TYPE_SIZE 32
1311 + #define _FP_W_TYPE unsigned long
1312 + #define _FP_WS_TYPE signed long
1313 +diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
1314 +index f8a2bec0f260b..1261dc7b84e8b 100644
1315 +--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
1316 ++++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
1317 +@@ -73,8 +73,9 @@ static void shx3_prepare_cpus(unsigned int max_cpus)
1318 + BUILD_BUG_ON(SMP_MSG_NR >= 8);
1319 +
1320 + for (i = 0; i < SMP_MSG_NR; i++)
1321 +- request_irq(104 + i, ipi_interrupt_handler,
1322 +- IRQF_PERCPU, "IPI", (void *)(long)i);
1323 ++ if (request_irq(104 + i, ipi_interrupt_handler,
1324 ++ IRQF_PERCPU, "IPI", (void *)(long)i))
1325 ++ pr_err("Failed to request irq %d\n", i);
1326 +
1327 + for (i = 0; i < max_cpus; i++)
1328 + set_cpu_present(i, true);
1329 +diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c
1330 +index e8be0eca0444a..615ba932c398e 100644
1331 +--- a/arch/sh/math-emu/math.c
1332 ++++ b/arch/sh/math-emu/math.c
1333 +@@ -467,109 +467,6 @@ static int fpu_emulate(u16 code, struct sh_fpu_soft_struct *fregs, struct pt_reg
1334 + return id_sys(fregs, regs, code);
1335 + }
1336 +
1337 +-/**
1338 +- * denormal_to_double - Given denormalized float number,
1339 +- * store double float
1340 +- *
1341 +- * @fpu: Pointer to sh_fpu_soft structure
1342 +- * @n: Index to FP register
1343 +- */
1344 +-static void denormal_to_double(struct sh_fpu_soft_struct *fpu, int n)
1345 +-{
1346 +- unsigned long du, dl;
1347 +- unsigned long x = fpu->fpul;
1348 +- int exp = 1023 - 126;
1349 +-
1350 +- if (x != 0 && (x & 0x7f800000) == 0) {
1351 +- du = (x & 0x80000000);
1352 +- while ((x & 0x00800000) == 0) {
1353 +- x <<= 1;
1354 +- exp--;
1355 +- }
1356 +- x &= 0x007fffff;
1357 +- du |= (exp << 20) | (x >> 3);
1358 +- dl = x << 29;
1359 +-
1360 +- fpu->fp_regs[n] = du;
1361 +- fpu->fp_regs[n+1] = dl;
1362 +- }
1363 +-}
1364 +-
1365 +-/**
1366 +- * ieee_fpe_handler - Handle denormalized number exception
1367 +- *
1368 +- * @regs: Pointer to register structure
1369 +- *
1370 +- * Returns 1 when it's handled (should not cause exception).
1371 +- */
1372 +-static int ieee_fpe_handler(struct pt_regs *regs)
1373 +-{
1374 +- unsigned short insn = *(unsigned short *)regs->pc;
1375 +- unsigned short finsn;
1376 +- unsigned long nextpc;
1377 +- int nib[4] = {
1378 +- (insn >> 12) & 0xf,
1379 +- (insn >> 8) & 0xf,
1380 +- (insn >> 4) & 0xf,
1381 +- insn & 0xf};
1382 +-
1383 +- if (nib[0] == 0xb ||
1384 +- (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
1385 +- regs->pr = regs->pc + 4;
1386 +-
1387 +- if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
1388 +- nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
1389 +- finsn = *(unsigned short *) (regs->pc + 2);
1390 +- } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
1391 +- if (regs->sr & 1)
1392 +- nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
1393 +- else
1394 +- nextpc = regs->pc + 4;
1395 +- finsn = *(unsigned short *) (regs->pc + 2);
1396 +- } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
1397 +- if (regs->sr & 1)
1398 +- nextpc = regs->pc + 4;
1399 +- else
1400 +- nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
1401 +- finsn = *(unsigned short *) (regs->pc + 2);
1402 +- } else if (nib[0] == 0x4 && nib[3] == 0xb &&
1403 +- (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
1404 +- nextpc = regs->regs[nib[1]];
1405 +- finsn = *(unsigned short *) (regs->pc + 2);
1406 +- } else if (nib[0] == 0x0 && nib[3] == 0x3 &&
1407 +- (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
1408 +- nextpc = regs->pc + 4 + regs->regs[nib[1]];
1409 +- finsn = *(unsigned short *) (regs->pc + 2);
1410 +- } else if (insn == 0x000b) { /* rts */
1411 +- nextpc = regs->pr;
1412 +- finsn = *(unsigned short *) (regs->pc + 2);
1413 +- } else {
1414 +- nextpc = regs->pc + 2;
1415 +- finsn = insn;
1416 +- }
1417 +-
1418 +- if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
1419 +- struct task_struct *tsk = current;
1420 +-
1421 +- if ((tsk->thread.xstate->softfpu.fpscr & (1 << 17))) {
1422 +- /* FPU error */
1423 +- denormal_to_double (&tsk->thread.xstate->softfpu,
1424 +- (finsn >> 8) & 0xf);
1425 +- tsk->thread.xstate->softfpu.fpscr &=
1426 +- ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
1427 +- task_thread_info(tsk)->status |= TS_USEDFPU;
1428 +- } else {
1429 +- force_sig_fault(SIGFPE, FPE_FLTINV,
1430 +- (void __user *)regs->pc);
1431 +- }
1432 +-
1433 +- regs->pc = nextpc;
1434 +- return 1;
1435 +- }
1436 +-
1437 +- return 0;
1438 +-}
1439 +-
1440 + /**
1441 + * fpu_init - Initialize FPU registers
1442 + * @fpu: Pointer to software emulated FPU registers.
1443 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
1444 +index 2b5957b27a3d6..a853ed7240eef 100644
1445 +--- a/arch/x86/Kconfig
1446 ++++ b/arch/x86/Kconfig
1447 +@@ -1266,7 +1266,8 @@ config TOSHIBA
1448 +
1449 + config I8K
1450 + tristate "Dell i8k legacy laptop support"
1451 +- select HWMON
1452 ++ depends on HWMON
1453 ++ depends on PROC_FS
1454 + select SENSORS_DELL_SMM
1455 + help
1456 + This option enables legacy /proc/i8k userspace interface in hwmon
1457 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
1458 +index 4684bf9fcc428..a521135247eb6 100644
1459 +--- a/arch/x86/events/intel/core.c
1460 ++++ b/arch/x86/events/intel/core.c
1461 +@@ -2879,8 +2879,10 @@ intel_vlbr_constraints(struct perf_event *event)
1462 + {
1463 + struct event_constraint *c = &vlbr_constraint;
1464 +
1465 +- if (unlikely(constraint_match(c, event->hw.config)))
1466 ++ if (unlikely(constraint_match(c, event->hw.config))) {
1467 ++ event->hw.flags |= c->flags;
1468 + return c;
1469 ++ }
1470 +
1471 + return NULL;
1472 + }
1473 +diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
1474 +index c01b51d1cbdff..ba26792d96731 100644
1475 +--- a/arch/x86/events/intel/uncore_snbep.c
1476 ++++ b/arch/x86/events/intel/uncore_snbep.c
1477 +@@ -3545,6 +3545,9 @@ static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *ev
1478 + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1479 + struct extra_reg *er;
1480 + int idx = 0;
1481 ++ /* Any of the CHA events may be filtered by Thread/Core-ID.*/
1482 ++ if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
1483 ++ idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
1484 +
1485 + for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
1486 + if (er->event != (event->hw.config & er->config_mask))
1487 +@@ -3612,6 +3615,7 @@ static struct event_constraint skx_uncore_iio_constraints[] = {
1488 + UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
1489 + UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
1490 + UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
1491 ++ UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
1492 + EVENT_CONSTRAINT_END
1493 + };
1494 +
1495 +diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
1496 +index 3cf4030232590..01860c0d324d7 100644
1497 +--- a/arch/x86/hyperv/hv_init.c
1498 ++++ b/arch/x86/hyperv/hv_init.c
1499 +@@ -176,6 +176,9 @@ void set_hv_tscchange_cb(void (*cb)(void))
1500 + return;
1501 + }
1502 +
1503 ++ if (!hv_vp_index)
1504 ++ return;
1505 ++
1506 + hv_reenlightenment_cb = cb;
1507 +
1508 + /* Make sure callback is registered before we write to MSRs */
1509 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
1510 +index d5f24a2f3e916..257ec2cbf69a4 100644
1511 +--- a/arch/x86/kvm/vmx/nested.c
1512 ++++ b/arch/x86/kvm/vmx/nested.c
1513 +@@ -2851,6 +2851,17 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
1514 + return 0;
1515 + }
1516 +
1517 ++static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu,
1518 ++ struct vmcs12 *vmcs12)
1519 ++{
1520 ++#ifdef CONFIG_X86_64
1521 ++ if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
1522 ++ !!(vcpu->arch.efer & EFER_LMA)))
1523 ++ return -EINVAL;
1524 ++#endif
1525 ++ return 0;
1526 ++}
1527 ++
1528 + static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
1529 + struct vmcs12 *vmcs12)
1530 + {
1531 +@@ -2875,18 +2886,16 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
1532 + return -EINVAL;
1533 +
1534 + #ifdef CONFIG_X86_64
1535 +- ia32e = !!(vcpu->arch.efer & EFER_LMA);
1536 ++ ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
1537 + #else
1538 + ia32e = false;
1539 + #endif
1540 +
1541 + if (ia32e) {
1542 +- if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) ||
1543 +- CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
1544 ++ if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
1545 + return -EINVAL;
1546 + } else {
1547 +- if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) ||
1548 +- CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
1549 ++ if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
1550 + CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
1551 + CC((vmcs12->host_rip) >> 32))
1552 + return -EINVAL;
1553 +@@ -3555,6 +3564,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
1554 + if (nested_vmx_check_controls(vcpu, vmcs12))
1555 + return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
1556 +
1557 ++ if (nested_vmx_check_address_space_size(vcpu, vmcs12))
1558 ++ return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
1559 ++
1560 + if (nested_vmx_check_host_state(vcpu, vmcs12))
1561 + return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
1562 +
1563 +diff --git a/block/blk-core.c b/block/blk-core.c
1564 +index fbc39756f37de..26664f2a139eb 100644
1565 +--- a/block/blk-core.c
1566 ++++ b/block/blk-core.c
1567 +@@ -897,10 +897,8 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
1568 + if (unlikely(!current->io_context))
1569 + create_task_io_context(current, GFP_ATOMIC, q->node);
1570 +
1571 +- if (blk_throtl_bio(bio)) {
1572 +- blkcg_bio_issue_init(bio);
1573 ++ if (blk_throtl_bio(bio))
1574 + return false;
1575 +- }
1576 +
1577 + blk_cgroup_bio_start(bio);
1578 + blkcg_bio_issue_init(bio);
1579 +diff --git a/block/ioprio.c b/block/ioprio.c
1580 +index 364d2294ba904..84da6c71b2ccb 100644
1581 +--- a/block/ioprio.c
1582 ++++ b/block/ioprio.c
1583 +@@ -69,7 +69,14 @@ int ioprio_check_cap(int ioprio)
1584 +
1585 + switch (class) {
1586 + case IOPRIO_CLASS_RT:
1587 +- if (!capable(CAP_SYS_NICE) && !capable(CAP_SYS_ADMIN))
1588 ++ /*
1589 ++ * Originally this only checked for CAP_SYS_ADMIN,
1590 ++ * which was implicitly allowed for pid 0 by security
1591 ++ * modules such as SELinux. Make sure we check
1592 ++ * CAP_SYS_ADMIN first to avoid a denial/avc for
1593 ++ * possibly missing CAP_SYS_NICE permission.
1594 ++ */
1595 ++ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
1596 + return -EPERM;
1597 + fallthrough;
1598 + /* rt has prio field too */
1599 +diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
1600 +index f41e4e4993d37..1372f40d0371f 100644
1601 +--- a/drivers/base/firmware_loader/main.c
1602 ++++ b/drivers/base/firmware_loader/main.c
1603 +@@ -99,12 +99,15 @@ static struct firmware_cache fw_cache;
1604 + extern struct builtin_fw __start_builtin_fw[];
1605 + extern struct builtin_fw __end_builtin_fw[];
1606 +
1607 +-static void fw_copy_to_prealloc_buf(struct firmware *fw,
1608 ++static bool fw_copy_to_prealloc_buf(struct firmware *fw,
1609 + void *buf, size_t size)
1610 + {
1611 +- if (!buf || size < fw->size)
1612 +- return;
1613 ++ if (!buf)
1614 ++ return true;
1615 ++ if (size < fw->size)
1616 ++ return false;
1617 + memcpy(buf, fw->data, fw->size);
1618 ++ return true;
1619 + }
1620 +
1621 + static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
1622 +@@ -116,9 +119,7 @@ static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
1623 + if (strcmp(name, b_fw->name) == 0) {
1624 + fw->size = b_fw->size;
1625 + fw->data = b_fw->data;
1626 +- fw_copy_to_prealloc_buf(fw, buf, size);
1627 +-
1628 +- return true;
1629 ++ return fw_copy_to_prealloc_buf(fw, buf, size);
1630 + }
1631 + }
1632 +
1633 +diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
1634 +index 2ff437e5c7051..43603dc9da430 100644
1635 +--- a/drivers/bus/ti-sysc.c
1636 ++++ b/drivers/bus/ti-sysc.c
1637 +@@ -6,6 +6,7 @@
1638 + #include <linux/io.h>
1639 + #include <linux/clk.h>
1640 + #include <linux/clkdev.h>
1641 ++#include <linux/cpu_pm.h>
1642 + #include <linux/delay.h>
1643 + #include <linux/list.h>
1644 + #include <linux/module.h>
1645 +@@ -52,11 +53,18 @@ struct sysc_address {
1646 + struct list_head node;
1647 + };
1648 +
1649 ++struct sysc_module {
1650 ++ struct sysc *ddata;
1651 ++ struct list_head node;
1652 ++};
1653 ++
1654 + struct sysc_soc_info {
1655 + unsigned long general_purpose:1;
1656 + enum sysc_soc soc;
1657 +- struct mutex list_lock; /* disabled modules list lock */
1658 ++ struct mutex list_lock; /* disabled and restored modules list lock */
1659 + struct list_head disabled_modules;
1660 ++ struct list_head restored_modules;
1661 ++ struct notifier_block nb;
1662 + };
1663 +
1664 + enum sysc_clocks {
1665 +@@ -1555,7 +1563,7 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1666 + 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1667 + SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
1668 + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1669 +- SYSC_QUIRK_REINIT_ON_RESUME),
1670 ++ SYSC_QUIRK_REINIT_ON_CTX_LOST),
1671 + SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
1672 + SYSC_MODULE_QUIRK_WDT),
1673 + /* PRUSS on am3, am4 and am5 */
1674 +@@ -2429,6 +2437,79 @@ static struct dev_pm_domain sysc_child_pm_domain = {
1675 + }
1676 + };
1677 +
1678 ++/* Caller needs to take list_lock if ever used outside of cpu_pm */
1679 ++static void sysc_reinit_modules(struct sysc_soc_info *soc)
1680 ++{
1681 ++ struct sysc_module *module;
1682 ++ struct list_head *pos;
1683 ++ struct sysc *ddata;
1684 ++ int error = 0;
1685 ++
1686 ++ list_for_each(pos, &sysc_soc->restored_modules) {
1687 ++ module = list_entry(pos, struct sysc_module, node);
1688 ++ ddata = module->ddata;
1689 ++ error = sysc_reinit_module(ddata, ddata->enabled);
1690 ++ }
1691 ++}
1692 ++
1693 ++/**
1694 ++ * sysc_context_notifier - optionally reset and restore module after idle
1695 ++ * @nb: notifier block
1696 ++ * @cmd: unused
1697 ++ * @v: unused
1698 ++ *
1699 ++ * Some interconnect target modules need to be restored, or reset and restored
1700 ++ * on CPU_PM CPU_PM_CLUSTER_EXIT notifier. This is needed at least for am335x
1701 ++ * OTG and GPMC target modules even if the modules are unused.
1702 ++ */
1703 ++static int sysc_context_notifier(struct notifier_block *nb, unsigned long cmd,
1704 ++ void *v)
1705 ++{
1706 ++ struct sysc_soc_info *soc;
1707 ++
1708 ++ soc = container_of(nb, struct sysc_soc_info, nb);
1709 ++
1710 ++ switch (cmd) {
1711 ++ case CPU_CLUSTER_PM_ENTER:
1712 ++ break;
1713 ++ case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
1714 ++ break;
1715 ++ case CPU_CLUSTER_PM_EXIT:
1716 ++ sysc_reinit_modules(soc);
1717 ++ break;
1718 ++ }
1719 ++
1720 ++ return NOTIFY_OK;
1721 ++}
1722 ++
1723 ++/**
1724 ++ * sysc_add_restored - optionally add reset and restore quirk hanlling
1725 ++ * @ddata: device data
1726 ++ */
1727 ++static void sysc_add_restored(struct sysc *ddata)
1728 ++{
1729 ++ struct sysc_module *restored_module;
1730 ++
1731 ++ restored_module = kzalloc(sizeof(*restored_module), GFP_KERNEL);
1732 ++ if (!restored_module)
1733 ++ return;
1734 ++
1735 ++ restored_module->ddata = ddata;
1736 ++
1737 ++ mutex_lock(&sysc_soc->list_lock);
1738 ++
1739 ++ list_add(&restored_module->node, &sysc_soc->restored_modules);
1740 ++
1741 ++ if (sysc_soc->nb.notifier_call)
1742 ++ goto out_unlock;
1743 ++
1744 ++ sysc_soc->nb.notifier_call = sysc_context_notifier;
1745 ++ cpu_pm_register_notifier(&sysc_soc->nb);
1746 ++
1747 ++out_unlock:
1748 ++ mutex_unlock(&sysc_soc->list_lock);
1749 ++}
1750 ++
1751 + /**
1752 + * sysc_legacy_idle_quirk - handle children in omap_device compatible way
1753 + * @ddata: device driver data
1754 +@@ -2928,12 +3009,14 @@ static int sysc_add_disabled(unsigned long base)
1755 + }
1756 +
1757 + /*
1758 +- * One time init to detect the booted SoC and disable unavailable features.
1759 ++ * One time init to detect the booted SoC, disable unavailable features
1760 ++ * and initialize list for optional cpu_pm notifier.
1761 ++ *
1762 + * Note that we initialize static data shared across all ti-sysc instances
1763 + * so ddata is only used for SoC type. This can be called from module_init
1764 + * once we no longer need to rely on platform data.
1765 + */
1766 +-static int sysc_init_soc(struct sysc *ddata)
1767 ++static int sysc_init_static_data(struct sysc *ddata)
1768 + {
1769 + const struct soc_device_attribute *match;
1770 + struct ti_sysc_platform_data *pdata;
1771 +@@ -2948,6 +3031,7 @@ static int sysc_init_soc(struct sysc *ddata)
1772 +
1773 + mutex_init(&sysc_soc->list_lock);
1774 + INIT_LIST_HEAD(&sysc_soc->disabled_modules);
1775 ++ INIT_LIST_HEAD(&sysc_soc->restored_modules);
1776 + sysc_soc->general_purpose = true;
1777 +
1778 + pdata = dev_get_platdata(ddata->dev);
1779 +@@ -2994,15 +3078,24 @@ static int sysc_init_soc(struct sysc *ddata)
1780 + return 0;
1781 + }
1782 +
1783 +-static void sysc_cleanup_soc(void)
1784 ++static void sysc_cleanup_static_data(void)
1785 + {
1786 ++ struct sysc_module *restored_module;
1787 + struct sysc_address *disabled_module;
1788 + struct list_head *pos, *tmp;
1789 +
1790 + if (!sysc_soc)
1791 + return;
1792 +
1793 ++ if (sysc_soc->nb.notifier_call)
1794 ++ cpu_pm_unregister_notifier(&sysc_soc->nb);
1795 ++
1796 + mutex_lock(&sysc_soc->list_lock);
1797 ++ list_for_each_safe(pos, tmp, &sysc_soc->restored_modules) {
1798 ++ restored_module = list_entry(pos, struct sysc_module, node);
1799 ++ list_del(pos);
1800 ++ kfree(restored_module);
1801 ++ }
1802 + list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
1803 + disabled_module = list_entry(pos, struct sysc_address, node);
1804 + list_del(pos);
1805 +@@ -3067,7 +3160,7 @@ static int sysc_probe(struct platform_device *pdev)
1806 + ddata->dev = &pdev->dev;
1807 + platform_set_drvdata(pdev, ddata);
1808 +
1809 +- error = sysc_init_soc(ddata);
1810 ++ error = sysc_init_static_data(ddata);
1811 + if (error)
1812 + return error;
1813 +
1814 +@@ -3166,6 +3259,9 @@ static int sysc_probe(struct platform_device *pdev)
1815 + pm_runtime_put(&pdev->dev);
1816 + }
1817 +
1818 ++ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_CTX_LOST)
1819 ++ sysc_add_restored(ddata);
1820 ++
1821 + return 0;
1822 +
1823 + err:
1824 +@@ -3248,7 +3344,7 @@ static void __exit sysc_exit(void)
1825 + {
1826 + bus_unregister_notifier(&platform_bus_type, &sysc_nb);
1827 + platform_driver_unregister(&sysc_driver);
1828 +- sysc_cleanup_soc();
1829 ++ sysc_cleanup_static_data();
1830 + }
1831 + module_exit(sysc_exit);
1832 +
1833 +diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
1834 +index bc3be5f3eae15..24dab2312bc6f 100644
1835 +--- a/drivers/clk/clk-ast2600.c
1836 ++++ b/drivers/clk/clk-ast2600.c
1837 +@@ -51,6 +51,8 @@ static DEFINE_SPINLOCK(aspeed_g6_clk_lock);
1838 + static struct clk_hw_onecell_data *aspeed_g6_clk_data;
1839 +
1840 + static void __iomem *scu_g6_base;
1841 ++/* AST2600 revision: A0, A1, A2, etc */
1842 ++static u8 soc_rev;
1843 +
1844 + /*
1845 + * Clocks marked with CLK_IS_CRITICAL:
1846 +@@ -191,9 +193,8 @@ static struct clk_hw *ast2600_calc_pll(const char *name, u32 val)
1847 + static struct clk_hw *ast2600_calc_apll(const char *name, u32 val)
1848 + {
1849 + unsigned int mult, div;
1850 +- u32 chip_id = readl(scu_g6_base + ASPEED_G6_SILICON_REV);
1851 +
1852 +- if (((chip_id & CHIP_REVISION_ID) >> 16) >= 2) {
1853 ++ if (soc_rev >= 2) {
1854 + if (val & BIT(24)) {
1855 + /* Pass through mode */
1856 + mult = div = 1;
1857 +@@ -707,7 +708,7 @@ static const u32 ast2600_a1_axi_ahb200_tbl[] = {
1858 + static void __init aspeed_g6_cc(struct regmap *map)
1859 + {
1860 + struct clk_hw *hw;
1861 +- u32 val, div, divbits, chip_id, axi_div, ahb_div;
1862 ++ u32 val, div, divbits, axi_div, ahb_div;
1863 +
1864 + clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000);
1865 +
1866 +@@ -738,8 +739,7 @@ static void __init aspeed_g6_cc(struct regmap *map)
1867 + axi_div = 2;
1868 +
1869 + divbits = (val >> 11) & 0x3;
1870 +- regmap_read(map, ASPEED_G6_SILICON_REV, &chip_id);
1871 +- if (chip_id & BIT(16)) {
1872 ++ if (soc_rev >= 1) {
1873 + if (!divbits) {
1874 + ahb_div = ast2600_a1_axi_ahb200_tbl[(val >> 8) & 0x3];
1875 + if (val & BIT(16))
1876 +@@ -784,6 +784,8 @@ static void __init aspeed_g6_cc_init(struct device_node *np)
1877 + if (!scu_g6_base)
1878 + return;
1879 +
1880 ++ soc_rev = (readl(scu_g6_base + ASPEED_G6_SILICON_REV) & CHIP_REVISION_ID) >> 16;
1881 ++
1882 + aspeed_g6_clk_data = kzalloc(struct_size(aspeed_g6_clk_data, hws,
1883 + ASPEED_G6_NUM_CLKS), GFP_KERNEL);
1884 + if (!aspeed_g6_clk_data)
1885 +diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
1886 +index 5dbb6a9377324..206e4c43f68f8 100644
1887 +--- a/drivers/clk/imx/clk-imx6ul.c
1888 ++++ b/drivers/clk/imx/clk-imx6ul.c
1889 +@@ -161,7 +161,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
1890 + hws[IMX6UL_PLL5_BYPASS] = imx_clk_hw_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
1891 + hws[IMX6UL_PLL6_BYPASS] = imx_clk_hw_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT);
1892 + hws[IMX6UL_PLL7_BYPASS] = imx_clk_hw_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
1893 +- hws[IMX6UL_CLK_CSI_SEL] = imx_clk_hw_mux_flags("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels), CLK_SET_RATE_PARENT);
1894 +
1895 + /* Do not bypass PLLs initially */
1896 + clk_set_parent(hws[IMX6UL_PLL1_BYPASS]->clk, hws[IMX6UL_CLK_PLL1]->clk);
1897 +@@ -270,6 +269,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
1898 + hws[IMX6UL_CLK_ECSPI_SEL] = imx_clk_hw_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
1899 + hws[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_hw_mux_flags("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels), CLK_SET_RATE_PARENT);
1900 + hws[IMX6UL_CLK_LCDIF_SEL] = imx_clk_hw_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
1901 ++ hws[IMX6UL_CLK_CSI_SEL] = imx_clk_hw_mux("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels));
1902 +
1903 + hws[IMX6UL_CLK_LDB_DI0_DIV_SEL] = imx_clk_hw_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
1904 + hws[IMX6UL_CLK_LDB_DI1_DIV_SEL] = imx_clk_hw_mux("ldb_di1", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels));
1905 +diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
1906 +index c8e9cb6c8e39c..2b9bb7d55efc8 100644
1907 +--- a/drivers/clk/ingenic/cgu.c
1908 ++++ b/drivers/clk/ingenic/cgu.c
1909 +@@ -425,15 +425,15 @@ ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info,
1910 + }
1911 +
1912 + /* Impose hardware constraints */
1913 +- div = min_t(unsigned, div, 1 << clk_info->div.bits);
1914 +- div = max_t(unsigned, div, 1);
1915 ++ div = clamp_t(unsigned int, div, clk_info->div.div,
1916 ++ clk_info->div.div << clk_info->div.bits);
1917 +
1918 + /*
1919 + * If the divider value itself must be divided before being written to
1920 + * the divider register, we must ensure we don't have any bits set that
1921 + * would be lost as a result of doing so.
1922 + */
1923 +- div /= clk_info->div.div;
1924 ++ div = DIV_ROUND_UP(div, clk_info->div.div);
1925 + div *= clk_info->div.div;
1926 +
1927 + return div;
1928 +diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
1929 +index 3c3a7ff045621..9b1674b28d45d 100644
1930 +--- a/drivers/clk/qcom/gcc-msm8996.c
1931 ++++ b/drivers/clk/qcom/gcc-msm8996.c
1932 +@@ -2937,20 +2937,6 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
1933 + },
1934 + };
1935 +
1936 +-static struct clk_branch gcc_aggre1_pnoc_ahb_clk = {
1937 +- .halt_reg = 0x82014,
1938 +- .clkr = {
1939 +- .enable_reg = 0x82014,
1940 +- .enable_mask = BIT(0),
1941 +- .hw.init = &(struct clk_init_data){
1942 +- .name = "gcc_aggre1_pnoc_ahb_clk",
1943 +- .parent_names = (const char *[]){ "periph_noc_clk_src" },
1944 +- .num_parents = 1,
1945 +- .ops = &clk_branch2_ops,
1946 +- },
1947 +- },
1948 +-};
1949 +-
1950 + static struct clk_branch gcc_aggre2_ufs_axi_clk = {
1951 + .halt_reg = 0x83014,
1952 + .clkr = {
1953 +@@ -3474,7 +3460,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
1954 + [GCC_AGGRE0_CNOC_AHB_CLK] = &gcc_aggre0_cnoc_ahb_clk.clkr,
1955 + [GCC_SMMU_AGGRE0_AXI_CLK] = &gcc_smmu_aggre0_axi_clk.clkr,
1956 + [GCC_SMMU_AGGRE0_AHB_CLK] = &gcc_smmu_aggre0_ahb_clk.clkr,
1957 +- [GCC_AGGRE1_PNOC_AHB_CLK] = &gcc_aggre1_pnoc_ahb_clk.clkr,
1958 + [GCC_AGGRE2_UFS_AXI_CLK] = &gcc_aggre2_ufs_axi_clk.clkr,
1959 + [GCC_AGGRE2_USB3_AXI_CLK] = &gcc_aggre2_usb3_axi_clk.clkr,
1960 + [GCC_QSPI_AHB_CLK] = &gcc_qspi_ahb_clk.clkr,
1961 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1962 +index b9c11c2b2885a..0de66f59adb8a 100644
1963 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1964 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1965 +@@ -827,6 +827,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
1966 +
1967 + amdgpu_connector_get_edid(connector);
1968 + ret = amdgpu_connector_ddc_get_modes(connector);
1969 ++ amdgpu_get_native_mode(connector);
1970 +
1971 + return ret;
1972 + }
1973 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1974 +index 3121816546467..53ac826935328 100644
1975 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1976 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1977 +@@ -1852,7 +1852,9 @@ static void swizzle_to_dml_params(
1978 + case DC_SW_VAR_D_X:
1979 + *sw_mode = dm_sw_var_d_x;
1980 + break;
1981 +-
1982 ++ case DC_SW_VAR_R_X:
1983 ++ *sw_mode = dm_sw_var_r_x;
1984 ++ break;
1985 + default:
1986 + ASSERT(0); /* Not supported */
1987 + break;
1988 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
1989 +index 64f9c735f74d8..e73cee275729c 100644
1990 +--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
1991 ++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
1992 +@@ -80,11 +80,11 @@ enum dm_swizzle_mode {
1993 + dm_sw_SPARE_13 = 24,
1994 + dm_sw_64kb_s_x = 25,
1995 + dm_sw_64kb_d_x = 26,
1996 +- dm_sw_SPARE_14 = 27,
1997 ++ dm_sw_64kb_r_x = 27,
1998 + dm_sw_SPARE_15 = 28,
1999 + dm_sw_var_s_x = 29,
2000 + dm_sw_var_d_x = 30,
2001 +- dm_sw_64kb_r_x,
2002 ++ dm_sw_var_r_x = 31,
2003 + dm_sw_gfx7_2d_thin_l_vp,
2004 + dm_sw_gfx7_2d_thin_gl,
2005 + };
2006 +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
2007 +index 65d73eb5e155c..1c1931f5c958b 100644
2008 +--- a/drivers/gpu/drm/i915/display/intel_dp.c
2009 ++++ b/drivers/gpu/drm/i915/display/intel_dp.c
2010 +@@ -154,6 +154,12 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
2011 + enum pipe pipe);
2012 + static void intel_dp_unset_edid(struct intel_dp *intel_dp);
2013 +
2014 ++static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
2015 ++{
2016 ++ intel_dp->sink_rates[0] = 162000;
2017 ++ intel_dp->num_sink_rates = 1;
2018 ++}
2019 ++
2020 + /* update sink rates from dpcd */
2021 + static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
2022 + {
2023 +@@ -4678,6 +4684,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
2024 + */
2025 + intel_psr_init_dpcd(intel_dp);
2026 +
2027 ++ /* Clear the default sink rates */
2028 ++ intel_dp->num_sink_rates = 0;
2029 ++
2030 + /* Read the eDP 1.4+ supported link rates. */
2031 + if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
2032 + __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
2033 +@@ -7779,6 +7788,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
2034 + return false;
2035 +
2036 + intel_dp_set_source_rates(intel_dp);
2037 ++ intel_dp_set_default_sink_rates(intel_dp);
2038 ++ intel_dp_set_common_rates(intel_dp);
2039 +
2040 + intel_dp->reset_link_params = true;
2041 + intel_dp->pps_pipe = INVALID_PIPE;
2042 +diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
2043 +index 42fc5c813a9bb..ac96b6ab44c07 100644
2044 +--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
2045 ++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
2046 +@@ -557,6 +557,7 @@ nouveau_drm_device_init(struct drm_device *dev)
2047 + nvkm_dbgopt(nouveau_debug, "DRM");
2048 +
2049 + INIT_LIST_HEAD(&drm->clients);
2050 ++ mutex_init(&drm->clients_lock);
2051 + spin_lock_init(&drm->tile.lock);
2052 +
2053 + /* workaround an odd issue on nvc1 by disabling the device's
2054 +@@ -627,6 +628,7 @@ fail_alloc:
2055 + static void
2056 + nouveau_drm_device_fini(struct drm_device *dev)
2057 + {
2058 ++ struct nouveau_cli *cli, *temp_cli;
2059 + struct nouveau_drm *drm = nouveau_drm(dev);
2060 +
2061 + if (nouveau_pmops_runtime()) {
2062 +@@ -651,9 +653,28 @@ nouveau_drm_device_fini(struct drm_device *dev)
2063 + nouveau_ttm_fini(drm);
2064 + nouveau_vga_fini(drm);
2065 +
2066 ++ /*
2067 ++ * There may be existing clients from as-yet unclosed files. For now,
2068 ++ * clean them up here rather than deferring until the file is closed,
2069 ++ * but this likely not correct if we want to support hot-unplugging
2070 ++ * properly.
2071 ++ */
2072 ++ mutex_lock(&drm->clients_lock);
2073 ++ list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) {
2074 ++ list_del(&cli->head);
2075 ++ mutex_lock(&cli->mutex);
2076 ++ if (cli->abi16)
2077 ++ nouveau_abi16_fini(cli->abi16);
2078 ++ mutex_unlock(&cli->mutex);
2079 ++ nouveau_cli_fini(cli);
2080 ++ kfree(cli);
2081 ++ }
2082 ++ mutex_unlock(&drm->clients_lock);
2083 ++
2084 + nouveau_cli_fini(&drm->client);
2085 + nouveau_cli_fini(&drm->master);
2086 + nvif_parent_dtor(&drm->parent);
2087 ++ mutex_destroy(&drm->clients_lock);
2088 + kfree(drm);
2089 + }
2090 +
2091 +@@ -792,7 +813,7 @@ nouveau_drm_device_remove(struct drm_device *dev)
2092 + struct nvkm_client *client;
2093 + struct nvkm_device *device;
2094 +
2095 +- drm_dev_unregister(dev);
2096 ++ drm_dev_unplug(dev);
2097 +
2098 + dev->irq_enabled = false;
2099 + client = nvxx_client(&drm->client.base);
2100 +@@ -1086,9 +1107,9 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
2101 +
2102 + fpriv->driver_priv = cli;
2103 +
2104 +- mutex_lock(&drm->client.mutex);
2105 ++ mutex_lock(&drm->clients_lock);
2106 + list_add(&cli->head, &drm->clients);
2107 +- mutex_unlock(&drm->client.mutex);
2108 ++ mutex_unlock(&drm->clients_lock);
2109 +
2110 + done:
2111 + if (ret && cli) {
2112 +@@ -1106,6 +1127,16 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
2113 + {
2114 + struct nouveau_cli *cli = nouveau_cli(fpriv);
2115 + struct nouveau_drm *drm = nouveau_drm(dev);
2116 ++ int dev_index;
2117 ++
2118 ++ /*
2119 ++ * The device is gone, and as it currently stands all clients are
2120 ++ * cleaned up in the removal codepath. In the future this may change
2121 ++ * so that we can support hot-unplugging, but for now we immediately
2122 ++ * return to avoid a double-free situation.
2123 ++ */
2124 ++ if (!drm_dev_enter(dev, &dev_index))
2125 ++ return;
2126 +
2127 + pm_runtime_get_sync(dev->dev);
2128 +
2129 +@@ -1114,14 +1145,15 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
2130 + nouveau_abi16_fini(cli->abi16);
2131 + mutex_unlock(&cli->mutex);
2132 +
2133 +- mutex_lock(&drm->client.mutex);
2134 ++ mutex_lock(&drm->clients_lock);
2135 + list_del(&cli->head);
2136 +- mutex_unlock(&drm->client.mutex);
2137 ++ mutex_unlock(&drm->clients_lock);
2138 +
2139 + nouveau_cli_fini(cli);
2140 + kfree(cli);
2141 + pm_runtime_mark_last_busy(dev->dev);
2142 + pm_runtime_put_autosuspend(dev->dev);
2143 ++ drm_dev_exit(dev_index);
2144 + }
2145 +
2146 + static const struct drm_ioctl_desc
2147 +diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
2148 +index b8025507a9e4c..8b252dca0fc3e 100644
2149 +--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
2150 ++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
2151 +@@ -142,6 +142,11 @@ struct nouveau_drm {
2152 +
2153 + struct list_head clients;
2154 +
2155 ++ /**
2156 ++ * @clients_lock: Protects access to the @clients list of &struct nouveau_cli.
2157 ++ */
2158 ++ struct mutex clients_lock;
2159 ++
2160 + u8 old_pm_cap;
2161 +
2162 + struct {
2163 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
2164 +index 6e3c450eaacef..3ff49344abc77 100644
2165 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
2166 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
2167 +@@ -62,7 +62,6 @@ gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet,
2168 + nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header);
2169 + nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low);
2170 + nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high);
2171 +- nvkm_wr32(device, 0x6f0110 + hdmi, 0x00000000);
2172 + nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000);
2173 + nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000);
2174 + nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000);
2175 +diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
2176 +index cdc1c42e16695..aac41a809924e 100644
2177 +--- a/drivers/gpu/drm/udl/udl_connector.c
2178 ++++ b/drivers/gpu/drm/udl/udl_connector.c
2179 +@@ -30,7 +30,7 @@ static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
2180 + ret = usb_control_msg(udl->udev,
2181 + usb_rcvctrlpipe(udl->udev, 0),
2182 + (0x02), (0x80 | (0x02 << 5)), bval,
2183 +- 0xA1, read_buff, 2, HZ);
2184 ++ 0xA1, read_buff, 2, 1000);
2185 + if (ret < 1) {
2186 + DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
2187 + kfree(read_buff);
2188 +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
2189 +index 2ab1ac5a2412f..558ca3843bb95 100644
2190 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
2191 ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
2192 +@@ -1465,6 +1465,8 @@ st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u32 req_odr)
2193 + int err;
2194 +
2195 + switch (sensor->id) {
2196 ++ case ST_LSM6DSX_ID_GYRO:
2197 ++ break;
2198 + case ST_LSM6DSX_ID_EXT0:
2199 + case ST_LSM6DSX_ID_EXT1:
2200 + case ST_LSM6DSX_ID_EXT2:
2201 +@@ -1490,8 +1492,8 @@ st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u32 req_odr)
2202 + }
2203 + break;
2204 + }
2205 +- default:
2206 +- break;
2207 ++ default: /* should never occur */
2208 ++ return -EINVAL;
2209 + }
2210 +
2211 + if (req_odr > 0) {
2212 +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
2213 +index 441952a5eca4a..10d77f50f818b 100644
2214 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
2215 ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
2216 +@@ -3368,8 +3368,11 @@ static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
2217 + struct ib_wc *wc,
2218 + struct bnxt_qplib_cqe *cqe)
2219 + {
2220 ++ struct bnxt_re_dev *rdev;
2221 ++ u16 vlan_id = 0;
2222 + u8 nw_type;
2223 +
2224 ++ rdev = qp->rdev;
2225 + wc->opcode = IB_WC_RECV;
2226 + wc->status = __rc_to_ib_wc_status(cqe->status);
2227 +
2228 +@@ -3381,9 +3384,12 @@ static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
2229 + memcpy(wc->smac, cqe->smac, ETH_ALEN);
2230 + wc->wc_flags |= IB_WC_WITH_SMAC;
2231 + if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
2232 +- wc->vlan_id = (cqe->cfa_meta & 0xFFF);
2233 +- if (wc->vlan_id < 0x1000)
2234 +- wc->wc_flags |= IB_WC_WITH_VLAN;
2235 ++ vlan_id = (cqe->cfa_meta & 0xFFF);
2236 ++ }
2237 ++ /* Mark only if vlan_id is non zero */
2238 ++ if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
2239 ++ wc->vlan_id = vlan_id;
2240 ++ wc->wc_flags |= IB_WC_WITH_VLAN;
2241 + }
2242 + nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
2243 + CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
2244 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
2245 +index 1835d2e451c01..fc7fce642666c 100644
2246 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
2247 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
2248 +@@ -635,11 +635,13 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
2249 + {
2250 + int i, rc;
2251 + struct bnx2x_ilt *ilt = BP_ILT(bp);
2252 +- struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
2253 ++ struct ilt_client_info *ilt_cli;
2254 +
2255 + if (!ilt || !ilt->lines)
2256 + return -1;
2257 +
2258 ++ ilt_cli = &ilt->clients[cli_num];
2259 ++
2260 + if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
2261 + return 0;
2262 +
2263 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
2264 +index 2186706cf9130..3e9b1f59e381d 100644
2265 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
2266 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
2267 +@@ -1854,7 +1854,7 @@ static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,
2268 + struct flow_cls_offload *flower = type_data;
2269 + struct bnxt *bp = priv->bp;
2270 +
2271 +- if (flower->common.chain_index)
2272 ++ if (!tc_cls_can_offload_and_chain0(bp->dev, type_data))
2273 + return -EOPNOTSUPP;
2274 +
2275 + switch (type) {
2276 +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
2277 +index f91c67489e629..a4ef35216e2f7 100644
2278 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
2279 ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
2280 +@@ -4432,10 +4432,10 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
2281 +
2282 + fsl_mc_portal_free(priv->mc_io);
2283 +
2284 +- free_netdev(net_dev);
2285 +-
2286 + dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
2287 +
2288 ++ free_netdev(net_dev);
2289 ++
2290 + return 0;
2291 + }
2292 +
2293 +diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
2294 +index ee86ea12fa379..9295a9a1efc73 100644
2295 +--- a/drivers/net/ethernet/intel/e100.c
2296 ++++ b/drivers/net/ethernet/intel/e100.c
2297 +@@ -2997,9 +2997,10 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
2298 + struct net_device *netdev = pci_get_drvdata(pdev);
2299 + struct nic *nic = netdev_priv(netdev);
2300 +
2301 ++ netif_device_detach(netdev);
2302 ++
2303 + if (netif_running(netdev))
2304 + e100_down(nic);
2305 +- netif_device_detach(netdev);
2306 +
2307 + if ((nic->flags & wol_magic) | e100_asf(nic)) {
2308 + /* enable reverse auto-negotiation */
2309 +@@ -3016,7 +3017,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
2310 + *enable_wake = false;
2311 + }
2312 +
2313 +- pci_clear_master(pdev);
2314 ++ pci_disable_device(pdev);
2315 + }
2316 +
2317 + static int __e100_power_off(struct pci_dev *pdev, bool wake)
2318 +@@ -3036,8 +3037,6 @@ static int __maybe_unused e100_suspend(struct device *dev_d)
2319 +
2320 + __e100_shutdown(to_pci_dev(dev_d), &wake);
2321 +
2322 +- device_wakeup_disable(dev_d);
2323 +-
2324 + return 0;
2325 + }
2326 +
2327 +@@ -3045,6 +3044,14 @@ static int __maybe_unused e100_resume(struct device *dev_d)
2328 + {
2329 + struct net_device *netdev = dev_get_drvdata(dev_d);
2330 + struct nic *nic = netdev_priv(netdev);
2331 ++ int err;
2332 ++
2333 ++ err = pci_enable_device(to_pci_dev(dev_d));
2334 ++ if (err) {
2335 ++ netdev_err(netdev, "Resume cannot enable PCI device, aborting\n");
2336 ++ return err;
2337 ++ }
2338 ++ pci_set_master(to_pci_dev(dev_d));
2339 +
2340 + /* disable reverse auto-negotiation */
2341 + if (nic->phy == phy_82552_v) {
2342 +@@ -3056,10 +3063,11 @@ static int __maybe_unused e100_resume(struct device *dev_d)
2343 + smartspeed & ~(E100_82552_REV_ANEG));
2344 + }
2345 +
2346 +- netif_device_attach(netdev);
2347 + if (netif_running(netdev))
2348 + e100_up(nic);
2349 +
2350 ++ netif_device_attach(netdev);
2351 ++
2352 + return 0;
2353 + }
2354 +
2355 +diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
2356 +index fe1258778cbc4..5b83d1bc0e74d 100644
2357 +--- a/drivers/net/ethernet/intel/i40e/i40e.h
2358 ++++ b/drivers/net/ethernet/intel/i40e/i40e.h
2359 +@@ -159,6 +159,7 @@ enum i40e_vsi_state_t {
2360 + __I40E_VSI_OVERFLOW_PROMISC,
2361 + __I40E_VSI_REINIT_REQUESTED,
2362 + __I40E_VSI_DOWN_REQUESTED,
2363 ++ __I40E_VSI_RELEASING,
2364 + /* This must be last as it determines the size of the BITMAP */
2365 + __I40E_VSI_STATE_SIZE__,
2366 + };
2367 +@@ -1144,6 +1145,7 @@ void i40e_ptp_save_hw_time(struct i40e_pf *pf);
2368 + void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
2369 + void i40e_ptp_init(struct i40e_pf *pf);
2370 + void i40e_ptp_stop(struct i40e_pf *pf);
2371 ++int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
2372 + int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
2373 + i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
2374 + i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
2375 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
2376 +index 52c2d6fdeb7a0..583eae71cda4b 100644
2377 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
2378 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
2379 +@@ -1789,6 +1789,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
2380 + bool is_add)
2381 + {
2382 + struct i40e_pf *pf = vsi->back;
2383 ++ u16 num_tc_qps = 0;
2384 + u16 sections = 0;
2385 + u8 netdev_tc = 0;
2386 + u16 numtc = 1;
2387 +@@ -1796,13 +1797,33 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
2388 + u8 offset;
2389 + u16 qmap;
2390 + int i;
2391 +- u16 num_tc_qps = 0;
2392 +
2393 + sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2394 + offset = 0;
2395 ++ /* zero out queue mapping, it will get updated on the end of the function */
2396 ++ memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
2397 ++
2398 ++ if (vsi->type == I40E_VSI_MAIN) {
2399 ++ /* This code helps add more queue to the VSI if we have
2400 ++ * more cores than RSS can support, the higher cores will
2401 ++ * be served by ATR or other filters. Furthermore, the
2402 ++ * non-zero req_queue_pairs says that user requested a new
2403 ++ * queue count via ethtool's set_channels, so use this
2404 ++ * value for queues distribution across traffic classes
2405 ++ */
2406 ++ if (vsi->req_queue_pairs > 0)
2407 ++ vsi->num_queue_pairs = vsi->req_queue_pairs;
2408 ++ else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2409 ++ vsi->num_queue_pairs = pf->num_lan_msix;
2410 ++ }
2411 +
2412 + /* Number of queues per enabled TC */
2413 +- num_tc_qps = vsi->alloc_queue_pairs;
2414 ++ if (vsi->type == I40E_VSI_MAIN ||
2415 ++ (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
2416 ++ num_tc_qps = vsi->num_queue_pairs;
2417 ++ else
2418 ++ num_tc_qps = vsi->alloc_queue_pairs;
2419 ++
2420 + if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2421 + /* Find numtc from enabled TC bitmap */
2422 + for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2423 +@@ -1880,15 +1901,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
2424 + }
2425 + ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
2426 + }
2427 +-
2428 +- /* Set actual Tx/Rx queue pairs */
2429 +- vsi->num_queue_pairs = offset;
2430 +- if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
2431 +- if (vsi->req_queue_pairs > 0)
2432 +- vsi->num_queue_pairs = vsi->req_queue_pairs;
2433 +- else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2434 +- vsi->num_queue_pairs = pf->num_lan_msix;
2435 +- }
2436 ++ /* Do not change previously set num_queue_pairs for PFs and VFs*/
2437 ++ if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
2438 ++ (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
2439 ++ (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
2440 ++ vsi->num_queue_pairs = offset;
2441 +
2442 + /* Scheduler section valid can only be set for ADD VSI */
2443 + if (is_add) {
2444 +@@ -2622,7 +2639,8 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2445 +
2446 + for (v = 0; v < pf->num_alloc_vsi; v++) {
2447 + if (pf->vsi[v] &&
2448 +- (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2449 ++ (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
2450 ++ !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
2451 + int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2452 +
2453 + if (ret) {
2454 +@@ -5393,6 +5411,58 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
2455 + sizeof(vsi->info.tc_mapping));
2456 + }
2457 +
2458 ++/**
2459 ++ * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
2460 ++ * @vsi: the VSI being reconfigured
2461 ++ * @vsi_offset: offset from main VF VSI
2462 ++ */
2463 ++int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
2464 ++{
2465 ++ struct i40e_vsi_context ctxt = {};
2466 ++ struct i40e_pf *pf;
2467 ++ struct i40e_hw *hw;
2468 ++ int ret;
2469 ++
2470 ++ if (!vsi)
2471 ++ return I40E_ERR_PARAM;
2472 ++ pf = vsi->back;
2473 ++ hw = &pf->hw;
2474 ++
2475 ++ ctxt.seid = vsi->seid;
2476 ++ ctxt.pf_num = hw->pf_id;
2477 ++ ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
2478 ++ ctxt.uplink_seid = vsi->uplink_seid;
2479 ++ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
2480 ++ ctxt.flags = I40E_AQ_VSI_TYPE_VF;
2481 ++ ctxt.info = vsi->info;
2482 ++
2483 ++ i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
2484 ++ false);
2485 ++ if (vsi->reconfig_rss) {
2486 ++ vsi->rss_size = min_t(int, pf->alloc_rss_size,
2487 ++ vsi->num_queue_pairs);
2488 ++ ret = i40e_vsi_config_rss(vsi);
2489 ++ if (ret) {
2490 ++ dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
2491 ++ return ret;
2492 ++ }
2493 ++ vsi->reconfig_rss = false;
2494 ++ }
2495 ++
2496 ++ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2497 ++ if (ret) {
2498 ++ dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
2499 ++ i40e_stat_str(hw, ret),
2500 ++ i40e_aq_str(hw, hw->aq.asq_last_status));
2501 ++ return ret;
2502 ++ }
2503 ++ /* update the local VSI info with updated queue map */
2504 ++ i40e_vsi_update_queue_map(vsi, &ctxt);
2505 ++ vsi->info.valid_sections = 0;
2506 ++
2507 ++ return ret;
2508 ++}
2509 ++
2510 + /**
2511 + * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
2512 + * @vsi: VSI to be configured
2513 +@@ -5683,24 +5753,6 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
2514 + INIT_LIST_HEAD(&vsi->ch_list);
2515 + }
2516 +
2517 +-/**
2518 +- * i40e_is_any_channel - channel exist or not
2519 +- * @vsi: ptr to VSI to which channels are associated with
2520 +- *
2521 +- * Returns true or false if channel(s) exist for associated VSI or not
2522 +- **/
2523 +-static bool i40e_is_any_channel(struct i40e_vsi *vsi)
2524 +-{
2525 +- struct i40e_channel *ch, *ch_tmp;
2526 +-
2527 +- list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
2528 +- if (ch->initialized)
2529 +- return true;
2530 +- }
2531 +-
2532 +- return false;
2533 +-}
2534 +-
2535 + /**
2536 + * i40e_get_max_queues_for_channel
2537 + * @vsi: ptr to VSI to which channels are associated with
2538 +@@ -6206,26 +6258,15 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi,
2539 + /* By default we are in VEPA mode, if this is the first VF/VMDq
2540 + * VSI to be added switch to VEB mode.
2541 + */
2542 +- if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
2543 +- (!i40e_is_any_channel(vsi))) {
2544 +- if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
2545 +- dev_dbg(&pf->pdev->dev,
2546 +- "Failed to create channel. Override queues (%u) not power of 2\n",
2547 +- vsi->tc_config.tc_info[0].qcount);
2548 +- return -EINVAL;
2549 +- }
2550 +
2551 +- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
2552 +- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
2553 ++ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
2554 ++ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
2555 +
2556 +- if (vsi->type == I40E_VSI_MAIN) {
2557 +- if (pf->flags & I40E_FLAG_TC_MQPRIO)
2558 +- i40e_do_reset(pf, I40E_PF_RESET_FLAG,
2559 +- true);
2560 +- else
2561 +- i40e_do_reset_safe(pf,
2562 +- I40E_PF_RESET_FLAG);
2563 +- }
2564 ++ if (vsi->type == I40E_VSI_MAIN) {
2565 ++ if (pf->flags & I40E_FLAG_TC_MQPRIO)
2566 ++ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
2567 ++ else
2568 ++ i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
2569 + }
2570 + /* now onwards for main VSI, number of queues will be value
2571 + * of TC0's queue count
2572 +@@ -7552,12 +7593,20 @@ config_tc:
2573 + vsi->seid);
2574 + need_reset = true;
2575 + goto exit;
2576 +- } else {
2577 +- dev_info(&vsi->back->pdev->dev,
2578 +- "Setup channel (id:%u) utilizing num_queues %d\n",
2579 +- vsi->seid, vsi->tc_config.tc_info[0].qcount);
2580 ++ } else if (enabled_tc &&
2581 ++ (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
2582 ++ netdev_info(netdev,
2583 ++ "Failed to create channel. Override queues (%u) not power of 2\n",
2584 ++ vsi->tc_config.tc_info[0].qcount);
2585 ++ ret = -EINVAL;
2586 ++ need_reset = true;
2587 ++ goto exit;
2588 + }
2589 +
2590 ++ dev_info(&vsi->back->pdev->dev,
2591 ++ "Setup channel (id:%u) utilizing num_queues %d\n",
2592 ++ vsi->seid, vsi->tc_config.tc_info[0].qcount);
2593 ++
2594 + if (pf->flags & I40E_FLAG_TC_MQPRIO) {
2595 + if (vsi->mqprio_qopt.max_rate[0]) {
2596 + u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
2597 +@@ -8122,9 +8171,8 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
2598 + err = i40e_add_del_cloud_filter(vsi, filter, true);
2599 +
2600 + if (err) {
2601 +- dev_err(&pf->pdev->dev,
2602 +- "Failed to add cloud filter, err %s\n",
2603 +- i40e_stat_str(&pf->hw, err));
2604 ++ dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
2605 ++ err);
2606 + goto err;
2607 + }
2608 +
2609 +@@ -13308,7 +13356,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
2610 + dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
2611 + return -ENODEV;
2612 + }
2613 +-
2614 ++ set_bit(__I40E_VSI_RELEASING, vsi->state);
2615 + uplink_seid = vsi->uplink_seid;
2616 + if (vsi->type != I40E_VSI_SRIOV) {
2617 + if (vsi->netdev_registered) {
2618 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2619 +index a02167cce81e1..41c0a103119c1 100644
2620 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2621 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2622 +@@ -130,17 +130,18 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
2623 + /***********************misc routines*****************************/
2624 +
2625 + /**
2626 +- * i40e_vc_disable_vf
2627 ++ * i40e_vc_reset_vf
2628 + * @vf: pointer to the VF info
2629 +- *
2630 +- * Disable the VF through a SW reset.
2631 ++ * @notify_vf: notify vf about reset or not
2632 ++ * Reset VF handler.
2633 + **/
2634 +-static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
2635 ++static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
2636 + {
2637 + struct i40e_pf *pf = vf->pf;
2638 + int i;
2639 +
2640 +- i40e_vc_notify_vf_reset(vf);
2641 ++ if (notify_vf)
2642 ++ i40e_vc_notify_vf_reset(vf);
2643 +
2644 + /* We want to ensure that an actual reset occurs initiated after this
2645 + * function was called. However, we do not want to wait forever, so
2646 +@@ -158,9 +159,14 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
2647 + usleep_range(10000, 20000);
2648 + }
2649 +
2650 +- dev_warn(&vf->pf->pdev->dev,
2651 +- "Failed to initiate reset for VF %d after 200 milliseconds\n",
2652 +- vf->vf_id);
2653 ++ if (notify_vf)
2654 ++ dev_warn(&vf->pf->pdev->dev,
2655 ++ "Failed to initiate reset for VF %d after 200 milliseconds\n",
2656 ++ vf->vf_id);
2657 ++ else
2658 ++ dev_dbg(&vf->pf->pdev->dev,
2659 ++ "Failed to initiate reset for VF %d after 200 milliseconds\n",
2660 ++ vf->vf_id);
2661 + }
2662 +
2663 + /**
2664 +@@ -621,14 +627,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
2665 + u16 vsi_queue_id,
2666 + struct virtchnl_rxq_info *info)
2667 + {
2668 ++ u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
2669 + struct i40e_pf *pf = vf->pf;
2670 ++ struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2671 + struct i40e_hw *hw = &pf->hw;
2672 + struct i40e_hmc_obj_rxq rx_ctx;
2673 +- u16 pf_queue_id;
2674 + int ret = 0;
2675 +
2676 +- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
2677 +-
2678 + /* clear the context structure first */
2679 + memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2680 +
2681 +@@ -666,6 +671,10 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
2682 + }
2683 + rx_ctx.rxmax = info->max_pkt_size;
2684 +
2685 ++ /* if port VLAN is configured increase the max packet size */
2686 ++ if (vsi->info.pvid)
2687 ++ rx_ctx.rxmax += VLAN_HLEN;
2688 ++
2689 + /* enable 32bytes desc always */
2690 + rx_ctx.dsize = 1;
2691 +
2692 +@@ -2051,20 +2060,6 @@ err:
2693 + return ret;
2694 + }
2695 +
2696 +-/**
2697 +- * i40e_vc_reset_vf_msg
2698 +- * @vf: pointer to the VF info
2699 +- *
2700 +- * called from the VF to reset itself,
2701 +- * unlike other virtchnl messages, PF driver
2702 +- * doesn't send the response back to the VF
2703 +- **/
2704 +-static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
2705 +-{
2706 +- if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2707 +- i40e_reset_vf(vf, false);
2708 +-}
2709 +-
2710 + /**
2711 + * i40e_vc_config_promiscuous_mode_msg
2712 + * @vf: pointer to the VF info
2713 +@@ -2163,11 +2158,12 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2714 + struct virtchnl_vsi_queue_config_info *qci =
2715 + (struct virtchnl_vsi_queue_config_info *)msg;
2716 + struct virtchnl_queue_pair_info *qpi;
2717 +- struct i40e_pf *pf = vf->pf;
2718 + u16 vsi_id, vsi_queue_id = 0;
2719 +- u16 num_qps_all = 0;
2720 ++ struct i40e_pf *pf = vf->pf;
2721 + i40e_status aq_ret = 0;
2722 + int i, j = 0, idx = 0;
2723 ++ struct i40e_vsi *vsi;
2724 ++ u16 num_qps_all = 0;
2725 +
2726 + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2727 + aq_ret = I40E_ERR_PARAM;
2728 +@@ -2256,9 +2252,15 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2729 + pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2730 + qci->num_queue_pairs;
2731 + } else {
2732 +- for (i = 0; i < vf->num_tc; i++)
2733 +- pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2734 +- vf->ch[i].num_qps;
2735 ++ for (i = 0; i < vf->num_tc; i++) {
2736 ++ vsi = pf->vsi[vf->ch[i].vsi_idx];
2737 ++ vsi->num_queue_pairs = vf->ch[i].num_qps;
2738 ++
2739 ++ if (i40e_update_adq_vsi_queues(vsi, i)) {
2740 ++ aq_ret = I40E_ERR_CONFIG;
2741 ++ goto error_param;
2742 ++ }
2743 ++ }
2744 + }
2745 +
2746 + error_param:
2747 +@@ -2553,8 +2555,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2748 + } else {
2749 + /* successful request */
2750 + vf->num_req_queues = req_pairs;
2751 +- i40e_vc_notify_vf_reset(vf);
2752 +- i40e_reset_vf(vf, false);
2753 ++ i40e_vc_reset_vf(vf, true);
2754 + return 0;
2755 + }
2756 +
2757 +@@ -3767,8 +3768,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
2758 + vf->num_req_queues = 0;
2759 +
2760 + /* reset the VF in order to allocate resources */
2761 +- i40e_vc_notify_vf_reset(vf);
2762 +- i40e_reset_vf(vf, false);
2763 ++ i40e_vc_reset_vf(vf, true);
2764 +
2765 + return I40E_SUCCESS;
2766 +
2767 +@@ -3808,8 +3808,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
2768 + }
2769 +
2770 + /* reset the VF in order to allocate resources */
2771 +- i40e_vc_notify_vf_reset(vf);
2772 +- i40e_reset_vf(vf, false);
2773 ++ i40e_vc_reset_vf(vf, true);
2774 +
2775 + return I40E_SUCCESS;
2776 +
2777 +@@ -3871,7 +3870,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
2778 + i40e_vc_notify_vf_link_state(vf);
2779 + break;
2780 + case VIRTCHNL_OP_RESET_VF:
2781 +- i40e_vc_reset_vf_msg(vf);
2782 ++ i40e_vc_reset_vf(vf, false);
2783 + ret = 0;
2784 + break;
2785 + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
2786 +@@ -4125,7 +4124,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2787 + /* Force the VF interface down so it has to bring up with new MAC
2788 + * address
2789 + */
2790 +- i40e_vc_disable_vf(vf);
2791 ++ i40e_vc_reset_vf(vf, true);
2792 + dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
2793 +
2794 + error_param:
2795 +@@ -4133,34 +4132,6 @@ error_param:
2796 + return ret;
2797 + }
2798 +
2799 +-/**
2800 +- * i40e_vsi_has_vlans - True if VSI has configured VLANs
2801 +- * @vsi: pointer to the vsi
2802 +- *
2803 +- * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
2804 +- * we have no configured VLANs. Do not call while holding the
2805 +- * mac_filter_hash_lock.
2806 +- */
2807 +-static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
2808 +-{
2809 +- bool have_vlans;
2810 +-
2811 +- /* If we have a port VLAN, then the VSI cannot have any VLANs
2812 +- * configured, as all MAC/VLAN filters will be assigned to the PVID.
2813 +- */
2814 +- if (vsi->info.pvid)
2815 +- return false;
2816 +-
2817 +- /* Since we don't have a PVID, we know that if the device is in VLAN
2818 +- * mode it must be because of a VLAN filter configured on this VSI.
2819 +- */
2820 +- spin_lock_bh(&vsi->mac_filter_hash_lock);
2821 +- have_vlans = i40e_is_vsi_in_vlan(vsi);
2822 +- spin_unlock_bh(&vsi->mac_filter_hash_lock);
2823 +-
2824 +- return have_vlans;
2825 +-}
2826 +-
2827 + /**
2828 + * i40e_ndo_set_vf_port_vlan
2829 + * @netdev: network interface device structure
2830 +@@ -4217,19 +4188,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
2831 + /* duplicate request, so just return success */
2832 + goto error_pvid;
2833 +
2834 +- if (i40e_vsi_has_vlans(vsi)) {
2835 +- dev_err(&pf->pdev->dev,
2836 +- "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
2837 +- vf_id);
2838 +- /* Administrator Error - knock the VF offline until he does
2839 +- * the right thing by reconfiguring his network correctly
2840 +- * and then reloading the VF driver.
2841 +- */
2842 +- i40e_vc_disable_vf(vf);
2843 +- /* During reset the VF got a new VSI, so refresh the pointer. */
2844 +- vsi = pf->vsi[vf->lan_vsi_idx];
2845 +- }
2846 +-
2847 ++ i40e_vc_reset_vf(vf, true);
2848 ++ /* During reset the VF got a new VSI, so refresh a pointer. */
2849 ++ vsi = pf->vsi[vf->lan_vsi_idx];
2850 + /* Locked once because multiple functions below iterate list */
2851 + spin_lock_bh(&vsi->mac_filter_hash_lock);
2852 +
2853 +@@ -4610,7 +4571,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
2854 + goto out;
2855 +
2856 + vf->trusted = setting;
2857 +- i40e_vc_disable_vf(vf);
2858 ++ i40e_vc_reset_vf(vf, true);
2859 + dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
2860 + vf_id, setting ? "" : "un");
2861 +
2862 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
2863 +index c93567f4d0f79..ea85b06857fa2 100644
2864 +--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
2865 ++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
2866 +@@ -892,6 +892,7 @@ static int iavf_set_channels(struct net_device *netdev,
2867 + {
2868 + struct iavf_adapter *adapter = netdev_priv(netdev);
2869 + u32 num_req = ch->combined_count;
2870 ++ int i;
2871 +
2872 + if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
2873 + adapter->num_tc) {
2874 +@@ -902,7 +903,7 @@ static int iavf_set_channels(struct net_device *netdev,
2875 + /* All of these should have already been checked by ethtool before this
2876 + * even gets to us, but just to be sure.
2877 + */
2878 +- if (num_req > adapter->vsi_res->num_queue_pairs)
2879 ++ if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs)
2880 + return -EINVAL;
2881 +
2882 + if (num_req == adapter->num_active_queues)
2883 +@@ -914,6 +915,20 @@ static int iavf_set_channels(struct net_device *netdev,
2884 + adapter->num_req_queues = num_req;
2885 + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
2886 + iavf_schedule_reset(adapter);
2887 ++
2888 ++ /* wait for the reset is done */
2889 ++ for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
2890 ++ msleep(IAVF_RESET_WAIT_MS);
2891 ++ if (adapter->flags & IAVF_FLAG_RESET_PENDING)
2892 ++ continue;
2893 ++ break;
2894 ++ }
2895 ++ if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
2896 ++ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2897 ++ adapter->num_active_queues = num_req;
2898 ++ return -EOPNOTSUPP;
2899 ++ }
2900 ++
2901 + return 0;
2902 + }
2903 +
2904 +@@ -960,14 +975,13 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
2905 +
2906 + if (hfunc)
2907 + *hfunc = ETH_RSS_HASH_TOP;
2908 +- if (!indir)
2909 +- return 0;
2910 +-
2911 +- memcpy(key, adapter->rss_key, adapter->rss_key_size);
2912 ++ if (key)
2913 ++ memcpy(key, adapter->rss_key, adapter->rss_key_size);
2914 +
2915 +- /* Each 32 bits pointed by 'indir' is stored with a lut entry */
2916 +- for (i = 0; i < adapter->rss_lut_size; i++)
2917 +- indir[i] = (u32)adapter->rss_lut[i];
2918 ++ if (indir)
2919 ++ /* Each 32 bits pointed by 'indir' is stored with a lut entry */
2920 ++ for (i = 0; i < adapter->rss_lut_size; i++)
2921 ++ indir[i] = (u32)adapter->rss_lut[i];
2922 +
2923 + return 0;
2924 + }
2925 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
2926 +index f06c079e812ec..643679cad8657 100644
2927 +--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
2928 ++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
2929 +@@ -1616,8 +1616,7 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
2930 + iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
2931 + return 0;
2932 + }
2933 +-
2934 +- if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
2935 ++ if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
2936 + (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
2937 + iavf_set_promiscuous(adapter, 0);
2938 + return 0;
2939 +@@ -2047,8 +2046,8 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
2940 +
2941 + iavf_free_misc_irq(adapter);
2942 + iavf_reset_interrupt_capability(adapter);
2943 +- iavf_free_queues(adapter);
2944 + iavf_free_q_vectors(adapter);
2945 ++ iavf_free_queues(adapter);
2946 + memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2947 + iavf_shutdown_adminq(&adapter->hw);
2948 + adapter->netdev->flags &= ~IFF_UP;
2949 +@@ -2330,7 +2329,7 @@ static void iavf_adminq_task(struct work_struct *work)
2950 +
2951 + /* check for error indications */
2952 + val = rd32(hw, hw->aq.arq.len);
2953 +- if (val == 0xdeadbeef) /* indicates device in reset */
2954 ++ if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
2955 + goto freedom;
2956 + oldval = val;
2957 + if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
2958 +@@ -3028,11 +3027,11 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
2959 + /* start out with flow type and eth type IPv4 to begin with */
2960 + filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
2961 + err = iavf_parse_cls_flower(adapter, cls_flower, filter);
2962 +- if (err < 0)
2963 ++ if (err)
2964 + goto err;
2965 +
2966 + err = iavf_handle_tclass(adapter, tc, filter);
2967 +- if (err < 0)
2968 ++ if (err)
2969 + goto err;
2970 +
2971 + /* add filter to the list */
2972 +@@ -3419,7 +3418,8 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
2973 + {
2974 + struct iavf_adapter *adapter = netdev_priv(netdev);
2975 +
2976 +- if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
2977 ++ if (adapter->vf_res &&
2978 ++ !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
2979 + features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
2980 + NETIF_F_HW_VLAN_CTAG_RX |
2981 + NETIF_F_HW_VLAN_CTAG_FILTER);
2982 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
2983 +index 66d92a0cfef35..5b67d24b2b5ed 100644
2984 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
2985 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
2986 +@@ -4361,9 +4361,6 @@ static void ice_remove(struct pci_dev *pdev)
2987 + struct ice_pf *pf = pci_get_drvdata(pdev);
2988 + int i;
2989 +
2990 +- if (!pf)
2991 +- return;
2992 +-
2993 + for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
2994 + if (!ice_is_reset_in_progress(pf->state))
2995 + break;
2996 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
2997 +index c74600be570ed..68d7ca17b6f51 100644
2998 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
2999 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
3000 +@@ -163,13 +163,14 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
3001 + MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
3002 + MLX5_SET(destroy_cq_in, in, uid, cq->uid);
3003 + err = mlx5_cmd_exec_in(dev, destroy_cq, in);
3004 ++ if (err)
3005 ++ return err;
3006 +
3007 + synchronize_irq(cq->irqn);
3008 +-
3009 + mlx5_cq_put(cq);
3010 + wait_for_completion(&cq->free);
3011 +
3012 +- return err;
3013 ++ return 0;
3014 + }
3015 + EXPORT_SYMBOL(mlx5_core_destroy_cq);
3016 +
3017 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
3018 +index 07c8d9811bc81..10d195042ab55 100644
3019 +--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
3020 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
3021 +@@ -507,6 +507,8 @@ void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
3022 + if (!mlx5_debugfs_root)
3023 + return;
3024 +
3025 +- if (cq->dbg)
3026 ++ if (cq->dbg) {
3027 + rem_res_tree(cq->dbg);
3028 ++ cq->dbg = NULL;
3029 ++ }
3030 + }
3031 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
3032 +index 401b2f5128dd4..78cc6f0bbc72b 100644
3033 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
3034 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
3035 +@@ -1663,7 +1663,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
3036 + if (!ESW_ALLOWED(esw))
3037 + return 0;
3038 +
3039 +- mutex_lock(&esw->mode_lock);
3040 ++ down_write(&esw->mode_lock);
3041 + if (esw->mode == MLX5_ESWITCH_NONE) {
3042 + ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
3043 + } else {
3044 +@@ -1675,7 +1675,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
3045 + if (!ret)
3046 + esw->esw_funcs.num_vfs = num_vfs;
3047 + }
3048 +- mutex_unlock(&esw->mode_lock);
3049 ++ up_write(&esw->mode_lock);
3050 + return ret;
3051 + }
3052 +
3053 +@@ -1719,10 +1719,10 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
3054 + if (!ESW_ALLOWED(esw))
3055 + return;
3056 +
3057 +- mutex_lock(&esw->mode_lock);
3058 ++ down_write(&esw->mode_lock);
3059 + mlx5_eswitch_disable_locked(esw, clear_vf);
3060 + esw->esw_funcs.num_vfs = 0;
3061 +- mutex_unlock(&esw->mode_lock);
3062 ++ up_write(&esw->mode_lock);
3063 + }
3064 +
3065 + int mlx5_eswitch_init(struct mlx5_core_dev *dev)
3066 +@@ -1778,7 +1778,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
3067 + atomic64_set(&esw->offloads.num_flows, 0);
3068 + ida_init(&esw->offloads.vport_metadata_ida);
3069 + mutex_init(&esw->state_lock);
3070 +- mutex_init(&esw->mode_lock);
3071 ++ init_rwsem(&esw->mode_lock);
3072 +
3073 + mlx5_esw_for_all_vports(esw, i, vport) {
3074 + vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
3075 +@@ -1813,7 +1813,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
3076 + esw->dev->priv.eswitch = NULL;
3077 + destroy_workqueue(esw->work_queue);
3078 + esw_offloads_cleanup_reps(esw);
3079 +- mutex_destroy(&esw->mode_lock);
3080 + mutex_destroy(&esw->state_lock);
3081 + ida_destroy(&esw->offloads.vport_metadata_ida);
3082 + mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
3083 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
3084 +index cf87de94418ff..59c674f157a8c 100644
3085 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
3086 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
3087 +@@ -262,7 +262,7 @@ struct mlx5_eswitch {
3088 + /* Protects eswitch mode change that occurs via one or more
3089 + * user commands, i.e. sriov state change, devlink commands.
3090 + */
3091 +- struct mutex mode_lock;
3092 ++ struct rw_semaphore mode_lock;
3093 +
3094 + struct {
3095 + bool enabled;
3096 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
3097 +index 5801f55ff0771..e06b1ba7d2349 100644
3098 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
3099 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
3100 +@@ -2508,7 +2508,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
3101 + if (esw_mode_from_devlink(mode, &mlx5_mode))
3102 + return -EINVAL;
3103 +
3104 +- mutex_lock(&esw->mode_lock);
3105 ++ down_write(&esw->mode_lock);
3106 + cur_mlx5_mode = esw->mode;
3107 + if (cur_mlx5_mode == mlx5_mode)
3108 + goto unlock;
3109 +@@ -2521,7 +2521,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
3110 + err = -EINVAL;
3111 +
3112 + unlock:
3113 +- mutex_unlock(&esw->mode_lock);
3114 ++ up_write(&esw->mode_lock);
3115 + return err;
3116 + }
3117 +
3118 +@@ -2534,14 +2534,14 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3119 + if (IS_ERR(esw))
3120 + return PTR_ERR(esw);
3121 +
3122 +- mutex_lock(&esw->mode_lock);
3123 ++ down_write(&esw->mode_lock);
3124 + err = eswitch_devlink_esw_mode_check(esw);
3125 + if (err)
3126 + goto unlock;
3127 +
3128 + err = esw_mode_to_devlink(esw->mode, mode);
3129 + unlock:
3130 +- mutex_unlock(&esw->mode_lock);
3131 ++ up_write(&esw->mode_lock);
3132 + return err;
3133 + }
3134 +
3135 +@@ -2557,7 +2557,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
3136 + if (IS_ERR(esw))
3137 + return PTR_ERR(esw);
3138 +
3139 +- mutex_lock(&esw->mode_lock);
3140 ++ down_write(&esw->mode_lock);
3141 + err = eswitch_devlink_esw_mode_check(esw);
3142 + if (err)
3143 + goto out;
3144 +@@ -2599,7 +2599,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
3145 + }
3146 +
3147 + esw->offloads.inline_mode = mlx5_mode;
3148 +- mutex_unlock(&esw->mode_lock);
3149 ++ up_write(&esw->mode_lock);
3150 + return 0;
3151 +
3152 + revert_inline_mode:
3153 +@@ -2609,7 +2609,7 @@ revert_inline_mode:
3154 + vport,
3155 + esw->offloads.inline_mode);
3156 + out:
3157 +- mutex_unlock(&esw->mode_lock);
3158 ++ up_write(&esw->mode_lock);
3159 + return err;
3160 + }
3161 +
3162 +@@ -2622,14 +2622,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
3163 + if (IS_ERR(esw))
3164 + return PTR_ERR(esw);
3165 +
3166 +- mutex_lock(&esw->mode_lock);
3167 ++ down_write(&esw->mode_lock);
3168 + err = eswitch_devlink_esw_mode_check(esw);
3169 + if (err)
3170 + goto unlock;
3171 +
3172 + err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
3173 + unlock:
3174 +- mutex_unlock(&esw->mode_lock);
3175 ++ up_write(&esw->mode_lock);
3176 + return err;
3177 + }
3178 +
3179 +@@ -2645,7 +2645,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
3180 + if (IS_ERR(esw))
3181 + return PTR_ERR(esw);
3182 +
3183 +- mutex_lock(&esw->mode_lock);
3184 ++ down_write(&esw->mode_lock);
3185 + err = eswitch_devlink_esw_mode_check(esw);
3186 + if (err)
3187 + goto unlock;
3188 +@@ -2691,7 +2691,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
3189 + }
3190 +
3191 + unlock:
3192 +- mutex_unlock(&esw->mode_lock);
3193 ++ up_write(&esw->mode_lock);
3194 + return err;
3195 + }
3196 +
3197 +@@ -2706,15 +2706,15 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
3198 + return PTR_ERR(esw);
3199 +
3200 +
3201 +- mutex_lock(&esw->mode_lock);
3202 ++ down_write(&esw->mode_lock);
3203 + err = eswitch_devlink_esw_mode_check(esw);
3204 + if (err)
3205 + goto unlock;
3206 +
3207 + *encap = esw->offloads.encap;
3208 + unlock:
3209 +- mutex_unlock(&esw->mode_lock);
3210 +- return 0;
3211 ++ up_write(&esw->mode_lock);
3212 ++ return err;
3213 + }
3214 +
3215 + static bool
3216 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
3217 +index fe5476a76464f..11cc3ea5010aa 100644
3218 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
3219 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
3220 +@@ -365,6 +365,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
3221 + bool is_bonded, is_in_lag, mode_supported;
3222 + int bond_status = 0;
3223 + int num_slaves = 0;
3224 ++ int changed = 0;
3225 + int idx;
3226 +
3227 + if (!netif_is_lag_master(upper))
3228 +@@ -401,27 +402,27 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
3229 + */
3230 + is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
3231 +
3232 +- if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
3233 +- NL_SET_ERR_MSG_MOD(info->info.extack,
3234 +- "Can't activate LAG offload, PF is configured with more than 64 VFs");
3235 +- return 0;
3236 +- }
3237 +-
3238 + /* Lag mode must be activebackup or hash. */
3239 + mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
3240 + tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
3241 +
3242 +- if (is_in_lag && !mode_supported)
3243 +- NL_SET_ERR_MSG_MOD(info->info.extack,
3244 +- "Can't activate LAG offload, TX type isn't supported");
3245 +-
3246 + is_bonded = is_in_lag && mode_supported;
3247 + if (tracker->is_bonded != is_bonded) {
3248 + tracker->is_bonded = is_bonded;
3249 +- return 1;
3250 ++ changed = 1;
3251 + }
3252 +
3253 +- return 0;
3254 ++ if (!is_in_lag)
3255 ++ return changed;
3256 ++
3257 ++ if (!mlx5_lag_is_ready(ldev))
3258 ++ NL_SET_ERR_MSG_MOD(info->info.extack,
3259 ++ "Can't activate LAG offload, PF is configured with more than 64 VFs");
3260 ++ else if (!mode_supported)
3261 ++ NL_SET_ERR_MSG_MOD(info->info.extack,
3262 ++ "Can't activate LAG offload, TX type isn't supported");
3263 ++
3264 ++ return changed;
3265 + }
3266 +
3267 + static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
3268 +@@ -464,9 +465,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
3269 +
3270 + ldev = container_of(this, struct mlx5_lag, nb);
3271 +
3272 +- if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
3273 +- return NOTIFY_DONE;
3274 +-
3275 + tracker = ldev->tracker;
3276 +
3277 + switch (event) {
3278 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
3279 +index 143b2cb13bf94..e7fbc9b30bf96 100644
3280 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
3281 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
3282 +@@ -21,6 +21,7 @@
3283 + #include <linux/delay.h>
3284 + #include <linux/mfd/syscon.h>
3285 + #include <linux/regmap.h>
3286 ++#include <linux/pm_runtime.h>
3287 +
3288 + #include "stmmac_platform.h"
3289 +
3290 +@@ -1335,6 +1336,8 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
3291 + return ret;
3292 + }
3293 +
3294 ++ pm_runtime_get_sync(dev);
3295 ++
3296 + if (bsp_priv->integrated_phy)
3297 + rk_gmac_integrated_phy_powerup(bsp_priv);
3298 +
3299 +@@ -1346,6 +1349,8 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
3300 + if (gmac->integrated_phy)
3301 + rk_gmac_integrated_phy_powerdown(gmac);
3302 +
3303 ++ pm_runtime_put_sync(&gmac->pdev->dev);
3304 ++
3305 + phy_power_on(gmac, false);
3306 + gmac_clk_enable(gmac, false);
3307 + }
3308 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
3309 +index 70d41783329dd..f37b6d57b2fe2 100644
3310 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
3311 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
3312 +@@ -485,8 +485,28 @@ static int socfpga_dwmac_resume(struct device *dev)
3313 + }
3314 + #endif /* CONFIG_PM_SLEEP */
3315 +
3316 +-static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend,
3317 +- socfpga_dwmac_resume);
3318 ++static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev)
3319 ++{
3320 ++ struct net_device *ndev = dev_get_drvdata(dev);
3321 ++ struct stmmac_priv *priv = netdev_priv(ndev);
3322 ++
3323 ++ stmmac_bus_clks_config(priv, false);
3324 ++
3325 ++ return 0;
3326 ++}
3327 ++
3328 ++static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev)
3329 ++{
3330 ++ struct net_device *ndev = dev_get_drvdata(dev);
3331 ++ struct stmmac_priv *priv = netdev_priv(ndev);
3332 ++
3333 ++ return stmmac_bus_clks_config(priv, true);
3334 ++}
3335 ++
3336 ++static const struct dev_pm_ops socfpga_dwmac_pm_ops = {
3337 ++ SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume)
3338 ++ SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL)
3339 ++};
3340 +
3341 + static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
3342 + .set_phy_mode = socfpga_gen5_set_phy_mode,
3343 +diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
3344 +index b40b711cf4bd5..a37aae00e128f 100644
3345 +--- a/drivers/net/ipa/ipa_endpoint.c
3346 ++++ b/drivers/net/ipa/ipa_endpoint.c
3347 +@@ -703,6 +703,7 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
3348 + u32 offset;
3349 + u32 val;
3350 +
3351 ++ /* This should only be changed when HOL_BLOCK_EN is disabled */
3352 + offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
3353 + val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
3354 + iowrite32(val, ipa->reg_virt + offset);
3355 +@@ -730,6 +731,7 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
3356 + if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
3357 + continue;
3358 +
3359 ++ ipa_endpoint_init_hol_block_enable(endpoint, false);
3360 + ipa_endpoint_init_hol_block_timer(endpoint, 0);
3361 + ipa_endpoint_init_hol_block_enable(endpoint, true);
3362 + }
3363 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
3364 +index c671d8e257741..ffbc7eda95eed 100644
3365 +--- a/drivers/net/tun.c
3366 ++++ b/drivers/net/tun.c
3367 +@@ -1021,6 +1021,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
3368 + {
3369 + struct tun_struct *tun = netdev_priv(dev);
3370 + int txq = skb->queue_mapping;
3371 ++ struct netdev_queue *queue;
3372 + struct tun_file *tfile;
3373 + int len = skb->len;
3374 +
3375 +@@ -1065,6 +1066,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
3376 + if (ptr_ring_produce(&tfile->tx_ring, skb))
3377 + goto drop;
3378 +
3379 ++ /* NETIF_F_LLTX requires to do our own update of trans_start */
3380 ++ queue = netdev_get_tx_queue(dev, txq);
3381 ++ queue->trans_start = jiffies;
3382 ++
3383 + /* Notify and wake up reader process */
3384 + if (tfile->flags & TUN_FASYNC)
3385 + kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
3386 +diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
3387 +index c51793f6546f1..fdfd7b8f3a76d 100644
3388 +--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
3389 ++++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
3390 +@@ -1310,6 +1310,7 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
3391 + .ngpios = 151,
3392 + .wakeirq_map = sdm845_pdc_map,
3393 + .nwakeirq_map = ARRAY_SIZE(sdm845_pdc_map),
3394 ++ .wakeirq_dual_edge_errata = true,
3395 + };
3396 +
3397 + static const struct msm_pinctrl_soc_data sdm845_acpi_pinctrl = {
3398 +diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
3399 +index 8c0867bda8280..0dfaa1a43b674 100644
3400 +--- a/drivers/platform/x86/hp_accel.c
3401 ++++ b/drivers/platform/x86/hp_accel.c
3402 +@@ -372,9 +372,11 @@ static int lis3lv02d_add(struct acpi_device *device)
3403 + INIT_WORK(&hpled_led.work, delayed_set_status_worker);
3404 + ret = led_classdev_register(NULL, &hpled_led.led_classdev);
3405 + if (ret) {
3406 ++ i8042_remove_filter(hp_accel_i8042_filter);
3407 + lis3lv02d_joystick_disable(&lis3_dev);
3408 + lis3lv02d_poweroff(&lis3_dev);
3409 + flush_work(&hpled_led.work);
3410 ++ lis3lv02d_remove_fs(&lis3_dev);
3411 + return ret;
3412 + }
3413 +
3414 +diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
3415 +index c2c7850ff7b42..727d8f019eddd 100644
3416 +--- a/drivers/scsi/advansys.c
3417 ++++ b/drivers/scsi/advansys.c
3418 +@@ -3366,8 +3366,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
3419 + shost->host_no);
3420 +
3421 + seq_printf(m,
3422 +- " iop_base 0x%lx, cable_detect: %X, err_code %u\n",
3423 +- (unsigned long)v->iop_base,
3424 ++ " iop_base 0x%p, cable_detect: %X, err_code %u\n",
3425 ++ v->iop_base,
3426 + AdvReadWordRegister(iop_base,IOPW_SCSI_CFG1) & CABLE_DETECT,
3427 + v->err_code);
3428 +
3429 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
3430 +index 990b700de6892..06a23718a7c7f 100644
3431 +--- a/drivers/scsi/lpfc/lpfc_sli.c
3432 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
3433 +@@ -20080,6 +20080,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
3434 + fail_msg,
3435 + piocbq->iotag, piocbq->sli4_xritag);
3436 + list_add_tail(&piocbq->list, &completions);
3437 ++ fail_msg = NULL;
3438 + }
3439 + spin_unlock_irqrestore(&pring->ring_lock, iflags);
3440 + }
3441 +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
3442 +index 4ebd8851a0c9f..734745f450211 100644
3443 +--- a/drivers/scsi/qla2xxx/qla_mbx.c
3444 ++++ b/drivers/scsi/qla2xxx/qla_mbx.c
3445 +@@ -1650,10 +1650,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
3446 + mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
3447 + if (IS_FWI2_CAPABLE(vha->hw))
3448 + mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
3449 +- if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
3450 +- mcp->in_mb |= MBX_15;
3451 +- mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23;
3452 +- }
3453 ++ if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
3454 ++ mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23;
3455 +
3456 + mcp->tov = MBX_TOV_SECONDS;
3457 + mcp->flags = 0;
3458 +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
3459 +index b6540b92f5661..3fc7c2a31c191 100644
3460 +--- a/drivers/scsi/scsi_debug.c
3461 ++++ b/drivers/scsi/scsi_debug.c
3462 +@@ -1855,7 +1855,7 @@ static int resp_readcap16(struct scsi_cmnd *scp,
3463 + {
3464 + unsigned char *cmd = scp->cmnd;
3465 + unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
3466 +- int alloc_len;
3467 ++ u32 alloc_len;
3468 +
3469 + alloc_len = get_unaligned_be32(cmd + 10);
3470 + /* following just in case virtual_gb changed */
3471 +@@ -1884,7 +1884,7 @@ static int resp_readcap16(struct scsi_cmnd *scp,
3472 + }
3473 +
3474 + return fill_from_dev_buffer(scp, arr,
3475 +- min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
3476 ++ min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
3477 + }
3478 +
3479 + #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
3480 +@@ -1895,8 +1895,9 @@ static int resp_report_tgtpgs(struct scsi_cmnd *scp,
3481 + unsigned char *cmd = scp->cmnd;
3482 + unsigned char *arr;
3483 + int host_no = devip->sdbg_host->shost->host_no;
3484 +- int n, ret, alen, rlen;
3485 + int port_group_a, port_group_b, port_a, port_b;
3486 ++ u32 alen, n, rlen;
3487 ++ int ret;
3488 +
3489 + alen = get_unaligned_be32(cmd + 6);
3490 + arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
3491 +@@ -1958,9 +1959,9 @@ static int resp_report_tgtpgs(struct scsi_cmnd *scp,
3492 + * - The constructed command length
3493 + * - The maximum array size
3494 + */
3495 +- rlen = min_t(int, alen, n);
3496 ++ rlen = min(alen, n);
3497 + ret = fill_from_dev_buffer(scp, arr,
3498 +- min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
3499 ++ min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
3500 + kfree(arr);
3501 + return ret;
3502 + }
3503 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
3504 +index 1378bb1a7371c..8de67679a8782 100644
3505 +--- a/drivers/scsi/scsi_sysfs.c
3506 ++++ b/drivers/scsi/scsi_sysfs.c
3507 +@@ -796,6 +796,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
3508 + int i, ret;
3509 + struct scsi_device *sdev = to_scsi_device(dev);
3510 + enum scsi_device_state state = 0;
3511 ++ bool rescan_dev = false;
3512 +
3513 + for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
3514 + const int len = strlen(sdev_states[i].name);
3515 +@@ -814,20 +815,27 @@ store_state_field(struct device *dev, struct device_attribute *attr,
3516 + }
3517 +
3518 + mutex_lock(&sdev->state_mutex);
3519 +- ret = scsi_device_set_state(sdev, state);
3520 +- /*
3521 +- * If the device state changes to SDEV_RUNNING, we need to
3522 +- * run the queue to avoid I/O hang, and rescan the device
3523 +- * to revalidate it. Running the queue first is necessary
3524 +- * because another thread may be waiting inside
3525 +- * blk_mq_freeze_queue_wait() and because that call may be
3526 +- * waiting for pending I/O to finish.
3527 +- */
3528 +- if (ret == 0 && state == SDEV_RUNNING) {
3529 ++ if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
3530 ++ ret = count;
3531 ++ } else {
3532 ++ ret = scsi_device_set_state(sdev, state);
3533 ++ if (ret == 0 && state == SDEV_RUNNING)
3534 ++ rescan_dev = true;
3535 ++ }
3536 ++ mutex_unlock(&sdev->state_mutex);
3537 ++
3538 ++ if (rescan_dev) {
3539 ++ /*
3540 ++ * If the device state changes to SDEV_RUNNING, we need to
3541 ++ * run the queue to avoid I/O hang, and rescan the device
3542 ++ * to revalidate it. Running the queue first is necessary
3543 ++ * because another thread may be waiting inside
3544 ++ * blk_mq_freeze_queue_wait() and because that call may be
3545 ++ * waiting for pending I/O to finish.
3546 ++ */
3547 + blk_mq_run_hw_queues(sdev->request_queue, true);
3548 + scsi_rescan_device(dev);
3549 + }
3550 +- mutex_unlock(&sdev->state_mutex);
3551 +
3552 + return ret == 0 ? count : -EINVAL;
3553 + }
3554 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
3555 +index 930f35863cbb5..e3a9a02cadf5a 100644
3556 +--- a/drivers/scsi/ufs/ufshcd.c
3557 ++++ b/drivers/scsi/ufs/ufshcd.c
3558 +@@ -6099,27 +6099,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
3559 + return retval;
3560 + }
3561 +
3562 +-struct ctm_info {
3563 +- struct ufs_hba *hba;
3564 +- unsigned long pending;
3565 +- unsigned int ncpl;
3566 +-};
3567 +-
3568 +-static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
3569 +-{
3570 +- struct ctm_info *const ci = priv;
3571 +- struct completion *c;
3572 +-
3573 +- WARN_ON_ONCE(reserved);
3574 +- if (test_bit(req->tag, &ci->pending))
3575 +- return true;
3576 +- ci->ncpl++;
3577 +- c = req->end_io_data;
3578 +- if (c)
3579 +- complete(c);
3580 +- return true;
3581 +-}
3582 +-
3583 + /**
3584 + * ufshcd_tmc_handler - handle task management function completion
3585 + * @hba: per adapter instance
3586 +@@ -6130,14 +6109,22 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
3587 + */
3588 + static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
3589 + {
3590 +- struct request_queue *q = hba->tmf_queue;
3591 +- struct ctm_info ci = {
3592 +- .hba = hba,
3593 +- .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
3594 +- };
3595 ++ unsigned long pending, issued;
3596 ++ irqreturn_t ret = IRQ_NONE;
3597 ++ int tag;
3598 +
3599 +- blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
3600 +- return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
3601 ++ pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
3602 ++
3603 ++ issued = hba->outstanding_tasks & ~pending;
3604 ++ for_each_set_bit(tag, &issued, hba->nutmrs) {
3605 ++ struct request *req = hba->tmf_rqs[tag];
3606 ++ struct completion *c = req->end_io_data;
3607 ++
3608 ++ complete(c);
3609 ++ ret = IRQ_HANDLED;
3610 ++ }
3611 ++
3612 ++ return ret;
3613 + }
3614 +
3615 + /**
3616 +@@ -6267,9 +6254,9 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
3617 + ufshcd_hold(hba, false);
3618 +
3619 + spin_lock_irqsave(host->host_lock, flags);
3620 +- blk_mq_start_request(req);
3621 +
3622 + task_tag = req->tag;
3623 ++ hba->tmf_rqs[req->tag] = req;
3624 + treq->req_header.dword_0 |= cpu_to_be32(task_tag);
3625 +
3626 + memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
3627 +@@ -6293,11 +6280,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
3628 + err = wait_for_completion_io_timeout(&wait,
3629 + msecs_to_jiffies(TM_CMD_TIMEOUT));
3630 + if (!err) {
3631 +- /*
3632 +- * Make sure that ufshcd_compl_tm() does not trigger a
3633 +- * use-after-free.
3634 +- */
3635 +- req->end_io_data = NULL;
3636 + ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
3637 + dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
3638 + __func__, tm_function);
3639 +@@ -6313,6 +6295,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
3640 + }
3641 +
3642 + spin_lock_irqsave(hba->host->host_lock, flags);
3643 ++ hba->tmf_rqs[req->tag] = NULL;
3644 + __clear_bit(task_tag, &hba->outstanding_tasks);
3645 + spin_unlock_irqrestore(hba->host->host_lock, flags);
3646 +
3647 +@@ -9235,6 +9218,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
3648 + err = PTR_ERR(hba->tmf_queue);
3649 + goto free_tmf_tag_set;
3650 + }
3651 ++ hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
3652 ++ sizeof(*hba->tmf_rqs), GFP_KERNEL);
3653 ++ if (!hba->tmf_rqs) {
3654 ++ err = -ENOMEM;
3655 ++ goto free_tmf_queue;
3656 ++ }
3657 +
3658 + /* Reset the attached device */
3659 + ufshcd_vops_device_reset(hba);
3660 +diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
3661 +index 1ba9c786feb6d..35dd5197ccb96 100644
3662 +--- a/drivers/scsi/ufs/ufshcd.h
3663 ++++ b/drivers/scsi/ufs/ufshcd.h
3664 +@@ -734,6 +734,7 @@ struct ufs_hba {
3665 +
3666 + struct blk_mq_tag_set tmf_tag_set;
3667 + struct request_queue *tmf_queue;
3668 ++ struct request **tmf_rqs;
3669 +
3670 + struct uic_command *active_uic_cmd;
3671 + struct mutex uic_cmd_mutex;
3672 +diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
3673 +index e5d7fb81ad665..44a931d41a132 100644
3674 +--- a/drivers/sh/maple/maple.c
3675 ++++ b/drivers/sh/maple/maple.c
3676 +@@ -835,8 +835,10 @@ static int __init maple_bus_init(void)
3677 +
3678 + maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN);
3679 +
3680 +- if (!maple_queue_cache)
3681 ++ if (!maple_queue_cache) {
3682 ++ retval = -ENOMEM;
3683 + goto cleanup_bothirqs;
3684 ++ }
3685 +
3686 + INIT_LIST_HEAD(&maple_waitq);
3687 + INIT_LIST_HEAD(&maple_sentq);
3688 +@@ -849,6 +851,7 @@ static int __init maple_bus_init(void)
3689 + if (!mdev[i]) {
3690 + while (i-- > 0)
3691 + maple_free_dev(mdev[i]);
3692 ++ retval = -ENOMEM;
3693 + goto cleanup_cache;
3694 + }
3695 + baseunits[i] = mdev[i];
3696 +diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
3697 +index b912ad2f4b720..4df6d04315e39 100644
3698 +--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
3699 ++++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
3700 +@@ -6679,7 +6679,6 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
3701 + struct sta_info *psta_bmc;
3702 + struct list_head *xmitframe_plist, *xmitframe_phead;
3703 + struct xmit_frame *pxmitframe = NULL;
3704 +- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
3705 + struct sta_priv *pstapriv = &padapter->stapriv;
3706 +
3707 + /* for BC/MC Frames */
3708 +@@ -6690,8 +6689,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
3709 + if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) {
3710 + msleep(10);/* 10ms, ATIM(HIQ) Windows */
3711 +
3712 +- /* spin_lock_bh(&psta_bmc->sleep_q.lock); */
3713 +- spin_lock_bh(&pxmitpriv->lock);
3714 ++ spin_lock_bh(&psta_bmc->sleep_q.lock);
3715 +
3716 + xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
3717 + xmitframe_plist = get_next(xmitframe_phead);
3718 +@@ -6717,8 +6715,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
3719 + rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
3720 + }
3721 +
3722 +- /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
3723 +- spin_unlock_bh(&pxmitpriv->lock);
3724 ++ spin_unlock_bh(&psta_bmc->sleep_q.lock);
3725 +
3726 + /* check hi queue and bmc_sleepq */
3727 + rtw_chk_hi_queue_cmd(padapter);
3728 +diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
3729 +index 6979f8dbccb84..0d47e6e121777 100644
3730 +--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
3731 ++++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
3732 +@@ -1144,10 +1144,8 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_
3733 + if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) {
3734 + struct list_head *xmitframe_plist, *xmitframe_phead;
3735 + struct xmit_frame *pxmitframe = NULL;
3736 +- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
3737 +
3738 +- /* spin_lock_bh(&psta->sleep_q.lock); */
3739 +- spin_lock_bh(&pxmitpriv->lock);
3740 ++ spin_lock_bh(&psta->sleep_q.lock);
3741 +
3742 + xmitframe_phead = get_list_head(&psta->sleep_q);
3743 + xmitframe_plist = get_next(xmitframe_phead);
3744 +@@ -1182,12 +1180,10 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_
3745 + update_beacon(padapter, _TIM_IE_, NULL, true);
3746 + }
3747 +
3748 +- /* spin_unlock_bh(&psta->sleep_q.lock); */
3749 +- spin_unlock_bh(&pxmitpriv->lock);
3750 ++ spin_unlock_bh(&psta->sleep_q.lock);
3751 +
3752 + } else {
3753 +- /* spin_unlock_bh(&psta->sleep_q.lock); */
3754 +- spin_unlock_bh(&pxmitpriv->lock);
3755 ++ spin_unlock_bh(&psta->sleep_q.lock);
3756 +
3757 + /* DBG_871X("no buffered packets to xmit\n"); */
3758 + if (pstapriv->tim_bitmap&BIT(psta->aid)) {
3759 +diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
3760 +index e3f56c6cc882e..b1784b4e466f3 100644
3761 +--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
3762 ++++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
3763 +@@ -330,46 +330,48 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
3764 +
3765 + /* list_del_init(&psta->wakeup_list); */
3766 +
3767 +- spin_lock_bh(&pxmitpriv->lock);
3768 +-
3769 ++ spin_lock_bh(&psta->sleep_q.lock);
3770 + rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
3771 + psta->sleepq_len = 0;
3772 ++ spin_unlock_bh(&psta->sleep_q.lock);
3773 ++
3774 ++ spin_lock_bh(&pxmitpriv->lock);
3775 +
3776 + /* vo */
3777 +- /* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */
3778 ++ spin_lock_bh(&pstaxmitpriv->vo_q.sta_pending.lock);
3779 + rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
3780 + list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
3781 + phwxmit = pxmitpriv->hwxmits;
3782 + phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt;
3783 + pstaxmitpriv->vo_q.qcnt = 0;
3784 +- /* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */
3785 ++ spin_unlock_bh(&pstaxmitpriv->vo_q.sta_pending.lock);
3786 +
3787 + /* vi */
3788 +- /* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */
3789 ++ spin_lock_bh(&pstaxmitpriv->vi_q.sta_pending.lock);
3790 + rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
3791 + list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
3792 + phwxmit = pxmitpriv->hwxmits+1;
3793 + phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt;
3794 + pstaxmitpriv->vi_q.qcnt = 0;
3795 +- /* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */
3796 ++ spin_unlock_bh(&pstaxmitpriv->vi_q.sta_pending.lock);
3797 +
3798 + /* be */
3799 +- /* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */
3800 ++ spin_lock_bh(&pstaxmitpriv->be_q.sta_pending.lock);
3801 + rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
3802 + list_del_init(&(pstaxmitpriv->be_q.tx_pending));
3803 + phwxmit = pxmitpriv->hwxmits+2;
3804 + phwxmit->accnt -= pstaxmitpriv->be_q.qcnt;
3805 + pstaxmitpriv->be_q.qcnt = 0;
3806 +- /* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */
3807 ++ spin_unlock_bh(&pstaxmitpriv->be_q.sta_pending.lock);
3808 +
3809 + /* bk */
3810 +- /* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */
3811 ++ spin_lock_bh(&pstaxmitpriv->bk_q.sta_pending.lock);
3812 + rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
3813 + list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
3814 + phwxmit = pxmitpriv->hwxmits+3;
3815 + phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt;
3816 + pstaxmitpriv->bk_q.qcnt = 0;
3817 +- /* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */
3818 ++ spin_unlock_bh(&pstaxmitpriv->bk_q.sta_pending.lock);
3819 +
3820 + spin_unlock_bh(&pxmitpriv->lock);
3821 +
3822 +diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
3823 +index 6ecaff9728fd4..d78cff7ed6a01 100644
3824 +--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
3825 ++++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
3826 +@@ -1871,8 +1871,6 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
3827 + struct list_head *plist, *phead;
3828 + struct xmit_frame *pxmitframe;
3829 +
3830 +- spin_lock_bh(&pframequeue->lock);
3831 +-
3832 + phead = get_list_head(pframequeue);
3833 + plist = get_next(phead);
3834 +
3835 +@@ -1883,7 +1881,6 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
3836 +
3837 + rtw_free_xmitframe(pxmitpriv, pxmitframe);
3838 + }
3839 +- spin_unlock_bh(&pframequeue->lock);
3840 + }
3841 +
3842 + s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
3843 +@@ -1946,6 +1943,7 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
3844 + struct sta_info *psta;
3845 + struct tx_servq *ptxservq;
3846 + struct pkt_attrib *pattrib = &pxmitframe->attrib;
3847 ++ struct xmit_priv *xmit_priv = &padapter->xmitpriv;
3848 + struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
3849 + sint res = _SUCCESS;
3850 +
3851 +@@ -1974,12 +1972,14 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
3852 +
3853 + ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
3854 +
3855 ++ spin_lock_bh(&xmit_priv->lock);
3856 + if (list_empty(&ptxservq->tx_pending))
3857 + list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
3858 +
3859 + list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
3860 + ptxservq->qcnt++;
3861 + phwxmits[ac_index].accnt++;
3862 ++ spin_unlock_bh(&xmit_priv->lock);
3863 +
3864 + exit:
3865 +
3866 +@@ -2397,11 +2397,10 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
3867 + struct list_head *xmitframe_plist, *xmitframe_phead;
3868 + struct xmit_frame *pxmitframe = NULL;
3869 + struct sta_priv *pstapriv = &padapter->stapriv;
3870 +- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
3871 +
3872 + psta_bmc = rtw_get_bcmc_stainfo(padapter);
3873 +
3874 +- spin_lock_bh(&pxmitpriv->lock);
3875 ++ spin_lock_bh(&psta->sleep_q.lock);
3876 +
3877 + xmitframe_phead = get_list_head(&psta->sleep_q);
3878 + xmitframe_plist = get_next(xmitframe_phead);
3879 +@@ -2509,7 +2508,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
3880 +
3881 + _exit:
3882 +
3883 +- spin_unlock_bh(&pxmitpriv->lock);
3884 ++ spin_unlock_bh(&psta->sleep_q.lock);
3885 +
3886 + if (update_mask)
3887 + update_beacon(padapter, _TIM_IE_, NULL, true);
3888 +@@ -2521,9 +2520,8 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
3889 + struct list_head *xmitframe_plist, *xmitframe_phead;
3890 + struct xmit_frame *pxmitframe = NULL;
3891 + struct sta_priv *pstapriv = &padapter->stapriv;
3892 +- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
3893 +
3894 +- spin_lock_bh(&pxmitpriv->lock);
3895 ++ spin_lock_bh(&psta->sleep_q.lock);
3896 +
3897 + xmitframe_phead = get_list_head(&psta->sleep_q);
3898 + xmitframe_plist = get_next(xmitframe_phead);
3899 +@@ -2579,7 +2577,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
3900 + }
3901 + }
3902 +
3903 +- spin_unlock_bh(&pxmitpriv->lock);
3904 ++ spin_unlock_bh(&psta->sleep_q.lock);
3905 + }
3906 +
3907 + void enqueue_pending_xmitbuf(
3908 +diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
3909 +index 44799c4a9f35b..ce5bf2861d0c1 100644
3910 +--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
3911 ++++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
3912 +@@ -572,9 +572,7 @@ s32 rtl8723bs_hal_xmit(
3913 + rtw_issue_addbareq_cmd(padapter, pxmitframe);
3914 + }
3915 +
3916 +- spin_lock_bh(&pxmitpriv->lock);
3917 + err = rtw_xmitframe_enqueue(padapter, pxmitframe);
3918 +- spin_unlock_bh(&pxmitpriv->lock);
3919 + if (err != _SUCCESS) {
3920 + RT_TRACE(_module_hal_xmit_c_, _drv_err_, ("rtl8723bs_hal_xmit: enqueue xmitframe fail\n"));
3921 + rtw_free_xmitframe(pxmitpriv, pxmitframe);
3922 +diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c
3923 +index e06d7e1ebe9c3..61b8cc05f2935 100644
3924 +--- a/drivers/staging/wfx/bus_sdio.c
3925 ++++ b/drivers/staging/wfx/bus_sdio.c
3926 +@@ -120,19 +120,22 @@ static int wfx_sdio_irq_subscribe(void *priv)
3927 + return ret;
3928 + }
3929 +
3930 ++ flags = irq_get_trigger_type(bus->of_irq);
3931 ++ if (!flags)
3932 ++ flags = IRQF_TRIGGER_HIGH;
3933 ++ flags |= IRQF_ONESHOT;
3934 ++ ret = devm_request_threaded_irq(&bus->func->dev, bus->of_irq, NULL,
3935 ++ wfx_sdio_irq_handler_ext, flags,
3936 ++ "wfx", bus);
3937 ++ if (ret)
3938 ++ return ret;
3939 + sdio_claim_host(bus->func);
3940 + cccr = sdio_f0_readb(bus->func, SDIO_CCCR_IENx, NULL);
3941 + cccr |= BIT(0);
3942 + cccr |= BIT(bus->func->num);
3943 + sdio_f0_writeb(bus->func, cccr, SDIO_CCCR_IENx, NULL);
3944 + sdio_release_host(bus->func);
3945 +- flags = irq_get_trigger_type(bus->of_irq);
3946 +- if (!flags)
3947 +- flags = IRQF_TRIGGER_HIGH;
3948 +- flags |= IRQF_ONESHOT;
3949 +- return devm_request_threaded_irq(&bus->func->dev, bus->of_irq, NULL,
3950 +- wfx_sdio_irq_handler_ext, flags,
3951 +- "wfx", bus);
3952 ++ return 0;
3953 + }
3954 +
3955 + static int wfx_sdio_irq_unsubscribe(void *priv)
3956 +diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
3957 +index 6b72afee2f8b7..b240bd1ccb71d 100644
3958 +--- a/drivers/target/target_core_alua.c
3959 ++++ b/drivers/target/target_core_alua.c
3960 +@@ -1702,7 +1702,6 @@ int core_alua_set_tg_pt_gp_id(
3961 + pr_err("Maximum ALUA alua_tg_pt_gps_count:"
3962 + " 0x0000ffff reached\n");
3963 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
3964 +- kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
3965 + return -ENOSPC;
3966 + }
3967 + again:
3968 +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
3969 +index 405d82d447176..109f019d21480 100644
3970 +--- a/drivers/target/target_core_device.c
3971 ++++ b/drivers/target/target_core_device.c
3972 +@@ -758,6 +758,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
3973 + INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
3974 + spin_lock_init(&dev->t10_alua.lba_map_lock);
3975 +
3976 ++ INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
3977 ++
3978 + dev->t10_wwn.t10_dev = dev;
3979 + dev->t10_alua.t10_dev = dev;
3980 +
3981 +diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
3982 +index e7b3c6e5d5744..e4f072a680d41 100644
3983 +--- a/drivers/target/target_core_internal.h
3984 ++++ b/drivers/target/target_core_internal.h
3985 +@@ -150,6 +150,7 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
3986 + void transport_clear_lun_ref(struct se_lun *);
3987 + sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
3988 + void target_qf_do_work(struct work_struct *work);
3989 ++void target_do_delayed_work(struct work_struct *work);
3990 + bool target_check_wce(struct se_device *dev);
3991 + bool target_check_fua(struct se_device *dev);
3992 + void __target_execute_cmd(struct se_cmd *, bool);
3993 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
3994 +index 61b79804d462c..bca3a32a4bfb7 100644
3995 +--- a/drivers/target/target_core_transport.c
3996 ++++ b/drivers/target/target_core_transport.c
3997 +@@ -2065,32 +2065,35 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
3998 + */
3999 + switch (cmd->sam_task_attr) {
4000 + case TCM_HEAD_TAG:
4001 ++ atomic_inc_mb(&dev->non_ordered);
4002 + pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
4003 + cmd->t_task_cdb[0]);
4004 + return false;
4005 + case TCM_ORDERED_TAG:
4006 +- atomic_inc_mb(&dev->dev_ordered_sync);
4007 ++ atomic_inc_mb(&dev->delayed_cmd_count);
4008 +
4009 + pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
4010 + cmd->t_task_cdb[0]);
4011 +-
4012 +- /*
4013 +- * Execute an ORDERED command if no other older commands
4014 +- * exist that need to be completed first.
4015 +- */
4016 +- if (!atomic_read(&dev->simple_cmds))
4017 +- return false;
4018 + break;
4019 + default:
4020 + /*
4021 + * For SIMPLE and UNTAGGED Task Attribute commands
4022 + */
4023 +- atomic_inc_mb(&dev->simple_cmds);
4024 ++ atomic_inc_mb(&dev->non_ordered);
4025 ++
4026 ++ if (atomic_read(&dev->delayed_cmd_count) == 0)
4027 ++ return false;
4028 + break;
4029 + }
4030 +
4031 +- if (atomic_read(&dev->dev_ordered_sync) == 0)
4032 +- return false;
4033 ++ if (cmd->sam_task_attr != TCM_ORDERED_TAG) {
4034 ++ atomic_inc_mb(&dev->delayed_cmd_count);
4035 ++ /*
4036 ++ * We will account for this when we dequeue from the delayed
4037 ++ * list.
4038 ++ */
4039 ++ atomic_dec_mb(&dev->non_ordered);
4040 ++ }
4041 +
4042 + spin_lock(&dev->delayed_cmd_lock);
4043 + list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
4044 +@@ -2098,6 +2101,12 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
4045 +
4046 + pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
4047 + cmd->t_task_cdb[0], cmd->sam_task_attr);
4048 ++ /*
4049 ++ * We may have no non ordered cmds when this function started or we
4050 ++ * could have raced with the last simple/head cmd completing, so kick
4051 ++ * the delayed handler here.
4052 ++ */
4053 ++ schedule_work(&dev->delayed_cmd_work);
4054 + return true;
4055 + }
4056 +
4057 +@@ -2135,29 +2144,48 @@ EXPORT_SYMBOL(target_execute_cmd);
4058 + * Process all commands up to the last received ORDERED task attribute which
4059 + * requires another blocking boundary
4060 + */
4061 +-static void target_restart_delayed_cmds(struct se_device *dev)
4062 ++void target_do_delayed_work(struct work_struct *work)
4063 + {
4064 +- for (;;) {
4065 ++ struct se_device *dev = container_of(work, struct se_device,
4066 ++ delayed_cmd_work);
4067 ++
4068 ++ spin_lock(&dev->delayed_cmd_lock);
4069 ++ while (!dev->ordered_sync_in_progress) {
4070 + struct se_cmd *cmd;
4071 +
4072 +- spin_lock(&dev->delayed_cmd_lock);
4073 +- if (list_empty(&dev->delayed_cmd_list)) {
4074 +- spin_unlock(&dev->delayed_cmd_lock);
4075 ++ if (list_empty(&dev->delayed_cmd_list))
4076 + break;
4077 +- }
4078 +
4079 + cmd = list_entry(dev->delayed_cmd_list.next,
4080 + struct se_cmd, se_delayed_node);
4081 ++
4082 ++ if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
4083 ++ /*
4084 ++ * Check if we started with:
4085 ++ * [ordered] [simple] [ordered]
4086 ++ * and we are now at the last ordered so we have to wait
4087 ++ * for the simple cmd.
4088 ++ */
4089 ++ if (atomic_read(&dev->non_ordered) > 0)
4090 ++ break;
4091 ++
4092 ++ dev->ordered_sync_in_progress = true;
4093 ++ }
4094 ++
4095 + list_del(&cmd->se_delayed_node);
4096 ++ atomic_dec_mb(&dev->delayed_cmd_count);
4097 + spin_unlock(&dev->delayed_cmd_lock);
4098 +
4099 ++ if (cmd->sam_task_attr != TCM_ORDERED_TAG)
4100 ++ atomic_inc_mb(&dev->non_ordered);
4101 ++
4102 + cmd->transport_state |= CMD_T_SENT;
4103 +
4104 + __target_execute_cmd(cmd, true);
4105 +
4106 +- if (cmd->sam_task_attr == TCM_ORDERED_TAG)
4107 +- break;
4108 ++ spin_lock(&dev->delayed_cmd_lock);
4109 + }
4110 ++ spin_unlock(&dev->delayed_cmd_lock);
4111 + }
4112 +
4113 + /*
4114 +@@ -2175,14 +2203,17 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
4115 + goto restart;
4116 +
4117 + if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
4118 +- atomic_dec_mb(&dev->simple_cmds);
4119 ++ atomic_dec_mb(&dev->non_ordered);
4120 + dev->dev_cur_ordered_id++;
4121 + } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
4122 ++ atomic_dec_mb(&dev->non_ordered);
4123 + dev->dev_cur_ordered_id++;
4124 + pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
4125 + dev->dev_cur_ordered_id);
4126 + } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
4127 +- atomic_dec_mb(&dev->dev_ordered_sync);
4128 ++ spin_lock(&dev->delayed_cmd_lock);
4129 ++ dev->ordered_sync_in_progress = false;
4130 ++ spin_unlock(&dev->delayed_cmd_lock);
4131 +
4132 + dev->dev_cur_ordered_id++;
4133 + pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
4134 +@@ -2191,7 +2222,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
4135 + cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
4136 +
4137 + restart:
4138 +- target_restart_delayed_cmds(dev);
4139 ++ if (atomic_read(&dev->delayed_cmd_count) > 0)
4140 ++ schedule_work(&dev->delayed_cmd_work);
4141 + }
4142 +
4143 + static void transport_complete_qf(struct se_cmd *cmd)
4144 +diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
4145 +index bd2d91546e327..0fc473321d3e3 100644
4146 +--- a/drivers/tty/tty_buffer.c
4147 ++++ b/drivers/tty/tty_buffer.c
4148 +@@ -534,6 +534,9 @@ static void flush_to_ldisc(struct work_struct *work)
4149 + if (!count)
4150 + break;
4151 + head->read += count;
4152 ++
4153 ++ if (need_resched())
4154 ++ cond_resched();
4155 + }
4156 +
4157 + mutex_unlock(&buf->lock);
4158 +diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
4159 +index c86d413226eb9..b875da01c5309 100644
4160 +--- a/drivers/usb/host/max3421-hcd.c
4161 ++++ b/drivers/usb/host/max3421-hcd.c
4162 +@@ -125,8 +125,6 @@ struct max3421_hcd {
4163 +
4164 + struct task_struct *spi_thread;
4165 +
4166 +- struct max3421_hcd *next;
4167 +-
4168 + enum max3421_rh_state rh_state;
4169 + /* lower 16 bits contain port status, upper 16 bits the change mask: */
4170 + u32 port_status;
4171 +@@ -174,8 +172,6 @@ struct max3421_ep {
4172 + u8 retransmit; /* packet needs retransmission */
4173 + };
4174 +
4175 +-static struct max3421_hcd *max3421_hcd_list;
4176 +-
4177 + #define MAX3421_FIFO_SIZE 64
4178 +
4179 + #define MAX3421_SPI_DIR_RD 0 /* read register from MAX3421 */
4180 +@@ -1882,9 +1878,8 @@ max3421_probe(struct spi_device *spi)
4181 + }
4182 + set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
4183 + max3421_hcd = hcd_to_max3421(hcd);
4184 +- max3421_hcd->next = max3421_hcd_list;
4185 +- max3421_hcd_list = max3421_hcd;
4186 + INIT_LIST_HEAD(&max3421_hcd->ep_list);
4187 ++ spi_set_drvdata(spi, max3421_hcd);
4188 +
4189 + max3421_hcd->tx = kmalloc(sizeof(*max3421_hcd->tx), GFP_KERNEL);
4190 + if (!max3421_hcd->tx)
4191 +@@ -1934,28 +1929,18 @@ error:
4192 + static int
4193 + max3421_remove(struct spi_device *spi)
4194 + {
4195 +- struct max3421_hcd *max3421_hcd = NULL, **prev;
4196 +- struct usb_hcd *hcd = NULL;
4197 ++ struct max3421_hcd *max3421_hcd;
4198 ++ struct usb_hcd *hcd;
4199 + unsigned long flags;
4200 +
4201 +- for (prev = &max3421_hcd_list; *prev; prev = &(*prev)->next) {
4202 +- max3421_hcd = *prev;
4203 +- hcd = max3421_to_hcd(max3421_hcd);
4204 +- if (hcd->self.controller == &spi->dev)
4205 +- break;
4206 +- }
4207 +- if (!max3421_hcd) {
4208 +- dev_err(&spi->dev, "no MAX3421 HCD found for SPI device %p\n",
4209 +- spi);
4210 +- return -ENODEV;
4211 +- }
4212 ++ max3421_hcd = spi_get_drvdata(spi);
4213 ++ hcd = max3421_to_hcd(max3421_hcd);
4214 +
4215 + usb_remove_hcd(hcd);
4216 +
4217 + spin_lock_irqsave(&max3421_hcd->lock, flags);
4218 +
4219 + kthread_stop(max3421_hcd->spi_thread);
4220 +- *prev = max3421_hcd->next;
4221 +
4222 + spin_unlock_irqrestore(&max3421_hcd->lock, flags);
4223 +
4224 +diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
4225 +index 08ec2ab0d95a5..3f3d62dc06746 100644
4226 +--- a/drivers/usb/host/ohci-tmio.c
4227 ++++ b/drivers/usb/host/ohci-tmio.c
4228 +@@ -199,7 +199,7 @@ static int ohci_hcd_tmio_drv_probe(struct platform_device *dev)
4229 + if (usb_disabled())
4230 + return -ENODEV;
4231 +
4232 +- if (!cell)
4233 ++ if (!cell || !regs || !config || !sram)
4234 + return -EINVAL;
4235 +
4236 + if (irq < 0)
4237 +diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
4238 +index 0c2afed4131bc..038307f661985 100644
4239 +--- a/drivers/usb/musb/tusb6010.c
4240 ++++ b/drivers/usb/musb/tusb6010.c
4241 +@@ -1103,6 +1103,11 @@ static int tusb_musb_init(struct musb *musb)
4242 +
4243 + /* dma address for async dma */
4244 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4245 ++ if (!mem) {
4246 ++ pr_debug("no async dma resource?\n");
4247 ++ ret = -ENODEV;
4248 ++ goto done;
4249 ++ }
4250 + musb->async = mem->start;
4251 +
4252 + /* dma address for sync dma */
4253 +diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
4254 +index 30bfc314b743c..6cb5c8e2c8535 100644
4255 +--- a/drivers/usb/typec/tps6598x.c
4256 ++++ b/drivers/usb/typec/tps6598x.c
4257 +@@ -109,7 +109,7 @@ tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
4258 + u8 data[TPS_MAX_LEN + 1];
4259 + int ret;
4260 +
4261 +- if (WARN_ON(len + 1 > sizeof(data)))
4262 ++ if (len + 1 > sizeof(data))
4263 + return -EINVAL;
4264 +
4265 + if (!tps->i2c_protocol)
4266 +diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
4267 +index 1b451165311c9..40496e9e9b438 100644
4268 +--- a/drivers/video/console/sticon.c
4269 ++++ b/drivers/video/console/sticon.c
4270 +@@ -332,13 +332,13 @@ static u8 sticon_build_attr(struct vc_data *conp, u8 color,
4271 + bool blink, bool underline, bool reverse,
4272 + bool italic)
4273 + {
4274 +- u8 attr = ((color & 0x70) >> 1) | ((color & 7));
4275 ++ u8 fg = color & 7;
4276 ++ u8 bg = (color & 0x70) >> 4;
4277 +
4278 +- if (reverse) {
4279 +- color = ((color >> 3) & 0x7) | ((color & 0x7) << 3);
4280 +- }
4281 +-
4282 +- return attr;
4283 ++ if (reverse)
4284 ++ return (fg << 3) | bg;
4285 ++ else
4286 ++ return (bg << 3) | fg;
4287 + }
4288 +
4289 + static void sticon_invert_region(struct vc_data *conp, u16 *p, int count)
4290 +diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
4291 +index 309516e6a9682..43c89952b7d25 100644
4292 +--- a/fs/btrfs/async-thread.c
4293 ++++ b/fs/btrfs/async-thread.c
4294 +@@ -234,6 +234,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
4295 + ordered_list);
4296 + if (!test_bit(WORK_DONE_BIT, &work->flags))
4297 + break;
4298 ++ /*
4299 ++ * Orders all subsequent loads after reading WORK_DONE_BIT,
4300 ++ * paired with the smp_mb__before_atomic in btrfs_work_helper
4301 ++ * this guarantees that the ordered function will see all
4302 ++ * updates from ordinary work function.
4303 ++ */
4304 ++ smp_rmb();
4305 +
4306 + /*
4307 + * we are going to call the ordered done function, but
4308 +@@ -317,6 +324,13 @@ static void btrfs_work_helper(struct work_struct *normal_work)
4309 + thresh_exec_hook(wq);
4310 + work->func(work);
4311 + if (need_order) {
4312 ++ /*
4313 ++ * Ensures all memory accesses done in the work function are
4314 ++ * ordered before setting the WORK_DONE_BIT. Ensuring the thread
4315 ++ * which is going to executed the ordered work sees them.
4316 ++ * Pairs with the smp_rmb in run_ordered_work.
4317 ++ */
4318 ++ smp_mb__before_atomic();
4319 + set_bit(WORK_DONE_BIT, &work->flags);
4320 + run_ordered_work(wq, work);
4321 + } else {
4322 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
4323 +index d9e582e40b5b7..e462de9917237 100644
4324 +--- a/fs/btrfs/volumes.c
4325 ++++ b/fs/btrfs/volumes.c
4326 +@@ -14,6 +14,7 @@
4327 + #include <linux/semaphore.h>
4328 + #include <linux/uuid.h>
4329 + #include <linux/list_sort.h>
4330 ++#include <linux/namei.h>
4331 + #include "misc.h"
4332 + #include "ctree.h"
4333 + #include "extent_map.h"
4334 +@@ -1871,18 +1872,22 @@ out:
4335 + /*
4336 + * Function to update ctime/mtime for a given device path.
4337 + * Mainly used for ctime/mtime based probe like libblkid.
4338 ++ *
4339 ++ * We don't care about errors here, this is just to be kind to userspace.
4340 + */
4341 +-static void update_dev_time(struct block_device *bdev)
4342 ++static void update_dev_time(const char *device_path)
4343 + {
4344 +- struct inode *inode = bdev->bd_inode;
4345 ++ struct path path;
4346 + struct timespec64 now;
4347 ++ int ret;
4348 +
4349 +- /* Shouldn't happen but just in case. */
4350 +- if (!inode)
4351 ++ ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
4352 ++ if (ret)
4353 + return;
4354 +
4355 +- now = current_time(inode);
4356 +- generic_update_time(inode, &now, S_MTIME | S_CTIME);
4357 ++ now = current_time(d_inode(path.dentry));
4358 ++ inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
4359 ++ path_put(&path);
4360 + }
4361 +
4362 + static int btrfs_rm_dev_item(struct btrfs_device *device)
4363 +@@ -2057,7 +2062,7 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
4364 + btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
4365 +
4366 + /* Update ctime/mtime for device path for libblkid */
4367 +- update_dev_time(bdev);
4368 ++ update_dev_time(device_path);
4369 + }
4370 +
4371 + int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
4372 +@@ -2700,7 +2705,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
4373 + btrfs_forget_devices(device_path);
4374 +
4375 + /* Update ctime/mtime for blkid or udev */
4376 +- update_dev_time(bdev);
4377 ++ update_dev_time(device_path);
4378 +
4379 + return ret;
4380 +
4381 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
4382 +index 2d7799bd30b10..bc488a7d01903 100644
4383 +--- a/fs/f2fs/f2fs.h
4384 ++++ b/fs/f2fs/f2fs.h
4385 +@@ -3908,8 +3908,7 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode)
4386 +
4387 + if (!f2fs_compressed_file(inode))
4388 + return true;
4389 +- if (S_ISREG(inode->i_mode) &&
4390 +- (get_dirty_pages(inode) || atomic_read(&fi->i_compr_blocks)))
4391 ++ if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
4392 + return false;
4393 +
4394 + fi->i_flags &= ~F2FS_COMPR_FL;
4395 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
4396 +index de543168b3708..b7287b722e9e1 100644
4397 +--- a/fs/f2fs/super.c
4398 ++++ b/fs/f2fs/super.c
4399 +@@ -1020,7 +1020,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
4400 + /* Not pass down write hints if the number of active logs is lesser
4401 + * than NR_CURSEG_PERSIST_TYPE.
4402 + */
4403 +- if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
4404 ++ if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_PERSIST_TYPE)
4405 + F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
4406 + return 0;
4407 + }
4408 +@@ -3081,7 +3081,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
4409 + NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
4410 + f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
4411 + cp_payload, nat_bits_blocks);
4412 +- return -EFSCORRUPTED;
4413 ++ return 1;
4414 + }
4415 +
4416 + if (unlikely(f2fs_cp_error(sbi))) {
4417 +diff --git a/fs/inode.c b/fs/inode.c
4418 +index 5eea9912a0b9d..638d5d5bf42df 100644
4419 +--- a/fs/inode.c
4420 ++++ b/fs/inode.c
4421 +@@ -1772,12 +1772,13 @@ EXPORT_SYMBOL(generic_update_time);
4422 + * This does the actual work of updating an inodes time or version. Must have
4423 + * had called mnt_want_write() before calling this.
4424 + */
4425 +-static int update_time(struct inode *inode, struct timespec64 *time, int flags)
4426 ++int inode_update_time(struct inode *inode, struct timespec64 *time, int flags)
4427 + {
4428 + if (inode->i_op->update_time)
4429 + return inode->i_op->update_time(inode, time, flags);
4430 + return generic_update_time(inode, time, flags);
4431 + }
4432 ++EXPORT_SYMBOL(inode_update_time);
4433 +
4434 + /**
4435 + * touch_atime - update the access time
4436 +@@ -1847,7 +1848,7 @@ void touch_atime(const struct path *path)
4437 + * of the fs read only, e.g. subvolumes in Btrfs.
4438 + */
4439 + now = current_time(inode);
4440 +- update_time(inode, &now, S_ATIME);
4441 ++ inode_update_time(inode, &now, S_ATIME);
4442 + __mnt_drop_write(mnt);
4443 + skip_update:
4444 + sb_end_write(inode->i_sb);
4445 +@@ -1991,7 +1992,7 @@ int file_update_time(struct file *file)
4446 + if (__mnt_want_write_file(file))
4447 + return 0;
4448 +
4449 +- ret = update_time(inode, &now, sync_it);
4450 ++ ret = inode_update_time(inode, &now, sync_it);
4451 + __mnt_drop_write_file(file);
4452 +
4453 + return ret;
4454 +diff --git a/fs/udf/dir.c b/fs/udf/dir.c
4455 +index c19dba45aa209..d0f92a52e3bab 100644
4456 +--- a/fs/udf/dir.c
4457 ++++ b/fs/udf/dir.c
4458 +@@ -31,6 +31,7 @@
4459 + #include <linux/mm.h>
4460 + #include <linux/slab.h>
4461 + #include <linux/bio.h>
4462 ++#include <linux/iversion.h>
4463 +
4464 + #include "udf_i.h"
4465 + #include "udf_sb.h"
4466 +@@ -44,7 +45,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
4467 + struct fileIdentDesc *fi = NULL;
4468 + struct fileIdentDesc cfi;
4469 + udf_pblk_t block, iblock;
4470 +- loff_t nf_pos;
4471 ++ loff_t nf_pos, emit_pos = 0;
4472 + int flen;
4473 + unsigned char *fname = NULL, *copy_name = NULL;
4474 + unsigned char *nameptr;
4475 +@@ -58,6 +59,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
4476 + int i, num, ret = 0;
4477 + struct extent_position epos = { NULL, 0, {0, 0} };
4478 + struct super_block *sb = dir->i_sb;
4479 ++ bool pos_valid = false;
4480 +
4481 + if (ctx->pos == 0) {
4482 + if (!dir_emit_dot(file, ctx))
4483 +@@ -68,6 +70,21 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
4484 + if (nf_pos >= size)
4485 + goto out;
4486 +
4487 ++ /*
4488 ++ * Something changed since last readdir (either lseek was called or dir
4489 ++ * changed)? We need to verify the position correctly points at the
4490 ++ * beginning of some dir entry so that the directory parsing code does
4491 ++ * not get confused. Since UDF does not have any reliable way of
4492 ++ * identifying beginning of dir entry (names are under user control),
4493 ++ * we need to scan the directory from the beginning.
4494 ++ */
4495 ++ if (!inode_eq_iversion(dir, file->f_version)) {
4496 ++ emit_pos = nf_pos;
4497 ++ nf_pos = 0;
4498 ++ } else {
4499 ++ pos_valid = true;
4500 ++ }
4501 ++
4502 + fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
4503 + if (!fname) {
4504 + ret = -ENOMEM;
4505 +@@ -123,13 +140,21 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
4506 +
4507 + while (nf_pos < size) {
4508 + struct kernel_lb_addr tloc;
4509 ++ loff_t cur_pos = nf_pos;
4510 +
4511 +- ctx->pos = (nf_pos >> 2) + 1;
4512 ++ /* Update file position only if we got past the current one */
4513 ++ if (nf_pos >= emit_pos) {
4514 ++ ctx->pos = (nf_pos >> 2) + 1;
4515 ++ pos_valid = true;
4516 ++ }
4517 +
4518 + fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
4519 + &elen, &offset);
4520 + if (!fi)
4521 + goto out;
4522 ++ /* Still not at offset where user asked us to read from? */
4523 ++ if (cur_pos < emit_pos)
4524 ++ continue;
4525 +
4526 + liu = le16_to_cpu(cfi.lengthOfImpUse);
4527 + lfi = cfi.lengthFileIdent;
4528 +@@ -187,8 +212,11 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
4529 + } /* end while */
4530 +
4531 + ctx->pos = (nf_pos >> 2) + 1;
4532 ++ pos_valid = true;
4533 +
4534 + out:
4535 ++ if (pos_valid)
4536 ++ file->f_version = inode_query_iversion(dir);
4537 + if (fibh.sbh != fibh.ebh)
4538 + brelse(fibh.ebh);
4539 + brelse(fibh.sbh);
4540 +diff --git a/fs/udf/namei.c b/fs/udf/namei.c
4541 +index f4a72ff8cf959..9f3aced46c68f 100644
4542 +--- a/fs/udf/namei.c
4543 ++++ b/fs/udf/namei.c
4544 +@@ -30,6 +30,7 @@
4545 + #include <linux/sched.h>
4546 + #include <linux/crc-itu-t.h>
4547 + #include <linux/exportfs.h>
4548 ++#include <linux/iversion.h>
4549 +
4550 + static inline int udf_match(int len1, const unsigned char *name1, int len2,
4551 + const unsigned char *name2)
4552 +@@ -135,6 +136,8 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
4553 + mark_buffer_dirty_inode(fibh->ebh, inode);
4554 + mark_buffer_dirty_inode(fibh->sbh, inode);
4555 + }
4556 ++ inode_inc_iversion(inode);
4557 ++
4558 + return 0;
4559 + }
4560 +
4561 +diff --git a/fs/udf/super.c b/fs/udf/super.c
4562 +index 5d2b820ef303a..3448098e54768 100644
4563 +--- a/fs/udf/super.c
4564 ++++ b/fs/udf/super.c
4565 +@@ -57,6 +57,7 @@
4566 + #include <linux/crc-itu-t.h>
4567 + #include <linux/log2.h>
4568 + #include <asm/byteorder.h>
4569 ++#include <linux/iversion.h>
4570 +
4571 + #include "udf_sb.h"
4572 + #include "udf_i.h"
4573 +@@ -149,6 +150,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
4574 + init_rwsem(&ei->i_data_sem);
4575 + ei->cached_extent.lstart = -1;
4576 + spin_lock_init(&ei->i_extent_cache_lock);
4577 ++ inode_set_iversion(&ei->vfs_inode, 1);
4578 +
4579 + return &ei->vfs_inode;
4580 + }
4581 +diff --git a/include/linux/fs.h b/include/linux/fs.h
4582 +index 43bb6a51e42d9..42d246a942283 100644
4583 +--- a/include/linux/fs.h
4584 ++++ b/include/linux/fs.h
4585 +@@ -2214,6 +2214,8 @@ enum file_time_flags {
4586 +
4587 + extern bool atime_needs_update(const struct path *, struct inode *);
4588 + extern void touch_atime(const struct path *);
4589 ++int inode_update_time(struct inode *inode, struct timespec64 *time, int flags);
4590 ++
4591 + static inline void file_accessed(struct file *file)
4592 + {
4593 + if (!(file->f_flags & O_NOATIME))
4594 +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
4595 +index c095e713cf08f..ce14fb2772b5b 100644
4596 +--- a/include/linux/perf_event.h
4597 ++++ b/include/linux/perf_event.h
4598 +@@ -607,7 +607,6 @@ struct swevent_hlist {
4599 + #define PERF_ATTACH_TASK_DATA 0x08
4600 + #define PERF_ATTACH_ITRACE 0x10
4601 + #define PERF_ATTACH_SCHED_CB 0x20
4602 +-#define PERF_ATTACH_CHILD 0x40
4603 +
4604 + struct perf_cgroup;
4605 + struct perf_buffer;
4606 +diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
4607 +index 9837fb011f2fb..989aa30c598dc 100644
4608 +--- a/include/linux/platform_data/ti-sysc.h
4609 ++++ b/include/linux/platform_data/ti-sysc.h
4610 +@@ -50,6 +50,7 @@ struct sysc_regbits {
4611 + s8 emufree_shift;
4612 + };
4613 +
4614 ++#define SYSC_QUIRK_REINIT_ON_CTX_LOST BIT(28)
4615 + #define SYSC_QUIRK_REINIT_ON_RESUME BIT(27)
4616 + #define SYSC_QUIRK_GPMC_DEBUG BIT(26)
4617 + #define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25)
4618 +diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
4619 +index d321fe5ad1a14..c57b79301a75e 100644
4620 +--- a/include/linux/trace_events.h
4621 ++++ b/include/linux/trace_events.h
4622 +@@ -571,7 +571,7 @@ struct trace_event_file {
4623 +
4624 + #define PERF_MAX_TRACE_SIZE 2048
4625 +
4626 +-#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
4627 ++#define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */
4628 +
4629 + enum event_trigger_type {
4630 + ETT_NONE = (0),
4631 +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
4632 +index b465f8f3e554f..04e87f4b9417c 100644
4633 +--- a/include/linux/virtio_net.h
4634 ++++ b/include/linux/virtio_net.h
4635 +@@ -120,10 +120,15 @@ retry:
4636 +
4637 + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
4638 + u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
4639 ++ unsigned int nh_off = p_off;
4640 + struct skb_shared_info *shinfo = skb_shinfo(skb);
4641 +
4642 ++ /* UFO may not include transport header in gso_size. */
4643 ++ if (gso_type & SKB_GSO_UDP)
4644 ++ nh_off -= thlen;
4645 ++
4646 + /* Too small packets are not really GSO ones. */
4647 +- if (skb->len - p_off > gso_size) {
4648 ++ if (skb->len - nh_off > gso_size) {
4649 + shinfo->gso_size = gso_size;
4650 + shinfo->gso_type = gso_type;
4651 +
4652 +diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
4653 +index 33979017b7824..004e49f748419 100644
4654 +--- a/include/net/nfc/nci_core.h
4655 ++++ b/include/net/nfc/nci_core.h
4656 +@@ -30,6 +30,7 @@ enum nci_flag {
4657 + NCI_UP,
4658 + NCI_DATA_EXCHANGE,
4659 + NCI_DATA_EXCHANGE_TO,
4660 ++ NCI_UNREG,
4661 + };
4662 +
4663 + /* NCI device states */
4664 +diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
4665 +index 2758d9df71ee9..c2a79aeee113c 100644
4666 +--- a/include/rdma/rdma_netlink.h
4667 ++++ b/include/rdma/rdma_netlink.h
4668 +@@ -30,7 +30,7 @@ enum rdma_nl_flags {
4669 + * constant as well and the compiler checks they are the same.
4670 + */
4671 + #define MODULE_ALIAS_RDMA_NETLINK(_index, _val) \
4672 +- static inline void __chk_##_index(void) \
4673 ++ static inline void __maybe_unused __chk_##_index(void) \
4674 + { \
4675 + BUILD_BUG_ON(_index != _val); \
4676 + } \
4677 +diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
4678 +index 7abf74c1c4740..75048ea178f62 100644
4679 +--- a/include/sound/hdaudio_ext.h
4680 ++++ b/include/sound/hdaudio_ext.h
4681 +@@ -88,6 +88,8 @@ struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus,
4682 + struct snd_pcm_substream *substream,
4683 + int type);
4684 + void snd_hdac_ext_stream_release(struct hdac_ext_stream *azx_dev, int type);
4685 ++void snd_hdac_ext_stream_decouple_locked(struct hdac_bus *bus,
4686 ++ struct hdac_ext_stream *azx_dev, bool decouple);
4687 + void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
4688 + struct hdac_ext_stream *azx_dev, bool decouple);
4689 + void snd_hdac_ext_stop_streams(struct hdac_bus *bus);
4690 +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
4691 +index 549947d407cfd..18a5dcd275f88 100644
4692 +--- a/include/target/target_core_base.h
4693 ++++ b/include/target/target_core_base.h
4694 +@@ -788,8 +788,9 @@ struct se_device {
4695 + atomic_long_t read_bytes;
4696 + atomic_long_t write_bytes;
4697 + /* Active commands on this virtual SE device */
4698 +- atomic_t simple_cmds;
4699 +- atomic_t dev_ordered_sync;
4700 ++ atomic_t non_ordered;
4701 ++ bool ordered_sync_in_progress;
4702 ++ atomic_t delayed_cmd_count;
4703 + atomic_t dev_qf_count;
4704 + u32 export_count;
4705 + spinlock_t delayed_cmd_lock;
4706 +@@ -811,6 +812,7 @@ struct se_device {
4707 + struct list_head dev_sep_list;
4708 + struct list_head dev_tmr_list;
4709 + struct work_struct qf_work_queue;
4710 ++ struct work_struct delayed_cmd_work;
4711 + struct list_head delayed_cmd_list;
4712 + struct list_head state_list;
4713 + struct list_head qf_cmd_list;
4714 +diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
4715 +index 56b113e3cd6aa..df293bc7f03b8 100644
4716 +--- a/include/trace/events/f2fs.h
4717 ++++ b/include/trace/events/f2fs.h
4718 +@@ -807,20 +807,20 @@ TRACE_EVENT(f2fs_lookup_start,
4719 + TP_STRUCT__entry(
4720 + __field(dev_t, dev)
4721 + __field(ino_t, ino)
4722 +- __field(const char *, name)
4723 ++ __string(name, dentry->d_name.name)
4724 + __field(unsigned int, flags)
4725 + ),
4726 +
4727 + TP_fast_assign(
4728 + __entry->dev = dir->i_sb->s_dev;
4729 + __entry->ino = dir->i_ino;
4730 +- __entry->name = dentry->d_name.name;
4731 ++ __assign_str(name, dentry->d_name.name);
4732 + __entry->flags = flags;
4733 + ),
4734 +
4735 + TP_printk("dev = (%d,%d), pino = %lu, name:%s, flags:%u",
4736 + show_dev_ino(__entry),
4737 +- __entry->name,
4738 ++ __get_str(name),
4739 + __entry->flags)
4740 + );
4741 +
4742 +@@ -834,7 +834,7 @@ TRACE_EVENT(f2fs_lookup_end,
4743 + TP_STRUCT__entry(
4744 + __field(dev_t, dev)
4745 + __field(ino_t, ino)
4746 +- __field(const char *, name)
4747 ++ __string(name, dentry->d_name.name)
4748 + __field(nid_t, cino)
4749 + __field(int, err)
4750 + ),
4751 +@@ -842,14 +842,14 @@ TRACE_EVENT(f2fs_lookup_end,
4752 + TP_fast_assign(
4753 + __entry->dev = dir->i_sb->s_dev;
4754 + __entry->ino = dir->i_ino;
4755 +- __entry->name = dentry->d_name.name;
4756 ++ __assign_str(name, dentry->d_name.name);
4757 + __entry->cino = ino;
4758 + __entry->err = err;
4759 + ),
4760 +
4761 + TP_printk("dev = (%d,%d), pino = %lu, name:%s, ino:%u, err:%d",
4762 + show_dev_ino(__entry),
4763 +- __entry->name,
4764 ++ __get_str(name),
4765 + __entry->cino,
4766 + __entry->err)
4767 + );
4768 +diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
4769 +index cfcb10b754838..62db78b9c1a0a 100644
4770 +--- a/include/uapi/linux/tcp.h
4771 ++++ b/include/uapi/linux/tcp.h
4772 +@@ -349,5 +349,7 @@ struct tcp_zerocopy_receive {
4773 + __u32 recv_skip_hint; /* out: amount of bytes to skip */
4774 + __u32 inq; /* out: amount of bytes in read queue */
4775 + __s32 err; /* out: socket error */
4776 ++ __u64 copybuf_address; /* in: copybuf address (small reads) */
4777 ++ __s32 copybuf_len; /* in/out: copybuf bytes avail/used or error */
4778 + };
4779 + #endif /* _UAPI_LINUX_TCP_H */
4780 +diff --git a/ipc/util.c b/ipc/util.c
4781 +index cfa0045e748d5..bbb5190af6d9f 100644
4782 +--- a/ipc/util.c
4783 ++++ b/ipc/util.c
4784 +@@ -446,8 +446,8 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
4785 + static void ipc_kht_remove(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
4786 + {
4787 + if (ipcp->key != IPC_PRIVATE)
4788 +- rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode,
4789 +- ipc_kht_params);
4790 ++ WARN_ON_ONCE(rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode,
4791 ++ ipc_kht_params));
4792 + }
4793 +
4794 + /**
4795 +@@ -462,7 +462,7 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
4796 + {
4797 + int idx = ipcid_to_idx(ipcp->id);
4798 +
4799 +- idr_remove(&ids->ipcs_idr, idx);
4800 ++ WARN_ON_ONCE(idr_remove(&ids->ipcs_idr, idx) != ipcp);
4801 + ipc_kht_remove(ids, ipcp);
4802 + ids->in_use--;
4803 + ipcp->deleted = true;
4804 +diff --git a/kernel/events/core.c b/kernel/events/core.c
4805 +index 908417736f4e9..639b99a318db1 100644
4806 +--- a/kernel/events/core.c
4807 ++++ b/kernel/events/core.c
4808 +@@ -2209,26 +2209,6 @@ out:
4809 + perf_event__header_size(leader);
4810 + }
4811 +
4812 +-static void sync_child_event(struct perf_event *child_event);
4813 +-
4814 +-static void perf_child_detach(struct perf_event *event)
4815 +-{
4816 +- struct perf_event *parent_event = event->parent;
4817 +-
4818 +- if (!(event->attach_state & PERF_ATTACH_CHILD))
4819 +- return;
4820 +-
4821 +- event->attach_state &= ~PERF_ATTACH_CHILD;
4822 +-
4823 +- if (WARN_ON_ONCE(!parent_event))
4824 +- return;
4825 +-
4826 +- lockdep_assert_held(&parent_event->child_mutex);
4827 +-
4828 +- sync_child_event(event);
4829 +- list_del_init(&event->child_list);
4830 +-}
4831 +-
4832 + static bool is_orphaned_event(struct perf_event *event)
4833 + {
4834 + return event->state == PERF_EVENT_STATE_DEAD;
4835 +@@ -2336,7 +2316,6 @@ group_sched_out(struct perf_event *group_event,
4836 + }
4837 +
4838 + #define DETACH_GROUP 0x01UL
4839 +-#define DETACH_CHILD 0x02UL
4840 +
4841 + /*
4842 + * Cross CPU call to remove a performance event
4843 +@@ -2360,8 +2339,6 @@ __perf_remove_from_context(struct perf_event *event,
4844 + event_sched_out(event, cpuctx, ctx);
4845 + if (flags & DETACH_GROUP)
4846 + perf_group_detach(event);
4847 +- if (flags & DETACH_CHILD)
4848 +- perf_child_detach(event);
4849 + list_del_event(event, ctx);
4850 +
4851 + if (!ctx->nr_events && ctx->is_active) {
4852 +@@ -2390,21 +2367,25 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
4853 +
4854 + lockdep_assert_held(&ctx->mutex);
4855 +
4856 ++ event_function_call(event, __perf_remove_from_context, (void *)flags);
4857 ++
4858 + /*
4859 +- * Because of perf_event_exit_task(), perf_remove_from_context() ought
4860 +- * to work in the face of TASK_TOMBSTONE, unlike every other
4861 +- * event_function_call() user.
4862 ++ * The above event_function_call() can NO-OP when it hits
4863 ++ * TASK_TOMBSTONE. In that case we must already have been detached
4864 ++ * from the context (by perf_event_exit_event()) but the grouping
4865 ++ * might still be in-tact.
4866 + */
4867 +- raw_spin_lock_irq(&ctx->lock);
4868 +- if (!ctx->is_active) {
4869 +- __perf_remove_from_context(event, __get_cpu_context(ctx),
4870 +- ctx, (void *)flags);
4871 ++ WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
4872 ++ if ((flags & DETACH_GROUP) &&
4873 ++ (event->attach_state & PERF_ATTACH_GROUP)) {
4874 ++ /*
4875 ++ * Since in that case we cannot possibly be scheduled, simply
4876 ++ * detach now.
4877 ++ */
4878 ++ raw_spin_lock_irq(&ctx->lock);
4879 ++ perf_group_detach(event);
4880 + raw_spin_unlock_irq(&ctx->lock);
4881 +- return;
4882 + }
4883 +- raw_spin_unlock_irq(&ctx->lock);
4884 +-
4885 +- event_function_call(event, __perf_remove_from_context, (void *)flags);
4886 + }
4887 +
4888 + /*
4889 +@@ -12296,17 +12277,14 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
4890 + }
4891 + EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
4892 +
4893 +-static void sync_child_event(struct perf_event *child_event)
4894 ++static void sync_child_event(struct perf_event *child_event,
4895 ++ struct task_struct *child)
4896 + {
4897 + struct perf_event *parent_event = child_event->parent;
4898 + u64 child_val;
4899 +
4900 +- if (child_event->attr.inherit_stat) {
4901 +- struct task_struct *task = child_event->ctx->task;
4902 +-
4903 +- if (task && task != TASK_TOMBSTONE)
4904 +- perf_event_read_event(child_event, task);
4905 +- }
4906 ++ if (child_event->attr.inherit_stat)
4907 ++ perf_event_read_event(child_event, child);
4908 +
4909 + child_val = perf_event_count(child_event);
4910 +
4911 +@@ -12321,53 +12299,60 @@ static void sync_child_event(struct perf_event *child_event)
4912 + }
4913 +
4914 + static void
4915 +-perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
4916 ++perf_event_exit_event(struct perf_event *child_event,
4917 ++ struct perf_event_context *child_ctx,
4918 ++ struct task_struct *child)
4919 + {
4920 +- struct perf_event *parent_event = event->parent;
4921 +- unsigned long detach_flags = 0;
4922 +-
4923 +- if (parent_event) {
4924 +- /*
4925 +- * Do not destroy the 'original' grouping; because of the
4926 +- * context switch optimization the original events could've
4927 +- * ended up in a random child task.
4928 +- *
4929 +- * If we were to destroy the original group, all group related
4930 +- * operations would cease to function properly after this
4931 +- * random child dies.
4932 +- *
4933 +- * Do destroy all inherited groups, we don't care about those
4934 +- * and being thorough is better.
4935 +- */
4936 +- detach_flags = DETACH_GROUP | DETACH_CHILD;
4937 +- mutex_lock(&parent_event->child_mutex);
4938 +- }
4939 ++ struct perf_event *parent_event = child_event->parent;
4940 +
4941 +- perf_remove_from_context(event, detach_flags);
4942 ++ /*
4943 ++ * Do not destroy the 'original' grouping; because of the context
4944 ++ * switch optimization the original events could've ended up in a
4945 ++ * random child task.
4946 ++ *
4947 ++ * If we were to destroy the original group, all group related
4948 ++ * operations would cease to function properly after this random
4949 ++ * child dies.
4950 ++ *
4951 ++ * Do destroy all inherited groups, we don't care about those
4952 ++ * and being thorough is better.
4953 ++ */
4954 ++ raw_spin_lock_irq(&child_ctx->lock);
4955 ++ WARN_ON_ONCE(child_ctx->is_active);
4956 +
4957 +- raw_spin_lock_irq(&ctx->lock);
4958 +- if (event->state > PERF_EVENT_STATE_EXIT)
4959 +- perf_event_set_state(event, PERF_EVENT_STATE_EXIT);
4960 +- raw_spin_unlock_irq(&ctx->lock);
4961 ++ if (parent_event)
4962 ++ perf_group_detach(child_event);
4963 ++ list_del_event(child_event, child_ctx);
4964 ++ perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */
4965 ++ raw_spin_unlock_irq(&child_ctx->lock);
4966 +
4967 + /*
4968 +- * Child events can be freed.
4969 ++ * Parent events are governed by their filedesc, retain them.
4970 + */
4971 +- if (parent_event) {
4972 +- mutex_unlock(&parent_event->child_mutex);
4973 +- /*
4974 +- * Kick perf_poll() for is_event_hup();
4975 +- */
4976 +- perf_event_wakeup(parent_event);
4977 +- free_event(event);
4978 +- put_event(parent_event);
4979 ++ if (!parent_event) {
4980 ++ perf_event_wakeup(child_event);
4981 + return;
4982 + }
4983 ++ /*
4984 ++ * Child events can be cleaned up.
4985 ++ */
4986 ++
4987 ++ sync_child_event(child_event, child);
4988 +
4989 + /*
4990 +- * Parent events are governed by their filedesc, retain them.
4991 ++ * Remove this event from the parent's list
4992 ++ */
4993 ++ WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4994 ++ mutex_lock(&parent_event->child_mutex);
4995 ++ list_del_init(&child_event->child_list);
4996 ++ mutex_unlock(&parent_event->child_mutex);
4997 ++
4998 ++ /*
4999 ++ * Kick perf_poll() for is_event_hup().
5000 + */
5001 +- perf_event_wakeup(event);
5002 ++ perf_event_wakeup(parent_event);
5003 ++ free_event(child_event);
5004 ++ put_event(parent_event);
5005 + }
5006 +
5007 + static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
5008 +@@ -12424,7 +12409,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
5009 + perf_event_task(child, child_ctx, 0);
5010 +
5011 + list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
5012 +- perf_event_exit_event(child_event, child_ctx);
5013 ++ perf_event_exit_event(child_event, child_ctx, child);
5014 +
5015 + mutex_unlock(&child_ctx->mutex);
5016 +
5017 +@@ -12684,7 +12669,6 @@ inherit_event(struct perf_event *parent_event,
5018 + */
5019 + raw_spin_lock_irqsave(&child_ctx->lock, flags);
5020 + add_event_to_ctx(child_event, child_ctx);
5021 +- child_event->attach_state |= PERF_ATTACH_CHILD;
5022 + raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
5023 +
5024 + /*
5025 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
5026 +index bc8ff11e60242..e456cce772a3a 100644
5027 +--- a/kernel/sched/core.c
5028 ++++ b/kernel/sched/core.c
5029 +@@ -2650,6 +2650,9 @@ out:
5030 +
5031 + bool cpus_share_cache(int this_cpu, int that_cpu)
5032 + {
5033 ++ if (this_cpu == that_cpu)
5034 ++ return true;
5035 ++
5036 + return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
5037 + }
5038 +
5039 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
5040 +index 1b7f90e00eb05..c2ec467a5766b 100644
5041 +--- a/kernel/trace/trace_events_hist.c
5042 ++++ b/kernel/trace/trace_events_hist.c
5043 +@@ -1684,9 +1684,10 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
5044 + if (!hist_field->type)
5045 + goto free;
5046 +
5047 +- if (field->filter_type == FILTER_STATIC_STRING)
5048 ++ if (field->filter_type == FILTER_STATIC_STRING) {
5049 + hist_field->fn = hist_field_string;
5050 +- else if (field->filter_type == FILTER_DYN_STRING)
5051 ++ hist_field->size = field->size;
5052 ++ } else if (field->filter_type == FILTER_DYN_STRING)
5053 + hist_field->fn = hist_field_dynstring;
5054 + else
5055 + hist_field->fn = hist_field_pstring;
5056 +@@ -2623,8 +2624,10 @@ static inline void __update_field_vars(struct tracing_map_elt *elt,
5057 + if (val->flags & HIST_FIELD_FL_STRING) {
5058 + char *str = elt_data->field_var_str[j++];
5059 + char *val_str = (char *)(uintptr_t)var_val;
5060 ++ unsigned int size;
5061 +
5062 +- strscpy(str, val_str, STR_VAR_LEN_MAX);
5063 ++ size = min(val->size, STR_VAR_LEN_MAX);
5064 ++ strscpy(str, val_str, size);
5065 + var_val = (u64)(uintptr_t)str;
5066 + }
5067 + tracing_map_set_var(elt, var_idx, var_val);
5068 +@@ -4464,6 +4467,7 @@ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
5069 + if (hist_field->flags & HIST_FIELD_FL_STRING) {
5070 + unsigned int str_start, var_str_idx, idx;
5071 + char *str, *val_str;
5072 ++ unsigned int size;
5073 +
5074 + str_start = hist_data->n_field_var_str +
5075 + hist_data->n_save_var_str;
5076 +@@ -4472,7 +4476,9 @@ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
5077 +
5078 + str = elt_data->field_var_str[idx];
5079 + val_str = (char *)(uintptr_t)hist_val;
5080 +- strscpy(str, val_str, STR_VAR_LEN_MAX);
5081 ++
5082 ++ size = min(hist_field->size, STR_VAR_LEN_MAX);
5083 ++ strscpy(str, val_str, size);
5084 +
5085 + hist_val = (u64)(uintptr_t)str;
5086 + }
5087 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
5088 +index 6e92ab0ae070f..fce705fc2848a 100644
5089 +--- a/mm/hugetlb.c
5090 ++++ b/mm/hugetlb.c
5091 +@@ -3913,6 +3913,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5092 + struct hstate *h = hstate_vma(vma);
5093 + unsigned long sz = huge_page_size(h);
5094 + struct mmu_notifier_range range;
5095 ++ bool force_flush = false;
5096 +
5097 + WARN_ON(!is_vm_hugetlb_page(vma));
5098 + BUG_ON(start & ~huge_page_mask(h));
5099 +@@ -3941,10 +3942,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5100 + ptl = huge_pte_lock(h, mm, ptep);
5101 + if (huge_pmd_unshare(mm, vma, &address, ptep)) {
5102 + spin_unlock(ptl);
5103 +- /*
5104 +- * We just unmapped a page of PMDs by clearing a PUD.
5105 +- * The caller's TLB flush range should cover this area.
5106 +- */
5107 ++ tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5108 ++ force_flush = true;
5109 + continue;
5110 + }
5111 +
5112 +@@ -4001,6 +4000,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5113 + }
5114 + mmu_notifier_invalidate_range_end(&range);
5115 + tlb_end_vma(tlb, vma);
5116 ++
5117 ++ /*
5118 ++ * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5119 ++ * could defer the flush until now, since by holding i_mmap_rwsem we
5120 ++ * guaranteed that the last refernece would not be dropped. But we must
5121 ++ * do the flushing before we return, as otherwise i_mmap_rwsem will be
5122 ++ * dropped and the last reference to the shared PMDs page might be
5123 ++ * dropped as well.
5124 ++ *
5125 ++ * In theory we could defer the freeing of the PMD pages as well, but
5126 ++ * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5127 ++ * detect sharing, so we cannot defer the release of the page either.
5128 ++ * Instead, do flush now.
5129 ++ */
5130 ++ if (force_flush)
5131 ++ tlb_flush_mmu_tlbonly(tlb);
5132 + }
5133 +
5134 + void __unmap_hugepage_range_final(struct mmu_gather *tlb,
5135 +diff --git a/mm/slab.h b/mm/slab.h
5136 +index 944e8b2040ae2..6952e10cf33b4 100644
5137 +--- a/mm/slab.h
5138 ++++ b/mm/slab.h
5139 +@@ -147,7 +147,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
5140 + #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
5141 + SLAB_TEMPORARY | SLAB_ACCOUNT)
5142 + #else
5143 +-#define SLAB_CACHE_FLAGS (0)
5144 ++#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
5145 + #endif
5146 +
5147 + /* Common flags available with current configuration */
5148 +diff --git a/net/core/sock.c b/net/core/sock.c
5149 +index f9c835167391d..6d9af4ef93d7a 100644
5150 +--- a/net/core/sock.c
5151 ++++ b/net/core/sock.c
5152 +@@ -1883,123 +1883,120 @@ static void sk_init_common(struct sock *sk)
5153 + struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
5154 + {
5155 + struct proto *prot = READ_ONCE(sk->sk_prot);
5156 +- struct sock *newsk;
5157 ++ struct sk_filter *filter;
5158 + bool is_charged = true;
5159 ++ struct sock *newsk;
5160 +
5161 + newsk = sk_prot_alloc(prot, priority, sk->sk_family);
5162 +- if (newsk != NULL) {
5163 +- struct sk_filter *filter;
5164 ++ if (!newsk)
5165 ++ goto out;
5166 +
5167 +- sock_copy(newsk, sk);
5168 ++ sock_copy(newsk, sk);
5169 +
5170 +- newsk->sk_prot_creator = prot;
5171 ++ newsk->sk_prot_creator = prot;
5172 +
5173 +- /* SANITY */
5174 +- if (likely(newsk->sk_net_refcnt))
5175 +- get_net(sock_net(newsk));
5176 +- sk_node_init(&newsk->sk_node);
5177 +- sock_lock_init(newsk);
5178 +- bh_lock_sock(newsk);
5179 +- newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
5180 +- newsk->sk_backlog.len = 0;
5181 ++ /* SANITY */
5182 ++ if (likely(newsk->sk_net_refcnt)) {
5183 ++ get_net(sock_net(newsk));
5184 ++ sock_inuse_add(sock_net(newsk), 1);
5185 ++ }
5186 ++ sk_node_init(&newsk->sk_node);
5187 ++ sock_lock_init(newsk);
5188 ++ bh_lock_sock(newsk);
5189 ++ newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
5190 ++ newsk->sk_backlog.len = 0;
5191 +
5192 +- atomic_set(&newsk->sk_rmem_alloc, 0);
5193 +- /*
5194 +- * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
5195 +- */
5196 +- refcount_set(&newsk->sk_wmem_alloc, 1);
5197 +- atomic_set(&newsk->sk_omem_alloc, 0);
5198 +- sk_init_common(newsk);
5199 ++ atomic_set(&newsk->sk_rmem_alloc, 0);
5200 +
5201 +- newsk->sk_dst_cache = NULL;
5202 +- newsk->sk_dst_pending_confirm = 0;
5203 +- newsk->sk_wmem_queued = 0;
5204 +- newsk->sk_forward_alloc = 0;
5205 +- atomic_set(&newsk->sk_drops, 0);
5206 +- newsk->sk_send_head = NULL;
5207 +- newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
5208 +- atomic_set(&newsk->sk_zckey, 0);
5209 ++ /* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */
5210 ++ refcount_set(&newsk->sk_wmem_alloc, 1);
5211 +
5212 +- sock_reset_flag(newsk, SOCK_DONE);
5213 ++ atomic_set(&newsk->sk_omem_alloc, 0);
5214 ++ sk_init_common(newsk);
5215 +
5216 +- /* sk->sk_memcg will be populated at accept() time */
5217 +- newsk->sk_memcg = NULL;
5218 ++ newsk->sk_dst_cache = NULL;
5219 ++ newsk->sk_dst_pending_confirm = 0;
5220 ++ newsk->sk_wmem_queued = 0;
5221 ++ newsk->sk_forward_alloc = 0;
5222 ++ atomic_set(&newsk->sk_drops, 0);
5223 ++ newsk->sk_send_head = NULL;
5224 ++ newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
5225 ++ atomic_set(&newsk->sk_zckey, 0);
5226 +
5227 +- cgroup_sk_clone(&newsk->sk_cgrp_data);
5228 ++ sock_reset_flag(newsk, SOCK_DONE);
5229 +
5230 +- rcu_read_lock();
5231 +- filter = rcu_dereference(sk->sk_filter);
5232 +- if (filter != NULL)
5233 +- /* though it's an empty new sock, the charging may fail
5234 +- * if sysctl_optmem_max was changed between creation of
5235 +- * original socket and cloning
5236 +- */
5237 +- is_charged = sk_filter_charge(newsk, filter);
5238 +- RCU_INIT_POINTER(newsk->sk_filter, filter);
5239 +- rcu_read_unlock();
5240 ++ /* sk->sk_memcg will be populated at accept() time */
5241 ++ newsk->sk_memcg = NULL;
5242 +
5243 +- if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
5244 +- /* We need to make sure that we don't uncharge the new
5245 +- * socket if we couldn't charge it in the first place
5246 +- * as otherwise we uncharge the parent's filter.
5247 +- */
5248 +- if (!is_charged)
5249 +- RCU_INIT_POINTER(newsk->sk_filter, NULL);
5250 +- sk_free_unlock_clone(newsk);
5251 +- newsk = NULL;
5252 +- goto out;
5253 +- }
5254 +- RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
5255 ++ cgroup_sk_clone(&newsk->sk_cgrp_data);
5256 +
5257 +- if (bpf_sk_storage_clone(sk, newsk)) {
5258 +- sk_free_unlock_clone(newsk);
5259 +- newsk = NULL;
5260 +- goto out;
5261 +- }
5262 ++ rcu_read_lock();
5263 ++ filter = rcu_dereference(sk->sk_filter);
5264 ++ if (filter != NULL)
5265 ++ /* though it's an empty new sock, the charging may fail
5266 ++ * if sysctl_optmem_max was changed between creation of
5267 ++ * original socket and cloning
5268 ++ */
5269 ++ is_charged = sk_filter_charge(newsk, filter);
5270 ++ RCU_INIT_POINTER(newsk->sk_filter, filter);
5271 ++ rcu_read_unlock();
5272 +
5273 +- /* Clear sk_user_data if parent had the pointer tagged
5274 +- * as not suitable for copying when cloning.
5275 ++ if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
5276 ++ /* We need to make sure that we don't uncharge the new
5277 ++ * socket if we couldn't charge it in the first place
5278 ++ * as otherwise we uncharge the parent's filter.
5279 + */
5280 +- if (sk_user_data_is_nocopy(newsk))
5281 +- newsk->sk_user_data = NULL;
5282 ++ if (!is_charged)
5283 ++ RCU_INIT_POINTER(newsk->sk_filter, NULL);
5284 ++ sk_free_unlock_clone(newsk);
5285 ++ newsk = NULL;
5286 ++ goto out;
5287 ++ }
5288 ++ RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
5289 +
5290 +- newsk->sk_err = 0;
5291 +- newsk->sk_err_soft = 0;
5292 +- newsk->sk_priority = 0;
5293 +- newsk->sk_incoming_cpu = raw_smp_processor_id();
5294 +- if (likely(newsk->sk_net_refcnt))
5295 +- sock_inuse_add(sock_net(newsk), 1);
5296 ++ if (bpf_sk_storage_clone(sk, newsk)) {
5297 ++ sk_free_unlock_clone(newsk);
5298 ++ newsk = NULL;
5299 ++ goto out;
5300 ++ }
5301 +
5302 +- /*
5303 +- * Before updating sk_refcnt, we must commit prior changes to memory
5304 +- * (Documentation/RCU/rculist_nulls.rst for details)
5305 +- */
5306 +- smp_wmb();
5307 +- refcount_set(&newsk->sk_refcnt, 2);
5308 ++ /* Clear sk_user_data if parent had the pointer tagged
5309 ++ * as not suitable for copying when cloning.
5310 ++ */
5311 ++ if (sk_user_data_is_nocopy(newsk))
5312 ++ newsk->sk_user_data = NULL;
5313 +
5314 +- /*
5315 +- * Increment the counter in the same struct proto as the master
5316 +- * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
5317 +- * is the same as sk->sk_prot->socks, as this field was copied
5318 +- * with memcpy).
5319 +- *
5320 +- * This _changes_ the previous behaviour, where
5321 +- * tcp_create_openreq_child always was incrementing the
5322 +- * equivalent to tcp_prot->socks (inet_sock_nr), so this have
5323 +- * to be taken into account in all callers. -acme
5324 +- */
5325 +- sk_refcnt_debug_inc(newsk);
5326 +- sk_set_socket(newsk, NULL);
5327 +- sk_tx_queue_clear(newsk);
5328 +- RCU_INIT_POINTER(newsk->sk_wq, NULL);
5329 ++ newsk->sk_err = 0;
5330 ++ newsk->sk_err_soft = 0;
5331 ++ newsk->sk_priority = 0;
5332 ++ newsk->sk_incoming_cpu = raw_smp_processor_id();
5333 +
5334 +- if (newsk->sk_prot->sockets_allocated)
5335 +- sk_sockets_allocated_inc(newsk);
5336 ++ /* Before updating sk_refcnt, we must commit prior changes to memory
5337 ++ * (Documentation/RCU/rculist_nulls.rst for details)
5338 ++ */
5339 ++ smp_wmb();
5340 ++ refcount_set(&newsk->sk_refcnt, 2);
5341 +
5342 +- if (sock_needs_netstamp(sk) &&
5343 +- newsk->sk_flags & SK_FLAGS_TIMESTAMP)
5344 +- net_enable_timestamp();
5345 +- }
5346 ++ /* Increment the counter in the same struct proto as the master
5347 ++ * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
5348 ++ * is the same as sk->sk_prot->socks, as this field was copied
5349 ++ * with memcpy).
5350 ++ *
5351 ++ * This _changes_ the previous behaviour, where
5352 ++ * tcp_create_openreq_child always was incrementing the
5353 ++ * equivalent to tcp_prot->socks (inet_sock_nr), so this have
5354 ++ * to be taken into account in all callers. -acme
5355 ++ */
5356 ++ sk_refcnt_debug_inc(newsk);
5357 ++ sk_set_socket(newsk, NULL);
5358 ++ sk_tx_queue_clear(newsk);
5359 ++ RCU_INIT_POINTER(newsk->sk_wq, NULL);
5360 ++
5361 ++ if (newsk->sk_prot->sockets_allocated)
5362 ++ sk_sockets_allocated_inc(newsk);
5363 ++
5364 ++ if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP)
5365 ++ net_enable_timestamp();
5366 + out:
5367 + return newsk;
5368 + }
5369 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
5370 +index e8aca226c4ae3..bb16c88f58a3c 100644
5371 +--- a/net/ipv4/tcp.c
5372 ++++ b/net/ipv4/tcp.c
5373 +@@ -1746,6 +1746,77 @@ int tcp_mmap(struct file *file, struct socket *sock,
5374 + }
5375 + EXPORT_SYMBOL(tcp_mmap);
5376 +
5377 ++static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb,
5378 ++ u32 *offset_frag)
5379 ++{
5380 ++ skb_frag_t *frag;
5381 ++
5382 ++ if (unlikely(offset_skb >= skb->len))
5383 ++ return NULL;
5384 ++
5385 ++ offset_skb -= skb_headlen(skb);
5386 ++ if ((int)offset_skb < 0 || skb_has_frag_list(skb))
5387 ++ return NULL;
5388 ++
5389 ++ frag = skb_shinfo(skb)->frags;
5390 ++ while (offset_skb) {
5391 ++ if (skb_frag_size(frag) > offset_skb) {
5392 ++ *offset_frag = offset_skb;
5393 ++ return frag;
5394 ++ }
5395 ++ offset_skb -= skb_frag_size(frag);
5396 ++ ++frag;
5397 ++ }
5398 ++ *offset_frag = 0;
5399 ++ return frag;
5400 ++}
5401 ++
5402 ++static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc,
5403 ++ struct sk_buff *skb, u32 copylen,
5404 ++ u32 *offset, u32 *seq)
5405 ++{
5406 ++ unsigned long copy_address = (unsigned long)zc->copybuf_address;
5407 ++ struct msghdr msg = {};
5408 ++ struct iovec iov;
5409 ++ int err;
5410 ++
5411 ++ if (copy_address != zc->copybuf_address)
5412 ++ return -EINVAL;
5413 ++
5414 ++ err = import_single_range(READ, (void __user *)copy_address,
5415 ++ copylen, &iov, &msg.msg_iter);
5416 ++ if (err)
5417 ++ return err;
5418 ++ err = skb_copy_datagram_msg(skb, *offset, &msg, copylen);
5419 ++ if (err)
5420 ++ return err;
5421 ++ zc->recv_skip_hint -= copylen;
5422 ++ *offset += copylen;
5423 ++ *seq += copylen;
5424 ++ return (__s32)copylen;
5425 ++}
5426 ++
5427 ++static int tcp_zerocopy_handle_leftover_data(struct tcp_zerocopy_receive *zc,
5428 ++ struct sock *sk,
5429 ++ struct sk_buff *skb,
5430 ++ u32 *seq,
5431 ++ s32 copybuf_len)
5432 ++{
5433 ++ u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint);
5434 ++
5435 ++ if (!copylen)
5436 ++ return 0;
5437 ++ /* skb is null if inq < PAGE_SIZE. */
5438 ++ if (skb)
5439 ++ offset = *seq - TCP_SKB_CB(skb)->seq;
5440 ++ else
5441 ++ skb = tcp_recv_skb(sk, *seq, &offset);
5442 ++
5443 ++ zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset,
5444 ++ seq);
5445 ++ return zc->copybuf_len < 0 ? 0 : copylen;
5446 ++}
5447 ++
5448 + static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma,
5449 + struct page **pages,
5450 + unsigned long pages_to_map,
5451 +@@ -1779,8 +1850,10 @@ static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma,
5452 + static int tcp_zerocopy_receive(struct sock *sk,
5453 + struct tcp_zerocopy_receive *zc)
5454 + {
5455 ++ u32 length = 0, offset, vma_len, avail_len, aligned_len, copylen = 0;
5456 + unsigned long address = (unsigned long)zc->address;
5457 +- u32 length = 0, seq, offset, zap_len;
5458 ++ s32 copybuf_len = zc->copybuf_len;
5459 ++ struct tcp_sock *tp = tcp_sk(sk);
5460 + #define PAGE_BATCH_SIZE 8
5461 + struct page *pages[PAGE_BATCH_SIZE];
5462 + const skb_frag_t *frags = NULL;
5463 +@@ -1788,10 +1861,12 @@ static int tcp_zerocopy_receive(struct sock *sk,
5464 + struct sk_buff *skb = NULL;
5465 + unsigned long pg_idx = 0;
5466 + unsigned long curr_addr;
5467 +- struct tcp_sock *tp;
5468 +- int inq;
5469 ++ u32 seq = tp->copied_seq;
5470 ++ int inq = tcp_inq(sk);
5471 + int ret;
5472 +
5473 ++ zc->copybuf_len = 0;
5474 ++
5475 + if (address & (PAGE_SIZE - 1) || address != zc->address)
5476 + return -EINVAL;
5477 +
5478 +@@ -1800,8 +1875,6 @@ static int tcp_zerocopy_receive(struct sock *sk,
5479 +
5480 + sock_rps_record_flow(sk);
5481 +
5482 +- tp = tcp_sk(sk);
5483 +-
5484 + mmap_read_lock(current->mm);
5485 +
5486 + vma = find_vma(current->mm, address);
5487 +@@ -1809,22 +1882,23 @@ static int tcp_zerocopy_receive(struct sock *sk,
5488 + mmap_read_unlock(current->mm);
5489 + return -EINVAL;
5490 + }
5491 +- zc->length = min_t(unsigned long, zc->length, vma->vm_end - address);
5492 +-
5493 +- seq = tp->copied_seq;
5494 +- inq = tcp_inq(sk);
5495 +- zc->length = min_t(u32, zc->length, inq);
5496 +- zap_len = zc->length & ~(PAGE_SIZE - 1);
5497 +- if (zap_len) {
5498 +- zap_page_range(vma, address, zap_len);
5499 ++ vma_len = min_t(unsigned long, zc->length, vma->vm_end - address);
5500 ++ avail_len = min_t(u32, vma_len, inq);
5501 ++ aligned_len = avail_len & ~(PAGE_SIZE - 1);
5502 ++ if (aligned_len) {
5503 ++ zap_page_range(vma, address, aligned_len);
5504 ++ zc->length = aligned_len;
5505 + zc->recv_skip_hint = 0;
5506 + } else {
5507 +- zc->recv_skip_hint = zc->length;
5508 ++ zc->length = avail_len;
5509 ++ zc->recv_skip_hint = avail_len;
5510 + }
5511 + ret = 0;
5512 + curr_addr = address;
5513 + while (length + PAGE_SIZE <= zc->length) {
5514 + if (zc->recv_skip_hint < PAGE_SIZE) {
5515 ++ u32 offset_frag;
5516 ++
5517 + /* If we're here, finish the current batch. */
5518 + if (pg_idx) {
5519 + ret = tcp_zerocopy_vm_insert_batch(vma, pages,
5520 +@@ -1845,16 +1919,9 @@ static int tcp_zerocopy_receive(struct sock *sk,
5521 + skb = tcp_recv_skb(sk, seq, &offset);
5522 + }
5523 + zc->recv_skip_hint = skb->len - offset;
5524 +- offset -= skb_headlen(skb);
5525 +- if ((int)offset < 0 || skb_has_frag_list(skb))
5526 ++ frags = skb_advance_to_frag(skb, offset, &offset_frag);
5527 ++ if (!frags || offset_frag)
5528 + break;
5529 +- frags = skb_shinfo(skb)->frags;
5530 +- while (offset) {
5531 +- if (skb_frag_size(frags) > offset)
5532 +- goto out;
5533 +- offset -= skb_frag_size(frags);
5534 +- frags++;
5535 +- }
5536 + }
5537 + if (skb_frag_size(frags) != PAGE_SIZE || skb_frag_off(frags)) {
5538 + int remaining = zc->recv_skip_hint;
5539 +@@ -1888,13 +1955,18 @@ static int tcp_zerocopy_receive(struct sock *sk,
5540 + }
5541 + out:
5542 + mmap_read_unlock(current->mm);
5543 +- if (length) {
5544 ++ /* Try to copy straggler data. */
5545 ++ if (!ret)
5546 ++ copylen = tcp_zerocopy_handle_leftover_data(zc, sk, skb, &seq,
5547 ++ copybuf_len);
5548 ++
5549 ++ if (length + copylen) {
5550 + WRITE_ONCE(tp->copied_seq, seq);
5551 + tcp_rcv_space_adjust(sk);
5552 +
5553 + /* Clean up data we have read: This will do ACK frames. */
5554 + tcp_recv_skb(sk, seq, &offset);
5555 +- tcp_cleanup_rbuf(sk, length);
5556 ++ tcp_cleanup_rbuf(sk, length + copylen);
5557 + ret = 0;
5558 + if (length == zc->length)
5559 + zc->recv_skip_hint = 0;
5560 +diff --git a/net/nfc/core.c b/net/nfc/core.c
5561 +index eb377f87bcae8..6800470dd6df7 100644
5562 +--- a/net/nfc/core.c
5563 ++++ b/net/nfc/core.c
5564 +@@ -94,13 +94,13 @@ int nfc_dev_up(struct nfc_dev *dev)
5565 +
5566 + device_lock(&dev->dev);
5567 +
5568 +- if (dev->rfkill && rfkill_blocked(dev->rfkill)) {
5569 +- rc = -ERFKILL;
5570 ++ if (!device_is_registered(&dev->dev)) {
5571 ++ rc = -ENODEV;
5572 + goto error;
5573 + }
5574 +
5575 +- if (!device_is_registered(&dev->dev)) {
5576 +- rc = -ENODEV;
5577 ++ if (dev->rfkill && rfkill_blocked(dev->rfkill)) {
5578 ++ rc = -ERFKILL;
5579 + goto error;
5580 + }
5581 +
5582 +@@ -1117,11 +1117,7 @@ int nfc_register_device(struct nfc_dev *dev)
5583 + if (rc)
5584 + pr_err("Could not register llcp device\n");
5585 +
5586 +- rc = nfc_genl_device_added(dev);
5587 +- if (rc)
5588 +- pr_debug("The userspace won't be notified that the device %s was added\n",
5589 +- dev_name(&dev->dev));
5590 +-
5591 ++ device_lock(&dev->dev);
5592 + dev->rfkill = rfkill_alloc(dev_name(&dev->dev), &dev->dev,
5593 + RFKILL_TYPE_NFC, &nfc_rfkill_ops, dev);
5594 + if (dev->rfkill) {
5595 +@@ -1130,6 +1126,12 @@ int nfc_register_device(struct nfc_dev *dev)
5596 + dev->rfkill = NULL;
5597 + }
5598 + }
5599 ++ device_unlock(&dev->dev);
5600 ++
5601 ++ rc = nfc_genl_device_added(dev);
5602 ++ if (rc)
5603 ++ pr_debug("The userspace won't be notified that the device %s was added\n",
5604 ++ dev_name(&dev->dev));
5605 +
5606 + return 0;
5607 + }
5608 +@@ -1146,10 +1148,17 @@ void nfc_unregister_device(struct nfc_dev *dev)
5609 +
5610 + pr_debug("dev_name=%s\n", dev_name(&dev->dev));
5611 +
5612 ++ rc = nfc_genl_device_removed(dev);
5613 ++ if (rc)
5614 ++ pr_debug("The userspace won't be notified that the device %s "
5615 ++ "was removed\n", dev_name(&dev->dev));
5616 ++
5617 ++ device_lock(&dev->dev);
5618 + if (dev->rfkill) {
5619 + rfkill_unregister(dev->rfkill);
5620 + rfkill_destroy(dev->rfkill);
5621 + }
5622 ++ device_unlock(&dev->dev);
5623 +
5624 + if (dev->ops->check_presence) {
5625 + device_lock(&dev->dev);
5626 +@@ -1159,11 +1168,6 @@ void nfc_unregister_device(struct nfc_dev *dev)
5627 + cancel_work_sync(&dev->check_pres_work);
5628 + }
5629 +
5630 +- rc = nfc_genl_device_removed(dev);
5631 +- if (rc)
5632 +- pr_debug("The userspace won't be notified that the device %s "
5633 +- "was removed\n", dev_name(&dev->dev));
5634 +-
5635 + nfc_llcp_unregister_device(dev);
5636 +
5637 + mutex_lock(&nfc_devlist_mutex);
5638 +diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
5639 +index 32e8154363cab..e38719e2ee582 100644
5640 +--- a/net/nfc/nci/core.c
5641 ++++ b/net/nfc/nci/core.c
5642 +@@ -144,12 +144,15 @@ inline int nci_request(struct nci_dev *ndev,
5643 + {
5644 + int rc;
5645 +
5646 +- if (!test_bit(NCI_UP, &ndev->flags))
5647 +- return -ENETDOWN;
5648 +-
5649 + /* Serialize all requests */
5650 + mutex_lock(&ndev->req_lock);
5651 +- rc = __nci_request(ndev, req, opt, timeout);
5652 ++ /* check the state after obtaing the lock against any races
5653 ++ * from nci_close_device when the device gets removed.
5654 ++ */
5655 ++ if (test_bit(NCI_UP, &ndev->flags))
5656 ++ rc = __nci_request(ndev, req, opt, timeout);
5657 ++ else
5658 ++ rc = -ENETDOWN;
5659 + mutex_unlock(&ndev->req_lock);
5660 +
5661 + return rc;
5662 +@@ -470,6 +473,11 @@ static int nci_open_device(struct nci_dev *ndev)
5663 +
5664 + mutex_lock(&ndev->req_lock);
5665 +
5666 ++ if (test_bit(NCI_UNREG, &ndev->flags)) {
5667 ++ rc = -ENODEV;
5668 ++ goto done;
5669 ++ }
5670 ++
5671 + if (test_bit(NCI_UP, &ndev->flags)) {
5672 + rc = -EALREADY;
5673 + goto done;
5674 +@@ -533,6 +541,10 @@ done:
5675 + static int nci_close_device(struct nci_dev *ndev)
5676 + {
5677 + nci_req_cancel(ndev, ENODEV);
5678 ++
5679 ++ /* This mutex needs to be held as a barrier for
5680 ++ * caller nci_unregister_device
5681 ++ */
5682 + mutex_lock(&ndev->req_lock);
5683 +
5684 + if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
5685 +@@ -565,13 +577,13 @@ static int nci_close_device(struct nci_dev *ndev)
5686 +
5687 + clear_bit(NCI_INIT, &ndev->flags);
5688 +
5689 +- del_timer_sync(&ndev->cmd_timer);
5690 +-
5691 + /* Flush cmd wq */
5692 + flush_workqueue(ndev->cmd_wq);
5693 +
5694 +- /* Clear flags */
5695 +- ndev->flags = 0;
5696 ++ del_timer_sync(&ndev->cmd_timer);
5697 ++
5698 ++ /* Clear flags except NCI_UNREG */
5699 ++ ndev->flags &= BIT(NCI_UNREG);
5700 +
5701 + mutex_unlock(&ndev->req_lock);
5702 +
5703 +@@ -1256,6 +1268,12 @@ void nci_unregister_device(struct nci_dev *ndev)
5704 + {
5705 + struct nci_conn_info *conn_info, *n;
5706 +
5707 ++ /* This set_bit is not protected with specialized barrier,
5708 ++ * However, it is fine because the mutex_lock(&ndev->req_lock);
5709 ++ * in nci_close_device() will help to emit one.
5710 ++ */
5711 ++ set_bit(NCI_UNREG, &ndev->flags);
5712 ++
5713 + nci_close_device(ndev);
5714 +
5715 + destroy_workqueue(ndev->cmd_wq);
5716 +diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
5717 +index 0b0eb18919c09..24d561d8d9c97 100644
5718 +--- a/net/sched/act_mirred.c
5719 ++++ b/net/sched/act_mirred.c
5720 +@@ -19,6 +19,7 @@
5721 + #include <linux/if_arp.h>
5722 + #include <net/net_namespace.h>
5723 + #include <net/netlink.h>
5724 ++#include <net/dst.h>
5725 + #include <net/pkt_sched.h>
5726 + #include <net/pkt_cls.h>
5727 + #include <linux/tc_act/tc_mirred.h>
5728 +@@ -218,6 +219,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
5729 + bool want_ingress;
5730 + bool is_redirect;
5731 + bool expects_nh;
5732 ++ bool at_ingress;
5733 + int m_eaction;
5734 + int mac_len;
5735 + bool at_nh;
5736 +@@ -253,7 +255,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
5737 + * ingress - that covers the TC S/W datapath.
5738 + */
5739 + is_redirect = tcf_mirred_is_act_redirect(m_eaction);
5740 +- use_reinsert = skb_at_tc_ingress(skb) && is_redirect &&
5741 ++ at_ingress = skb_at_tc_ingress(skb);
5742 ++ use_reinsert = at_ingress && is_redirect &&
5743 + tcf_mirred_can_reinsert(retval);
5744 + if (!use_reinsert) {
5745 + skb2 = skb_clone(skb, GFP_ATOMIC);
5746 +@@ -261,10 +264,12 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
5747 + goto out;
5748 + }
5749 +
5750 ++ want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
5751 ++
5752 + /* All mirred/redirected skbs should clear previous ct info */
5753 + nf_reset_ct(skb2);
5754 +-
5755 +- want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
5756 ++ if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
5757 ++ skb_dst_drop(skb2);
5758 +
5759 + expects_nh = want_ingress || !m_mac_header_xmit;
5760 + at_nh = skb->data == skb_network_header(skb);
5761 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
5762 +index c491dd8e67cda..109d790eaebe2 100644
5763 +--- a/net/smc/smc_core.c
5764 ++++ b/net/smc/smc_core.c
5765 +@@ -287,13 +287,14 @@ static u8 smcr_next_link_id(struct smc_link_group *lgr)
5766 + int i;
5767 +
5768 + while (1) {
5769 ++again:
5770 + link_id = ++lgr->next_link_id;
5771 + if (!link_id) /* skip zero as link_id */
5772 + link_id = ++lgr->next_link_id;
5773 + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
5774 + if (smc_link_usable(&lgr->lnk[i]) &&
5775 + lgr->lnk[i].link_id == link_id)
5776 +- continue;
5777 ++ goto again;
5778 + }
5779 + break;
5780 + }
5781 +diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
5782 +index 23b100f36ee48..d8a2f424786fc 100644
5783 +--- a/net/tipc/crypto.c
5784 ++++ b/net/tipc/crypto.c
5785 +@@ -590,6 +590,10 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
5786 + tmp->cloned = NULL;
5787 + tmp->authsize = TIPC_AES_GCM_TAG_SIZE;
5788 + tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL);
5789 ++ if (!tmp->key) {
5790 ++ tipc_aead_free(&tmp->rcu);
5791 ++ return -ENOMEM;
5792 ++ }
5793 + memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE);
5794 + atomic_set(&tmp->users, 0);
5795 + atomic64_set(&tmp->seqno, 0);
5796 +diff --git a/net/tipc/link.c b/net/tipc/link.c
5797 +index c92e6984933cb..29591955d08a5 100644
5798 +--- a/net/tipc/link.c
5799 ++++ b/net/tipc/link.c
5800 +@@ -1258,8 +1258,11 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
5801 + return false;
5802 + #ifdef CONFIG_TIPC_CRYPTO
5803 + case MSG_CRYPTO:
5804 +- tipc_crypto_msg_rcv(l->net, skb);
5805 +- return true;
5806 ++ if (TIPC_SKB_CB(skb)->decrypted) {
5807 ++ tipc_crypto_msg_rcv(l->net, skb);
5808 ++ return true;
5809 ++ }
5810 ++ fallthrough;
5811 + #endif
5812 + default:
5813 + pr_warn("Dropping received illegal msg type\n");
5814 +diff --git a/net/wireless/util.c b/net/wireless/util.c
5815 +index 3f8c46bb6d9a4..4b32e85c2d9a1 100644
5816 +--- a/net/wireless/util.c
5817 ++++ b/net/wireless/util.c
5818 +@@ -1044,6 +1044,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
5819 +
5820 + switch (otype) {
5821 + case NL80211_IFTYPE_AP:
5822 ++ case NL80211_IFTYPE_P2P_GO:
5823 + cfg80211_stop_ap(rdev, dev, true);
5824 + break;
5825 + case NL80211_IFTYPE_ADHOC:
5826 +diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
5827 +index dab8c25c739b9..7335f67ce54eb 100644
5828 +--- a/security/selinux/ss/hashtab.c
5829 ++++ b/security/selinux/ss/hashtab.c
5830 +@@ -30,13 +30,20 @@ static u32 hashtab_compute_size(u32 nel)
5831 +
5832 + int hashtab_init(struct hashtab *h, u32 nel_hint)
5833 + {
5834 +- h->size = hashtab_compute_size(nel_hint);
5835 ++ u32 size = hashtab_compute_size(nel_hint);
5836 ++
5837 ++ /* should already be zeroed, but better be safe */
5838 + h->nel = 0;
5839 +- if (!h->size)
5840 +- return 0;
5841 ++ h->size = 0;
5842 ++ h->htable = NULL;
5843 +
5844 +- h->htable = kcalloc(h->size, sizeof(*h->htable), GFP_KERNEL);
5845 +- return h->htable ? 0 : -ENOMEM;
5846 ++ if (size) {
5847 ++ h->htable = kcalloc(size, sizeof(*h->htable), GFP_KERNEL);
5848 ++ if (!h->htable)
5849 ++ return -ENOMEM;
5850 ++ h->size = size;
5851 ++ }
5852 ++ return 0;
5853 + }
5854 +
5855 + int __hashtab_insert(struct hashtab *h, struct hashtab_node **dst,
5856 +diff --git a/sound/core/Makefile b/sound/core/Makefile
5857 +index ee4a4a6b99ba7..d123587c0fd8f 100644
5858 +--- a/sound/core/Makefile
5859 ++++ b/sound/core/Makefile
5860 +@@ -9,7 +9,9 @@ ifneq ($(CONFIG_SND_PROC_FS),)
5861 + snd-y += info.o
5862 + snd-$(CONFIG_SND_OSSEMUL) += info_oss.o
5863 + endif
5864 ++ifneq ($(CONFIG_M68K),y)
5865 + snd-$(CONFIG_ISA_DMA_API) += isadma.o
5866 ++endif
5867 + snd-$(CONFIG_SND_OSSEMUL) += sound_oss.o
5868 + snd-$(CONFIG_SND_VMASTER) += vmaster.o
5869 + snd-$(CONFIG_SND_JACK) += ctljack.o jack.o
5870 +diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
5871 +index c4d54a838773c..1e6e4cf428cda 100644
5872 +--- a/sound/hda/ext/hdac_ext_stream.c
5873 ++++ b/sound/hda/ext/hdac_ext_stream.c
5874 +@@ -106,20 +106,14 @@ void snd_hdac_stream_free_all(struct hdac_bus *bus)
5875 + }
5876 + EXPORT_SYMBOL_GPL(snd_hdac_stream_free_all);
5877 +
5878 +-/**
5879 +- * snd_hdac_ext_stream_decouple - decouple the hdac stream
5880 +- * @bus: HD-audio core bus
5881 +- * @stream: HD-audio ext core stream object to initialize
5882 +- * @decouple: flag to decouple
5883 +- */
5884 +-void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
5885 +- struct hdac_ext_stream *stream, bool decouple)
5886 ++void snd_hdac_ext_stream_decouple_locked(struct hdac_bus *bus,
5887 ++ struct hdac_ext_stream *stream,
5888 ++ bool decouple)
5889 + {
5890 + struct hdac_stream *hstream = &stream->hstream;
5891 + u32 val;
5892 + int mask = AZX_PPCTL_PROCEN(hstream->index);
5893 +
5894 +- spin_lock_irq(&bus->reg_lock);
5895 + val = readw(bus->ppcap + AZX_REG_PP_PPCTL) & mask;
5896 +
5897 + if (decouple && !val)
5898 +@@ -128,6 +122,20 @@ void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
5899 + snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, mask, 0);
5900 +
5901 + stream->decoupled = decouple;
5902 ++}
5903 ++EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_decouple_locked);
5904 ++
5905 ++/**
5906 ++ * snd_hdac_ext_stream_decouple - decouple the hdac stream
5907 ++ * @bus: HD-audio core bus
5908 ++ * @stream: HD-audio ext core stream object to initialize
5909 ++ * @decouple: flag to decouple
5910 ++ */
5911 ++void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
5912 ++ struct hdac_ext_stream *stream, bool decouple)
5913 ++{
5914 ++ spin_lock_irq(&bus->reg_lock);
5915 ++ snd_hdac_ext_stream_decouple_locked(bus, stream, decouple);
5916 + spin_unlock_irq(&bus->reg_lock);
5917 + }
5918 + EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_decouple);
5919 +@@ -252,6 +260,7 @@ hdac_ext_link_stream_assign(struct hdac_bus *bus,
5920 + return NULL;
5921 + }
5922 +
5923 ++ spin_lock_irq(&bus->reg_lock);
5924 + list_for_each_entry(stream, &bus->stream_list, list) {
5925 + struct hdac_ext_stream *hstream = container_of(stream,
5926 + struct hdac_ext_stream,
5927 +@@ -266,17 +275,16 @@ hdac_ext_link_stream_assign(struct hdac_bus *bus,
5928 + }
5929 +
5930 + if (!hstream->link_locked) {
5931 +- snd_hdac_ext_stream_decouple(bus, hstream, true);
5932 ++ snd_hdac_ext_stream_decouple_locked(bus, hstream, true);
5933 + res = hstream;
5934 + break;
5935 + }
5936 + }
5937 + if (res) {
5938 +- spin_lock_irq(&bus->reg_lock);
5939 + res->link_locked = 1;
5940 + res->link_substream = substream;
5941 +- spin_unlock_irq(&bus->reg_lock);
5942 + }
5943 ++ spin_unlock_irq(&bus->reg_lock);
5944 + return res;
5945 + }
5946 +
5947 +@@ -292,6 +300,7 @@ hdac_ext_host_stream_assign(struct hdac_bus *bus,
5948 + return NULL;
5949 + }
5950 +
5951 ++ spin_lock_irq(&bus->reg_lock);
5952 + list_for_each_entry(stream, &bus->stream_list, list) {
5953 + struct hdac_ext_stream *hstream = container_of(stream,
5954 + struct hdac_ext_stream,
5955 +@@ -301,18 +310,17 @@ hdac_ext_host_stream_assign(struct hdac_bus *bus,
5956 +
5957 + if (!stream->opened) {
5958 + if (!hstream->decoupled)
5959 +- snd_hdac_ext_stream_decouple(bus, hstream, true);
5960 ++ snd_hdac_ext_stream_decouple_locked(bus, hstream, true);
5961 + res = hstream;
5962 + break;
5963 + }
5964 + }
5965 + if (res) {
5966 +- spin_lock_irq(&bus->reg_lock);
5967 + res->hstream.opened = 1;
5968 + res->hstream.running = 0;
5969 + res->hstream.substream = substream;
5970 +- spin_unlock_irq(&bus->reg_lock);
5971 + }
5972 ++ spin_unlock_irq(&bus->reg_lock);
5973 +
5974 + return res;
5975 + }
5976 +@@ -378,15 +386,17 @@ void snd_hdac_ext_stream_release(struct hdac_ext_stream *stream, int type)
5977 + break;
5978 +
5979 + case HDAC_EXT_STREAM_TYPE_HOST:
5980 ++ spin_lock_irq(&bus->reg_lock);
5981 + if (stream->decoupled && !stream->link_locked)
5982 +- snd_hdac_ext_stream_decouple(bus, stream, false);
5983 ++ snd_hdac_ext_stream_decouple_locked(bus, stream, false);
5984 ++ spin_unlock_irq(&bus->reg_lock);
5985 + snd_hdac_stream_release(&stream->hstream);
5986 + break;
5987 +
5988 + case HDAC_EXT_STREAM_TYPE_LINK:
5989 +- if (stream->decoupled && !stream->hstream.opened)
5990 +- snd_hdac_ext_stream_decouple(bus, stream, false);
5991 + spin_lock_irq(&bus->reg_lock);
5992 ++ if (stream->decoupled && !stream->hstream.opened)
5993 ++ snd_hdac_ext_stream_decouple_locked(bus, stream, false);
5994 + stream->link_locked = 0;
5995 + stream->link_substream = NULL;
5996 + spin_unlock_irq(&bus->reg_lock);
5997 +diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
5998 +index abe7a1b16fe1e..ce77a53201639 100644
5999 +--- a/sound/hda/hdac_stream.c
6000 ++++ b/sound/hda/hdac_stream.c
6001 +@@ -296,6 +296,7 @@ struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
6002 + int key = (substream->pcm->device << 16) | (substream->number << 2) |
6003 + (substream->stream + 1);
6004 +
6005 ++ spin_lock_irq(&bus->reg_lock);
6006 + list_for_each_entry(azx_dev, &bus->stream_list, list) {
6007 + if (azx_dev->direction != substream->stream)
6008 + continue;
6009 +@@ -309,13 +310,12 @@ struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
6010 + res = azx_dev;
6011 + }
6012 + if (res) {
6013 +- spin_lock_irq(&bus->reg_lock);
6014 + res->opened = 1;
6015 + res->running = 0;
6016 + res->assigned_key = key;
6017 + res->substream = substream;
6018 +- spin_unlock_irq(&bus->reg_lock);
6019 + }
6020 ++ spin_unlock_irq(&bus->reg_lock);
6021 + return res;
6022 + }
6023 + EXPORT_SYMBOL_GPL(snd_hdac_stream_assign);
6024 +diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
6025 +index 61e1de6d7be0a..6cdb3db7507b1 100644
6026 +--- a/sound/hda/intel-dsp-config.c
6027 ++++ b/sound/hda/intel-dsp-config.c
6028 +@@ -30,6 +30,7 @@ struct config_entry {
6029 + u32 flags;
6030 + u16 device;
6031 + const struct dmi_system_id *dmi_table;
6032 ++ u8 codec_hid[ACPI_ID_LEN];
6033 + };
6034 +
6035 + /*
6036 +@@ -55,7 +56,7 @@ static const struct config_entry config_table[] = {
6037 + /*
6038 + * Apollolake (Broxton-P)
6039 + * the legacy HDAudio driver is used except on Up Squared (SOF) and
6040 +- * Chromebooks (SST)
6041 ++ * Chromebooks (SST), as well as devices based on the ES8336 codec
6042 + */
6043 + #if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE)
6044 + {
6045 +@@ -72,6 +73,11 @@ static const struct config_entry config_table[] = {
6046 + {}
6047 + }
6048 + },
6049 ++ {
6050 ++ .flags = FLAG_SOF,
6051 ++ .device = 0x5a98,
6052 ++ .codec_hid = "ESSX8336",
6053 ++ },
6054 + #endif
6055 + #if IS_ENABLED(CONFIG_SND_SOC_INTEL_APL)
6056 + {
6057 +@@ -136,7 +142,7 @@ static const struct config_entry config_table[] = {
6058 +
6059 + /*
6060 + * Geminilake uses legacy HDAudio driver except for Google
6061 +- * Chromebooks
6062 ++ * Chromebooks and devices based on the ES8336 codec
6063 + */
6064 + /* Geminilake */
6065 + #if IS_ENABLED(CONFIG_SND_SOC_SOF_GEMINILAKE)
6066 +@@ -153,6 +159,11 @@ static const struct config_entry config_table[] = {
6067 + {}
6068 + }
6069 + },
6070 ++ {
6071 ++ .flags = FLAG_SOF,
6072 ++ .device = 0x3198,
6073 ++ .codec_hid = "ESSX8336",
6074 ++ },
6075 + #endif
6076 +
6077 + /*
6078 +@@ -310,6 +321,11 @@ static const struct config_entry config_table[] = {
6079 + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
6080 + .device = 0x43c8,
6081 + },
6082 ++ {
6083 ++ .flags = FLAG_SOF,
6084 ++ .device = 0xa0c8,
6085 ++ .codec_hid = "ESSX8336",
6086 ++ },
6087 + #endif
6088 +
6089 + /* Elkhart Lake */
6090 +@@ -337,6 +353,8 @@ static const struct config_entry *snd_intel_dsp_find_config
6091 + continue;
6092 + if (table->dmi_table && !dmi_check_system(table->dmi_table))
6093 + continue;
6094 ++ if (table->codec_hid[0] && !acpi_dev_present(table->codec_hid, NULL, -1))
6095 ++ continue;
6096 + return table;
6097 + }
6098 + return NULL;
6099 +diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
6100 +index 6ffa48dd59830..570b88e0b2018 100644
6101 +--- a/sound/isa/Kconfig
6102 ++++ b/sound/isa/Kconfig
6103 +@@ -22,7 +22,7 @@ config SND_SB16_DSP
6104 + menuconfig SND_ISA
6105 + bool "ISA sound devices"
6106 + depends on ISA || COMPILE_TEST
6107 +- depends on ISA_DMA_API
6108 ++ depends on ISA_DMA_API && !M68K
6109 + default y
6110 + help
6111 + Support for sound devices connected via the ISA bus.
6112 +diff --git a/sound/isa/gus/gus_dma.c b/sound/isa/gus/gus_dma.c
6113 +index a1c770d826dda..6d664dd8dde0b 100644
6114 +--- a/sound/isa/gus/gus_dma.c
6115 ++++ b/sound/isa/gus/gus_dma.c
6116 +@@ -126,6 +126,8 @@ static void snd_gf1_dma_interrupt(struct snd_gus_card * gus)
6117 + }
6118 + block = snd_gf1_dma_next_block(gus);
6119 + spin_unlock(&gus->dma_lock);
6120 ++ if (!block)
6121 ++ return;
6122 + snd_gf1_dma_program(gus, block->addr, block->buf_addr, block->count, (unsigned short) block->cmd);
6123 + kfree(block);
6124 + #if 0
6125 +diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
6126 +index 93bc9bef7641f..41ce125971777 100644
6127 +--- a/sound/pci/Kconfig
6128 ++++ b/sound/pci/Kconfig
6129 +@@ -279,6 +279,7 @@ config SND_CS46XX_NEW_DSP
6130 + config SND_CS5530
6131 + tristate "CS5530 Audio"
6132 + depends on ISA_DMA_API && (X86_32 || COMPILE_TEST)
6133 ++ depends on !M68K
6134 + select SND_SB16_DSP
6135 + help
6136 + Say Y here to include support for audio on Cyrix/NatSemi CS5530 chips.
6137 +diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
6138 +index 15bd8335f6678..c8ccfa2fff848 100644
6139 +--- a/sound/soc/codecs/nau8824.c
6140 ++++ b/sound/soc/codecs/nau8824.c
6141 +@@ -8,6 +8,7 @@
6142 +
6143 + #include <linux/module.h>
6144 + #include <linux/delay.h>
6145 ++#include <linux/dmi.h>
6146 + #include <linux/init.h>
6147 + #include <linux/i2c.h>
6148 + #include <linux/regmap.h>
6149 +@@ -27,6 +28,12 @@
6150 +
6151 + #include "nau8824.h"
6152 +
6153 ++#define NAU8824_JD_ACTIVE_HIGH BIT(0)
6154 ++
6155 ++static int nau8824_quirk;
6156 ++static int quirk_override = -1;
6157 ++module_param_named(quirk, quirk_override, uint, 0444);
6158 ++MODULE_PARM_DESC(quirk, "Board-specific quirk override");
6159 +
6160 + static int nau8824_config_sysclk(struct nau8824 *nau8824,
6161 + int clk_id, unsigned int freq);
6162 +@@ -1875,6 +1882,34 @@ static int nau8824_read_device_properties(struct device *dev,
6163 + return 0;
6164 + }
6165 +
6166 ++/* Please keep this list alphabetically sorted */
6167 ++static const struct dmi_system_id nau8824_quirk_table[] = {
6168 ++ {
6169 ++ /* Cyberbook T116 rugged tablet */
6170 ++ .matches = {
6171 ++ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
6172 ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
6173 ++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "20170531"),
6174 ++ },
6175 ++ .driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
6176 ++ },
6177 ++ {}
6178 ++};
6179 ++
6180 ++static void nau8824_check_quirks(void)
6181 ++{
6182 ++ const struct dmi_system_id *dmi_id;
6183 ++
6184 ++ if (quirk_override != -1) {
6185 ++ nau8824_quirk = quirk_override;
6186 ++ return;
6187 ++ }
6188 ++
6189 ++ dmi_id = dmi_first_match(nau8824_quirk_table);
6190 ++ if (dmi_id)
6191 ++ nau8824_quirk = (unsigned long)dmi_id->driver_data;
6192 ++}
6193 ++
6194 + static int nau8824_i2c_probe(struct i2c_client *i2c,
6195 + const struct i2c_device_id *id)
6196 + {
6197 +@@ -1899,6 +1934,11 @@ static int nau8824_i2c_probe(struct i2c_client *i2c,
6198 + nau8824->irq = i2c->irq;
6199 + sema_init(&nau8824->jd_sem, 1);
6200 +
6201 ++ nau8824_check_quirks();
6202 ++
6203 ++ if (nau8824_quirk & NAU8824_JD_ACTIVE_HIGH)
6204 ++ nau8824->jkdet_polarity = 0;
6205 ++
6206 + nau8824_print_device_properties(nau8824);
6207 +
6208 + ret = regmap_read(nau8824->regmap, NAU8824_REG_I2C_DEVICE_ID, &value);
6209 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
6210 +index 08960167d34f5..2924d89bf0daf 100644
6211 +--- a/sound/soc/soc-dapm.c
6212 ++++ b/sound/soc/soc-dapm.c
6213 +@@ -2555,8 +2555,13 @@ static struct snd_soc_dapm_widget *dapm_find_widget(
6214 + return NULL;
6215 + }
6216 +
6217 +-static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
6218 +- const char *pin, int status)
6219 ++/*
6220 ++ * set the DAPM pin status:
6221 ++ * returns 1 when the value has been updated, 0 when unchanged, or a negative
6222 ++ * error code; called from kcontrol put callback
6223 ++ */
6224 ++static int __snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
6225 ++ const char *pin, int status)
6226 + {
6227 + struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
6228 + int ret = 0;
6229 +@@ -2582,6 +2587,18 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
6230 + return ret;
6231 + }
6232 +
6233 ++/*
6234 ++ * similar as __snd_soc_dapm_set_pin(), but returns 0 when successful;
6235 ++ * called from several API functions below
6236 ++ */
6237 ++static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
6238 ++ const char *pin, int status)
6239 ++{
6240 ++ int ret = __snd_soc_dapm_set_pin(dapm, pin, status);
6241 ++
6242 ++ return ret < 0 ? ret : 0;
6243 ++}
6244 ++
6245 + /**
6246 + * snd_soc_dapm_sync_unlocked - scan and power dapm paths
6247 + * @dapm: DAPM context
6248 +@@ -3586,10 +3603,10 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
6249 + const char *pin = (const char *)kcontrol->private_value;
6250 + int ret;
6251 +
6252 +- if (ucontrol->value.integer.value[0])
6253 +- ret = snd_soc_dapm_enable_pin(&card->dapm, pin);
6254 +- else
6255 +- ret = snd_soc_dapm_disable_pin(&card->dapm, pin);
6256 ++ mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
6257 ++ ret = __snd_soc_dapm_set_pin(&card->dapm, pin,
6258 ++ !!ucontrol->value.integer.value[0]);
6259 ++ mutex_unlock(&card->dapm_mutex);
6260 +
6261 + snd_soc_dapm_sync(&card->dapm);
6262 + return ret;
6263 +diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
6264 +index c6cb8c212eca5..ef316311e959a 100644
6265 +--- a/sound/soc/sof/intel/hda-dai.c
6266 ++++ b/sound/soc/sof/intel/hda-dai.c
6267 +@@ -68,6 +68,7 @@ static struct hdac_ext_stream *
6268 + return NULL;
6269 + }
6270 +
6271 ++ spin_lock_irq(&bus->reg_lock);
6272 + list_for_each_entry(stream, &bus->stream_list, list) {
6273 + struct hdac_ext_stream *hstream =
6274 + stream_to_hdac_ext_stream(stream);
6275 +@@ -107,12 +108,12 @@ static struct hdac_ext_stream *
6276 + * is updated in snd_hdac_ext_stream_decouple().
6277 + */
6278 + if (!res->decoupled)
6279 +- snd_hdac_ext_stream_decouple(bus, res, true);
6280 +- spin_lock_irq(&bus->reg_lock);
6281 ++ snd_hdac_ext_stream_decouple_locked(bus, res, true);
6282 ++
6283 + res->link_locked = 1;
6284 + res->link_substream = substream;
6285 +- spin_unlock_irq(&bus->reg_lock);
6286 + }
6287 ++ spin_unlock_irq(&bus->reg_lock);
6288 +
6289 + return res;
6290 + }
6291 +diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
6292 +index bb25d8beb3b85..159bc89e6a79a 100644
6293 +--- a/tools/perf/bench/futex-lock-pi.c
6294 ++++ b/tools/perf/bench/futex-lock-pi.c
6295 +@@ -226,6 +226,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
6296 + print_summary();
6297 +
6298 + free(worker);
6299 ++ perf_cpu_map__put(cpu);
6300 + return ret;
6301 + err:
6302 + usage_with_options(bench_futex_lock_pi_usage, options);
6303 +diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
6304 +index 7a15c2e610228..105b36cdc42d3 100644
6305 +--- a/tools/perf/bench/futex-requeue.c
6306 ++++ b/tools/perf/bench/futex-requeue.c
6307 +@@ -216,6 +216,7 @@ int bench_futex_requeue(int argc, const char **argv)
6308 + print_summary();
6309 +
6310 + free(worker);
6311 ++ perf_cpu_map__put(cpu);
6312 + return ret;
6313 + err:
6314 + usage_with_options(bench_futex_requeue_usage, options);
6315 +diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
6316 +index cd2b81a845acb..a129c94eb3fe1 100644
6317 +--- a/tools/perf/bench/futex-wake-parallel.c
6318 ++++ b/tools/perf/bench/futex-wake-parallel.c
6319 +@@ -320,6 +320,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
6320 + print_summary();
6321 +
6322 + free(blocked_worker);
6323 ++ perf_cpu_map__put(cpu);
6324 + return ret;
6325 + }
6326 + #endif /* HAVE_PTHREAD_BARRIER */
6327 +diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
6328 +index 2dfcef3e371e4..507ff533612c6 100644
6329 +--- a/tools/perf/bench/futex-wake.c
6330 ++++ b/tools/perf/bench/futex-wake.c
6331 +@@ -210,5 +210,6 @@ int bench_futex_wake(int argc, const char **argv)
6332 + print_summary();
6333 +
6334 + free(worker);
6335 ++ perf_cpu_map__put(cpu);
6336 + return ret;
6337 + }
6338 +diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
6339 +index 045723b3d9928..c62af807198de 100755
6340 +--- a/tools/perf/tests/shell/record+zstd_comp_decomp.sh
6341 ++++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
6342 +@@ -12,7 +12,7 @@ skip_if_no_z_record() {
6343 +
6344 + collect_z_record() {
6345 + echo "Collecting compressed record file:"
6346 +- [[ "$(uname -m)" != s390x ]] && gflag='-g'
6347 ++ [ "$(uname -m)" != s390x ] && gflag='-g'
6348 + $perf_tool record -o $trace_file $gflag -z -F 5000 -- \
6349 + dd count=500 if=/dev/urandom of=/dev/null
6350 + }
6351 +diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
6352 +index c8101575dbf45..4eb02762104ba 100644
6353 +--- a/tools/perf/util/bpf-event.c
6354 ++++ b/tools/perf/util/bpf-event.c
6355 +@@ -109,7 +109,11 @@ static int perf_env__fetch_btf(struct perf_env *env,
6356 + node->data_size = data_size;
6357 + memcpy(node->data, data, data_size);
6358 +
6359 +- perf_env__insert_btf(env, node);
6360 ++ if (!perf_env__insert_btf(env, node)) {
6361 ++ /* Insertion failed because of a duplicate. */
6362 ++ free(node);
6363 ++ return -1;
6364 ++ }
6365 + return 0;
6366 + }
6367 +
6368 +diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
6369 +index f0dceb527ca38..d81ed1bc14bdc 100644
6370 +--- a/tools/perf/util/env.c
6371 ++++ b/tools/perf/util/env.c
6372 +@@ -71,12 +71,13 @@ out:
6373 + return node;
6374 + }
6375 +
6376 +-void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
6377 ++bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
6378 + {
6379 + struct rb_node *parent = NULL;
6380 + __u32 btf_id = btf_node->id;
6381 + struct btf_node *node;
6382 + struct rb_node **p;
6383 ++ bool ret = true;
6384 +
6385 + down_write(&env->bpf_progs.lock);
6386 + p = &env->bpf_progs.btfs.rb_node;
6387 +@@ -90,6 +91,7 @@ void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
6388 + p = &(*p)->rb_right;
6389 + } else {
6390 + pr_debug("duplicated btf %u\n", btf_id);
6391 ++ ret = false;
6392 + goto out;
6393 + }
6394 + }
6395 +@@ -99,6 +101,7 @@ void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
6396 + env->bpf_progs.btfs_cnt++;
6397 + out:
6398 + up_write(&env->bpf_progs.lock);
6399 ++ return ret;
6400 + }
6401 +
6402 + struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
6403 +diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
6404 +index a129726520064..01378a955dd5e 100644
6405 +--- a/tools/perf/util/env.h
6406 ++++ b/tools/perf/util/env.h
6407 +@@ -143,7 +143,7 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env,
6408 + struct bpf_prog_info_node *info_node);
6409 + struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
6410 + __u32 prog_id);
6411 +-void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
6412 ++bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
6413 + struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
6414 +
6415 + int perf_env__numa_node(struct perf_env *env, int cpu);