Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.14 commit in: /
Date: Sat, 18 Sep 2021 16:06:46
Message-Id: 1631981185.01156e7380d93bbadbe80280f8d16087e58272e3.mpagano@gentoo
1 commit: 01156e7380d93bbadbe80280f8d16087e58272e3
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Sep 18 16:06:25 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Sep 18 16:06:25 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=01156e73
7
8 Linux patch 5.14.6
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1005_linux-5.14.6.patch | 19126 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 19130 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 3b101ac..df8a957 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -67,6 +67,10 @@ Patch: 1004_linux-5.14.5.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.14.5
23
24 +Patch: 1005_linux-5.14.6.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.14.6
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1005_linux-5.14.6.patch b/1005_linux-5.14.6.patch
33 new file mode 100644
34 index 0000000..657c7b3
35 --- /dev/null
36 +++ b/1005_linux-5.14.6.patch
37 @@ -0,0 +1,19126 @@
38 +diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
39 +index 9c2be821c2254..922c23bb4372a 100644
40 +--- a/Documentation/admin-guide/devices.txt
41 ++++ b/Documentation/admin-guide/devices.txt
42 +@@ -2993,10 +2993,10 @@
43 + 65 = /dev/infiniband/issm1 Second InfiniBand IsSM device
44 + ...
45 + 127 = /dev/infiniband/issm63 63rd InfiniBand IsSM device
46 +- 128 = /dev/infiniband/uverbs0 First InfiniBand verbs device
47 +- 129 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
48 ++ 192 = /dev/infiniband/uverbs0 First InfiniBand verbs device
49 ++ 193 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
50 + ...
51 +- 159 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
52 ++ 223 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
53 +
54 + 232 char Biometric Devices
55 + 0 = /dev/biometric/sensor0/fingerprint first fingerprint sensor on first device
56 +diff --git a/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml b/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
57 +index 4cb75a5f2e3a2..cd62968426fb5 100644
58 +--- a/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
59 ++++ b/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
60 +@@ -33,8 +33,11 @@ properties:
61 +
62 + backlight: true
63 +
64 ++ spi-cpha: true
65 ++
66 ++ spi-cpol: true
67 ++
68 + spi-max-frequency:
69 +- $ref: /schemas/types.yaml#/definitions/uint32
70 + description: inherited as a SPI client node, the datasheet specifies
71 + maximum 300 ns minimum cycle which gives around 3 MHz max frequency
72 + maximum: 3000000
73 +@@ -44,6 +47,9 @@ properties:
74 + required:
75 + - compatible
76 + - reg
77 ++ - spi-cpha
78 ++ - spi-cpol
79 ++ - port
80 +
81 + additionalProperties: false
82 +
83 +@@ -52,15 +58,23 @@ examples:
84 + #include <dt-bindings/gpio/gpio.h>
85 +
86 + spi {
87 ++ compatible = "spi-gpio";
88 ++ sck-gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
89 ++ miso-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>;
90 ++ mosi-gpios = <&gpio 2 GPIO_ACTIVE_HIGH>;
91 ++ cs-gpios = <&gpio 3 GPIO_ACTIVE_HIGH>;
92 ++ num-chipselects = <1>;
93 + #address-cells = <1>;
94 + #size-cells = <0>;
95 + panel@0 {
96 + compatible = "samsung,lms397kf04";
97 + spi-max-frequency = <3000000>;
98 ++ spi-cpha;
99 ++ spi-cpol;
100 + reg = <0>;
101 + vci-supply = <&lcd_3v0_reg>;
102 + vccio-supply = <&lcd_1v8_reg>;
103 +- reset-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
104 ++ reset-gpios = <&gpio 4 GPIO_ACTIVE_LOW>;
105 + backlight = <&ktd259>;
106 +
107 + port {
108 +diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
109 +index 38dc56a577604..ecec514b31550 100644
110 +--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
111 ++++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
112 +@@ -43,19 +43,19 @@ group emmc_nb
113 +
114 + group pwm0
115 + - pin 11 (GPIO1-11)
116 +- - functions pwm, gpio
117 ++ - functions pwm, led, gpio
118 +
119 + group pwm1
120 + - pin 12
121 +- - functions pwm, gpio
122 ++ - functions pwm, led, gpio
123 +
124 + group pwm2
125 + - pin 13
126 +- - functions pwm, gpio
127 ++ - functions pwm, led, gpio
128 +
129 + group pwm3
130 + - pin 14
131 +- - functions pwm, gpio
132 ++ - functions pwm, led, gpio
133 +
134 + group pmic1
135 + - pin 7
136 +diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
137 +index ff9e7cc97c65a..b5285599d9725 100644
138 +--- a/Documentation/filesystems/f2fs.rst
139 ++++ b/Documentation/filesystems/f2fs.rst
140 +@@ -185,6 +185,7 @@ fault_type=%d Support configuring fault injection type, should be
141 + FAULT_KVMALLOC 0x000000002
142 + FAULT_PAGE_ALLOC 0x000000004
143 + FAULT_PAGE_GET 0x000000008
144 ++ FAULT_ALLOC_BIO 0x000000010 (obsolete)
145 + FAULT_ALLOC_NID 0x000000020
146 + FAULT_ORPHAN 0x000000040
147 + FAULT_BLOCK 0x000000080
148 +diff --git a/Makefile b/Makefile
149 +index 0eaa5623f4060..f9c8bbf8cf71e 100644
150 +--- a/Makefile
151 ++++ b/Makefile
152 +@@ -1,7 +1,7 @@
153 + # SPDX-License-Identifier: GPL-2.0
154 + VERSION = 5
155 + PATCHLEVEL = 14
156 +-SUBLEVEL = 5
157 ++SUBLEVEL = 6
158 + EXTRAVERSION =
159 + NAME = Opossums on Parade
160 +
161 +@@ -404,6 +404,11 @@ ifeq ($(ARCH),sparc64)
162 + SRCARCH := sparc
163 + endif
164 +
165 ++# Additional ARCH settings for parisc
166 ++ifeq ($(ARCH),parisc64)
167 ++ SRCARCH := parisc
168 ++endif
169 ++
170 + export cross_compiling :=
171 + ifneq ($(SRCARCH),$(SUBARCH))
172 + cross_compiling := 1
173 +@@ -803,6 +808,8 @@ else
174 + # Disabled for clang while comment to attribute conversion happens and
175 + # https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
176 + KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough=5,)
177 ++# gcc inanely warns about local variables called 'main'
178 ++KBUILD_CFLAGS += -Wno-main
179 + endif
180 +
181 + # These warnings generated too much noise in a regular build.
182 +diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
183 +index 9d91ae1091b0b..91265e7ff672f 100644
184 +--- a/arch/arm/boot/compressed/Makefile
185 ++++ b/arch/arm/boot/compressed/Makefile
186 +@@ -85,6 +85,8 @@ compress-$(CONFIG_KERNEL_LZ4) = lz4
187 + libfdt_objs := fdt_rw.o fdt_ro.o fdt_wip.o fdt.o
188 +
189 + ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y)
190 ++CFLAGS_REMOVE_atags_to_fdt.o += -Wframe-larger-than=${CONFIG_FRAME_WARN}
191 ++CFLAGS_atags_to_fdt.o += -Wframe-larger-than=1280
192 + OBJS += $(libfdt_objs) atags_to_fdt.o
193 + endif
194 + ifeq ($(CONFIG_USE_OF),y)
195 +diff --git a/arch/arm/boot/dts/at91-kizbox3_common.dtsi b/arch/arm/boot/dts/at91-kizbox3_common.dtsi
196 +index c4b3750495da8..abe27adfa4d65 100644
197 +--- a/arch/arm/boot/dts/at91-kizbox3_common.dtsi
198 ++++ b/arch/arm/boot/dts/at91-kizbox3_common.dtsi
199 +@@ -336,7 +336,7 @@
200 + };
201 +
202 + &shutdown_controller {
203 +- atmel,shdwc-debouncer = <976>;
204 ++ debounce-delay-us = <976>;
205 + atmel,wakeup-rtc-timer;
206 +
207 + input@0 {
208 +diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
209 +index ebbc9b23aef1c..b1068cca42287 100644
210 +--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
211 ++++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
212 +@@ -662,7 +662,7 @@
213 + };
214 +
215 + &shutdown_controller {
216 +- atmel,shdwc-debouncer = <976>;
217 ++ debounce-delay-us = <976>;
218 + status = "okay";
219 +
220 + input@0 {
221 +diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
222 +index a9e6fee55a2a8..8034e5dacc808 100644
223 +--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
224 ++++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
225 +@@ -138,7 +138,7 @@
226 + };
227 +
228 + shdwc@f8048010 {
229 +- atmel,shdwc-debouncer = <976>;
230 ++ debounce-delay-us = <976>;
231 + atmel,wakeup-rtc-timer;
232 +
233 + input@0 {
234 +diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
235 +index ff83967fd0082..c145c4e5ef582 100644
236 +--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
237 ++++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
238 +@@ -205,7 +205,7 @@
239 + };
240 +
241 + &shutdown_controller {
242 +- atmel,shdwc-debouncer = <976>;
243 ++ debounce-delay-us = <976>;
244 + atmel,wakeup-rtc-timer;
245 +
246 + input@0 {
247 +diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
248 +index bd64721fa23ca..34faca597c352 100644
249 +--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
250 ++++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
251 +@@ -693,7 +693,7 @@
252 + };
253 +
254 + &shutdown_controller {
255 +- atmel,shdwc-debouncer = <976>;
256 ++ debounce-delay-us = <976>;
257 + atmel,wakeup-rtc-timer;
258 +
259 + input@0 {
260 +diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
261 +index dfd150eb0fd86..3f972a4086c37 100644
262 +--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
263 ++++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
264 +@@ -203,7 +203,7 @@
265 + };
266 +
267 + shdwc@f8048010 {
268 +- atmel,shdwc-debouncer = <976>;
269 ++ debounce-delay-us = <976>;
270 +
271 + input@0 {
272 + reg = <0>;
273 +diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
274 +index 509c732a0d8b4..627b7bf88d83b 100644
275 +--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
276 ++++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
277 +@@ -347,7 +347,7 @@
278 + };
279 +
280 + shdwc@f8048010 {
281 +- atmel,shdwc-debouncer = <976>;
282 ++ debounce-delay-us = <976>;
283 + atmel,wakeup-rtc-timer;
284 +
285 + input@0 {
286 +diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
287 +index 5a5fa6190a528..37d0cffea99c5 100644
288 +--- a/arch/arm/boot/dts/imx53-ppd.dts
289 ++++ b/arch/arm/boot/dts/imx53-ppd.dts
290 +@@ -70,6 +70,12 @@
291 + clock-frequency = <11289600>;
292 + };
293 +
294 ++ achc_24M: achc-clock {
295 ++ compatible = "fixed-clock";
296 ++ #clock-cells = <0>;
297 ++ clock-frequency = <24000000>;
298 ++ };
299 ++
300 + sgtlsound: sound {
301 + compatible = "fsl,imx53-cpuvo-sgtl5000",
302 + "fsl,imx-audio-sgtl5000";
303 +@@ -314,16 +320,13 @@
304 + &gpio4 12 GPIO_ACTIVE_LOW>;
305 + status = "okay";
306 +
307 +- spidev0: spi@0 {
308 +- compatible = "ge,achc";
309 +- reg = <0>;
310 +- spi-max-frequency = <1000000>;
311 +- };
312 +-
313 +- spidev1: spi@1 {
314 +- compatible = "ge,achc";
315 +- reg = <1>;
316 +- spi-max-frequency = <1000000>;
317 ++ spidev0: spi@1 {
318 ++ compatible = "ge,achc", "nxp,kinetis-k20";
319 ++ reg = <1>, <0>;
320 ++ vdd-supply = <&reg_3v3>;
321 ++ vdda-supply = <&reg_3v3>;
322 ++ clocks = <&achc_24M>;
323 ++ reset-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
324 + };
325 +
326 + gpioxra0: gpio@2 {
327 +diff --git a/arch/arm/boot/dts/intel-ixp42x-linksys-nslu2.dts b/arch/arm/boot/dts/intel-ixp42x-linksys-nslu2.dts
328 +index 5b8dcc19deeef..b9a5268fe7ad6 100644
329 +--- a/arch/arm/boot/dts/intel-ixp42x-linksys-nslu2.dts
330 ++++ b/arch/arm/boot/dts/intel-ixp42x-linksys-nslu2.dts
331 +@@ -124,20 +124,20 @@
332 + */
333 + interrupt-map =
334 + /* IDSEL 1 */
335 +- <0x0800 0 0 1 &gpio0 11 3>, /* INT A on slot 1 is irq 11 */
336 +- <0x0800 0 0 2 &gpio0 10 3>, /* INT B on slot 1 is irq 10 */
337 +- <0x0800 0 0 3 &gpio0 9 3>, /* INT C on slot 1 is irq 9 */
338 +- <0x0800 0 0 4 &gpio0 8 3>, /* INT D on slot 1 is irq 8 */
339 ++ <0x0800 0 0 1 &gpio0 11 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 1 is irq 11 */
340 ++ <0x0800 0 0 2 &gpio0 10 IRQ_TYPE_LEVEL_LOW>, /* INT B on slot 1 is irq 10 */
341 ++ <0x0800 0 0 3 &gpio0 9 IRQ_TYPE_LEVEL_LOW>, /* INT C on slot 1 is irq 9 */
342 ++ <0x0800 0 0 4 &gpio0 8 IRQ_TYPE_LEVEL_LOW>, /* INT D on slot 1 is irq 8 */
343 + /* IDSEL 2 */
344 +- <0x1000 0 0 1 &gpio0 10 3>, /* INT A on slot 2 is irq 10 */
345 +- <0x1000 0 0 2 &gpio0 9 3>, /* INT B on slot 2 is irq 9 */
346 +- <0x1000 0 0 3 &gpio0 11 3>, /* INT C on slot 2 is irq 11 */
347 +- <0x1000 0 0 4 &gpio0 8 3>, /* INT D on slot 2 is irq 8 */
348 ++ <0x1000 0 0 1 &gpio0 10 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 2 is irq 10 */
349 ++ <0x1000 0 0 2 &gpio0 9 IRQ_TYPE_LEVEL_LOW>, /* INT B on slot 2 is irq 9 */
350 ++ <0x1000 0 0 3 &gpio0 11 IRQ_TYPE_LEVEL_LOW>, /* INT C on slot 2 is irq 11 */
351 ++ <0x1000 0 0 4 &gpio0 8 IRQ_TYPE_LEVEL_LOW>, /* INT D on slot 2 is irq 8 */
352 + /* IDSEL 3 */
353 +- <0x1800 0 0 1 &gpio0 9 3>, /* INT A on slot 3 is irq 9 */
354 +- <0x1800 0 0 2 &gpio0 11 3>, /* INT B on slot 3 is irq 11 */
355 +- <0x1800 0 0 3 &gpio0 10 3>, /* INT C on slot 3 is irq 10 */
356 +- <0x1800 0 0 4 &gpio0 8 3>; /* INT D on slot 3 is irq 8 */
357 ++ <0x1800 0 0 1 &gpio0 9 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 3 is irq 9 */
358 ++ <0x1800 0 0 2 &gpio0 11 IRQ_TYPE_LEVEL_LOW>, /* INT B on slot 3 is irq 11 */
359 ++ <0x1800 0 0 3 &gpio0 10 IRQ_TYPE_LEVEL_LOW>, /* INT C on slot 3 is irq 10 */
360 ++ <0x1800 0 0 4 &gpio0 8 IRQ_TYPE_LEVEL_LOW>; /* INT D on slot 3 is irq 8 */
361 + };
362 +
363 + ethernet@c8009000 {
364 +diff --git a/arch/arm/boot/dts/intel-ixp43x-gateworks-gw2358.dts b/arch/arm/boot/dts/intel-ixp43x-gateworks-gw2358.dts
365 +index 60a1228a970fc..f5fe309f7762d 100644
366 +--- a/arch/arm/boot/dts/intel-ixp43x-gateworks-gw2358.dts
367 ++++ b/arch/arm/boot/dts/intel-ixp43x-gateworks-gw2358.dts
368 +@@ -108,35 +108,35 @@
369 + */
370 + interrupt-map =
371 + /* IDSEL 1 */
372 +- <0x0800 0 0 1 &gpio0 11 3>, /* INT A on slot 1 is irq 11 */
373 +- <0x0800 0 0 2 &gpio0 10 3>, /* INT B on slot 1 is irq 10 */
374 +- <0x0800 0 0 3 &gpio0 9 3>, /* INT C on slot 1 is irq 9 */
375 +- <0x0800 0 0 4 &gpio0 8 3>, /* INT D on slot 1 is irq 8 */
376 ++ <0x0800 0 0 1 &gpio0 11 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 1 is irq 11 */
377 ++ <0x0800 0 0 2 &gpio0 10 IRQ_TYPE_LEVEL_LOW>, /* INT B on slot 1 is irq 10 */
378 ++ <0x0800 0 0 3 &gpio0 9 IRQ_TYPE_LEVEL_LOW>, /* INT C on slot 1 is irq 9 */
379 ++ <0x0800 0 0 4 &gpio0 8 IRQ_TYPE_LEVEL_LOW>, /* INT D on slot 1 is irq 8 */
380 + /* IDSEL 2 */
381 +- <0x1000 0 0 1 &gpio0 10 3>, /* INT A on slot 2 is irq 10 */
382 +- <0x1000 0 0 2 &gpio0 9 3>, /* INT B on slot 2 is irq 9 */
383 +- <0x1000 0 0 3 &gpio0 8 3>, /* INT C on slot 2 is irq 8 */
384 +- <0x1000 0 0 4 &gpio0 11 3>, /* INT D on slot 2 is irq 11 */
385 ++ <0x1000 0 0 1 &gpio0 10 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 2 is irq 10 */
386 ++ <0x1000 0 0 2 &gpio0 9 IRQ_TYPE_LEVEL_LOW>, /* INT B on slot 2 is irq 9 */
387 ++ <0x1000 0 0 3 &gpio0 8 IRQ_TYPE_LEVEL_LOW>, /* INT C on slot 2 is irq 8 */
388 ++ <0x1000 0 0 4 &gpio0 11 IRQ_TYPE_LEVEL_LOW>, /* INT D on slot 2 is irq 11 */
389 + /* IDSEL 3 */
390 +- <0x1800 0 0 1 &gpio0 9 3>, /* INT A on slot 3 is irq 9 */
391 +- <0x1800 0 0 2 &gpio0 8 3>, /* INT B on slot 3 is irq 8 */
392 +- <0x1800 0 0 3 &gpio0 11 3>, /* INT C on slot 3 is irq 11 */
393 +- <0x1800 0 0 4 &gpio0 10 3>, /* INT D on slot 3 is irq 10 */
394 ++ <0x1800 0 0 1 &gpio0 9 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 3 is irq 9 */
395 ++ <0x1800 0 0 2 &gpio0 8 IRQ_TYPE_LEVEL_LOW>, /* INT B on slot 3 is irq 8 */
396 ++ <0x1800 0 0 3 &gpio0 11 IRQ_TYPE_LEVEL_LOW>, /* INT C on slot 3 is irq 11 */
397 ++ <0x1800 0 0 4 &gpio0 10 IRQ_TYPE_LEVEL_LOW>, /* INT D on slot 3 is irq 10 */
398 + /* IDSEL 4 */
399 +- <0x2000 0 0 1 &gpio0 8 3>, /* INT A on slot 3 is irq 8 */
400 +- <0x2000 0 0 2 &gpio0 11 3>, /* INT B on slot 3 is irq 11 */
401 +- <0x2000 0 0 3 &gpio0 10 3>, /* INT C on slot 3 is irq 10 */
402 +- <0x2000 0 0 4 &gpio0 9 3>, /* INT D on slot 3 is irq 9 */
403 ++ <0x2000 0 0 1 &gpio0 8 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 3 is irq 8 */
404 ++ <0x2000 0 0 2 &gpio0 11 IRQ_TYPE_LEVEL_LOW>, /* INT B on slot 3 is irq 11 */
405 ++ <0x2000 0 0 3 &gpio0 10 IRQ_TYPE_LEVEL_LOW>, /* INT C on slot 3 is irq 10 */
406 ++ <0x2000 0 0 4 &gpio0 9 IRQ_TYPE_LEVEL_LOW>, /* INT D on slot 3 is irq 9 */
407 + /* IDSEL 6 */
408 +- <0x3000 0 0 1 &gpio0 10 3>, /* INT A on slot 3 is irq 10 */
409 +- <0x3000 0 0 2 &gpio0 9 3>, /* INT B on slot 3 is irq 9 */
410 +- <0x3000 0 0 3 &gpio0 8 3>, /* INT C on slot 3 is irq 8 */
411 +- <0x3000 0 0 4 &gpio0 11 3>, /* INT D on slot 3 is irq 11 */
412 ++ <0x3000 0 0 1 &gpio0 10 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 3 is irq 10 */
413 ++ <0x3000 0 0 2 &gpio0 9 IRQ_TYPE_LEVEL_LOW>, /* INT B on slot 3 is irq 9 */
414 ++ <0x3000 0 0 3 &gpio0 8 IRQ_TYPE_LEVEL_LOW>, /* INT C on slot 3 is irq 8 */
415 ++ <0x3000 0 0 4 &gpio0 11 IRQ_TYPE_LEVEL_LOW>, /* INT D on slot 3 is irq 11 */
416 + /* IDSEL 15 */
417 +- <0x7800 0 0 1 &gpio0 8 3>, /* INT A on slot 3 is irq 8 */
418 +- <0x7800 0 0 2 &gpio0 11 3>, /* INT B on slot 3 is irq 11 */
419 +- <0x7800 0 0 3 &gpio0 10 3>, /* INT C on slot 3 is irq 10 */
420 +- <0x7800 0 0 4 &gpio0 9 3>; /* INT D on slot 3 is irq 9 */
421 ++ <0x7800 0 0 1 &gpio0 8 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 3 is irq 8 */
422 ++ <0x7800 0 0 2 &gpio0 11 IRQ_TYPE_LEVEL_LOW>, /* INT B on slot 3 is irq 11 */
423 ++ <0x7800 0 0 3 &gpio0 10 IRQ_TYPE_LEVEL_LOW>, /* INT C on slot 3 is irq 10 */
424 ++ <0x7800 0 0 4 &gpio0 9 IRQ_TYPE_LEVEL_LOW>; /* INT D on slot 3 is irq 9 */
425 + };
426 +
427 + ethernet@c800a000 {
428 +diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
429 +index 2687c4e890ba8..e36d590e83732 100644
430 +--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
431 ++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
432 +@@ -1262,9 +1262,9 @@
433 + <&mmcc DSI1_BYTE_CLK>,
434 + <&mmcc DSI_PIXEL_CLK>,
435 + <&mmcc DSI1_ESC_CLK>;
436 +- clock-names = "iface_clk", "bus_clk", "core_mmss_clk",
437 +- "src_clk", "byte_clk", "pixel_clk",
438 +- "core_clk";
439 ++ clock-names = "iface", "bus", "core_mmss",
440 ++ "src", "byte", "pixel",
441 ++ "core";
442 +
443 + assigned-clocks = <&mmcc DSI1_BYTE_SRC>,
444 + <&mmcc DSI1_ESC_SRC>,
445 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
446 +index 6cf1c8b4c6e28..c9577ba2973d3 100644
447 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
448 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
449 +@@ -172,15 +172,15 @@
450 + sgtl5000_tx_endpoint: endpoint@0 {
451 + reg = <0>;
452 + remote-endpoint = <&sai2a_endpoint>;
453 +- frame-master;
454 +- bitclock-master;
455 ++ frame-master = <&sgtl5000_tx_endpoint>;
456 ++ bitclock-master = <&sgtl5000_tx_endpoint>;
457 + };
458 +
459 + sgtl5000_rx_endpoint: endpoint@1 {
460 + reg = <1>;
461 + remote-endpoint = <&sai2b_endpoint>;
462 +- frame-master;
463 +- bitclock-master;
464 ++ frame-master = <&sgtl5000_rx_endpoint>;
465 ++ bitclock-master = <&sgtl5000_rx_endpoint>;
466 + };
467 + };
468 +
469 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
470 +index 64dca5b7f748d..6885948f3024e 100644
471 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
472 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
473 +@@ -220,8 +220,8 @@
474 + &i2c4 {
475 + hdmi-transmitter@3d {
476 + compatible = "adi,adv7513";
477 +- reg = <0x3d>, <0x2d>, <0x4d>, <0x5d>;
478 +- reg-names = "main", "cec", "edid", "packet";
479 ++ reg = <0x3d>, <0x4d>, <0x2d>, <0x5d>;
480 ++ reg-names = "main", "edid", "cec", "packet";
481 + clocks = <&cec_clock>;
482 + clock-names = "cec";
483 +
484 +@@ -239,8 +239,6 @@
485 + adi,input-depth = <8>;
486 + adi,input-colorspace = "rgb";
487 + adi,input-clock = "1x";
488 +- adi,input-style = <1>;
489 +- adi,input-justification = "evenly";
490 +
491 + ports {
492 + #address-cells = <1>;
493 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
494 +index 59f18846cf5d0..586aac8a998c0 100644
495 +--- a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
496 ++++ b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
497 +@@ -220,15 +220,15 @@
498 + cs42l51_tx_endpoint: endpoint@0 {
499 + reg = <0>;
500 + remote-endpoint = <&sai2a_endpoint>;
501 +- frame-master;
502 +- bitclock-master;
503 ++ frame-master = <&cs42l51_tx_endpoint>;
504 ++ bitclock-master = <&cs42l51_tx_endpoint>;
505 + };
506 +
507 + cs42l51_rx_endpoint: endpoint@1 {
508 + reg = <1>;
509 + remote-endpoint = <&sai2b_endpoint>;
510 +- frame-master;
511 +- bitclock-master;
512 ++ frame-master = <&cs42l51_rx_endpoint>;
513 ++ bitclock-master = <&cs42l51_rx_endpoint>;
514 + };
515 + };
516 + };
517 +diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
518 +index 1976c383912aa..05bd0add258c6 100644
519 +--- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
520 ++++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
521 +@@ -719,7 +719,6 @@
522 + nvidia,xcvr-setup-use-fuses;
523 + nvidia,xcvr-lsfslew = <2>;
524 + nvidia,xcvr-lsrslew = <2>;
525 +- vbus-supply = <&vdd_vbus1>;
526 + };
527 +
528 + usb@c5008000 {
529 +@@ -731,7 +730,7 @@
530 + nvidia,xcvr-setup-use-fuses;
531 + nvidia,xcvr-lsfslew = <2>;
532 + nvidia,xcvr-lsrslew = <2>;
533 +- vbus-supply = <&vdd_vbus3>;
534 ++ vbus-supply = <&vdd_5v0_sys>;
535 + };
536 +
537 + brcm_wifi_pwrseq: wifi-pwrseq {
538 +@@ -991,28 +990,6 @@
539 + vin-supply = <&vdd_5v0_sys>;
540 + };
541 +
542 +- vdd_vbus1: regulator@4 {
543 +- compatible = "regulator-fixed";
544 +- regulator-name = "vdd_usb1_vbus";
545 +- regulator-min-microvolt = <5000000>;
546 +- regulator-max-microvolt = <5000000>;
547 +- regulator-always-on;
548 +- gpio = <&gpio TEGRA_GPIO(D, 0) GPIO_ACTIVE_HIGH>;
549 +- enable-active-high;
550 +- vin-supply = <&vdd_5v0_sys>;
551 +- };
552 +-
553 +- vdd_vbus3: regulator@5 {
554 +- compatible = "regulator-fixed";
555 +- regulator-name = "vdd_usb3_vbus";
556 +- regulator-min-microvolt = <5000000>;
557 +- regulator-max-microvolt = <5000000>;
558 +- regulator-always-on;
559 +- gpio = <&gpio TEGRA_GPIO(D, 3) GPIO_ACTIVE_HIGH>;
560 +- enable-active-high;
561 +- vin-supply = <&vdd_5v0_sys>;
562 +- };
563 +-
564 + sound {
565 + compatible = "nvidia,tegra-audio-wm8903-picasso",
566 + "nvidia,tegra-audio-wm8903";
567 +diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi
568 +index 95e6bccdb4f6e..dd4d506683de7 100644
569 +--- a/arch/arm/boot/dts/tegra20-tamonten.dtsi
570 ++++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi
571 +@@ -185,8 +185,9 @@
572 + nvidia,pins = "ata", "atb", "atc", "atd", "ate",
573 + "cdev1", "cdev2", "dap1", "dtb", "gma",
574 + "gmb", "gmc", "gmd", "gme", "gpu7",
575 +- "gpv", "i2cp", "pta", "rm", "slxa",
576 +- "slxk", "spia", "spib", "uac";
577 ++ "gpv", "i2cp", "irrx", "irtx", "pta",
578 ++ "rm", "slxa", "slxk", "spia", "spib",
579 ++ "uac";
580 + nvidia,pull = <TEGRA_PIN_PULL_NONE>;
581 + nvidia,tristate = <TEGRA_PIN_DISABLE>;
582 + };
583 +@@ -211,7 +212,7 @@
584 + conf_ddc {
585 + nvidia,pins = "ddc", "dta", "dtd", "kbca",
586 + "kbcb", "kbcc", "kbcd", "kbce", "kbcf",
587 +- "sdc";
588 ++ "sdc", "uad", "uca";
589 + nvidia,pull = <TEGRA_PIN_PULL_UP>;
590 + nvidia,tristate = <TEGRA_PIN_DISABLE>;
591 + };
592 +@@ -221,10 +222,9 @@
593 + "lvp0", "owc", "sdb";
594 + nvidia,tristate = <TEGRA_PIN_ENABLE>;
595 + };
596 +- conf_irrx {
597 +- nvidia,pins = "irrx", "irtx", "sdd", "spic",
598 +- "spie", "spih", "uaa", "uab", "uad",
599 +- "uca", "ucb";
600 ++ conf_sdd {
601 ++ nvidia,pins = "sdd", "spic", "spie", "spih",
602 ++ "uaa", "uab", "ucb";
603 + nvidia,pull = <TEGRA_PIN_PULL_UP>;
604 + nvidia,tristate = <TEGRA_PIN_ENABLE>;
605 + };
606 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
607 +index be81330db14f6..02641191682e0 100644
608 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
609 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
610 +@@ -32,14 +32,14 @@
611 + };
612 + };
613 +
614 +- reg_vcc3v3: vcc3v3 {
615 ++ reg_vcc3v3: regulator-vcc3v3 {
616 + compatible = "regulator-fixed";
617 + regulator-name = "vcc3v3";
618 + regulator-min-microvolt = <3300000>;
619 + regulator-max-microvolt = <3300000>;
620 + };
621 +
622 +- reg_vdd_cpu_gpu: vdd-cpu-gpu {
623 ++ reg_vdd_cpu_gpu: regulator-vdd-cpu-gpu {
624 + compatible = "regulator-fixed";
625 + regulator-name = "vdd-cpu-gpu";
626 + regulator-min-microvolt = <1135000>;
627 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
628 +index db3d303093f61..6d22efbd645cb 100644
629 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
630 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
631 +@@ -83,15 +83,9 @@
632 + };
633 +
634 + eeprom@52 {
635 +- compatible = "atmel,24c512";
636 ++ compatible = "onnn,cat24c04", "atmel,24c04";
637 + reg = <0x52>;
638 + };
639 +-
640 +- eeprom@53 {
641 +- compatible = "atmel,24c512";
642 +- reg = <0x53>;
643 +- };
644 +-
645 + };
646 + };
647 + };
648 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
649 +index 60acdf0b689ee..7025aad8ae897 100644
650 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
651 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
652 +@@ -59,14 +59,9 @@
653 + };
654 +
655 + eeprom@52 {
656 +- compatible = "atmel,24c512";
657 ++ compatible = "onnn,cat24c05", "atmel,24c04";
658 + reg = <0x52>;
659 + };
660 +-
661 +- eeprom@53 {
662 +- compatible = "atmel,24c512";
663 +- reg = <0x53>;
664 +- };
665 + };
666 +
667 + &i2c3 {
668 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi
669 +index c769fadbd008f..00f86cada30d2 100644
670 +--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi
671 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi
672 +@@ -278,70 +278,86 @@
673 +
674 + pmic@69 {
675 + compatible = "mps,mp5416";
676 +- pinctrl-names = "default";
677 +- pinctrl-0 = <&pinctrl_pmic>;
678 + reg = <0x69>;
679 +
680 + regulators {
681 ++ /* vdd_0p95: DRAM/GPU/VPU */
682 + buck1 {
683 +- regulator-name = "vdd_0p95";
684 +- regulator-min-microvolt = <805000>;
685 ++ regulator-name = "buck1";
686 ++ regulator-min-microvolt = <800000>;
687 + regulator-max-microvolt = <1000000>;
688 +- regulator-max-microamp = <2500000>;
689 ++ regulator-min-microamp = <3800000>;
690 ++ regulator-max-microamp = <6800000>;
691 + regulator-boot-on;
692 ++ regulator-always-on;
693 + };
694 +
695 ++ /* vdd_soc */
696 + buck2 {
697 +- regulator-name = "vdd_soc";
698 +- regulator-min-microvolt = <805000>;
699 ++ regulator-name = "buck2";
700 ++ regulator-min-microvolt = <800000>;
701 + regulator-max-microvolt = <900000>;
702 +- regulator-max-microamp = <1000000>;
703 ++ regulator-min-microamp = <2200000>;
704 ++ regulator-max-microamp = <5200000>;
705 + regulator-boot-on;
706 ++ regulator-always-on;
707 + };
708 +
709 ++ /* vdd_arm */
710 + buck3_reg: buck3 {
711 +- regulator-name = "vdd_arm";
712 +- regulator-min-microvolt = <805000>;
713 ++ regulator-name = "buck3";
714 ++ regulator-min-microvolt = <800000>;
715 + regulator-max-microvolt = <1000000>;
716 +- regulator-max-microamp = <2200000>;
717 +- regulator-boot-on;
718 ++ regulator-min-microamp = <3800000>;
719 ++ regulator-max-microamp = <6800000>;
720 ++ regulator-always-on;
721 + };
722 +
723 ++ /* vdd_1p8 */
724 + buck4 {
725 +- regulator-name = "vdd_1p8";
726 ++ regulator-name = "buck4";
727 + regulator-min-microvolt = <1800000>;
728 + regulator-max-microvolt = <1800000>;
729 +- regulator-max-microamp = <500000>;
730 ++ regulator-min-microamp = <2200000>;
731 ++ regulator-max-microamp = <5200000>;
732 + regulator-boot-on;
733 ++ regulator-always-on;
734 + };
735 +
736 ++ /* nvcc_snvs_1p8 */
737 + ldo1 {
738 +- regulator-name = "nvcc_snvs_1p8";
739 ++ regulator-name = "ldo1";
740 + regulator-min-microvolt = <1800000>;
741 + regulator-max-microvolt = <1800000>;
742 +- regulator-max-microamp = <300000>;
743 + regulator-boot-on;
744 ++ regulator-always-on;
745 + };
746 +
747 ++ /* vdd_snvs_0p8 */
748 + ldo2 {
749 +- regulator-name = "vdd_snvs_0p8";
750 ++ regulator-name = "ldo2";
751 + regulator-min-microvolt = <800000>;
752 + regulator-max-microvolt = <800000>;
753 + regulator-boot-on;
754 ++ regulator-always-on;
755 + };
756 +
757 ++ /* vdd_0p9 */
758 + ldo3 {
759 +- regulator-name = "vdd_0p95";
760 +- regulator-min-microvolt = <800000>;
761 +- regulator-max-microvolt = <800000>;
762 ++ regulator-name = "ldo3";
763 ++ regulator-min-microvolt = <900000>;
764 ++ regulator-max-microvolt = <900000>;
765 + regulator-boot-on;
766 ++ regulator-always-on;
767 + };
768 +
769 ++ /* vdd_1p8 */
770 + ldo4 {
771 +- regulator-name = "vdd_1p8";
772 ++ regulator-name = "ldo4";
773 + regulator-min-microvolt = <1800000>;
774 + regulator-max-microvolt = <1800000>;
775 + regulator-boot-on;
776 ++ regulator-always-on;
777 + };
778 + };
779 + };
780 +@@ -426,12 +442,6 @@
781 + >;
782 + };
783 +
784 +- pinctrl_pmic: pmicgrp {
785 +- fsl,pins = <
786 +- MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x41
787 +- >;
788 +- };
789 +-
790 + pinctrl_uart2: uart2grp {
791 + fsl,pins = <
792 + MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140
793 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
794 +index 905b68a3daa5a..8e4a0ce99790b 100644
795 +--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
796 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
797 +@@ -46,7 +46,7 @@
798 + pinctrl-0 = <&pinctrl_reg_usb1_en>;
799 + compatible = "regulator-fixed";
800 + regulator-name = "usb_otg1_vbus";
801 +- gpio = <&gpio1 12 GPIO_ACTIVE_HIGH>;
802 ++ gpio = <&gpio1 10 GPIO_ACTIVE_HIGH>;
803 + enable-active-high;
804 + regulator-min-microvolt = <5000000>;
805 + regulator-max-microvolt = <5000000>;
806 +@@ -156,7 +156,8 @@
807 +
808 + pinctrl_reg_usb1_en: regusb1grp {
809 + fsl,pins = <
810 +- MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x41
811 ++ MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x41
812 ++ MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x141
813 + MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41
814 + >;
815 + };
816 +diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
817 +index 9928a87f593a5..b0bcda8cc51f4 100644
818 +--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
819 ++++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
820 +@@ -1227,13 +1227,13 @@
821 +
822 + cpu@0 {
823 + device_type = "cpu";
824 +- compatible = "nvidia,denver";
825 ++ compatible = "nvidia,tegra132-denver";
826 + reg = <0>;
827 + };
828 +
829 + cpu@1 {
830 + device_type = "cpu";
831 +- compatible = "nvidia,denver";
832 ++ compatible = "nvidia,tegra132-denver";
833 + reg = <1>;
834 + };
835 + };
836 +diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
837 +index 5ba7a4519b956..c8250a3f7891f 100644
838 +--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
839 ++++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
840 +@@ -2122,7 +2122,7 @@
841 + };
842 +
843 + pcie_ep@14160000 {
844 +- compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
845 ++ compatible = "nvidia,tegra194-pcie-ep";
846 + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX4A>;
847 + reg = <0x00 0x14160000 0x0 0x00020000>, /* appl registers (128K) */
848 + <0x00 0x36040000 0x0 0x00040000>, /* iATU_DMA reg space (256K) */
849 +@@ -2162,7 +2162,7 @@
850 + };
851 +
852 + pcie_ep@14180000 {
853 +- compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
854 ++ compatible = "nvidia,tegra194-pcie-ep";
855 + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
856 + reg = <0x00 0x14180000 0x0 0x00020000>, /* appl registers (128K) */
857 + <0x00 0x38040000 0x0 0x00040000>, /* iATU_DMA reg space (256K) */
858 +@@ -2202,7 +2202,7 @@
859 + };
860 +
861 + pcie_ep@141a0000 {
862 +- compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
863 ++ compatible = "nvidia,tegra194-pcie-ep";
864 + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>;
865 + reg = <0x00 0x141a0000 0x0 0x00020000>, /* appl registers (128K) */
866 + <0x00 0x3a040000 0x0 0x00040000>, /* iATU_DMA reg space (256K) */
867 +diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
868 +index 9fa5b028e4f39..23ee1bfa43189 100644
869 +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
870 ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
871 +@@ -151,7 +151,7 @@
872 + #size-cells = <2>;
873 + ranges;
874 +
875 +- rpm_msg_ram: memory@0x60000 {
876 ++ rpm_msg_ram: memory@60000 {
877 + reg = <0x0 0x60000 0x0 0x6000>;
878 + no-map;
879 + };
880 +diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
881 +index e8c37a1693d3b..cc08dc4eb56a5 100644
882 +--- a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
883 ++++ b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
884 +@@ -20,7 +20,7 @@
885 + stdout-path = "serial0";
886 + };
887 +
888 +- memory {
889 ++ memory@40000000 {
890 + device_type = "memory";
891 + reg = <0x0 0x40000000 0x0 0x20000000>;
892 + };
893 +diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
894 +index f39bc10cc5bd7..d64a6e81d1a55 100644
895 +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
896 ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
897 +@@ -583,10 +583,10 @@
898 +
899 + pcie1: pci@10000000 {
900 + compatible = "qcom,pcie-ipq8074";
901 +- reg = <0x10000000 0xf1d
902 +- 0x10000f20 0xa8
903 +- 0x00088000 0x2000
904 +- 0x10100000 0x1000>;
905 ++ reg = <0x10000000 0xf1d>,
906 ++ <0x10000f20 0xa8>,
907 ++ <0x00088000 0x2000>,
908 ++ <0x10100000 0x1000>;
909 + reg-names = "dbi", "elbi", "parf", "config";
910 + device_type = "pci";
911 + linux,pci-domain = <1>;
912 +@@ -645,10 +645,10 @@
913 +
914 + pcie0: pci@20000000 {
915 + compatible = "qcom,pcie-ipq8074";
916 +- reg = <0x20000000 0xf1d
917 +- 0x20000f20 0xa8
918 +- 0x00080000 0x2000
919 +- 0x20100000 0x1000>;
920 ++ reg = <0x20000000 0xf1d>,
921 ++ <0x20000f20 0xa8>,
922 ++ <0x00080000 0x2000>,
923 ++ <0x20100000 0x1000>;
924 + reg-names = "dbi", "elbi", "parf", "config";
925 + device_type = "pci";
926 + linux,pci-domain = <0>;
927 +diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
928 +index f9f0b5aa6a266..87a3217e88efa 100644
929 +--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
930 ++++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
931 +@@ -15,16 +15,18 @@
932 + chosen { };
933 +
934 + clocks {
935 +- xo_board: xo_board {
936 ++ xo_board: xo-board {
937 + compatible = "fixed-clock";
938 + #clock-cells = <0>;
939 + clock-frequency = <19200000>;
940 ++ clock-output-names = "xo_board";
941 + };
942 +
943 +- sleep_clk: sleep_clk {
944 ++ sleep_clk: sleep-clk {
945 + compatible = "fixed-clock";
946 + #clock-cells = <0>;
947 + clock-frequency = <32768>;
948 ++ clock-output-names = "sleep_clk";
949 + };
950 + };
951 +
952 +diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
953 +index 78c55ca10ba9b..77bc233f83805 100644
954 +--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
955 ++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
956 +@@ -19,14 +19,14 @@
957 + chosen { };
958 +
959 + clocks {
960 +- xo_board: xo_board {
961 ++ xo_board: xo-board {
962 + compatible = "fixed-clock";
963 + #clock-cells = <0>;
964 + clock-frequency = <19200000>;
965 + clock-output-names = "xo_board";
966 + };
967 +
968 +- sleep_clk: sleep_clk {
969 ++ sleep_clk: sleep-clk {
970 + compatible = "fixed-clock";
971 + #clock-cells = <0>;
972 + clock-frequency = <32764>;
973 +diff --git a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
974 +index 0da7a3b8d1bf3..5ae2ddc65f7e4 100644
975 +--- a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
976 ++++ b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
977 +@@ -307,10 +307,6 @@
978 + status = "okay";
979 + };
980 +
981 +-&tlmm {
982 +- gpio-reserved-ranges = <0 4>;
983 +-};
984 +-
985 + &uart2 {
986 + status = "okay";
987 + };
988 +@@ -337,6 +333,16 @@
989 + vdda-pll-max-microamp = <18300>;
990 + };
991 +
992 ++&usb_1 {
993 ++ status = "okay";
994 ++};
995 ++
996 ++&usb_1_dwc3 {
997 ++ dr_mode = "host";
998 ++
999 ++ pinctrl-names = "default";
1000 ++ pinctrl-0 = <&usb2phy_ac_en1_default>;
1001 ++};
1002 +
1003 + &usb_1_hsphy {
1004 + status = "okay";
1005 +@@ -346,15 +352,51 @@
1006 + };
1007 +
1008 + &usb_1_qmpphy {
1009 ++ status = "disabled";
1010 ++};
1011 ++
1012 ++&usb_2 {
1013 + status = "okay";
1014 +- vdda-phy-supply = <&vreg_l8c_1p2>;
1015 +- vdda-pll-supply = <&vdda_usb_ss_dp_core_1>;
1016 + };
1017 +
1018 +-&usb_1 {
1019 ++&usb_2_dwc3 {
1020 ++ dr_mode = "host";
1021 ++
1022 ++ pinctrl-names = "default";
1023 ++ pinctrl-0 = <&usb2phy_ac_en2_default>;
1024 ++};
1025 ++
1026 ++&usb_2_hsphy {
1027 + status = "okay";
1028 ++ vdda-pll-supply = <&vdd_usb_hs_core>;
1029 ++ vdda33-supply = <&vdda_usb_hs_3p1>;
1030 ++ vdda18-supply = <&vdda_usb_hs_1p8>;
1031 + };
1032 +
1033 +-&usb_1_dwc3 {
1034 +- dr_mode = "peripheral";
1035 ++&usb_2_qmpphy {
1036 ++ status = "okay";
1037 ++ vdda-phy-supply = <&vreg_l8c_1p2>;
1038 ++ vdda-pll-supply = <&vdda_usb_ss_dp_core_1>;
1039 ++};
1040 ++
1041 ++&tlmm {
1042 ++ gpio-reserved-ranges = <0 4>;
1043 ++
1044 ++ usb2phy_ac_en1_default: usb2phy_ac_en1_default {
1045 ++ mux {
1046 ++ pins = "gpio113";
1047 ++ function = "usb2phy_ac";
1048 ++ bias-disable;
1049 ++ drive-strength = <2>;
1050 ++ };
1051 ++ };
1052 ++
1053 ++ usb2phy_ac_en2_default: usb2phy_ac_en2_default {
1054 ++ mux {
1055 ++ pins = "gpio123";
1056 ++ function = "usb2phy_ac";
1057 ++ bias-disable;
1058 ++ drive-strength = <2>;
1059 ++ };
1060 ++ };
1061 + };
1062 +diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
1063 +index f91a928466c3b..06a0ae773ad50 100644
1064 +--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
1065 ++++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
1066 +@@ -17,14 +17,14 @@
1067 + chosen { };
1068 +
1069 + clocks {
1070 +- xo_board: xo_board {
1071 ++ xo_board: xo-board {
1072 + compatible = "fixed-clock";
1073 + #clock-cells = <0>;
1074 + clock-frequency = <19200000>;
1075 + clock-output-names = "xo_board";
1076 + };
1077 +
1078 +- sleep_clk: sleep_clk {
1079 ++ sleep_clk: sleep-clk {
1080 + compatible = "fixed-clock";
1081 + #clock-cells = <0>;
1082 + clock-frequency = <32764>;
1083 +@@ -343,10 +343,19 @@
1084 + };
1085 +
1086 + qhee_code: qhee-code@85800000 {
1087 +- reg = <0x0 0x85800000 0x0 0x3700000>;
1088 ++ reg = <0x0 0x85800000 0x0 0x600000>;
1089 + no-map;
1090 + };
1091 +
1092 ++ rmtfs_mem: memory@85e00000 {
1093 ++ compatible = "qcom,rmtfs-mem";
1094 ++ reg = <0x0 0x85e00000 0x0 0x200000>;
1095 ++ no-map;
1096 ++
1097 ++ qcom,client-id = <1>;
1098 ++ qcom,vmid = <15>;
1099 ++ };
1100 ++
1101 + smem_region: smem-mem@86000000 {
1102 + reg = <0 0x86000000 0 0x200000>;
1103 + no-map;
1104 +@@ -357,58 +366,44 @@
1105 + no-map;
1106 + };
1107 +
1108 +- modem_fw_mem: modem-fw-region@8ac00000 {
1109 ++ mpss_region: mpss@8ac00000 {
1110 + reg = <0x0 0x8ac00000 0x0 0x7e00000>;
1111 + no-map;
1112 + };
1113 +
1114 +- adsp_fw_mem: adsp-fw-region@92a00000 {
1115 ++ adsp_region: adsp@92a00000 {
1116 + reg = <0x0 0x92a00000 0x0 0x1e00000>;
1117 + no-map;
1118 + };
1119 +
1120 +- pil_mba_mem: pil-mba-region@94800000 {
1121 ++ mba_region: mba@94800000 {
1122 + reg = <0x0 0x94800000 0x0 0x200000>;
1123 + no-map;
1124 + };
1125 +
1126 +- buffer_mem: buffer-region@94a00000 {
1127 ++ buffer_mem: tzbuffer@94a00000 {
1128 + reg = <0x0 0x94a00000 0x0 0x100000>;
1129 + no-map;
1130 + };
1131 +
1132 +- venus_fw_mem: venus-fw-region@9f800000 {
1133 ++ venus_region: venus@9f800000 {
1134 + reg = <0x0 0x9f800000 0x0 0x800000>;
1135 + no-map;
1136 + };
1137 +
1138 +- secure_region2: secure-region2@f7c00000 {
1139 +- reg = <0x0 0xf7c00000 0x0 0x5c00000>;
1140 +- no-map;
1141 +- };
1142 +-
1143 + adsp_mem: adsp-region@f6000000 {
1144 + reg = <0x0 0xf6000000 0x0 0x800000>;
1145 + no-map;
1146 + };
1147 +
1148 +- qseecom_ta_mem: qseecom-ta-region@fec00000 {
1149 +- reg = <0x0 0xfec00000 0x0 0x1000000>;
1150 +- no-map;
1151 +- };
1152 +-
1153 + qseecom_mem: qseecom-region@f6800000 {
1154 + reg = <0x0 0xf6800000 0x0 0x1400000>;
1155 + no-map;
1156 + };
1157 +
1158 +- secure_display_memory: secure-region@f5c00000 {
1159 +- reg = <0x0 0xf5c00000 0x0 0x5c00000>;
1160 +- no-map;
1161 +- };
1162 +-
1163 +- cont_splash_mem: cont-splash-region@9d400000 {
1164 +- reg = <0x0 0x9d400000 0x0 0x23ff000>;
1165 ++ zap_shader_region: gpu@fed00000 {
1166 ++ compatible = "shared-dma-pool";
1167 ++ reg = <0x0 0xfed00000 0x0 0xa00000>;
1168 + no-map;
1169 + };
1170 + };
1171 +@@ -527,14 +522,18 @@
1172 + reg = <0x01f40000 0x20000>;
1173 + };
1174 +
1175 +- tlmm: pinctrl@3000000 {
1176 ++ tlmm: pinctrl@3100000 {
1177 + compatible = "qcom,sdm630-pinctrl";
1178 +- reg = <0x03000000 0xc00000>;
1179 ++ reg = <0x03100000 0x400000>,
1180 ++ <0x03500000 0x400000>,
1181 ++ <0x03900000 0x400000>;
1182 ++ reg-names = "south", "center", "north";
1183 + interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
1184 + gpio-controller;
1185 +- #gpio-cells = <0x2>;
1186 ++ gpio-ranges = <&tlmm 0 0 114>;
1187 ++ #gpio-cells = <2>;
1188 + interrupt-controller;
1189 +- #interrupt-cells = <0x2>;
1190 ++ #interrupt-cells = <2>;
1191 +
1192 + blsp1_uart1_default: blsp1-uart1-default {
1193 + pins = "gpio0", "gpio1", "gpio2", "gpio3";
1194 +@@ -554,40 +553,48 @@
1195 + bias-disable;
1196 + };
1197 +
1198 +- blsp2_uart1_tx_active: blsp2-uart1-tx-active {
1199 +- pins = "gpio16";
1200 +- drive-strength = <2>;
1201 +- bias-disable;
1202 +- };
1203 +-
1204 +- blsp2_uart1_tx_sleep: blsp2-uart1-tx-sleep {
1205 +- pins = "gpio16";
1206 +- drive-strength = <2>;
1207 +- bias-pull-up;
1208 +- };
1209 ++ blsp2_uart1_default: blsp2-uart1-active {
1210 ++ tx-rts {
1211 ++ pins = "gpio16", "gpio19";
1212 ++ function = "blsp_uart5";
1213 ++ drive-strength = <2>;
1214 ++ bias-disable;
1215 ++ };
1216 +
1217 +- blsp2_uart1_rxcts_active: blsp2-uart1-rxcts-active {
1218 +- pins = "gpio17", "gpio18";
1219 +- drive-strength = <2>;
1220 +- bias-disable;
1221 +- };
1222 ++ rx {
1223 ++ /*
1224 ++ * Avoid garbage data while BT module
1225 ++ * is powered off or not driving signal
1226 ++ */
1227 ++ pins = "gpio17";
1228 ++ function = "blsp_uart5";
1229 ++ drive-strength = <2>;
1230 ++ bias-pull-up;
1231 ++ };
1232 +
1233 +- blsp2_uart1_rxcts_sleep: blsp2-uart1-rxcts-sleep {
1234 +- pins = "gpio17", "gpio18";
1235 +- drive-strength = <2>;
1236 +- bias-no-pull;
1237 ++ cts {
1238 ++ /* Match the pull of the BT module */
1239 ++ pins = "gpio18";
1240 ++ function = "blsp_uart5";
1241 ++ drive-strength = <2>;
1242 ++ bias-pull-down;
1243 ++ };
1244 + };
1245 +
1246 +- blsp2_uart1_rfr_active: blsp2-uart1-rfr-active {
1247 +- pins = "gpio19";
1248 +- drive-strength = <2>;
1249 +- bias-disable;
1250 +- };
1251 ++ blsp2_uart1_sleep: blsp2-uart1-sleep {
1252 ++ tx {
1253 ++ pins = "gpio16";
1254 ++ function = "gpio";
1255 ++ drive-strength = <2>;
1256 ++ bias-pull-up;
1257 ++ };
1258 +
1259 +- blsp2_uart1_rfr_sleep: blsp2-uart1-rfr-sleep {
1260 +- pins = "gpio19";
1261 +- drive-strength = <2>;
1262 +- bias-no-pull;
1263 ++ rx-cts-rts {
1264 ++ pins = "gpio17", "gpio18", "gpio19";
1265 ++ function = "gpio";
1266 ++ drive-strength = <2>;
1267 ++ bias-no-pull;
1268 ++ };
1269 + };
1270 +
1271 + i2c1_default: i2c1-default {
1272 +@@ -686,50 +693,106 @@
1273 + bias-pull-up;
1274 + };
1275 +
1276 +- sdc1_clk_on: sdc1-clk-on {
1277 +- pins = "sdc1_clk";
1278 +- bias-disable;
1279 +- drive-strength = <16>;
1280 +- };
1281 ++ sdc1_state_on: sdc1-on {
1282 ++ clk {
1283 ++ pins = "sdc1_clk";
1284 ++ bias-disable;
1285 ++ drive-strength = <16>;
1286 ++ };
1287 +
1288 +- sdc1_clk_off: sdc1-clk-off {
1289 +- pins = "sdc1_clk";
1290 +- bias-disable;
1291 +- drive-strength = <2>;
1292 +- };
1293 ++ cmd {
1294 ++ pins = "sdc1_cmd";
1295 ++ bias-pull-up;
1296 ++ drive-strength = <10>;
1297 ++ };
1298 +
1299 +- sdc1_cmd_on: sdc1-cmd-on {
1300 +- pins = "sdc1_cmd";
1301 +- bias-pull-up;
1302 +- drive-strength = <10>;
1303 +- };
1304 ++ data {
1305 ++ pins = "sdc1_data";
1306 ++ bias-pull-up;
1307 ++ drive-strength = <10>;
1308 ++ };
1309 +
1310 +- sdc1_cmd_off: sdc1-cmd-off {
1311 +- pins = "sdc1_cmd";
1312 +- bias-pull-up;
1313 +- drive-strength = <2>;
1314 ++ rclk {
1315 ++ pins = "sdc1_rclk";
1316 ++ bias-pull-down;
1317 ++ };
1318 + };
1319 +
1320 +- sdc1_data_on: sdc1-data-on {
1321 +- pins = "sdc1_data";
1322 +- bias-pull-up;
1323 +- drive-strength = <8>;
1324 +- };
1325 ++ sdc1_state_off: sdc1-off {
1326 ++ clk {
1327 ++ pins = "sdc1_clk";
1328 ++ bias-disable;
1329 ++ drive-strength = <2>;
1330 ++ };
1331 +
1332 +- sdc1_data_off: sdc1-data-off {
1333 +- pins = "sdc1_data";
1334 +- bias-pull-up;
1335 +- drive-strength = <2>;
1336 ++ cmd {
1337 ++ pins = "sdc1_cmd";
1338 ++ bias-pull-up;
1339 ++ drive-strength = <2>;
1340 ++ };
1341 ++
1342 ++ data {
1343 ++ pins = "sdc1_data";
1344 ++ bias-pull-up;
1345 ++ drive-strength = <2>;
1346 ++ };
1347 ++
1348 ++ rclk {
1349 ++ pins = "sdc1_rclk";
1350 ++ bias-pull-down;
1351 ++ };
1352 + };
1353 +
1354 +- sdc1_rclk_on: sdc1-rclk-on {
1355 +- pins = "sdc1_rclk";
1356 +- bias-pull-down;
1357 ++ sdc2_state_on: sdc2-on {
1358 ++ clk {
1359 ++ pins = "sdc2_clk";
1360 ++ bias-disable;
1361 ++ drive-strength = <16>;
1362 ++ };
1363 ++
1364 ++ cmd {
1365 ++ pins = "sdc2_cmd";
1366 ++ bias-pull-up;
1367 ++ drive-strength = <10>;
1368 ++ };
1369 ++
1370 ++ data {
1371 ++ pins = "sdc2_data";
1372 ++ bias-pull-up;
1373 ++ drive-strength = <10>;
1374 ++ };
1375 ++
1376 ++ sd-cd {
1377 ++ pins = "gpio54";
1378 ++ bias-pull-up;
1379 ++ drive-strength = <2>;
1380 ++ };
1381 + };
1382 +
1383 +- sdc1_rclk_off: sdc1-rclk-off {
1384 +- pins = "sdc1_rclk";
1385 +- bias-pull-down;
1386 ++ sdc2_state_off: sdc2-off {
1387 ++ clk {
1388 ++ pins = "sdc2_clk";
1389 ++ bias-disable;
1390 ++ drive-strength = <2>;
1391 ++ };
1392 ++
1393 ++ cmd {
1394 ++ pins = "sdc2_cmd";
1395 ++ bias-pull-up;
1396 ++ drive-strength = <2>;
1397 ++ };
1398 ++
1399 ++ data {
1400 ++ pins = "sdc2_data";
1401 ++ bias-pull-up;
1402 ++ drive-strength = <2>;
1403 ++ };
1404 ++
1405 ++ sd-cd {
1406 ++ pins = "gpio54";
1407 ++ bias-disable;
1408 ++ drive-strength = <2>;
1409 ++ };
1410 + };
1411 + };
1412 +
1413 +@@ -823,8 +886,8 @@
1414 + clock-names = "core", "iface", "xo", "ice";
1415 +
1416 + pinctrl-names = "default", "sleep";
1417 +- pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
1418 +- pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
1419 ++ pinctrl-0 = <&sdc1_state_on>;
1420 ++ pinctrl-1 = <&sdc1_state_off>;
1421 +
1422 + bus-width = <8>;
1423 + non-removable;
1424 +@@ -969,10 +1032,8 @@
1425 + dmas = <&blsp2_dma 0>, <&blsp2_dma 1>;
1426 + dma-names = "tx", "rx";
1427 + pinctrl-names = "default", "sleep";
1428 +- pinctrl-0 = <&blsp2_uart1_tx_active &blsp2_uart1_rxcts_active
1429 +- &blsp2_uart1_rfr_active>;
1430 +- pinctrl-1 = <&blsp2_uart1_tx_sleep &blsp2_uart1_rxcts_sleep
1431 +- &blsp2_uart1_rfr_sleep>;
1432 ++ pinctrl-0 = <&blsp2_uart1_default>;
1433 ++ pinctrl-1 = <&blsp2_uart1_sleep>;
1434 + status = "disabled";
1435 + };
1436 +
1437 +diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
1438 +index 9a6eff1813a68..7f7c8f467bfc0 100644
1439 +--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
1440 ++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
1441 +@@ -3955,7 +3955,7 @@
1442 + };
1443 + };
1444 +
1445 +- epss_l3: interconnect@18591000 {
1446 ++ epss_l3: interconnect@18590000 {
1447 + compatible = "qcom,sm8250-epss-l3";
1448 + reg = <0 0x18590000 0 0x1000>;
1449 +
1450 +diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
1451 +index b83fb24954b77..3198acb2aad8c 100644
1452 +--- a/arch/arm64/include/asm/el2_setup.h
1453 ++++ b/arch/arm64/include/asm/el2_setup.h
1454 +@@ -149,8 +149,17 @@
1455 + ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4
1456 + cbz x1, .Lskip_fgt_\@
1457 +
1458 +- msr_s SYS_HDFGRTR_EL2, xzr
1459 +- msr_s SYS_HDFGWTR_EL2, xzr
1460 ++ mov x0, xzr
1461 ++ mrs x1, id_aa64dfr0_el1
1462 ++ ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
1463 ++ cmp x1, #3
1464 ++ b.lt .Lset_fgt_\@
1465 ++ /* Disable PMSNEVFR_EL1 read and write traps */
1466 ++ orr x0, x0, #(1 << 62)
1467 ++
1468 ++.Lset_fgt_\@:
1469 ++ msr_s SYS_HDFGRTR_EL2, x0
1470 ++ msr_s SYS_HDFGWTR_EL2, x0
1471 + msr_s SYS_HFGRTR_EL2, xzr
1472 + msr_s SYS_HFGWTR_EL2, xzr
1473 + msr_s SYS_HFGITR_EL2, xzr
1474 +diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
1475 +index 3512184cfec17..96dc0f7da258d 100644
1476 +--- a/arch/arm64/include/asm/kernel-pgtable.h
1477 ++++ b/arch/arm64/include/asm/kernel-pgtable.h
1478 +@@ -65,8 +65,8 @@
1479 + #define EARLY_KASLR (0)
1480 + #endif
1481 +
1482 +-#define EARLY_ENTRIES(vstart, vend, shift) (((vend) >> (shift)) \
1483 +- - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
1484 ++#define EARLY_ENTRIES(vstart, vend, shift) \
1485 ++ ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
1486 +
1487 + #define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
1488 +
1489 +diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
1490 +index 75beffe2ee8a8..e9c30859f80cd 100644
1491 +--- a/arch/arm64/include/asm/mmu.h
1492 ++++ b/arch/arm64/include/asm/mmu.h
1493 +@@ -27,11 +27,32 @@ typedef struct {
1494 + } mm_context_t;
1495 +
1496 + /*
1497 +- * This macro is only used by the TLBI and low-level switch_mm() code,
1498 +- * neither of which can race with an ASID change. We therefore don't
1499 +- * need to reload the counter using atomic64_read().
1500 ++ * We use atomic64_read() here because the ASID for an 'mm_struct' can
1501 ++ * be reallocated when scheduling one of its threads following a
1502 ++ * rollover event (see new_context() and flush_context()). In this case,
1503 ++ * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
1504 ++ * may use a stale ASID. This is fine in principle as the new ASID is
1505 ++ * guaranteed to be clean in the TLB, but the TLBI routines have to take
1506 ++ * care to handle the following race:
1507 ++ *
1508 ++ * CPU 0 CPU 1 CPU 2
1509 ++ *
1510 ++ * // ptep_clear_flush(mm)
1511 ++ * xchg_relaxed(pte, 0)
1512 ++ * DSB ISHST
1513 ++ * old = ASID(mm)
1514 ++ * | <rollover>
1515 ++ * | new = new_context(mm)
1516 ++ * \-----------------> atomic_set(mm->context.id, new)
1517 ++ * cpu_switch_mm(mm)
1518 ++ * // Hardware walk of pte using new ASID
1519 ++ * TLBI(old)
1520 ++ *
1521 ++ * In this scenario, the barrier on CPU 0 and the dependency on CPU 1
1522 ++ * ensure that the page-table walker on CPU 1 *must* see the invalid PTE
1523 ++ * written by CPU 0.
1524 + */
1525 +-#define ASID(mm) ((mm)->context.id.counter & 0xffff)
1526 ++#define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff)
1527 +
1528 + static inline bool arm64_kernel_unmapped_at_el0(void)
1529 + {
1530 +diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
1531 +index cc3f5a33ff9c5..36f02892e1df8 100644
1532 +--- a/arch/arm64/include/asm/tlbflush.h
1533 ++++ b/arch/arm64/include/asm/tlbflush.h
1534 +@@ -245,9 +245,10 @@ static inline void flush_tlb_all(void)
1535 +
1536 + static inline void flush_tlb_mm(struct mm_struct *mm)
1537 + {
1538 +- unsigned long asid = __TLBI_VADDR(0, ASID(mm));
1539 ++ unsigned long asid;
1540 +
1541 + dsb(ishst);
1542 ++ asid = __TLBI_VADDR(0, ASID(mm));
1543 + __tlbi(aside1is, asid);
1544 + __tlbi_user(aside1is, asid);
1545 + dsb(ish);
1546 +@@ -256,9 +257,10 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
1547 + static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
1548 + unsigned long uaddr)
1549 + {
1550 +- unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
1551 ++ unsigned long addr;
1552 +
1553 + dsb(ishst);
1554 ++ addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
1555 + __tlbi(vale1is, addr);
1556 + __tlbi_user(vale1is, addr);
1557 + }
1558 +@@ -283,9 +285,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
1559 + {
1560 + int num = 0;
1561 + int scale = 0;
1562 +- unsigned long asid = ASID(vma->vm_mm);
1563 +- unsigned long addr;
1564 +- unsigned long pages;
1565 ++ unsigned long asid, addr, pages;
1566 +
1567 + start = round_down(start, stride);
1568 + end = round_up(end, stride);
1569 +@@ -305,6 +305,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
1570 + }
1571 +
1572 + dsb(ishst);
1573 ++ asid = ASID(vma->vm_mm);
1574 +
1575 + /*
1576 + * When the CPU does not support TLB range operations, flush the TLB
1577 +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
1578 +index c5c994a73a645..17962452e31de 100644
1579 +--- a/arch/arm64/kernel/head.S
1580 ++++ b/arch/arm64/kernel/head.S
1581 +@@ -177,7 +177,7 @@ SYM_CODE_END(preserve_boot_args)
1582 + * to be composed of multiple pages. (This effectively scales the end index).
1583 + *
1584 + * vstart: virtual address of start of range
1585 +- * vend: virtual address of end of range
1586 ++ * vend: virtual address of end of range - we map [vstart, vend]
1587 + * shift: shift used to transform virtual address into index
1588 + * ptrs: number of entries in page table
1589 + * istart: index in table corresponding to vstart
1590 +@@ -214,17 +214,18 @@ SYM_CODE_END(preserve_boot_args)
1591 + *
1592 + * tbl: location of page table
1593 + * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
1594 +- * vstart: start address to map
1595 +- * vend: end address to map - we map [vstart, vend]
1596 ++ * vstart: virtual address of start of range
1597 ++ * vend: virtual address of end of range - we map [vstart, vend - 1]
1598 + * flags: flags to use to map last level entries
1599 + * phys: physical address corresponding to vstart - physical memory is contiguous
1600 + * pgds: the number of pgd entries
1601 + *
1602 + * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
1603 +- * Preserves: vstart, vend, flags
1604 +- * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
1605 ++ * Preserves: vstart, flags
1606 ++ * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv
1607 + */
1608 + .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
1609 ++ sub \vend, \vend, #1
1610 + add \rtbl, \tbl, #PAGE_SIZE
1611 + mov \sv, \rtbl
1612 + mov \count, #0
1613 +diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
1614 +index 709d2c433c5e9..f6b1a88245db2 100644
1615 +--- a/arch/arm64/kernel/vmlinux.lds.S
1616 ++++ b/arch/arm64/kernel/vmlinux.lds.S
1617 +@@ -181,6 +181,8 @@ SECTIONS
1618 + /* everything from this point to __init_begin will be marked RO NX */
1619 + RO_DATA(PAGE_SIZE)
1620 +
1621 ++ HYPERVISOR_DATA_SECTIONS
1622 ++
1623 + idmap_pg_dir = .;
1624 + . += IDMAP_DIR_SIZE;
1625 + idmap_pg_end = .;
1626 +@@ -260,8 +262,6 @@ SECTIONS
1627 + _sdata = .;
1628 + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
1629 +
1630 +- HYPERVISOR_DATA_SECTIONS
1631 +-
1632 + /*
1633 + * Data written with the MMU off but read with the MMU on requires
1634 + * cache lines to be invalidated, discarding up to a Cache Writeback
1635 +diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus
1636 +index f1be832e2b746..d1e93a39cd3bc 100644
1637 +--- a/arch/m68k/Kconfig.bus
1638 ++++ b/arch/m68k/Kconfig.bus
1639 +@@ -63,7 +63,7 @@ source "drivers/zorro/Kconfig"
1640 +
1641 + endif
1642 +
1643 +-if !MMU
1644 ++if COLDFIRE
1645 +
1646 + config ISA_DMA_API
1647 + def_bool !M5272
1648 +diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c
1649 +index 0ddf03df62688..f451268f6c384 100644
1650 +--- a/arch/mips/mti-malta/malta-dtshim.c
1651 ++++ b/arch/mips/mti-malta/malta-dtshim.c
1652 +@@ -22,7 +22,7 @@
1653 + #define ROCIT_CONFIG_GEN1_MEMMAP_SHIFT 8
1654 + #define ROCIT_CONFIG_GEN1_MEMMAP_MASK (0xf << 8)
1655 +
1656 +-static unsigned char fdt_buf[16 << 10] __initdata;
1657 ++static unsigned char fdt_buf[16 << 10] __initdata __aligned(8);
1658 +
1659 + /* determined physical memory size, not overridden by command line args */
1660 + extern unsigned long physical_memsize;
1661 +diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
1662 +index bc657e55c15f8..98e4f97db5159 100644
1663 +--- a/arch/openrisc/kernel/entry.S
1664 ++++ b/arch/openrisc/kernel/entry.S
1665 +@@ -547,6 +547,7 @@ EXCEPTION_ENTRY(_external_irq_handler)
1666 + l.bnf 1f // ext irq enabled, all ok.
1667 + l.nop
1668 +
1669 ++#ifdef CONFIG_PRINTK
1670 + l.addi r1,r1,-0x8
1671 + l.movhi r3,hi(42f)
1672 + l.ori r3,r3,lo(42f)
1673 +@@ -560,6 +561,7 @@ EXCEPTION_ENTRY(_external_irq_handler)
1674 + .string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r"
1675 + .align 4
1676 + .previous
1677 ++#endif
1678 +
1679 + l.ori r4,r4,SPR_SR_IEE // fix the bug
1680 + // l.sw PT_SR(r1),r4
1681 +diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
1682 +index aed8ea29268bb..2d019aa73b8f0 100644
1683 +--- a/arch/parisc/Makefile
1684 ++++ b/arch/parisc/Makefile
1685 +@@ -25,18 +25,18 @@ CHECKFLAGS += -D__hppa__=1
1686 + ifdef CONFIG_64BIT
1687 + UTS_MACHINE := parisc64
1688 + CHECKFLAGS += -D__LP64__=1
1689 +-CC_ARCHES = hppa64
1690 + LD_BFD := elf64-hppa-linux
1691 + else # 32-bit
1692 +-CC_ARCHES = hppa hppa2.0 hppa1.1
1693 + LD_BFD := elf32-hppa-linux
1694 + endif
1695 +
1696 + # select defconfig based on actual architecture
1697 +-ifeq ($(shell uname -m),parisc64)
1698 ++ifeq ($(ARCH),parisc64)
1699 + KBUILD_DEFCONFIG := generic-64bit_defconfig
1700 ++ CC_ARCHES := hppa64
1701 + else
1702 + KBUILD_DEFCONFIG := generic-32bit_defconfig
1703 ++ CC_ARCHES := hppa hppa2.0 hppa1.1
1704 + endif
1705 +
1706 + export LD_BFD
1707 +diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
1708 +index fb1e94a3982bc..db1a47cf424dd 100644
1709 +--- a/arch/parisc/kernel/signal.c
1710 ++++ b/arch/parisc/kernel/signal.c
1711 +@@ -237,6 +237,12 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
1712 + #endif
1713 +
1714 + usp = (regs->gr[30] & ~(0x01UL));
1715 ++#ifdef CONFIG_64BIT
1716 ++ if (is_compat_task()) {
1717 ++ /* The gcc alloca implementation leaves garbage in the upper 32 bits of sp */
1718 ++ usp = (compat_uint_t)usp;
1719 ++ }
1720 ++#endif
1721 + /*FIXME: frame_size parameter is unused, remove it. */
1722 + frame = get_sigframe(&ksig->ka, usp, sizeof(*frame));
1723 +
1724 +diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
1725 +index d21f266cea9a5..cd08f9ed2c8dd 100644
1726 +--- a/arch/powerpc/configs/mpc885_ads_defconfig
1727 ++++ b/arch/powerpc/configs/mpc885_ads_defconfig
1728 +@@ -21,7 +21,6 @@ CONFIG_INET=y
1729 + CONFIG_IP_MULTICAST=y
1730 + CONFIG_IP_PNP=y
1731 + CONFIG_SYN_COOKIES=y
1732 +-# CONFIG_IPV6 is not set
1733 + # CONFIG_FW_LOADER is not set
1734 + CONFIG_MTD=y
1735 + CONFIG_MTD_BLOCK=y
1736 +@@ -34,6 +33,7 @@ CONFIG_MTD_CFI_GEOMETRY=y
1737 + # CONFIG_MTD_CFI_I2 is not set
1738 + CONFIG_MTD_CFI_I4=y
1739 + CONFIG_MTD_CFI_AMDSTD=y
1740 ++CONFIG_MTD_PHYSMAP=y
1741 + CONFIG_MTD_PHYSMAP_OF=y
1742 + # CONFIG_BLK_DEV is not set
1743 + CONFIG_NETDEVICES=y
1744 +@@ -76,7 +76,6 @@ CONFIG_PERF_EVENTS=y
1745 + CONFIG_MATH_EMULATION=y
1746 + CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y
1747 + CONFIG_STRICT_KERNEL_RWX=y
1748 +-CONFIG_IPV6=y
1749 + CONFIG_BPF_JIT=y
1750 + CONFIG_DEBUG_VM_PGTABLE=y
1751 + CONFIG_BDI_SWITCH=y
1752 +diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
1753 +index c6bbe9778d3cd..3c09109e708ef 100644
1754 +--- a/arch/powerpc/include/asm/pmc.h
1755 ++++ b/arch/powerpc/include/asm/pmc.h
1756 +@@ -34,6 +34,13 @@ static inline void ppc_set_pmu_inuse(int inuse)
1757 + #endif
1758 + }
1759 +
1760 ++#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1761 ++static inline int ppc_get_pmu_inuse(void)
1762 ++{
1763 ++ return get_paca()->pmcregs_in_use;
1764 ++}
1765 ++#endif
1766 ++
1767 + extern void power4_enable_pmcs(void);
1768 +
1769 + #else /* CONFIG_PPC64 */
1770 +diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
1771 +index 447b78a87c8f2..12c75b95646a5 100644
1772 +--- a/arch/powerpc/kernel/smp.c
1773 ++++ b/arch/powerpc/kernel/smp.c
1774 +@@ -1085,7 +1085,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1775 + }
1776 +
1777 + if (cpu_to_chip_id(boot_cpuid) != -1) {
1778 +- int idx = num_possible_cpus() / threads_per_core;
1779 ++ int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1780 +
1781 + /*
1782 + * All threads of a core will all belong to the same core,
1783 +@@ -1503,6 +1503,7 @@ static void add_cpu_to_masks(int cpu)
1784 + * add it to it's own thread sibling mask.
1785 + */
1786 + cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1787 ++ cpumask_set_cpu(cpu, cpu_core_mask(cpu));
1788 +
1789 + for (i = first_thread; i < first_thread + threads_per_core; i++)
1790 + if (cpu_online(i))
1791 +@@ -1520,11 +1521,6 @@ static void add_cpu_to_masks(int cpu)
1792 + if (chip_id_lookup_table && ret)
1793 + chip_id = cpu_to_chip_id(cpu);
1794 +
1795 +- if (chip_id == -1) {
1796 +- cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
1797 +- goto out;
1798 +- }
1799 +-
1800 + if (shared_caches)
1801 + submask_fn = cpu_l2_cache_mask;
1802 +
1803 +@@ -1534,6 +1530,10 @@ static void add_cpu_to_masks(int cpu)
1804 + /* Skip all CPUs already part of current CPU core mask */
1805 + cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
1806 +
1807 ++ /* If chip_id is -1; limit the cpu_core_mask to within DIE*/
1808 ++ if (chip_id == -1)
1809 ++ cpumask_and(mask, mask, cpu_cpu_mask(cpu));
1810 ++
1811 + for_each_cpu(i, mask) {
1812 + if (chip_id == cpu_to_chip_id(i)) {
1813 + or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
1814 +@@ -1543,7 +1543,6 @@ static void add_cpu_to_masks(int cpu)
1815 + }
1816 + }
1817 +
1818 +-out:
1819 + free_cpumask_var(mask);
1820 + }
1821 +
1822 +diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
1823 +index 2b0d04a1b7d2d..9e4a4a7af380c 100644
1824 +--- a/arch/powerpc/kernel/stacktrace.c
1825 ++++ b/arch/powerpc/kernel/stacktrace.c
1826 +@@ -8,6 +8,7 @@
1827 + * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
1828 + */
1829 +
1830 ++#include <linux/delay.h>
1831 + #include <linux/export.h>
1832 + #include <linux/kallsyms.h>
1833 + #include <linux/module.h>
1834 +diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
1835 +index b5905ae4377c2..44eb7b1ef289e 100644
1836 +--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
1837 ++++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
1838 +@@ -65,10 +65,12 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
1839 + }
1840 + isync();
1841 +
1842 ++ pagefault_disable();
1843 + if (is_load)
1844 +- ret = copy_from_user_nofault(to, (const void __user *)from, n);
1845 ++ ret = __copy_from_user_inatomic(to, (const void __user *)from, n);
1846 + else
1847 +- ret = copy_to_user_nofault((void __user *)to, from, n);
1848 ++ ret = __copy_to_user_inatomic((void __user *)to, from, n);
1849 ++ pagefault_enable();
1850 +
1851 + /* switch the pid first to avoid running host with unallocated pid */
1852 + if (quadrant == 1 && pid != old_pid)
1853 +diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
1854 +index dc6591548f0cf..636c6ae0939b4 100644
1855 +--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
1856 ++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
1857 +@@ -173,10 +173,13 @@ static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
1858 + idx -= stt->offset;
1859 + page = stt->pages[idx / TCES_PER_PAGE];
1860 + /*
1861 +- * page must not be NULL in real mode,
1862 +- * kvmppc_rm_ioba_validate() must have taken care of this.
1863 ++ * kvmppc_rm_ioba_validate() allows pages not be allocated if TCE is
1864 ++ * being cleared, otherwise it returns H_TOO_HARD and we skip this.
1865 + */
1866 +- WARN_ON_ONCE_RM(!page);
1867 ++ if (!page) {
1868 ++ WARN_ON_ONCE_RM(tce != 0);
1869 ++ return;
1870 ++ }
1871 + tbl = kvmppc_page_address(page);
1872 +
1873 + tbl[idx % TCES_PER_PAGE] = tce;
1874 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
1875 +index 085fb8ecbf688..af822f09785ff 100644
1876 +--- a/arch/powerpc/kvm/book3s_hv.c
1877 ++++ b/arch/powerpc/kvm/book3s_hv.c
1878 +@@ -59,6 +59,7 @@
1879 + #include <asm/kvm_book3s.h>
1880 + #include <asm/mmu_context.h>
1881 + #include <asm/lppaca.h>
1882 ++#include <asm/pmc.h>
1883 + #include <asm/processor.h>
1884 + #include <asm/cputhreads.h>
1885 + #include <asm/page.h>
1886 +@@ -3852,6 +3853,18 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
1887 + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
1888 + kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
1889 +
1890 ++#ifdef CONFIG_PPC_PSERIES
1891 ++ if (kvmhv_on_pseries()) {
1892 ++ barrier();
1893 ++ if (vcpu->arch.vpa.pinned_addr) {
1894 ++ struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
1895 ++ get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
1896 ++ } else {
1897 ++ get_lppaca()->pmcregs_in_use = 1;
1898 ++ }
1899 ++ barrier();
1900 ++ }
1901 ++#endif
1902 + kvmhv_load_guest_pmu(vcpu);
1903 +
1904 + msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
1905 +@@ -3986,6 +3999,13 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
1906 + save_pmu |= nesting_enabled(vcpu->kvm);
1907 +
1908 + kvmhv_save_guest_pmu(vcpu, save_pmu);
1909 ++#ifdef CONFIG_PPC_PSERIES
1910 ++ if (kvmhv_on_pseries()) {
1911 ++ barrier();
1912 ++ get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
1913 ++ barrier();
1914 ++ }
1915 ++#endif
1916 +
1917 + vc->entry_exit_map = 0x101;
1918 + vc->in_guest = 0;
1919 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
1920 +index f2bf98bdcea28..094a1076fd1fe 100644
1921 +--- a/arch/powerpc/mm/numa.c
1922 ++++ b/arch/powerpc/mm/numa.c
1923 +@@ -893,7 +893,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
1924 + static void __init find_possible_nodes(void)
1925 + {
1926 + struct device_node *rtas;
1927 +- const __be32 *domains;
1928 ++ const __be32 *domains = NULL;
1929 + int prop_length, max_nodes;
1930 + u32 i;
1931 +
1932 +@@ -909,9 +909,14 @@ static void __init find_possible_nodes(void)
1933 + * it doesn't exist, then fallback on ibm,max-associativity-domains.
1934 + * Current denotes what the platform can support compared to max
1935 + * which denotes what the Hypervisor can support.
1936 ++ *
1937 ++ * If the LPAR is migratable, new nodes might be activated after a LPM,
1938 ++ * so we should consider the max number in that case.
1939 + */
1940 +- domains = of_get_property(rtas, "ibm,current-associativity-domains",
1941 +- &prop_length);
1942 ++ if (!of_get_property(of_root, "ibm,migratable-partition", NULL))
1943 ++ domains = of_get_property(rtas,
1944 ++ "ibm,current-associativity-domains",
1945 ++ &prop_length);
1946 + if (!domains) {
1947 + domains = of_get_property(rtas, "ibm,max-associativity-domains",
1948 + &prop_length);
1949 +@@ -920,6 +925,8 @@ static void __init find_possible_nodes(void)
1950 + }
1951 +
1952 + max_nodes = of_read_number(&domains[min_common_depth], 1);
1953 ++ pr_info("Partition configured for %d NUMA nodes.\n", max_nodes);
1954 ++
1955 + for (i = 0; i < max_nodes; i++) {
1956 + if (!node_possible(i))
1957 + node_set(i, node_possible_map);
1958 +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
1959 +index bb0ee716de912..b0a5894090391 100644
1960 +--- a/arch/powerpc/perf/core-book3s.c
1961 ++++ b/arch/powerpc/perf/core-book3s.c
1962 +@@ -2251,18 +2251,10 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
1963 + */
1964 + unsigned long perf_instruction_pointer(struct pt_regs *regs)
1965 + {
1966 +- bool use_siar = regs_use_siar(regs);
1967 + unsigned long siar = mfspr(SPRN_SIAR);
1968 +
1969 +- if (ppmu && (ppmu->flags & PPMU_P10_DD1)) {
1970 +- if (siar)
1971 +- return siar;
1972 +- else
1973 +- return regs->nip;
1974 +- } else if (use_siar && siar_valid(regs))
1975 +- return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1976 +- else if (use_siar)
1977 +- return 0; // no valid instruction pointer
1978 ++ if (regs_use_siar(regs) && siar_valid(regs) && siar)
1979 ++ return siar + perf_ip_adjust(regs);
1980 + else
1981 + return regs->nip;
1982 + }
1983 +diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
1984 +index d48413e28c39e..c756228a081fb 100644
1985 +--- a/arch/powerpc/perf/hv-gpci.c
1986 ++++ b/arch/powerpc/perf/hv-gpci.c
1987 +@@ -175,7 +175,7 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
1988 + */
1989 + count = 0;
1990 + for (i = offset; i < offset + length; i++)
1991 +- count |= arg->bytes[i] << (i - offset);
1992 ++ count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8);
1993 +
1994 + *value = count;
1995 + out:
1996 +diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
1997 +index 3a77aa96d0925..bdb0c77bcfd9f 100644
1998 +--- a/arch/s390/include/asm/setup.h
1999 ++++ b/arch/s390/include/asm/setup.h
2000 +@@ -36,6 +36,7 @@
2001 + #define MACHINE_FLAG_NX BIT(15)
2002 + #define MACHINE_FLAG_GS BIT(16)
2003 + #define MACHINE_FLAG_SCC BIT(17)
2004 ++#define MACHINE_FLAG_PCI_MIO BIT(18)
2005 +
2006 + #define LPP_MAGIC BIT(31)
2007 + #define LPP_PID_MASK _AC(0xffffffff, UL)
2008 +@@ -110,6 +111,7 @@ extern unsigned long mio_wb_bit_mask;
2009 + #define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
2010 + #define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
2011 + #define MACHINE_HAS_SCC (S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
2012 ++#define MACHINE_HAS_PCI_MIO (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO)
2013 +
2014 + /*
2015 + * Console mode. Override with conmode=
2016 +diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
2017 +index e317fd4866c15..f16f4d054ae25 100644
2018 +--- a/arch/s390/include/asm/smp.h
2019 ++++ b/arch/s390/include/asm/smp.h
2020 +@@ -18,6 +18,7 @@ extern struct mutex smp_cpu_state_mutex;
2021 + extern unsigned int smp_cpu_mt_shift;
2022 + extern unsigned int smp_cpu_mtid;
2023 + extern __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
2024 ++extern cpumask_t cpu_setup_mask;
2025 +
2026 + extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
2027 +
2028 +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
2029 +index fb84e3fc1686d..9857cb0467268 100644
2030 +--- a/arch/s390/kernel/early.c
2031 ++++ b/arch/s390/kernel/early.c
2032 +@@ -236,6 +236,10 @@ static __init void detect_machine_facilities(void)
2033 + clock_comparator_max = -1ULL >> 1;
2034 + __ctl_set_bit(0, 53);
2035 + }
2036 ++ if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
2037 ++ S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
2038 ++ /* the control bit is set during PCI initialization */
2039 ++ }
2040 + }
2041 +
2042 + static inline void save_vector_registers(void)
2043 +diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
2044 +index ab584e8e35275..9156653b56f69 100644
2045 +--- a/arch/s390/kernel/jump_label.c
2046 ++++ b/arch/s390/kernel/jump_label.c
2047 +@@ -36,7 +36,7 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
2048 + unsigned char *ipe = (unsigned char *)expected;
2049 + unsigned char *ipn = (unsigned char *)new;
2050 +
2051 +- pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
2052 ++ pr_emerg("Jump label code mismatch at %pS [%px]\n", ipc, ipc);
2053 + pr_emerg("Found: %6ph\n", ipc);
2054 + pr_emerg("Expected: %6ph\n", ipe);
2055 + pr_emerg("New: %6ph\n", ipn);
2056 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
2057 +index 8e8ace899407c..1909ec99d47d7 100644
2058 +--- a/arch/s390/kernel/smp.c
2059 ++++ b/arch/s390/kernel/smp.c
2060 +@@ -95,6 +95,7 @@ __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
2061 + #endif
2062 +
2063 + static unsigned int smp_max_threads __initdata = -1U;
2064 ++cpumask_t cpu_setup_mask;
2065 +
2066 + static int __init early_nosmt(char *s)
2067 + {
2068 +@@ -894,13 +895,14 @@ static void smp_init_secondary(void)
2069 + vtime_init();
2070 + vdso_getcpu_init();
2071 + pfault_init();
2072 ++ cpumask_set_cpu(cpu, &cpu_setup_mask);
2073 ++ update_cpu_masks();
2074 + notify_cpu_starting(cpu);
2075 + if (topology_cpu_dedicated(cpu))
2076 + set_cpu_flag(CIF_DEDICATED_CPU);
2077 + else
2078 + clear_cpu_flag(CIF_DEDICATED_CPU);
2079 + set_cpu_online(cpu, true);
2080 +- update_cpu_masks();
2081 + inc_irq_stat(CPU_RST);
2082 + local_irq_enable();
2083 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
2084 +@@ -955,10 +957,13 @@ early_param("possible_cpus", _setup_possible_cpus);
2085 + int __cpu_disable(void)
2086 + {
2087 + unsigned long cregs[16];
2088 ++ int cpu;
2089 +
2090 + /* Handle possible pending IPIs */
2091 + smp_handle_ext_call();
2092 +- set_cpu_online(smp_processor_id(), false);
2093 ++ cpu = smp_processor_id();
2094 ++ set_cpu_online(cpu, false);
2095 ++ cpumask_clear_cpu(cpu, &cpu_setup_mask);
2096 + update_cpu_masks();
2097 + /* Disable pseudo page faults on this cpu. */
2098 + pfault_fini();
2099 +diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
2100 +index 26aa2614ee352..eb4047c9da9a3 100644
2101 +--- a/arch/s390/kernel/topology.c
2102 ++++ b/arch/s390/kernel/topology.c
2103 +@@ -67,7 +67,7 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
2104 + static cpumask_t mask;
2105 +
2106 + cpumask_clear(&mask);
2107 +- if (!cpu_online(cpu))
2108 ++ if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
2109 + goto out;
2110 + cpumask_set_cpu(cpu, &mask);
2111 + switch (topology_mode) {
2112 +@@ -88,7 +88,7 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
2113 + case TOPOLOGY_MODE_SINGLE:
2114 + break;
2115 + }
2116 +- cpumask_and(&mask, &mask, cpu_online_mask);
2117 ++ cpumask_and(&mask, &mask, &cpu_setup_mask);
2118 + out:
2119 + cpumask_copy(dst, &mask);
2120 + }
2121 +@@ -99,16 +99,16 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
2122 + int i;
2123 +
2124 + cpumask_clear(&mask);
2125 +- if (!cpu_online(cpu))
2126 ++ if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
2127 + goto out;
2128 + cpumask_set_cpu(cpu, &mask);
2129 + if (topology_mode != TOPOLOGY_MODE_HW)
2130 + goto out;
2131 + cpu -= cpu % (smp_cpu_mtid + 1);
2132 +- for (i = 0; i <= smp_cpu_mtid; i++)
2133 +- if (cpu_present(cpu + i))
2134 ++ for (i = 0; i <= smp_cpu_mtid; i++) {
2135 ++ if (cpumask_test_cpu(cpu + i, &cpu_setup_mask))
2136 + cpumask_set_cpu(cpu + i, &mask);
2137 +- cpumask_and(&mask, &mask, cpu_online_mask);
2138 ++ }
2139 + out:
2140 + cpumask_copy(dst, &mask);
2141 + }
2142 +@@ -569,6 +569,7 @@ void __init topology_init_early(void)
2143 + alloc_masks(info, &book_info, 2);
2144 + alloc_masks(info, &drawer_info, 3);
2145 + out:
2146 ++ cpumask_set_cpu(0, &cpu_setup_mask);
2147 + __arch_update_cpu_topology();
2148 + __arch_update_dedicated_flag(NULL);
2149 + }
2150 +diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
2151 +index 8ac710de1ab1b..07bbee9b7320d 100644
2152 +--- a/arch/s390/mm/init.c
2153 ++++ b/arch/s390/mm/init.c
2154 +@@ -186,9 +186,9 @@ static void pv_init(void)
2155 + return;
2156 +
2157 + /* make sure bounce buffers are shared */
2158 ++ swiotlb_force = SWIOTLB_FORCE;
2159 + swiotlb_init(1);
2160 + swiotlb_update_mem_attributes();
2161 +- swiotlb_force = SWIOTLB_FORCE;
2162 + }
2163 +
2164 + void __init mem_init(void)
2165 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
2166 +index 77cd965cffefa..34839bad33e4d 100644
2167 +--- a/arch/s390/pci/pci.c
2168 ++++ b/arch/s390/pci/pci.c
2169 +@@ -893,7 +893,6 @@ static void zpci_mem_exit(void)
2170 + }
2171 +
2172 + static unsigned int s390_pci_probe __initdata = 1;
2173 +-static unsigned int s390_pci_no_mio __initdata;
2174 + unsigned int s390_pci_force_floating __initdata;
2175 + static unsigned int s390_pci_initialized;
2176 +
2177 +@@ -904,7 +903,7 @@ char * __init pcibios_setup(char *str)
2178 + return NULL;
2179 + }
2180 + if (!strcmp(str, "nomio")) {
2181 +- s390_pci_no_mio = 1;
2182 ++ S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
2183 + return NULL;
2184 + }
2185 + if (!strcmp(str, "force_floating")) {
2186 +@@ -935,7 +934,7 @@ static int __init pci_base_init(void)
2187 + return 0;
2188 + }
2189 +
2190 +- if (test_facility(153) && !s390_pci_no_mio) {
2191 ++ if (MACHINE_HAS_PCI_MIO) {
2192 + static_branch_enable(&have_mio);
2193 + ctl_set_bit(2, 5);
2194 + }
2195 +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
2196 +index c890d67a64ad0..ba54c44a64e2e 100644
2197 +--- a/arch/x86/kernel/cpu/mshyperv.c
2198 ++++ b/arch/x86/kernel/cpu/mshyperv.c
2199 +@@ -375,8 +375,6 @@ static void __init ms_hyperv_init_platform(void)
2200 + if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
2201 + wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, 0x1);
2202 + setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
2203 +- } else {
2204 +- mark_tsc_unstable("running on Hyper-V");
2205 + }
2206 +
2207 + /*
2208 +@@ -437,6 +435,13 @@ static void __init ms_hyperv_init_platform(void)
2209 + /* Register Hyper-V specific clocksource */
2210 + hv_init_clocksource();
2211 + #endif
2212 ++ /*
2213 ++ * TSC should be marked as unstable only after Hyper-V
2214 ++ * clocksource has been initialized. This ensures that the
2215 ++ * stability of the sched_clock is not altered.
2216 ++ */
2217 ++ if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT))
2218 ++ mark_tsc_unstable("running on Hyper-V");
2219 + }
2220 +
2221 + static bool __init ms_hyperv_x2apic_available(void)
2222 +diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
2223 +index ac06ca32e9ef7..5e6e236977c75 100644
2224 +--- a/arch/x86/xen/p2m.c
2225 ++++ b/arch/x86/xen/p2m.c
2226 +@@ -618,8 +618,8 @@ int xen_alloc_p2m_entry(unsigned long pfn)
2227 + }
2228 +
2229 + /* Expanded the p2m? */
2230 +- if (pfn > xen_p2m_last_pfn) {
2231 +- xen_p2m_last_pfn = pfn;
2232 ++ if (pfn >= xen_p2m_last_pfn) {
2233 ++ xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE);
2234 + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
2235 + }
2236 +
2237 +diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
2238 +index 21184488c277f..0108504dfb454 100644
2239 +--- a/arch/xtensa/platforms/iss/console.c
2240 ++++ b/arch/xtensa/platforms/iss/console.c
2241 +@@ -136,9 +136,13 @@ static const struct tty_operations serial_ops = {
2242 +
2243 + static int __init rs_init(void)
2244 + {
2245 +- tty_port_init(&serial_port);
2246 ++ int ret;
2247 +
2248 + serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES);
2249 ++ if (!serial_driver)
2250 ++ return -ENOMEM;
2251 ++
2252 ++ tty_port_init(&serial_port);
2253 +
2254 + /* Initialize the tty_driver structure */
2255 +
2256 +@@ -156,8 +160,15 @@ static int __init rs_init(void)
2257 + tty_set_operations(serial_driver, &serial_ops);
2258 + tty_port_link_device(&serial_port, serial_driver, 0);
2259 +
2260 +- if (tty_register_driver(serial_driver))
2261 +- panic("Couldn't register serial driver\n");
2262 ++ ret = tty_register_driver(serial_driver);
2263 ++ if (ret) {
2264 ++ pr_err("Couldn't register serial driver\n");
2265 ++ tty_driver_kref_put(serial_driver);
2266 ++ tty_port_destroy(&serial_port);
2267 ++
2268 ++ return ret;
2269 ++ }
2270 ++
2271 + return 0;
2272 + }
2273 +
2274 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
2275 +index 673a634eadd9f..9360c65169ff4 100644
2276 +--- a/block/bfq-iosched.c
2277 ++++ b/block/bfq-iosched.c
2278 +@@ -5296,7 +5296,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
2279 + if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
2280 + pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
2281 + bfqq->new_ioprio);
2282 +- bfqq->new_ioprio = IOPRIO_BE_NR;
2283 ++ bfqq->new_ioprio = IOPRIO_BE_NR - 1;
2284 + }
2285 +
2286 + bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
2287 +diff --git a/block/blk-zoned.c b/block/blk-zoned.c
2288 +index 86fce751bb173..1d0c76c18fc52 100644
2289 +--- a/block/blk-zoned.c
2290 ++++ b/block/blk-zoned.c
2291 +@@ -360,9 +360,6 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
2292 + if (!blk_queue_is_zoned(q))
2293 + return -ENOTTY;
2294 +
2295 +- if (!capable(CAP_SYS_ADMIN))
2296 +- return -EACCES;
2297 +-
2298 + if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
2299 + return -EFAULT;
2300 +
2301 +@@ -421,9 +418,6 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
2302 + if (!blk_queue_is_zoned(q))
2303 + return -ENOTTY;
2304 +
2305 +- if (!capable(CAP_SYS_ADMIN))
2306 +- return -EACCES;
2307 +-
2308 + if (!(mode & FMODE_WRITE))
2309 + return -EBADF;
2310 +
2311 +diff --git a/block/bsg.c b/block/bsg.c
2312 +index 1f196563ae6ca..79b42c5cafeb8 100644
2313 +--- a/block/bsg.c
2314 ++++ b/block/bsg.c
2315 +@@ -373,10 +373,13 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2316 + case SG_GET_RESERVED_SIZE:
2317 + case SG_SET_RESERVED_SIZE:
2318 + case SG_EMULATED_HOST:
2319 +- case SCSI_IOCTL_SEND_COMMAND:
2320 + return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
2321 + case SG_IO:
2322 + return bsg_sg_io(bd->queue, file->f_mode, uarg);
2323 ++ case SCSI_IOCTL_SEND_COMMAND:
2324 ++ pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n",
2325 ++ current->comm);
2326 ++ return -EINVAL;
2327 + default:
2328 + return -ENOTTY;
2329 + }
2330 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
2331 +index 44f434acfce08..0e6e73b8023fc 100644
2332 +--- a/drivers/ata/libata-core.c
2333 ++++ b/drivers/ata/libata-core.c
2334 +@@ -3950,6 +3950,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
2335 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
2336 + { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2337 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
2338 ++ { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2339 ++ ATA_HORKAGE_ZERO_AFTER_TRIM, },
2340 ++ { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2341 ++ ATA_HORKAGE_ZERO_AFTER_TRIM, },
2342 + { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2343 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
2344 +
2345 +diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
2346 +index f0ef844428bb4..338c2e50f7591 100644
2347 +--- a/drivers/ata/sata_dwc_460ex.c
2348 ++++ b/drivers/ata/sata_dwc_460ex.c
2349 +@@ -1259,24 +1259,20 @@ static int sata_dwc_probe(struct platform_device *ofdev)
2350 + irq = irq_of_parse_and_map(np, 0);
2351 + if (irq == NO_IRQ) {
2352 + dev_err(&ofdev->dev, "no SATA DMA irq\n");
2353 +- err = -ENODEV;
2354 +- goto error_out;
2355 ++ return -ENODEV;
2356 + }
2357 +
2358 + #ifdef CONFIG_SATA_DWC_OLD_DMA
2359 + if (!of_find_property(np, "dmas", NULL)) {
2360 + err = sata_dwc_dma_init_old(ofdev, hsdev);
2361 + if (err)
2362 +- goto error_out;
2363 ++ return err;
2364 + }
2365 + #endif
2366 +
2367 + hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
2368 +- if (IS_ERR(hsdev->phy)) {
2369 +- err = PTR_ERR(hsdev->phy);
2370 +- hsdev->phy = NULL;
2371 +- goto error_out;
2372 +- }
2373 ++ if (IS_ERR(hsdev->phy))
2374 ++ return PTR_ERR(hsdev->phy);
2375 +
2376 + err = phy_init(hsdev->phy);
2377 + if (err)
2378 +diff --git a/drivers/base/core.c b/drivers/base/core.c
2379 +index 6c0ef9d55a343..8c77e14987d4b 100644
2380 +--- a/drivers/base/core.c
2381 ++++ b/drivers/base/core.c
2382 +@@ -886,6 +886,8 @@ static void device_link_put_kref(struct device_link *link)
2383 + {
2384 + if (link->flags & DL_FLAG_STATELESS)
2385 + kref_put(&link->kref, __device_link_del);
2386 ++ else if (!device_is_registered(link->consumer))
2387 ++ __device_link_del(&link->kref);
2388 + else
2389 + WARN(1, "Unable to drop a managed device link reference\n");
2390 + }
2391 +diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
2392 +index 09c8ab5e0959e..32b2b6d9bde0b 100644
2393 +--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
2394 ++++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
2395 +@@ -68,6 +68,8 @@ struct fsl_mc_addr_translation_range {
2396 + #define MC_FAPR_PL BIT(18)
2397 + #define MC_FAPR_BMT BIT(17)
2398 +
2399 ++static phys_addr_t mc_portal_base_phys_addr;
2400 ++
2401 + /**
2402 + * fsl_mc_bus_match - device to driver matching callback
2403 + * @dev: the fsl-mc device to match against
2404 +@@ -220,7 +222,7 @@ static int scan_fsl_mc_bus(struct device *dev, void *data)
2405 + root_mc_dev = to_fsl_mc_device(dev);
2406 + root_mc_bus = to_fsl_mc_bus(root_mc_dev);
2407 + mutex_lock(&root_mc_bus->scan_mutex);
2408 +- dprc_scan_objects(root_mc_dev, NULL);
2409 ++ dprc_scan_objects(root_mc_dev, false);
2410 + mutex_unlock(&root_mc_bus->scan_mutex);
2411 +
2412 + exit:
2413 +@@ -703,14 +705,30 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
2414 + * If base address is in the region_desc use it otherwise
2415 + * revert to old mechanism
2416 + */
2417 +- if (region_desc.base_address)
2418 ++ if (region_desc.base_address) {
2419 + regions[i].start = region_desc.base_address +
2420 + region_desc.base_offset;
2421 +- else
2422 ++ } else {
2423 + error = translate_mc_addr(mc_dev, mc_region_type,
2424 + region_desc.base_offset,
2425 + &regions[i].start);
2426 +
2427 ++ /*
2428 ++ * Some versions of the MC firmware wrongly report
2429 ++ * 0 for register base address of the DPMCP associated
2430 ++ * with child DPRC objects thus rendering them unusable.
2431 ++ * This is particularly troublesome in ACPI boot
2432 ++ * scenarios where the legacy way of extracting this
2433 ++ * base address from the device tree does not apply.
2434 ++ * Given that DPMCPs share the same base address,
2435 ++ * workaround this by using the base address extracted
2436 ++ * from the root DPRC container.
2437 ++ */
2438 ++ if (is_fsl_mc_bus_dprc(mc_dev) &&
2439 ++ regions[i].start == region_desc.base_offset)
2440 ++ regions[i].start += mc_portal_base_phys_addr;
2441 ++ }
2442 ++
2443 + if (error < 0) {
2444 + dev_err(parent_dev,
2445 + "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
2446 +@@ -1126,6 +1144,8 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
2447 + plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2448 + mc_portal_phys_addr = plat_res->start;
2449 + mc_portal_size = resource_size(plat_res);
2450 ++ mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
2451 ++
2452 + error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
2453 + mc_portal_size, NULL,
2454 + FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
2455 +diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
2456 +index b4fc8d71daf20..b656d25a97678 100644
2457 +--- a/drivers/clk/at91/clk-generated.c
2458 ++++ b/drivers/clk/at91/clk-generated.c
2459 +@@ -128,6 +128,12 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
2460 + int i;
2461 + u32 div;
2462 +
2463 ++ /* do not look for a rate that is outside of our range */
2464 ++ if (gck->range.max && req->rate > gck->range.max)
2465 ++ req->rate = gck->range.max;
2466 ++ if (gck->range.min && req->rate < gck->range.min)
2467 ++ req->rate = gck->range.min;
2468 ++
2469 + for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
2470 + if (gck->chg_pid == i)
2471 + continue;
2472 +diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
2473 +index 2c309e3dc8e34..04e728538cefe 100644
2474 +--- a/drivers/clk/imx/clk-composite-8m.c
2475 ++++ b/drivers/clk/imx/clk-composite-8m.c
2476 +@@ -216,7 +216,8 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
2477 + div->width = PCG_PREDIV_WIDTH;
2478 + divider_ops = &imx8m_clk_composite_divider_ops;
2479 + mux_ops = &clk_mux_ops;
2480 +- flags |= CLK_SET_PARENT_GATE;
2481 ++ if (!(composite_flags & IMX_COMPOSITE_FW_MANAGED))
2482 ++ flags |= CLK_SET_PARENT_GATE;
2483 + }
2484 +
2485 + div->lock = &imx_ccm_lock;
2486 +diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
2487 +index f1919fafb1247..e92621fa8b9cd 100644
2488 +--- a/drivers/clk/imx/clk-imx8mm.c
2489 ++++ b/drivers/clk/imx/clk-imx8mm.c
2490 +@@ -407,10 +407,10 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
2491 + hws[IMX8MM_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
2492 + hws[IMX8MM_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
2493 +
2494 +- hws[IMX8MM_CLK_CLKOUT1_SEL] = imx_clk_hw_mux("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
2495 ++ hws[IMX8MM_CLK_CLKOUT1_SEL] = imx_clk_hw_mux2("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
2496 + hws[IMX8MM_CLK_CLKOUT1_DIV] = imx_clk_hw_divider("clkout1_div", "clkout1_sel", base + 0x128, 0, 4);
2497 + hws[IMX8MM_CLK_CLKOUT1] = imx_clk_hw_gate("clkout1", "clkout1_div", base + 0x128, 8);
2498 +- hws[IMX8MM_CLK_CLKOUT2_SEL] = imx_clk_hw_mux("clkout2_sel", base + 0x128, 20, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
2499 ++ hws[IMX8MM_CLK_CLKOUT2_SEL] = imx_clk_hw_mux2("clkout2_sel", base + 0x128, 20, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
2500 + hws[IMX8MM_CLK_CLKOUT2_DIV] = imx_clk_hw_divider("clkout2_div", "clkout2_sel", base + 0x128, 16, 4);
2501 + hws[IMX8MM_CLK_CLKOUT2] = imx_clk_hw_gate("clkout2", "clkout2_div", base + 0x128, 24);
2502 +
2503 +@@ -470,10 +470,11 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
2504 +
2505 + /*
2506 + * DRAM clocks are manipulated from TF-A outside clock framework.
2507 +- * Mark with GET_RATE_NOCACHE to always read div value from hardware
2508 ++ * The fw_managed helper sets GET_RATE_NOCACHE and clears SET_PARENT_GATE
2509 ++ * as div value should always be read from hardware
2510 + */
2511 +- hws[IMX8MM_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
2512 +- hws[IMX8MM_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mm_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
2513 ++ hws[IMX8MM_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000);
2514 ++ hws[IMX8MM_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mm_dram_apb_sels, base + 0xa080);
2515 +
2516 + /* IP */
2517 + hws[IMX8MM_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100);
2518 +diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
2519 +index 88f6630cd472f..0a76f969b28b3 100644
2520 +--- a/drivers/clk/imx/clk-imx8mn.c
2521 ++++ b/drivers/clk/imx/clk-imx8mn.c
2522 +@@ -453,10 +453,11 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
2523 +
2524 + /*
2525 + * DRAM clocks are manipulated from TF-A outside clock framework.
2526 +- * Mark with GET_RATE_NOCACHE to always read div value from hardware
2527 ++ * The fw_managed helper sets GET_RATE_NOCACHE and clears SET_PARENT_GATE
2528 ++ * as div value should always be read from hardware
2529 + */
2530 +- hws[IMX8MN_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
2531 +- hws[IMX8MN_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mn_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
2532 ++ hws[IMX8MN_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000);
2533 ++ hws[IMX8MN_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mn_dram_apb_sels, base + 0xa080);
2534 +
2535 + hws[IMX8MN_CLK_DISP_PIXEL] = imx8m_clk_hw_composite("disp_pixel", imx8mn_disp_pixel_sels, base + 0xa500);
2536 + hws[IMX8MN_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mn_sai2_sels, base + 0xa600);
2537 +diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
2538 +index c491bc9c61ce7..83cc2b1c32947 100644
2539 +--- a/drivers/clk/imx/clk-imx8mq.c
2540 ++++ b/drivers/clk/imx/clk-imx8mq.c
2541 +@@ -449,11 +449,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
2542 +
2543 + /*
2544 + * DRAM clocks are manipulated from TF-A outside clock framework.
2545 +- * Mark with GET_RATE_NOCACHE to always read div value from hardware
2546 ++ * The fw_managed helper sets GET_RATE_NOCACHE and clears SET_PARENT_GATE
2547 ++ * as div value should always be read from hardware
2548 + */
2549 + hws[IMX8MQ_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mq_dram_core_sels, ARRAY_SIZE(imx8mq_dram_core_sels), CLK_IS_CRITICAL);
2550 +- hws[IMX8MQ_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
2551 +- hws[IMX8MQ_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mq_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
2552 ++ hws[IMX8MQ_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000);
2553 ++ hws[IMX8MQ_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mq_dram_apb_sels, base + 0xa080);
2554 +
2555 + /* IP */
2556 + hws[IMX8MQ_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mq_vpu_g1_sels, base + 0xa100);
2557 +diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
2558 +index 7571603bee23b..e144f983fd8ce 100644
2559 +--- a/drivers/clk/imx/clk.h
2560 ++++ b/drivers/clk/imx/clk.h
2561 +@@ -530,8 +530,9 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
2562 + struct clk *div, struct clk *mux, struct clk *pll,
2563 + struct clk *step);
2564 +
2565 +-#define IMX_COMPOSITE_CORE BIT(0)
2566 +-#define IMX_COMPOSITE_BUS BIT(1)
2567 ++#define IMX_COMPOSITE_CORE BIT(0)
2568 ++#define IMX_COMPOSITE_BUS BIT(1)
2569 ++#define IMX_COMPOSITE_FW_MANAGED BIT(2)
2570 +
2571 + struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
2572 + const char * const *parent_names,
2573 +@@ -567,6 +568,17 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
2574 + ARRAY_SIZE(parent_names), reg, 0, \
2575 + flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
2576 +
2577 ++#define __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, flags) \
2578 ++ imx8m_clk_hw_composite_flags(name, parent_names, \
2579 ++ ARRAY_SIZE(parent_names), reg, IMX_COMPOSITE_FW_MANAGED, \
2580 ++ flags | CLK_GET_RATE_NOCACHE | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
2581 ++
2582 ++#define imx8m_clk_hw_fw_managed_composite(name, parent_names, reg) \
2583 ++ __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, 0)
2584 ++
2585 ++#define imx8m_clk_hw_fw_managed_composite_critical(name, parent_names, reg) \
2586 ++ __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, CLK_IS_CRITICAL)
2587 ++
2588 + #define __imx8m_clk_composite(name, parent_names, reg, flags) \
2589 + to_clk(__imx8m_clk_hw_composite(name, parent_names, reg, flags))
2590 +
2591 +diff --git a/drivers/clk/ralink/clk-mt7621.c b/drivers/clk/ralink/clk-mt7621.c
2592 +index 857da1e274be9..a2c045390f008 100644
2593 +--- a/drivers/clk/ralink/clk-mt7621.c
2594 ++++ b/drivers/clk/ralink/clk-mt7621.c
2595 +@@ -131,14 +131,7 @@ static int mt7621_gate_ops_init(struct device *dev,
2596 + struct mt7621_gate *sclk)
2597 + {
2598 + struct clk_init_data init = {
2599 +- /*
2600 +- * Until now no clock driver existed so
2601 +- * these SoC drivers are not prepared
2602 +- * yet for the clock. We don't want kernel to
2603 +- * disable anything so we add CLK_IS_CRITICAL
2604 +- * flag here.
2605 +- */
2606 +- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
2607 ++ .flags = CLK_SET_RATE_PARENT,
2608 + .num_parents = 1,
2609 + .parent_names = &sclk->parent_name,
2610 + .ops = &mt7621_gate_ops,
2611 +diff --git a/drivers/clk/renesas/renesas-rzg2l-cpg.c b/drivers/clk/renesas/renesas-rzg2l-cpg.c
2612 +index e7c59af2a1d85..f894a210de902 100644
2613 +--- a/drivers/clk/renesas/renesas-rzg2l-cpg.c
2614 ++++ b/drivers/clk/renesas/renesas-rzg2l-cpg.c
2615 +@@ -229,7 +229,7 @@ static struct clk
2616 +
2617 + case CPG_MOD:
2618 + type = "module";
2619 +- if (clkidx > priv->num_mod_clks) {
2620 ++ if (clkidx >= priv->num_mod_clks) {
2621 + dev_err(dev, "Invalid %s clock index %u\n", type,
2622 + clkidx);
2623 + return ERR_PTR(-EINVAL);
2624 +diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
2625 +index fe937bcdb4876..f7827b3b7fc1c 100644
2626 +--- a/drivers/clk/rockchip/clk-pll.c
2627 ++++ b/drivers/clk/rockchip/clk-pll.c
2628 +@@ -940,7 +940,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
2629 + switch (pll_type) {
2630 + case pll_rk3036:
2631 + case pll_rk3328:
2632 +- if (!pll->rate_table || IS_ERR(ctx->grf))
2633 ++ if (!pll->rate_table)
2634 + init.ops = &rockchip_rk3036_pll_clk_norate_ops;
2635 + else
2636 + init.ops = &rockchip_rk3036_pll_clk_ops;
2637 +diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
2638 +index 1cb21ea79c640..242e94c0cf8a3 100644
2639 +--- a/drivers/clk/socfpga/clk-agilex.c
2640 ++++ b/drivers/clk/socfpga/clk-agilex.c
2641 +@@ -107,10 +107,10 @@ static const struct clk_parent_data gpio_db_free_mux[] = {
2642 + };
2643 +
2644 + static const struct clk_parent_data psi_ref_free_mux[] = {
2645 +- { .fw_name = "main_pll_c3",
2646 +- .name = "main_pll_c3", },
2647 +- { .fw_name = "peri_pll_c3",
2648 +- .name = "peri_pll_c3", },
2649 ++ { .fw_name = "main_pll_c2",
2650 ++ .name = "main_pll_c2", },
2651 ++ { .fw_name = "peri_pll_c2",
2652 ++ .name = "peri_pll_c2", },
2653 + { .fw_name = "osc1",
2654 + .name = "osc1", },
2655 + { .fw_name = "cb-intosc-hs-div2-clk",
2656 +@@ -195,6 +195,13 @@ static const struct clk_parent_data sdmmc_mux[] = {
2657 + .name = "boot_clk", },
2658 + };
2659 +
2660 ++static const struct clk_parent_data s2f_user0_mux[] = {
2661 ++ { .fw_name = "s2f_user0_free_clk",
2662 ++ .name = "s2f_user0_free_clk", },
2663 ++ { .fw_name = "boot_clk",
2664 ++ .name = "boot_clk", },
2665 ++};
2666 ++
2667 + static const struct clk_parent_data s2f_user1_mux[] = {
2668 + { .fw_name = "s2f_user1_free_clk",
2669 + .name = "s2f_user1_free_clk", },
2670 +@@ -273,7 +280,7 @@ static const struct stratix10_perip_cnt_clock agilex_main_perip_cnt_clks[] = {
2671 + { AGILEX_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
2672 + ARRAY_SIZE(sdmmc_free_mux), 0, 0xE4, 0, 0, 0},
2673 + { AGILEX_S2F_USER0_FREE_CLK, "s2f_user0_free_clk", NULL, s2f_usr0_free_mux,
2674 +- ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0, 0},
2675 ++ ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0x30, 2},
2676 + { AGILEX_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", NULL, s2f_usr1_free_mux,
2677 + ARRAY_SIZE(s2f_usr1_free_mux), 0, 0xEC, 0, 0x88, 5},
2678 + { AGILEX_PSI_REF_FREE_CLK, "psi_ref_free_clk", NULL, psi_ref_free_mux,
2679 +@@ -319,6 +326,8 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
2680 + 4, 0x98, 0, 16, 0x88, 3, 0},
2681 + { AGILEX_SDMMC_CLK, "sdmmc_clk", NULL, sdmmc_mux, ARRAY_SIZE(sdmmc_mux), 0, 0x7C,
2682 + 5, 0, 0, 0, 0x88, 4, 4},
2683 ++ { AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_user0_mux, ARRAY_SIZE(s2f_user0_mux), 0, 0x24,
2684 ++ 6, 0, 0, 0, 0x30, 2, 0},
2685 + { AGILEX_S2F_USER1_CLK, "s2f_user1_clk", NULL, s2f_user1_mux, ARRAY_SIZE(s2f_user1_mux), 0, 0x7C,
2686 + 6, 0, 0, 0, 0x88, 5, 0},
2687 + { AGILEX_PSI_REF_CLK, "psi_ref_clk", NULL, psi_mux, ARRAY_SIZE(psi_mux), 0, 0x7C,
2688 +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
2689 +index 005600cef2730..6fbb46b2f6dac 100644
2690 +--- a/drivers/cpufreq/powernv-cpufreq.c
2691 ++++ b/drivers/cpufreq/powernv-cpufreq.c
2692 +@@ -36,6 +36,7 @@
2693 + #define MAX_PSTATE_SHIFT 32
2694 + #define LPSTATE_SHIFT 48
2695 + #define GPSTATE_SHIFT 56
2696 ++#define MAX_NR_CHIPS 32
2697 +
2698 + #define MAX_RAMP_DOWN_TIME 5120
2699 + /*
2700 +@@ -1046,12 +1047,20 @@ static int init_chip_info(void)
2701 + unsigned int *chip;
2702 + unsigned int cpu, i;
2703 + unsigned int prev_chip_id = UINT_MAX;
2704 ++ cpumask_t *chip_cpu_mask;
2705 + int ret = 0;
2706 +
2707 + chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
2708 + if (!chip)
2709 + return -ENOMEM;
2710 +
2711 ++ /* Allocate a chip cpu mask large enough to fit mask for all chips */
2712 ++ chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL);
2713 ++ if (!chip_cpu_mask) {
2714 ++ ret = -ENOMEM;
2715 ++ goto free_and_return;
2716 ++ }
2717 ++
2718 + for_each_possible_cpu(cpu) {
2719 + unsigned int id = cpu_to_chip_id(cpu);
2720 +
2721 +@@ -1059,22 +1068,25 @@ static int init_chip_info(void)
2722 + prev_chip_id = id;
2723 + chip[nr_chips++] = id;
2724 + }
2725 ++ cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]);
2726 + }
2727 +
2728 + chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
2729 + if (!chips) {
2730 + ret = -ENOMEM;
2731 +- goto free_and_return;
2732 ++ goto out_free_chip_cpu_mask;
2733 + }
2734 +
2735 + for (i = 0; i < nr_chips; i++) {
2736 + chips[i].id = chip[i];
2737 +- cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
2738 ++ cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]);
2739 + INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
2740 + for_each_cpu(cpu, &chips[i].mask)
2741 + per_cpu(chip_info, cpu) = &chips[i];
2742 + }
2743 +
2744 ++out_free_chip_cpu_mask:
2745 ++ kfree(chip_cpu_mask);
2746 + free_and_return:
2747 + kfree(chip);
2748 + return ret;
2749 +diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
2750 +index a2b5c6f60cf0e..ff164dec8422e 100644
2751 +--- a/drivers/cpuidle/cpuidle-pseries.c
2752 ++++ b/drivers/cpuidle/cpuidle-pseries.c
2753 +@@ -402,7 +402,7 @@ static void __init fixup_cede0_latency(void)
2754 + * pseries_idle_probe()
2755 + * Choose state table for shared versus dedicated partition
2756 + */
2757 +-static int pseries_idle_probe(void)
2758 ++static int __init pseries_idle_probe(void)
2759 + {
2760 +
2761 + if (cpuidle_disable != IDLE_NO_OVERRIDE)
2762 +@@ -419,7 +419,21 @@ static int pseries_idle_probe(void)
2763 + cpuidle_state_table = shared_states;
2764 + max_idle_state = ARRAY_SIZE(shared_states);
2765 + } else {
2766 +- fixup_cede0_latency();
2767 ++ /*
2768 ++ * Use firmware provided latency values
2769 ++ * starting with POWER10 platforms. In the
2770 ++ * case that we are running on a POWER10
2771 ++ * platform but in an earlier compat mode, we
2772 ++ * can still use the firmware provided values.
2773 ++ *
2774 ++ * However, on platforms prior to POWER10, we
2775 ++ * cannot rely on the accuracy of the firmware
2776 ++ * provided latency values. On such platforms,
2777 ++ * go with the conservative default estimate
2778 ++ * of 10us.
2779 ++ */
2780 ++ if (cpu_has_feature(CPU_FTR_ARCH_31) || pvr_version_is(PVR_POWER10))
2781 ++ fixup_cede0_latency();
2782 + cpuidle_state_table = dedicated_states;
2783 + max_idle_state = NR_DEDICATED_STATES;
2784 + }
2785 +diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
2786 +index 91808402e0bf2..2ecb0e1f65d8d 100644
2787 +--- a/drivers/crypto/ccp/sev-dev.c
2788 ++++ b/drivers/crypto/ccp/sev-dev.c
2789 +@@ -300,6 +300,9 @@ static int __sev_platform_shutdown_locked(int *error)
2790 + struct sev_device *sev = psp_master->sev_data;
2791 + int ret;
2792 +
2793 ++ if (sev->state == SEV_STATE_UNINIT)
2794 ++ return 0;
2795 ++
2796 + ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
2797 + if (ret)
2798 + return ret;
2799 +@@ -1019,6 +1022,20 @@ e_err:
2800 + return ret;
2801 + }
2802 +
2803 ++static void sev_firmware_shutdown(struct sev_device *sev)
2804 ++{
2805 ++ sev_platform_shutdown(NULL);
2806 ++
2807 ++ if (sev_es_tmr) {
2808 ++ /* The TMR area was encrypted, flush it from the cache */
2809 ++ wbinvd_on_all_cpus();
2810 ++
2811 ++ free_pages((unsigned long)sev_es_tmr,
2812 ++ get_order(SEV_ES_TMR_SIZE));
2813 ++ sev_es_tmr = NULL;
2814 ++ }
2815 ++}
2816 ++
2817 + void sev_dev_destroy(struct psp_device *psp)
2818 + {
2819 + struct sev_device *sev = psp->sev_data;
2820 +@@ -1026,6 +1043,8 @@ void sev_dev_destroy(struct psp_device *psp)
2821 + if (!sev)
2822 + return;
2823 +
2824 ++ sev_firmware_shutdown(sev);
2825 ++
2826 + if (sev->misc)
2827 + kref_put(&misc_dev->refcount, sev_exit);
2828 +
2829 +@@ -1056,21 +1075,6 @@ void sev_pci_init(void)
2830 + if (sev_get_api_version())
2831 + goto err;
2832 +
2833 +- /*
2834 +- * If platform is not in UNINIT state then firmware upgrade and/or
2835 +- * platform INIT command will fail. These command require UNINIT state.
2836 +- *
2837 +- * In a normal boot we should never run into case where the firmware
2838 +- * is not in UNINIT state on boot. But in case of kexec boot, a reboot
2839 +- * may not go through a typical shutdown sequence and may leave the
2840 +- * firmware in INIT or WORKING state.
2841 +- */
2842 +-
2843 +- if (sev->state != SEV_STATE_UNINIT) {
2844 +- sev_platform_shutdown(NULL);
2845 +- sev->state = SEV_STATE_UNINIT;
2846 +- }
2847 +-
2848 + if (sev_version_greater_or_equal(0, 15) &&
2849 + sev_update_firmware(sev->dev) == 0)
2850 + sev_get_api_version();
2851 +@@ -1115,17 +1119,10 @@ err:
2852 +
2853 + void sev_pci_exit(void)
2854 + {
2855 +- if (!psp_master->sev_data)
2856 +- return;
2857 +-
2858 +- sev_platform_shutdown(NULL);
2859 ++ struct sev_device *sev = psp_master->sev_data;
2860 +
2861 +- if (sev_es_tmr) {
2862 +- /* The TMR area was encrypted, flush it from the cache */
2863 +- wbinvd_on_all_cpus();
2864 ++ if (!sev)
2865 ++ return;
2866 +
2867 +- free_pages((unsigned long)sev_es_tmr,
2868 +- get_order(SEV_ES_TMR_SIZE));
2869 +- sev_es_tmr = NULL;
2870 +- }
2871 ++ sev_firmware_shutdown(sev);
2872 + }
2873 +diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
2874 +index 6fb6ba35f89d4..9bcc1884c06a1 100644
2875 +--- a/drivers/crypto/ccp/sp-pci.c
2876 ++++ b/drivers/crypto/ccp/sp-pci.c
2877 +@@ -241,6 +241,17 @@ e_err:
2878 + return ret;
2879 + }
2880 +
2881 ++static void sp_pci_shutdown(struct pci_dev *pdev)
2882 ++{
2883 ++ struct device *dev = &pdev->dev;
2884 ++ struct sp_device *sp = dev_get_drvdata(dev);
2885 ++
2886 ++ if (!sp)
2887 ++ return;
2888 ++
2889 ++ sp_destroy(sp);
2890 ++}
2891 ++
2892 + static void sp_pci_remove(struct pci_dev *pdev)
2893 + {
2894 + struct device *dev = &pdev->dev;
2895 +@@ -371,6 +382,7 @@ static struct pci_driver sp_pci_driver = {
2896 + .id_table = sp_pci_table,
2897 + .probe = sp_pci_probe,
2898 + .remove = sp_pci_remove,
2899 ++ .shutdown = sp_pci_shutdown,
2900 + .driver.pm = &sp_pci_pm_ops,
2901 + };
2902 +
2903 +diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
2904 +index f397cc5bf1021..d19e5ffb5104b 100644
2905 +--- a/drivers/crypto/mxs-dcp.c
2906 ++++ b/drivers/crypto/mxs-dcp.c
2907 +@@ -300,21 +300,20 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
2908 +
2909 + struct scatterlist *dst = req->dst;
2910 + struct scatterlist *src = req->src;
2911 +- const int nents = sg_nents(req->src);
2912 ++ int dst_nents = sg_nents(dst);
2913 +
2914 + const int out_off = DCP_BUF_SZ;
2915 + uint8_t *in_buf = sdcp->coh->aes_in_buf;
2916 + uint8_t *out_buf = sdcp->coh->aes_out_buf;
2917 +
2918 +- uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
2919 + uint32_t dst_off = 0;
2920 ++ uint8_t *src_buf = NULL;
2921 + uint32_t last_out_len = 0;
2922 +
2923 + uint8_t *key = sdcp->coh->aes_key;
2924 +
2925 + int ret = 0;
2926 +- int split = 0;
2927 +- unsigned int i, len, clen, rem = 0, tlen = 0;
2928 ++ unsigned int i, len, clen, tlen = 0;
2929 + int init = 0;
2930 + bool limit_hit = false;
2931 +
2932 +@@ -332,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
2933 + memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
2934 + }
2935 +
2936 +- for_each_sg(req->src, src, nents, i) {
2937 ++ for_each_sg(req->src, src, sg_nents(src), i) {
2938 + src_buf = sg_virt(src);
2939 + len = sg_dma_len(src);
2940 + tlen += len;
2941 +@@ -357,34 +356,17 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
2942 + * submit the buffer.
2943 + */
2944 + if (actx->fill == out_off || sg_is_last(src) ||
2945 +- limit_hit) {
2946 ++ limit_hit) {
2947 + ret = mxs_dcp_run_aes(actx, req, init);
2948 + if (ret)
2949 + return ret;
2950 + init = 0;
2951 +
2952 +- out_tmp = out_buf;
2953 ++ sg_pcopy_from_buffer(dst, dst_nents, out_buf,
2954 ++ actx->fill, dst_off);
2955 ++ dst_off += actx->fill;
2956 + last_out_len = actx->fill;
2957 +- while (dst && actx->fill) {
2958 +- if (!split) {
2959 +- dst_buf = sg_virt(dst);
2960 +- dst_off = 0;
2961 +- }
2962 +- rem = min(sg_dma_len(dst) - dst_off,
2963 +- actx->fill);
2964 +-
2965 +- memcpy(dst_buf + dst_off, out_tmp, rem);
2966 +- out_tmp += rem;
2967 +- dst_off += rem;
2968 +- actx->fill -= rem;
2969 +-
2970 +- if (dst_off == sg_dma_len(dst)) {
2971 +- dst = sg_next(dst);
2972 +- split = 0;
2973 +- } else {
2974 +- split = 1;
2975 +- }
2976 +- }
2977 ++ actx->fill = 0;
2978 + }
2979 + } while (len);
2980 +
2981 +diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
2982 +index f26c71747d43a..e744fd87c63c8 100644
2983 +--- a/drivers/dma-buf/dma-resv.c
2984 ++++ b/drivers/dma-buf/dma-resv.c
2985 +@@ -615,25 +615,21 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
2986 + */
2987 + bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
2988 + {
2989 +- unsigned int seq, shared_count;
2990 ++ struct dma_fence *fence;
2991 ++ unsigned int seq;
2992 + int ret;
2993 +
2994 + rcu_read_lock();
2995 + retry:
2996 + ret = true;
2997 +- shared_count = 0;
2998 + seq = read_seqcount_begin(&obj->seq);
2999 +
3000 + if (test_all) {
3001 + struct dma_resv_list *fobj = dma_resv_shared_list(obj);
3002 +- unsigned int i;
3003 +-
3004 +- if (fobj)
3005 +- shared_count = fobj->shared_count;
3006 ++ unsigned int i, shared_count;
3007 +
3008 ++ shared_count = fobj ? fobj->shared_count : 0;
3009 + for (i = 0; i < shared_count; ++i) {
3010 +- struct dma_fence *fence;
3011 +-
3012 + fence = rcu_dereference(fobj->shared[i]);
3013 + ret = dma_resv_test_signaled_single(fence);
3014 + if (ret < 0)
3015 +@@ -641,24 +637,19 @@ retry:
3016 + else if (!ret)
3017 + break;
3018 + }
3019 +-
3020 +- if (read_seqcount_retry(&obj->seq, seq))
3021 +- goto retry;
3022 + }
3023 +
3024 +- if (!shared_count) {
3025 +- struct dma_fence *fence_excl = dma_resv_excl_fence(obj);
3026 +-
3027 +- if (fence_excl) {
3028 +- ret = dma_resv_test_signaled_single(fence_excl);
3029 +- if (ret < 0)
3030 +- goto retry;
3031 ++ fence = dma_resv_excl_fence(obj);
3032 ++ if (ret && fence) {
3033 ++ ret = dma_resv_test_signaled_single(fence);
3034 ++ if (ret < 0)
3035 ++ goto retry;
3036 +
3037 +- if (read_seqcount_retry(&obj->seq, seq))
3038 +- goto retry;
3039 +- }
3040 + }
3041 +
3042 ++ if (read_seqcount_retry(&obj->seq, seq))
3043 ++ goto retry;
3044 ++
3045 + rcu_read_unlock();
3046 + return ret;
3047 + }
3048 +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
3049 +index 8070fd664bfc6..665ccbf2b8be8 100644
3050 +--- a/drivers/dma/imx-sdma.c
3051 ++++ b/drivers/dma/imx-sdma.c
3052 +@@ -433,7 +433,6 @@ struct sdma_channel {
3053 + unsigned long watermark_level;
3054 + u32 shp_addr, per_addr;
3055 + enum dma_status status;
3056 +- bool context_loaded;
3057 + struct imx_dma_data data;
3058 + struct work_struct terminate_worker;
3059 + };
3060 +@@ -1008,9 +1007,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
3061 + int ret;
3062 + unsigned long flags;
3063 +
3064 +- if (sdmac->context_loaded)
3065 +- return 0;
3066 +-
3067 + if (sdmac->direction == DMA_DEV_TO_MEM)
3068 + load_address = sdmac->pc_from_device;
3069 + else if (sdmac->direction == DMA_DEV_TO_DEV)
3070 +@@ -1053,8 +1049,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
3071 +
3072 + spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
3073 +
3074 +- sdmac->context_loaded = true;
3075 +-
3076 + return ret;
3077 + }
3078 +
3079 +@@ -1093,7 +1087,6 @@ static void sdma_channel_terminate_work(struct work_struct *work)
3080 + vchan_get_all_descriptors(&sdmac->vc, &head);
3081 + spin_unlock_irqrestore(&sdmac->vc.lock, flags);
3082 + vchan_dma_desc_free_list(&sdmac->vc, &head);
3083 +- sdmac->context_loaded = false;
3084 + }
3085 +
3086 + static int sdma_terminate_all(struct dma_chan *chan)
3087 +@@ -1168,7 +1161,6 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
3088 + static int sdma_config_channel(struct dma_chan *chan)
3089 + {
3090 + struct sdma_channel *sdmac = to_sdma_chan(chan);
3091 +- int ret;
3092 +
3093 + sdma_disable_channel(chan);
3094 +
3095 +@@ -1208,9 +1200,7 @@ static int sdma_config_channel(struct dma_chan *chan)
3096 + sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
3097 + }
3098 +
3099 +- ret = sdma_load_context(sdmac);
3100 +-
3101 +- return ret;
3102 ++ return 0;
3103 + }
3104 +
3105 + static int sdma_set_channel_priority(struct sdma_channel *sdmac,
3106 +@@ -1361,7 +1351,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
3107 +
3108 + sdmac->event_id0 = 0;
3109 + sdmac->event_id1 = 0;
3110 +- sdmac->context_loaded = false;
3111 +
3112 + sdma_set_channel_priority(sdmac, 0);
3113 +
3114 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
3115 +index 8e5a7ac8c36fc..7a73167319116 100644
3116 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
3117 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
3118 +@@ -522,6 +522,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
3119 + break;
3120 + case CHIP_RENOIR:
3121 + case CHIP_VANGOGH:
3122 ++ case CHIP_YELLOW_CARP:
3123 + domain |= AMDGPU_GEM_DOMAIN_GTT;
3124 + break;
3125 +
3126 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
3127 +index 854fc497844b8..9a67746c10edd 100644
3128 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
3129 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
3130 +@@ -341,21 +341,18 @@ retry:
3131 + r = amdgpu_gem_object_create(adev, size, args->in.alignment,
3132 + initial_domain,
3133 + flags, ttm_bo_type_device, resv, &gobj);
3134 +- if (r) {
3135 +- if (r != -ERESTARTSYS) {
3136 +- if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
3137 +- flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
3138 +- goto retry;
3139 +- }
3140 ++ if (r && r != -ERESTARTSYS) {
3141 ++ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
3142 ++ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
3143 ++ goto retry;
3144 ++ }
3145 +
3146 +- if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
3147 +- initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
3148 +- goto retry;
3149 +- }
3150 +- DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
3151 +- size, initial_domain, args->in.alignment, r);
3152 ++ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
3153 ++ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
3154 ++ goto retry;
3155 + }
3156 +- return r;
3157 ++ DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
3158 ++ size, initial_domain, args->in.alignment, r);
3159 + }
3160 +
3161 + if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
3162 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
3163 +index bca4dddd5a15b..82608df433964 100644
3164 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
3165 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
3166 +@@ -339,7 +339,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
3167 + void
3168 + amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
3169 + {
3170 +- u8 val;
3171 ++ u8 val = 0;
3172 +
3173 + if (!amdgpu_connector->router.ddc_valid)
3174 + return;
3175 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
3176 +index 92c8e6e7f346b..def812f6231aa 100644
3177 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
3178 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
3179 +@@ -196,7 +196,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
3180 + c++;
3181 + }
3182 +
3183 +- BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
3184 ++ BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
3185 +
3186 + placement->num_placement = c;
3187 + placement->placement = places;
3188 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
3189 +index fc66aca285944..95d5842385b32 100644
3190 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
3191 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
3192 +@@ -1966,11 +1966,20 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
3193 + bool exc_err_limit = false;
3194 + int ret;
3195 +
3196 +- if (adev->ras_enabled && con)
3197 +- data = &con->eh_data;
3198 +- else
3199 ++ if (!con)
3200 ++ return 0;
3201 ++
3202 ++ /* Allow access to RAS EEPROM via debugfs, when the ASIC
3203 ++ * supports RAS and debugfs is enabled, but when
3204 ++ * adev->ras_enabled is unset, i.e. when "ras_enable"
3205 ++ * module parameter is set to 0.
3206 ++ */
3207 ++ con->adev = adev;
3208 ++
3209 ++ if (!adev->ras_enabled)
3210 + return 0;
3211 +
3212 ++ data = &con->eh_data;
3213 + *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
3214 + if (!*data) {
3215 + ret = -ENOMEM;
3216 +@@ -1980,7 +1989,6 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
3217 + mutex_init(&con->recovery_lock);
3218 + INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
3219 + atomic_set(&con->in_recovery, 0);
3220 +- con->adev = adev;
3221 +
3222 + max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length();
3223 + amdgpu_ras_validate_threshold(adev, max_eeprom_records_len);
3224 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
3225 +index 38222de921d15..8dd151c9e4591 100644
3226 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
3227 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
3228 +@@ -325,7 +325,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
3229 + return ret;
3230 + }
3231 +
3232 +- __decode_table_header_from_buff(hdr, &buff[2]);
3233 ++ __decode_table_header_from_buff(hdr, buff);
3234 +
3235 + if (hdr->header == EEPROM_TABLE_HDR_VAL) {
3236 + control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
3237 +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
3238 +index 284bb42d6c866..121ee9f2b8d16 100644
3239 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
3240 ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
3241 +@@ -119,7 +119,7 @@ static int vcn_v1_0_sw_init(void *handle)
3242 + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
3243 + adev->firmware.fw_size +=
3244 + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
3245 +- DRM_INFO("PSP loading VCN firmware\n");
3246 ++ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
3247 + }
3248 +
3249 + r = amdgpu_vcn_resume(adev);
3250 +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
3251 +index 8af567c546dbc..f4686e918e0d1 100644
3252 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
3253 ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
3254 +@@ -122,7 +122,7 @@ static int vcn_v2_0_sw_init(void *handle)
3255 + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
3256 + adev->firmware.fw_size +=
3257 + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
3258 +- DRM_INFO("PSP loading VCN firmware\n");
3259 ++ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
3260 + }
3261 +
3262 + r = amdgpu_vcn_resume(adev);
3263 +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
3264 +index 888b17d84691c..e0c0c3734432e 100644
3265 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
3266 ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
3267 +@@ -152,7 +152,7 @@ static int vcn_v2_5_sw_init(void *handle)
3268 + adev->firmware.fw_size +=
3269 + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
3270 + }
3271 +- DRM_INFO("PSP loading VCN firmware\n");
3272 ++ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
3273 + }
3274 +
3275 + r = amdgpu_vcn_resume(adev);
3276 +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
3277 +index 47d4f04cbd69e..2f017560948eb 100644
3278 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
3279 ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
3280 +@@ -160,7 +160,7 @@ static int vcn_v3_0_sw_init(void *handle)
3281 + adev->firmware.fw_size +=
3282 + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
3283 + }
3284 +- DRM_INFO("PSP loading VCN firmware\n");
3285 ++ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
3286 + }
3287 +
3288 + r = amdgpu_vcn_resume(adev);
3289 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
3290 +index 88813dad731fa..c021519af8106 100644
3291 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
3292 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
3293 +@@ -98,36 +98,78 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
3294 + uint32_t *se_mask)
3295 + {
3296 + struct kfd_cu_info cu_info;
3297 +- uint32_t cu_per_se[KFD_MAX_NUM_SE] = {0};
3298 +- int i, se, sh, cu = 0;
3299 +-
3300 ++ uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
3301 ++ int i, se, sh, cu;
3302 + amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
3303 +
3304 + if (cu_mask_count > cu_info.cu_active_number)
3305 + cu_mask_count = cu_info.cu_active_number;
3306 +
3307 ++ /* Exceeding these bounds corrupts the stack and indicates a coding error.
3308 ++ * Returning with no CU's enabled will hang the queue, which should be
3309 ++ * attention grabbing.
3310 ++ */
3311 ++ if (cu_info.num_shader_engines > KFD_MAX_NUM_SE) {
3312 ++ pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n", cu_info.num_shader_engines);
3313 ++ return;
3314 ++ }
3315 ++ if (cu_info.num_shader_arrays_per_engine > KFD_MAX_NUM_SH_PER_SE) {
3316 ++ pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
3317 ++ cu_info.num_shader_arrays_per_engine * cu_info.num_shader_engines);
3318 ++ return;
3319 ++ }
3320 ++ /* Count active CUs per SH.
3321 ++ *
3322 ++ * Some CUs in an SH may be disabled. HW expects disabled CUs to be
3323 ++ * represented in the high bits of each SH's enable mask (the upper and lower
3324 ++ * 16 bits of se_mask) and will take care of the actual distribution of
3325 ++ * disabled CUs within each SH automatically.
3326 ++ * Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1.
3327 ++ *
3328 ++ * See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info.
3329 ++ */
3330 + for (se = 0; se < cu_info.num_shader_engines; se++)
3331 + for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
3332 +- cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
3333 +-
3334 +- /* Symmetrically map cu_mask to all SEs:
3335 +- * cu_mask[0] bit0 -> se_mask[0] bit0;
3336 +- * cu_mask[0] bit1 -> se_mask[1] bit0;
3337 +- * ... (if # SE is 4)
3338 +- * cu_mask[0] bit4 -> se_mask[0] bit1;
3339 ++ cu_per_sh[se][sh] = hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
3340 ++
3341 ++ /* Symmetrically map cu_mask to all SEs & SHs:
3342 ++ * se_mask programs up to 2 SH in the upper and lower 16 bits.
3343 ++ *
3344 ++ * Examples
3345 ++ * Assuming 1 SH/SE, 4 SEs:
3346 ++ * cu_mask[0] bit0 -> se_mask[0] bit0
3347 ++ * cu_mask[0] bit1 -> se_mask[1] bit0
3348 ++ * ...
3349 ++ * cu_mask[0] bit4 -> se_mask[0] bit1
3350 ++ * ...
3351 ++ *
3352 ++ * Assuming 2 SH/SE, 4 SEs
3353 ++ * cu_mask[0] bit0 -> se_mask[0] bit0 (SE0,SH0,CU0)
3354 ++ * cu_mask[0] bit1 -> se_mask[1] bit0 (SE1,SH0,CU0)
3355 ++ * ...
3356 ++ * cu_mask[0] bit4 -> se_mask[0] bit16 (SE0,SH1,CU0)
3357 ++ * cu_mask[0] bit5 -> se_mask[1] bit16 (SE1,SH1,CU0)
3358 ++ * ...
3359 ++ * cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
3360 + * ...
3361 ++ *
3362 ++ * First ensure all CUs are disabled, then enable user specified CUs.
3363 + */
3364 +- se = 0;
3365 +- for (i = 0; i < cu_mask_count; i++) {
3366 +- if (cu_mask[i / 32] & (1 << (i % 32)))
3367 +- se_mask[se] |= 1 << cu;
3368 +-
3369 +- do {
3370 +- se++;
3371 +- if (se == cu_info.num_shader_engines) {
3372 +- se = 0;
3373 +- cu++;
3374 ++ for (i = 0; i < cu_info.num_shader_engines; i++)
3375 ++ se_mask[i] = 0;
3376 ++
3377 ++ i = 0;
3378 ++ for (cu = 0; cu < 16; cu++) {
3379 ++ for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
3380 ++ for (se = 0; se < cu_info.num_shader_engines; se++) {
3381 ++ if (cu_per_sh[se][sh] > cu) {
3382 ++ if (cu_mask[i / 32] & (1 << (i % 32)))
3383 ++ se_mask[se] |= 1 << (cu + sh * 16);
3384 ++ i++;
3385 ++ if (i == cu_mask_count)
3386 ++ return;
3387 ++ }
3388 + }
3389 +- } while (cu >= cu_per_se[se] && cu < 32);
3390 ++ }
3391 + }
3392 + }
3393 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
3394 +index b5e2ea7550d41..6e6918ccedfdb 100644
3395 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
3396 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
3397 +@@ -27,6 +27,7 @@
3398 + #include "kfd_priv.h"
3399 +
3400 + #define KFD_MAX_NUM_SE 8
3401 ++#define KFD_MAX_NUM_SH_PER_SE 2
3402 +
3403 + /**
3404 + * struct mqd_manager
3405 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3406 +index e883731c3f8ff..0f7f1e5621ea4 100644
3407 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3408 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3409 +@@ -2426,7 +2426,8 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
3410 + }
3411 + if (!p->xnack_enabled) {
3412 + pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
3413 +- return -EFAULT;
3414 ++ r = -EFAULT;
3415 ++ goto out;
3416 + }
3417 + svms = &p->svms;
3418 +
3419 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3420 +index afa96c8f721b7..3f913e4abd49e 100644
3421 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3422 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3423 +@@ -1202,7 +1202,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
3424 + dc_hardware_init(adev->dm.dc);
3425 +
3426 + #if defined(CONFIG_DRM_AMD_DC_DCN)
3427 +- if (adev->apu_flags) {
3428 ++ if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
3429 + struct dc_phy_addr_space_config pa_config;
3430 +
3431 + mmhub_read_system_context(adev, &pa_config);
3432 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
3433 +index f1145086a4688..1d15a9af99560 100644
3434 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
3435 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
3436 +@@ -197,29 +197,29 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
3437 +
3438 + rd_buf_ptr = rd_buf;
3439 +
3440 +- str_len = strlen("Current: %d %d %d ");
3441 +- snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
3442 ++ str_len = strlen("Current: %d 0x%x %d ");
3443 ++ snprintf(rd_buf_ptr, str_len, "Current: %d 0x%x %d ",
3444 + link->cur_link_settings.lane_count,
3445 + link->cur_link_settings.link_rate,
3446 + link->cur_link_settings.link_spread);
3447 + rd_buf_ptr += str_len;
3448 +
3449 +- str_len = strlen("Verified: %d %d %d ");
3450 +- snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
3451 ++ str_len = strlen("Verified: %d 0x%x %d ");
3452 ++ snprintf(rd_buf_ptr, str_len, "Verified: %d 0x%x %d ",
3453 + link->verified_link_cap.lane_count,
3454 + link->verified_link_cap.link_rate,
3455 + link->verified_link_cap.link_spread);
3456 + rd_buf_ptr += str_len;
3457 +
3458 +- str_len = strlen("Reported: %d %d %d ");
3459 +- snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
3460 ++ str_len = strlen("Reported: %d 0x%x %d ");
3461 ++ snprintf(rd_buf_ptr, str_len, "Reported: %d 0x%x %d ",
3462 + link->reported_link_cap.lane_count,
3463 + link->reported_link_cap.link_rate,
3464 + link->reported_link_cap.link_spread);
3465 + rd_buf_ptr += str_len;
3466 +
3467 +- str_len = strlen("Preferred: %d %d %d ");
3468 +- snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
3469 ++ str_len = strlen("Preferred: %d 0x%x %d ");
3470 ++ snprintf(rd_buf_ptr, str_len, "Preferred: %d 0x%x %d\n",
3471 + link->preferred_link_setting.lane_count,
3472 + link->preferred_link_setting.link_rate,
3473 + link->preferred_link_setting.link_spread);
3474 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
3475 +index 10d42ae0cffef..3428334c6c575 100644
3476 +--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
3477 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
3478 +@@ -207,7 +207,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
3479 + cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL;
3480 + cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
3481 + cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
3482 +- cmd.psr_set_level.psr_set_level_data.cmd_version = PSR_VERSION_1;
3483 ++ cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3484 + cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst;
3485 + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
3486 + dc_dmub_srv_cmd_execute(dc->dmub_srv);
3487 +@@ -293,7 +293,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
3488 + copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1;
3489 + copy_settings_data->fec_enable_status = (link->fec_state == dc_link_fec_enabled);
3490 + copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us;
3491 +- copy_settings_data->cmd_version = PSR_VERSION_1;
3492 ++ copy_settings_data->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3493 + copy_settings_data->panel_inst = panel_inst;
3494 +
3495 + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
3496 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
3497 +index c545eddabdcca..75fa4adcf5f40 100644
3498 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
3499 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
3500 +@@ -1502,25 +1502,22 @@ void dcn10_init_hw(struct dc *dc)
3501 + void dcn10_power_down_on_boot(struct dc *dc)
3502 + {
3503 + struct dc_link *edp_links[MAX_NUM_EDP];
3504 +- struct dc_link *edp_link;
3505 ++ struct dc_link *edp_link = NULL;
3506 + int edp_num;
3507 + int i = 0;
3508 +
3509 + get_edp_links(dc, edp_links, &edp_num);
3510 +-
3511 +- if (edp_num) {
3512 +- for (i = 0; i < edp_num; i++) {
3513 +- edp_link = edp_links[i];
3514 +- if (edp_link->link_enc->funcs->is_dig_enabled &&
3515 +- edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
3516 +- dc->hwseq->funcs.edp_backlight_control &&
3517 +- dc->hwss.power_down &&
3518 +- dc->hwss.edp_power_control) {
3519 +- dc->hwseq->funcs.edp_backlight_control(edp_link, false);
3520 +- dc->hwss.power_down(dc);
3521 +- dc->hwss.edp_power_control(edp_link, false);
3522 +- }
3523 +- }
3524 ++ if (edp_num)
3525 ++ edp_link = edp_links[0];
3526 ++
3527 ++ if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
3528 ++ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
3529 ++ dc->hwseq->funcs.edp_backlight_control &&
3530 ++ dc->hwss.power_down &&
3531 ++ dc->hwss.edp_power_control) {
3532 ++ dc->hwseq->funcs.edp_backlight_control(edp_link, false);
3533 ++ dc->hwss.power_down(dc);
3534 ++ dc->hwss.edp_power_control(edp_link, false);
3535 + } else {
3536 + for (i = 0; i < dc->link_count; i++) {
3537 + struct dc_link *link = dc->links[i];
3538 +@@ -3631,13 +3628,12 @@ enum dc_status dcn10_set_clock(struct dc *dc,
3539 + struct dc_clock_config clock_cfg = {0};
3540 + struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3541 +
3542 +- if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3543 +- dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3544 +- context, clock_type, &clock_cfg);
3545 +-
3546 +- if (!dc->clk_mgr->funcs->get_clock)
3547 ++ if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3548 + return DC_FAIL_UNSUPPORTED_1;
3549 +
3550 ++ dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3551 ++ context, clock_type, &clock_cfg);
3552 ++
3553 + if (clk_khz > clock_cfg.max_clock_khz)
3554 + return DC_FAIL_CLK_EXCEED_MAX;
3555 +
3556 +@@ -3655,7 +3651,7 @@ enum dc_status dcn10_set_clock(struct dc *dc,
3557 + else
3558 + return DC_ERROR_UNEXPECTED;
3559 +
3560 +- if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3561 ++ if (dc->clk_mgr->funcs->update_clocks)
3562 + dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3563 + context, true);
3564 + return DC_OK;
3565 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
3566 +index 5c2853654ccad..a47ba1d45be92 100644
3567 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
3568 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
3569 +@@ -1723,13 +1723,15 @@ void dcn20_program_front_end_for_ctx(
3570 +
3571 + pipe = pipe->bottom_pipe;
3572 + }
3573 +- /* Program secondary blending tree and writeback pipes */
3574 +- pipe = &context->res_ctx.pipe_ctx[i];
3575 +- if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
3576 +- && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
3577 +- && hws->funcs.program_all_writeback_pipes_in_tree)
3578 +- hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
3579 + }
3580 ++ /* Program secondary blending tree and writeback pipes */
3581 ++ pipe = &context->res_ctx.pipe_ctx[i];
3582 ++ if (!pipe->top_pipe && !pipe->prev_odm_pipe
3583 ++ && pipe->stream && pipe->stream->num_wb_info > 0
3584 ++ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
3585 ++ || pipe->stream->update_flags.raw)
3586 ++ && hws->funcs.program_all_writeback_pipes_in_tree)
3587 ++ hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
3588 + }
3589 + }
3590 +
3591 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
3592 +index b173fa3653b55..c78933a9d31c1 100644
3593 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
3594 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
3595 +@@ -2462,7 +2462,7 @@ void dcn20_set_mcif_arb_params(
3596 + wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
3597 + wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
3598 + }
3599 +- wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */
3600 ++ wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */
3601 + wb_arb_params->slice_lines = 32;
3602 + wb_arb_params->arbitration_slice = 2;
3603 + wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
3604 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
3605 +index 3fe9e41e4dbd7..6a3d3a0ec0a36 100644
3606 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
3607 ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
3608 +@@ -49,6 +49,11 @@
3609 + static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
3610 + struct dcn3_xfer_func_reg *reg)
3611 + {
3612 ++ reg->shifts.field_region_start_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
3613 ++ reg->masks.field_region_start_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
3614 ++ reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
3615 ++ reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
3616 ++
3617 + reg->shifts.exp_region0_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
3618 + reg->masks.exp_region0_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
3619 + reg->shifts.exp_region0_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
3620 +@@ -66,8 +71,6 @@ static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
3621 + reg->masks.field_region_end_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;
3622 + reg->shifts.field_region_linear_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
3623 + reg->masks.field_region_linear_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
3624 +- reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
3625 +- reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
3626 + reg->shifts.exp_region_start = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_B;
3627 + reg->masks.exp_region_start = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_B;
3628 + reg->shifts.exp_resion_start_segment = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
3629 +@@ -147,18 +150,19 @@ static enum dc_lut_mode dwb3_get_ogam_current(
3630 + uint32_t state_mode;
3631 + uint32_t ram_select;
3632 +
3633 +- REG_GET(DWB_OGAM_CONTROL,
3634 +- DWB_OGAM_MODE, &state_mode);
3635 +- REG_GET(DWB_OGAM_CONTROL,
3636 +- DWB_OGAM_SELECT, &ram_select);
3637 ++ REG_GET_2(DWB_OGAM_CONTROL,
3638 ++ DWB_OGAM_MODE_CURRENT, &state_mode,
3639 ++ DWB_OGAM_SELECT_CURRENT, &ram_select);
3640 +
3641 + if (state_mode == 0) {
3642 + mode = LUT_BYPASS;
3643 + } else if (state_mode == 2) {
3644 + if (ram_select == 0)
3645 + mode = LUT_RAM_A;
3646 +- else
3647 ++ else if (ram_select == 1)
3648 + mode = LUT_RAM_B;
3649 ++ else
3650 ++ mode = LUT_BYPASS;
3651 + } else {
3652 + // Reserved value
3653 + mode = LUT_BYPASS;
3654 +@@ -172,10 +176,10 @@ static void dwb3_configure_ogam_lut(
3655 + struct dcn30_dwbc *dwbc30,
3656 + bool is_ram_a)
3657 + {
3658 +- REG_UPDATE(DWB_OGAM_LUT_CONTROL,
3659 +- DWB_OGAM_LUT_READ_COLOR_SEL, 7);
3660 +- REG_UPDATE(DWB_OGAM_CONTROL,
3661 +- DWB_OGAM_SELECT, is_ram_a == true ? 0 : 1);
3662 ++ REG_UPDATE_2(DWB_OGAM_LUT_CONTROL,
3663 ++ DWB_OGAM_LUT_WRITE_COLOR_MASK, 7,
3664 ++ DWB_OGAM_LUT_HOST_SEL, (is_ram_a == true) ? 0 : 1);
3665 ++
3666 + REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
3667 + }
3668 +
3669 +@@ -185,17 +189,45 @@ static void dwb3_program_ogam_pwl(struct dcn30_dwbc *dwbc30,
3670 + {
3671 + uint32_t i;
3672 +
3673 +- // triple base implementation
3674 +- for (i = 0; i < num/2; i++) {
3675 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].red_reg);
3676 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].green_reg);
3677 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].blue_reg);
3678 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].red_reg);
3679 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].green_reg);
3680 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].blue_reg);
3681 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].red_reg);
3682 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].green_reg);
3683 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].blue_reg);
3684 ++ uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
3685 ++ uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
3686 ++ uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
3687 ++
3688 ++ if (is_rgb_equal(rgb, num)) {
3689 ++ for (i = 0 ; i < num; i++)
3690 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
3691 ++
3692 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
3693 ++
3694 ++ } else {
3695 ++
3696 ++ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
3697 ++ DWB_OGAM_LUT_WRITE_COLOR_MASK, 4);
3698 ++
3699 ++ for (i = 0 ; i < num; i++)
3700 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
3701 ++
3702 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
3703 ++
3704 ++ REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
3705 ++
3706 ++ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
3707 ++ DWB_OGAM_LUT_WRITE_COLOR_MASK, 2);
3708 ++
3709 ++ for (i = 0 ; i < num; i++)
3710 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].green_reg);
3711 ++
3712 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_green);
3713 ++
3714 ++ REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
3715 ++
3716 ++ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
3717 ++ DWB_OGAM_LUT_WRITE_COLOR_MASK, 1);
3718 ++
3719 ++ for (i = 0 ; i < num; i++)
3720 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].blue_reg);
3721 ++
3722 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_blue);
3723 + }
3724 + }
3725 +
3726 +@@ -211,6 +243,8 @@ static bool dwb3_program_ogam_lut(
3727 + return false;
3728 + }
3729 +
3730 ++ REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
3731 ++
3732 + current_mode = dwb3_get_ogam_current(dwbc30);
3733 + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
3734 + next_mode = LUT_RAM_B;
3735 +@@ -227,8 +261,7 @@ static bool dwb3_program_ogam_lut(
3736 + dwb3_program_ogam_pwl(
3737 + dwbc30, params->rgb_resulted, params->hw_points_num);
3738 +
3739 +- REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
3740 +- REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
3741 ++ REG_UPDATE(DWB_OGAM_CONTROL, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
3742 +
3743 + return true;
3744 + }
3745 +@@ -271,14 +304,19 @@ static void dwb3_program_gamut_remap(
3746 +
3747 + struct color_matrices_reg gam_regs;
3748 +
3749 +- REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
3750 +-
3751 + if (regval == NULL || select == CM_GAMUT_REMAP_MODE_BYPASS) {
3752 + REG_SET(DWB_GAMUT_REMAP_MODE, 0,
3753 + DWB_GAMUT_REMAP_MODE, 0);
3754 + return;
3755 + }
3756 +
3757 ++ REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
3758 ++
3759 ++ gam_regs.shifts.csc_c11 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C11;
3760 ++ gam_regs.masks.csc_c11 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C11;
3761 ++ gam_regs.shifts.csc_c12 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C12;
3762 ++ gam_regs.masks.csc_c12 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C12;
3763 ++
3764 + switch (select) {
3765 + case CM_GAMUT_REMAP_MODE_RAMA_COEFF:
3766 + gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPA_C11_C12);
3767 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
3768 +index c68e3a708a335..fafed1e4a998d 100644
3769 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
3770 ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
3771 +@@ -398,12 +398,22 @@ void dcn30_program_all_writeback_pipes_in_tree(
3772 + for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
3773 + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
3774 +
3775 ++ if (!pipe_ctx->plane_state)
3776 ++ continue;
3777 ++
3778 + if (pipe_ctx->plane_state == wb_info.writeback_source_plane) {
3779 + wb_info.mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
3780 + break;
3781 + }
3782 + }
3783 +- ASSERT(wb_info.mpcc_inst != -1);
3784 ++
3785 ++ if (wb_info.mpcc_inst == -1) {
3786 ++ /* Disable writeback pipe and disconnect from MPCC
3787 ++ * if source plane has been removed
3788 ++ */
3789 ++ dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst);
3790 ++ continue;
3791 ++ }
3792 +
3793 + ASSERT(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
3794 + dwb = dc->res_pool->dwbc[wb_info.dwb_pipe_inst];
3795 +@@ -580,22 +590,19 @@ void dcn30_init_hw(struct dc *dc)
3796 + */
3797 + if (dc->config.power_down_display_on_boot) {
3798 + struct dc_link *edp_links[MAX_NUM_EDP];
3799 +- struct dc_link *edp_link;
3800 ++ struct dc_link *edp_link = NULL;
3801 +
3802 + get_edp_links(dc, edp_links, &edp_num);
3803 +- if (edp_num) {
3804 +- for (i = 0; i < edp_num; i++) {
3805 +- edp_link = edp_links[i];
3806 +- if (edp_link->link_enc->funcs->is_dig_enabled &&
3807 +- edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
3808 +- dc->hwss.edp_backlight_control &&
3809 +- dc->hwss.power_down &&
3810 +- dc->hwss.edp_power_control) {
3811 +- dc->hwss.edp_backlight_control(edp_link, false);
3812 +- dc->hwss.power_down(dc);
3813 +- dc->hwss.edp_power_control(edp_link, false);
3814 +- }
3815 +- }
3816 ++ if (edp_num)
3817 ++ edp_link = edp_links[0];
3818 ++ if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
3819 ++ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
3820 ++ dc->hwss.edp_backlight_control &&
3821 ++ dc->hwss.power_down &&
3822 ++ dc->hwss.edp_power_control) {
3823 ++ dc->hwss.edp_backlight_control(edp_link, false);
3824 ++ dc->hwss.power_down(dc);
3825 ++ dc->hwss.edp_power_control(edp_link, false);
3826 + } else {
3827 + for (i = 0; i < dc->link_count; i++) {
3828 + struct dc_link *link = dc->links[i];
3829 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
3830 +index 28e15ebf2f431..23a246b62d5d7 100644
3831 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
3832 ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
3833 +@@ -2398,16 +2398,37 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
3834 + dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
3835 +
3836 + if (bw_params->clk_table.entries[0].memclk_mhz) {
3837 ++ int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
3838 ++
3839 ++ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
3840 ++ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
3841 ++ max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
3842 ++ if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
3843 ++ max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
3844 ++ if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
3845 ++ max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
3846 ++ if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
3847 ++ max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
3848 ++ }
3849 ++
3850 ++ if (!max_dcfclk_mhz)
3851 ++ max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
3852 ++ if (!max_dispclk_mhz)
3853 ++ max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
3854 ++ if (!max_dppclk_mhz)
3855 ++ max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
3856 ++ if (!max_phyclk_mhz)
3857 ++ max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
3858 +
3859 +- if (bw_params->clk_table.entries[1].dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
3860 ++ if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
3861 + // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
3862 +- dcfclk_sta_targets[num_dcfclk_sta_targets] = bw_params->clk_table.entries[1].dcfclk_mhz;
3863 ++ dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
3864 + num_dcfclk_sta_targets++;
3865 +- } else if (bw_params->clk_table.entries[1].dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
3866 ++ } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
3867 + // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
3868 + for (i = 0; i < num_dcfclk_sta_targets; i++) {
3869 +- if (dcfclk_sta_targets[i] > bw_params->clk_table.entries[1].dcfclk_mhz) {
3870 +- dcfclk_sta_targets[i] = bw_params->clk_table.entries[1].dcfclk_mhz;
3871 ++ if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
3872 ++ dcfclk_sta_targets[i] = max_dcfclk_mhz;
3873 + break;
3874 + }
3875 + }
3876 +@@ -2447,7 +2468,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
3877 + dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
3878 + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
3879 + } else {
3880 +- if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) {
3881 ++ if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
3882 + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
3883 + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
3884 + } else {
3885 +@@ -2462,11 +2483,12 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
3886 + }
3887 +
3888 + while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
3889 +- optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) {
3890 ++ optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
3891 + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
3892 + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
3893 + }
3894 +
3895 ++ dcn3_0_soc.num_states = num_states;
3896 + for (i = 0; i < dcn3_0_soc.num_states; i++) {
3897 + dcn3_0_soc.clock_limits[i].state = i;
3898 + dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
3899 +@@ -2474,9 +2496,9 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
3900 + dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
3901 +
3902 + /* Fill all states with max values of all other clocks */
3903 +- dcn3_0_soc.clock_limits[i].dispclk_mhz = bw_params->clk_table.entries[1].dispclk_mhz;
3904 +- dcn3_0_soc.clock_limits[i].dppclk_mhz = bw_params->clk_table.entries[1].dppclk_mhz;
3905 +- dcn3_0_soc.clock_limits[i].phyclk_mhz = bw_params->clk_table.entries[1].phyclk_mhz;
3906 ++ dcn3_0_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
3907 ++ dcn3_0_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
3908 ++ dcn3_0_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
3909 + dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
3910 + /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
3911 + /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
3912 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
3913 +index 8a2119d8ca0de..8189606537c5a 100644
3914 +--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
3915 ++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
3916 +@@ -226,6 +226,7 @@ void dcn31_init_hw(struct dc *dc)
3917 + if (dc->config.power_down_display_on_boot) {
3918 + struct dc_link *edp_links[MAX_NUM_EDP];
3919 + struct dc_link *edp_link;
3920 ++ bool power_down = false;
3921 +
3922 + get_edp_links(dc, edp_links, &edp_num);
3923 + if (edp_num) {
3924 +@@ -239,9 +240,11 @@ void dcn31_init_hw(struct dc *dc)
3925 + dc->hwss.edp_backlight_control(edp_link, false);
3926 + dc->hwss.power_down(dc);
3927 + dc->hwss.edp_power_control(edp_link, false);
3928 ++ power_down = true;
3929 + }
3930 + }
3931 +- } else {
3932 ++ }
3933 ++ if (!power_down) {
3934 + for (i = 0; i < dc->link_count; i++) {
3935 + struct dc_link *link = dc->links[i];
3936 +
3937 +diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
3938 +index 911f9f4147741..39ca338eb80b3 100644
3939 +--- a/drivers/gpu/drm/ast/ast_drv.h
3940 ++++ b/drivers/gpu/drm/ast/ast_drv.h
3941 +@@ -337,6 +337,11 @@ int ast_mode_config_init(struct ast_private *ast);
3942 + #define AST_DP501_LINKRATE 0xf014
3943 + #define AST_DP501_EDID_DATA 0xf020
3944 +
3945 ++/* Define for Soc scratched reg */
3946 ++#define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6)
3947 ++//#define AST_VRAM_INIT_BY_BMC BIT(7)
3948 ++//#define AST_VRAM_INIT_READY BIT(6)
3949 ++
3950 + int ast_mm_init(struct ast_private *ast);
3951 +
3952 + /* ast post */
3953 +@@ -346,6 +351,7 @@ bool ast_is_vga_enabled(struct drm_device *dev);
3954 + void ast_post_gpu(struct drm_device *dev);
3955 + u32 ast_mindwm(struct ast_private *ast, u32 r);
3956 + void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
3957 ++void ast_patch_ahb_2500(struct ast_private *ast);
3958 + /* ast dp501 */
3959 + void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
3960 + bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
3961 +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
3962 +index 2aff2e6cf450c..79a3618679554 100644
3963 +--- a/drivers/gpu/drm/ast/ast_main.c
3964 ++++ b/drivers/gpu/drm/ast/ast_main.c
3965 +@@ -97,6 +97,11 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
3966 + jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
3967 + jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
3968 + if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
3969 ++ /* Patch AST2500 */
3970 ++ if (((pdev->revision & 0xF0) == 0x40)
3971 ++ && ((jregd0 & AST_VRAM_INIT_STATUS_MASK) == 0))
3972 ++ ast_patch_ahb_2500(ast);
3973 ++
3974 + /* Double check it's actually working */
3975 + data = ast_read32(ast, 0xf004);
3976 + if ((data != 0xFFFFFFFF) && (data != 0x00)) {
3977 +diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
3978 +index 0607658dde51b..b5d92f652fd85 100644
3979 +--- a/drivers/gpu/drm/ast/ast_post.c
3980 ++++ b/drivers/gpu/drm/ast/ast_post.c
3981 +@@ -2028,6 +2028,40 @@ static bool ast_dram_init_2500(struct ast_private *ast)
3982 + return true;
3983 + }
3984 +
3985 ++void ast_patch_ahb_2500(struct ast_private *ast)
3986 ++{
3987 ++ u32 data;
3988 ++
3989 ++ /* Clear bus lock condition */
3990 ++ ast_moutdwm(ast, 0x1e600000, 0xAEED1A03);
3991 ++ ast_moutdwm(ast, 0x1e600084, 0x00010000);
3992 ++ ast_moutdwm(ast, 0x1e600088, 0x00000000);
3993 ++ ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
3994 ++ data = ast_mindwm(ast, 0x1e6e2070);
3995 ++ if (data & 0x08000000) { /* check fast reset */
3996 ++ /*
3997 ++ * If "Fast restet" is enabled for ARM-ICE debugger,
3998 ++ * then WDT needs to enable, that
3999 ++ * WDT04 is WDT#1 Reload reg.
4000 ++ * WDT08 is WDT#1 counter restart reg to avoid system deadlock
4001 ++ * WDT0C is WDT#1 control reg
4002 ++ * [6:5]:= 01:Full chip
4003 ++ * [4]:= 1:1MHz clock source
4004 ++ * [1]:= 1:WDT will be cleeared and disabled after timeout occurs
4005 ++ * [0]:= 1:WDT enable
4006 ++ */
4007 ++ ast_moutdwm(ast, 0x1E785004, 0x00000010);
4008 ++ ast_moutdwm(ast, 0x1E785008, 0x00004755);
4009 ++ ast_moutdwm(ast, 0x1E78500c, 0x00000033);
4010 ++ udelay(1000);
4011 ++ }
4012 ++ do {
4013 ++ ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
4014 ++ data = ast_mindwm(ast, 0x1e6e2000);
4015 ++ } while (data != 1);
4016 ++ ast_moutdwm(ast, 0x1e6e207c, 0x08000000); /* clear fast reset */
4017 ++}
4018 ++
4019 + void ast_post_chip_2500(struct drm_device *dev)
4020 + {
4021 + struct ast_private *ast = to_ast_private(dev);
4022 +@@ -2035,39 +2069,44 @@ void ast_post_chip_2500(struct drm_device *dev)
4023 + u8 reg;
4024 +
4025 + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
4026 +- if ((reg & 0x80) == 0) {/* vga only */
4027 ++ if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
4028 + /* Clear bus lock condition */
4029 +- ast_moutdwm(ast, 0x1e600000, 0xAEED1A03);
4030 +- ast_moutdwm(ast, 0x1e600084, 0x00010000);
4031 +- ast_moutdwm(ast, 0x1e600088, 0x00000000);
4032 +- ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
4033 +- ast_write32(ast, 0xf004, 0x1e6e0000);
4034 +- ast_write32(ast, 0xf000, 0x1);
4035 +- ast_write32(ast, 0x12000, 0x1688a8a8);
4036 +- while (ast_read32(ast, 0x12000) != 0x1)
4037 +- ;
4038 +-
4039 +- ast_write32(ast, 0x10000, 0xfc600309);
4040 +- while (ast_read32(ast, 0x10000) != 0x1)
4041 +- ;
4042 ++ ast_patch_ahb_2500(ast);
4043 ++
4044 ++ /* Disable watchdog */
4045 ++ ast_moutdwm(ast, 0x1E78502C, 0x00000000);
4046 ++ ast_moutdwm(ast, 0x1E78504C, 0x00000000);
4047 ++
4048 ++ /*
4049 ++ * Reset USB port to patch USB unknown device issue
4050 ++ * SCU90 is Multi-function Pin Control #5
4051 ++ * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub
4052 ++ * port).
4053 ++ * SCU94 is Multi-function Pin Control #6
4054 ++ * [14:13]:= 1x:USB2.0 Host2 controller
4055 ++ * SCU70 is Hardware Strap reg
4056 ++ * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by
4057 ++ * [18]: 0(24)/1(48) MHz)
4058 ++ * SCU7C is Write clear reg to SCU70
4059 ++ * [23]:= write 1 and then SCU70[23] will be clear as 0b.
4060 ++ */
4061 ++ ast_moutdwm(ast, 0x1E6E2090, 0x20000000);
4062 ++ ast_moutdwm(ast, 0x1E6E2094, 0x00004000);
4063 ++ if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) {
4064 ++ ast_moutdwm(ast, 0x1E6E207C, 0x00800000);
4065 ++ mdelay(100);
4066 ++ ast_moutdwm(ast, 0x1E6E2070, 0x00800000);
4067 ++ }
4068 ++ /* Modify eSPI reset pin */
4069 ++ temp = ast_mindwm(ast, 0x1E6E2070);
4070 ++ if (temp & 0x02000000)
4071 ++ ast_moutdwm(ast, 0x1E6E207C, 0x00004000);
4072 +
4073 + /* Slow down CPU/AHB CLK in VGA only mode */
4074 + temp = ast_read32(ast, 0x12008);
4075 + temp |= 0x73;
4076 + ast_write32(ast, 0x12008, temp);
4077 +
4078 +- /* Reset USB port to patch USB unknown device issue */
4079 +- ast_moutdwm(ast, 0x1e6e2090, 0x20000000);
4080 +- temp = ast_mindwm(ast, 0x1e6e2094);
4081 +- temp |= 0x00004000;
4082 +- ast_moutdwm(ast, 0x1e6e2094, temp);
4083 +- temp = ast_mindwm(ast, 0x1e6e2070);
4084 +- if (temp & 0x00800000) {
4085 +- ast_moutdwm(ast, 0x1e6e207c, 0x00800000);
4086 +- mdelay(100);
4087 +- ast_moutdwm(ast, 0x1e6e2070, 0x00800000);
4088 +- }
4089 +-
4090 + if (!ast_dram_init_2500(ast))
4091 + drm_err(dev, "DRAM init failed !\n");
4092 +
4093 +diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
4094 +index 873995f0a7416..6002404ffcb9d 100644
4095 +--- a/drivers/gpu/drm/bridge/nwl-dsi.c
4096 ++++ b/drivers/gpu/drm/bridge/nwl-dsi.c
4097 +@@ -196,7 +196,7 @@ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
4098 + u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
4099 +
4100 + return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp,
4101 +- dsi->lanes * 8 * NSEC_PER_SEC);
4102 ++ dsi->lanes * 8ULL * NSEC_PER_SEC);
4103 + }
4104 +
4105 + /*
4106 +diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
4107 +index b59b26a71ad5d..3a298df00901d 100644
4108 +--- a/drivers/gpu/drm/drm_auth.c
4109 ++++ b/drivers/gpu/drm/drm_auth.c
4110 +@@ -135,16 +135,18 @@ static void drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
4111 + static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
4112 + {
4113 + struct drm_master *old_master;
4114 ++ struct drm_master *new_master;
4115 +
4116 + lockdep_assert_held_once(&dev->master_mutex);
4117 +
4118 + WARN_ON(fpriv->is_master);
4119 + old_master = fpriv->master;
4120 +- fpriv->master = drm_master_create(dev);
4121 +- if (!fpriv->master) {
4122 +- fpriv->master = old_master;
4123 ++ new_master = drm_master_create(dev);
4124 ++ if (!new_master)
4125 + return -ENOMEM;
4126 +- }
4127 ++ spin_lock(&fpriv->master_lookup_lock);
4128 ++ fpriv->master = new_master;
4129 ++ spin_unlock(&fpriv->master_lookup_lock);
4130 +
4131 + fpriv->is_master = 1;
4132 + fpriv->authenticated = 1;
4133 +@@ -303,10 +305,13 @@ int drm_master_open(struct drm_file *file_priv)
4134 + * any master object for render clients
4135 + */
4136 + mutex_lock(&dev->master_mutex);
4137 +- if (!dev->master)
4138 ++ if (!dev->master) {
4139 + ret = drm_new_set_master(dev, file_priv);
4140 +- else
4141 ++ } else {
4142 ++ spin_lock(&file_priv->master_lookup_lock);
4143 + file_priv->master = drm_master_get(dev->master);
4144 ++ spin_unlock(&file_priv->master_lookup_lock);
4145 ++ }
4146 + mutex_unlock(&dev->master_mutex);
4147 +
4148 + return ret;
4149 +@@ -372,6 +377,31 @@ struct drm_master *drm_master_get(struct drm_master *master)
4150 + }
4151 + EXPORT_SYMBOL(drm_master_get);
4152 +
4153 ++/**
4154 ++ * drm_file_get_master - reference &drm_file.master of @file_priv
4155 ++ * @file_priv: DRM file private
4156 ++ *
4157 ++ * Increments the reference count of @file_priv's &drm_file.master and returns
4158 ++ * the &drm_file.master. If @file_priv has no &drm_file.master, returns NULL.
4159 ++ *
4160 ++ * Master pointers returned from this function should be unreferenced using
4161 ++ * drm_master_put().
4162 ++ */
4163 ++struct drm_master *drm_file_get_master(struct drm_file *file_priv)
4164 ++{
4165 ++ struct drm_master *master = NULL;
4166 ++
4167 ++ spin_lock(&file_priv->master_lookup_lock);
4168 ++ if (!file_priv->master)
4169 ++ goto unlock;
4170 ++ master = drm_master_get(file_priv->master);
4171 ++
4172 ++unlock:
4173 ++ spin_unlock(&file_priv->master_lookup_lock);
4174 ++ return master;
4175 ++}
4176 ++EXPORT_SYMBOL(drm_file_get_master);
4177 ++
4178 + static void drm_master_destroy(struct kref *kref)
4179 + {
4180 + struct drm_master *master = container_of(kref, struct drm_master, refcount);
4181 +diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
4182 +index 3d7182001004d..b0a8264894885 100644
4183 +--- a/drivers/gpu/drm/drm_debugfs.c
4184 ++++ b/drivers/gpu/drm/drm_debugfs.c
4185 +@@ -91,6 +91,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
4186 + mutex_lock(&dev->filelist_mutex);
4187 + list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
4188 + struct task_struct *task;
4189 ++ bool is_current_master = drm_is_current_master(priv);
4190 +
4191 + rcu_read_lock(); /* locks pid_task()->comm */
4192 + task = pid_task(priv->pid, PIDTYPE_PID);
4193 +@@ -99,7 +100,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
4194 + task ? task->comm : "<unknown>",
4195 + pid_vnr(priv->pid),
4196 + priv->minor->index,
4197 +- drm_is_current_master(priv) ? 'y' : 'n',
4198 ++ is_current_master ? 'y' : 'n',
4199 + priv->authenticated ? 'y' : 'n',
4200 + from_kuid_munged(seq_user_ns(m), uid),
4201 + priv->magic);
4202 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
4203 +index ad0795afc21cf..86d13d6bc4631 100644
4204 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
4205 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
4206 +@@ -2872,11 +2872,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
4207 + idx += tosend + 1;
4208 +
4209 + ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
4210 +- if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
4211 +- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
4212 ++ if (ret) {
4213 ++ if (drm_debug_enabled(DRM_UT_DP)) {
4214 ++ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
4215 +
4216 +- drm_printf(&p, "sideband msg failed to send\n");
4217 +- drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
4218 ++ drm_printf(&p, "sideband msg failed to send\n");
4219 ++ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
4220 ++ }
4221 + return ret;
4222 + }
4223 +
4224 +diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
4225 +index d4f0bac6f8f8a..ceb1a9723855f 100644
4226 +--- a/drivers/gpu/drm/drm_file.c
4227 ++++ b/drivers/gpu/drm/drm_file.c
4228 +@@ -176,6 +176,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
4229 + init_waitqueue_head(&file->event_wait);
4230 + file->event_space = 4096; /* set aside 4k for event buffer */
4231 +
4232 ++ spin_lock_init(&file->master_lookup_lock);
4233 + mutex_init(&file->event_read_lock);
4234 +
4235 + if (drm_core_check_feature(dev, DRIVER_GEM))
4236 +diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
4237 +index 00fb433bcef1a..92eac73d9001f 100644
4238 +--- a/drivers/gpu/drm/drm_lease.c
4239 ++++ b/drivers/gpu/drm/drm_lease.c
4240 +@@ -106,10 +106,19 @@ static bool _drm_has_leased(struct drm_master *master, int id)
4241 + */
4242 + bool _drm_lease_held(struct drm_file *file_priv, int id)
4243 + {
4244 +- if (!file_priv || !file_priv->master)
4245 ++ bool ret;
4246 ++ struct drm_master *master;
4247 ++
4248 ++ if (!file_priv)
4249 + return true;
4250 +
4251 +- return _drm_lease_held_master(file_priv->master, id);
4252 ++ master = drm_file_get_master(file_priv);
4253 ++ if (!master)
4254 ++ return true;
4255 ++ ret = _drm_lease_held_master(master, id);
4256 ++ drm_master_put(&master);
4257 ++
4258 ++ return ret;
4259 + }
4260 +
4261 + /**
4262 +@@ -128,13 +137,22 @@ bool drm_lease_held(struct drm_file *file_priv, int id)
4263 + struct drm_master *master;
4264 + bool ret;
4265 +
4266 +- if (!file_priv || !file_priv->master || !file_priv->master->lessor)
4267 ++ if (!file_priv)
4268 + return true;
4269 +
4270 +- master = file_priv->master;
4271 ++ master = drm_file_get_master(file_priv);
4272 ++ if (!master)
4273 ++ return true;
4274 ++ if (!master->lessor) {
4275 ++ ret = true;
4276 ++ goto out;
4277 ++ }
4278 + mutex_lock(&master->dev->mode_config.idr_mutex);
4279 + ret = _drm_lease_held_master(master, id);
4280 + mutex_unlock(&master->dev->mode_config.idr_mutex);
4281 ++
4282 ++out:
4283 ++ drm_master_put(&master);
4284 + return ret;
4285 + }
4286 +
4287 +@@ -154,10 +172,16 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
4288 + int count_in, count_out;
4289 + uint32_t crtcs_out = 0;
4290 +
4291 +- if (!file_priv || !file_priv->master || !file_priv->master->lessor)
4292 ++ if (!file_priv)
4293 + return crtcs_in;
4294 +
4295 +- master = file_priv->master;
4296 ++ master = drm_file_get_master(file_priv);
4297 ++ if (!master)
4298 ++ return crtcs_in;
4299 ++ if (!master->lessor) {
4300 ++ crtcs_out = crtcs_in;
4301 ++ goto out;
4302 ++ }
4303 + dev = master->dev;
4304 +
4305 + count_in = count_out = 0;
4306 +@@ -176,6 +200,9 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
4307 + count_in++;
4308 + }
4309 + mutex_unlock(&master->dev->mode_config.idr_mutex);
4310 ++
4311 ++out:
4312 ++ drm_master_put(&master);
4313 + return crtcs_out;
4314 + }
4315 +
4316 +@@ -489,7 +516,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
4317 + size_t object_count;
4318 + int ret = 0;
4319 + struct idr leases;
4320 +- struct drm_master *lessor = lessor_priv->master;
4321 ++ struct drm_master *lessor;
4322 + struct drm_master *lessee = NULL;
4323 + struct file *lessee_file = NULL;
4324 + struct file *lessor_file = lessor_priv->filp;
4325 +@@ -501,12 +528,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
4326 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
4327 + return -EOPNOTSUPP;
4328 +
4329 +- /* Do not allow sub-leases */
4330 +- if (lessor->lessor) {
4331 +- DRM_DEBUG_LEASE("recursive leasing not allowed\n");
4332 +- return -EINVAL;
4333 +- }
4334 +-
4335 + /* need some objects */
4336 + if (cl->object_count == 0) {
4337 + DRM_DEBUG_LEASE("no objects in lease\n");
4338 +@@ -518,12 +539,22 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
4339 + return -EINVAL;
4340 + }
4341 +
4342 ++ lessor = drm_file_get_master(lessor_priv);
4343 ++ /* Do not allow sub-leases */
4344 ++ if (lessor->lessor) {
4345 ++ DRM_DEBUG_LEASE("recursive leasing not allowed\n");
4346 ++ ret = -EINVAL;
4347 ++ goto out_lessor;
4348 ++ }
4349 ++
4350 + object_count = cl->object_count;
4351 +
4352 + object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
4353 + array_size(object_count, sizeof(__u32)));
4354 +- if (IS_ERR(object_ids))
4355 +- return PTR_ERR(object_ids);
4356 ++ if (IS_ERR(object_ids)) {
4357 ++ ret = PTR_ERR(object_ids);
4358 ++ goto out_lessor;
4359 ++ }
4360 +
4361 + idr_init(&leases);
4362 +
4363 +@@ -534,14 +565,15 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
4364 + if (ret) {
4365 + DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
4366 + idr_destroy(&leases);
4367 +- return ret;
4368 ++ goto out_lessor;
4369 + }
4370 +
4371 + /* Allocate a file descriptor for the lease */
4372 + fd = get_unused_fd_flags(cl->flags & (O_CLOEXEC | O_NONBLOCK));
4373 + if (fd < 0) {
4374 + idr_destroy(&leases);
4375 +- return fd;
4376 ++ ret = fd;
4377 ++ goto out_lessor;
4378 + }
4379 +
4380 + DRM_DEBUG_LEASE("Creating lease\n");
4381 +@@ -577,6 +609,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
4382 + /* Hook up the fd */
4383 + fd_install(fd, lessee_file);
4384 +
4385 ++ drm_master_put(&lessor);
4386 + DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
4387 + return 0;
4388 +
4389 +@@ -586,6 +619,8 @@ out_lessee:
4390 + out_leases:
4391 + put_unused_fd(fd);
4392 +
4393 ++out_lessor:
4394 ++ drm_master_put(&lessor);
4395 + DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
4396 + return ret;
4397 + }
4398 +@@ -608,7 +643,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
4399 + struct drm_mode_list_lessees *arg = data;
4400 + __u32 __user *lessee_ids = (__u32 __user *) (uintptr_t) (arg->lessees_ptr);
4401 + __u32 count_lessees = arg->count_lessees;
4402 +- struct drm_master *lessor = lessor_priv->master, *lessee;
4403 ++ struct drm_master *lessor, *lessee;
4404 + int count;
4405 + int ret = 0;
4406 +
4407 +@@ -619,6 +654,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
4408 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
4409 + return -EOPNOTSUPP;
4410 +
4411 ++ lessor = drm_file_get_master(lessor_priv);
4412 + DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id);
4413 +
4414 + mutex_lock(&dev->mode_config.idr_mutex);
4415 +@@ -642,6 +678,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
4416 + arg->count_lessees = count;
4417 +
4418 + mutex_unlock(&dev->mode_config.idr_mutex);
4419 ++ drm_master_put(&lessor);
4420 +
4421 + return ret;
4422 + }
4423 +@@ -661,7 +698,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
4424 + struct drm_mode_get_lease *arg = data;
4425 + __u32 __user *object_ids = (__u32 __user *) (uintptr_t) (arg->objects_ptr);
4426 + __u32 count_objects = arg->count_objects;
4427 +- struct drm_master *lessee = lessee_priv->master;
4428 ++ struct drm_master *lessee;
4429 + struct idr *object_idr;
4430 + int count;
4431 + void *entry;
4432 +@@ -675,6 +712,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
4433 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
4434 + return -EOPNOTSUPP;
4435 +
4436 ++ lessee = drm_file_get_master(lessee_priv);
4437 + DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id);
4438 +
4439 + mutex_lock(&dev->mode_config.idr_mutex);
4440 +@@ -702,6 +740,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
4441 + arg->count_objects = count;
4442 +
4443 + mutex_unlock(&dev->mode_config.idr_mutex);
4444 ++ drm_master_put(&lessee);
4445 +
4446 + return ret;
4447 + }
4448 +@@ -720,7 +759,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
4449 + void *data, struct drm_file *lessor_priv)
4450 + {
4451 + struct drm_mode_revoke_lease *arg = data;
4452 +- struct drm_master *lessor = lessor_priv->master;
4453 ++ struct drm_master *lessor;
4454 + struct drm_master *lessee;
4455 + int ret = 0;
4456 +
4457 +@@ -730,6 +769,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
4458 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
4459 + return -EOPNOTSUPP;
4460 +
4461 ++ lessor = drm_file_get_master(lessor_priv);
4462 + mutex_lock(&dev->mode_config.idr_mutex);
4463 +
4464 + lessee = _drm_find_lessee(lessor, arg->lessee_id);
4465 +@@ -750,6 +790,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
4466 +
4467 + fail:
4468 + mutex_unlock(&dev->mode_config.idr_mutex);
4469 ++ drm_master_put(&lessor);
4470 +
4471 + return ret;
4472 + }
4473 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
4474 +index 0644936afee26..bf33c3084cb41 100644
4475 +--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
4476 ++++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
4477 +@@ -115,6 +115,8 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
4478 + EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
4479 + else if (IS_ENABLED(CONFIG_IOMMU_DMA))
4480 + mapping = iommu_get_domain_for_dev(priv->dma_dev);
4481 ++ else
4482 ++ mapping = ERR_PTR(-ENODEV);
4483 +
4484 + if (IS_ERR(mapping))
4485 + return PTR_ERR(mapping);
4486 +diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
4487 +index 749a075fe9e4c..d1b51c133e27a 100644
4488 +--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
4489 ++++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
4490 +@@ -43,6 +43,22 @@
4491 + #define ATTR_INDEX 0x1fc0
4492 + #define ATTR_DATA 0x1fc1
4493 +
4494 ++#define WREG_MISC(v) \
4495 ++ WREG8(MGA_MISC_OUT, v)
4496 ++
4497 ++#define RREG_MISC(v) \
4498 ++ ((v) = RREG8(MGA_MISC_IN))
4499 ++
4500 ++#define WREG_MISC_MASKED(v, mask) \
4501 ++ do { \
4502 ++ u8 misc_; \
4503 ++ u8 mask_ = (mask); \
4504 ++ RREG_MISC(misc_); \
4505 ++ misc_ &= ~mask_; \
4506 ++ misc_ |= ((v) & mask_); \
4507 ++ WREG_MISC(misc_); \
4508 ++ } while (0)
4509 ++
4510 + #define WREG_ATTR(reg, v) \
4511 + do { \
4512 + RREG8(0x1fda); \
4513 +diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
4514 +index 9d576240faedd..555e3181e52b0 100644
4515 +--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
4516 ++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
4517 +@@ -174,6 +174,8 @@ static int mgag200_g200_set_plls(struct mga_device *mdev, long clock)
4518 + drm_dbg_kms(dev, "clock: %ld vco: %ld m: %d n: %d p: %d s: %d\n",
4519 + clock, f_vco, m, n, p, s);
4520 +
4521 ++ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
4522 ++
4523 + WREG_DAC(MGA1064_PIX_PLLC_M, m);
4524 + WREG_DAC(MGA1064_PIX_PLLC_N, n);
4525 + WREG_DAC(MGA1064_PIX_PLLC_P, (p | (s << 3)));
4526 +@@ -289,6 +291,8 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
4527 + return 1;
4528 + }
4529 +
4530 ++ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
4531 ++
4532 + WREG_DAC(MGA1064_PIX_PLLC_M, m);
4533 + WREG_DAC(MGA1064_PIX_PLLC_N, n);
4534 + WREG_DAC(MGA1064_PIX_PLLC_P, p);
4535 +@@ -385,6 +389,8 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
4536 + }
4537 + }
4538 +
4539 ++ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
4540 ++
4541 + for (i = 0; i <= 32 && pll_locked == false; i++) {
4542 + if (i > 0) {
4543 + WREG8(MGAREG_CRTC_INDEX, 0x1e);
4544 +@@ -522,6 +528,8 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
4545 + }
4546 + }
4547 +
4548 ++ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
4549 ++
4550 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
4551 + tmp = RREG8(DAC_DATA);
4552 + tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
4553 +@@ -654,6 +662,9 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
4554 + }
4555 + }
4556 + }
4557 ++
4558 ++ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
4559 ++
4560 + for (i = 0; i <= 32 && pll_locked == false; i++) {
4561 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
4562 + tmp = RREG8(DAC_DATA);
4563 +@@ -754,6 +765,8 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
4564 + }
4565 + }
4566 +
4567 ++ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
4568 ++
4569 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
4570 + tmp = RREG8(DAC_DATA);
4571 + tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
4572 +@@ -787,8 +800,6 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
4573 +
4574 + static int mgag200_crtc_set_plls(struct mga_device *mdev, long clock)
4575 + {
4576 +- u8 misc;
4577 +-
4578 + switch(mdev->type) {
4579 + case G200_PCI:
4580 + case G200_AGP:
4581 +@@ -808,11 +819,6 @@ static int mgag200_crtc_set_plls(struct mga_device *mdev, long clock)
4582 + return mga_g200er_set_plls(mdev, clock);
4583 + }
4584 +
4585 +- misc = RREG8(MGA_MISC_IN);
4586 +- misc &= ~MGAREG_MISC_CLK_SEL_MASK;
4587 +- misc |= MGAREG_MISC_CLK_SEL_MGA_MSK;
4588 +- WREG8(MGA_MISC_OUT, misc);
4589 +-
4590 + return 0;
4591 + }
4592 +
4593 +diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
4594 +index 977be0565c061..60e705283fe84 100644
4595 +--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
4596 ++++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
4597 +@@ -222,11 +222,10 @@
4598 +
4599 + #define MGAREG_MISC_IOADSEL (0x1 << 0)
4600 + #define MGAREG_MISC_RAMMAPEN (0x1 << 1)
4601 +-#define MGAREG_MISC_CLK_SEL_MASK GENMASK(3, 2)
4602 +-#define MGAREG_MISC_CLK_SEL_VGA25 (0x0 << 2)
4603 +-#define MGAREG_MISC_CLK_SEL_VGA28 (0x1 << 2)
4604 +-#define MGAREG_MISC_CLK_SEL_MGA_PIX (0x2 << 2)
4605 +-#define MGAREG_MISC_CLK_SEL_MGA_MSK (0x3 << 2)
4606 ++#define MGAREG_MISC_CLKSEL_MASK GENMASK(3, 2)
4607 ++#define MGAREG_MISC_CLKSEL_VGA25 (0x0 << 2)
4608 ++#define MGAREG_MISC_CLKSEL_VGA28 (0x1 << 2)
4609 ++#define MGAREG_MISC_CLKSEL_MGA (0x3 << 2)
4610 + #define MGAREG_MISC_VIDEO_DIS (0x1 << 4)
4611 + #define MGAREG_MISC_HIGH_PG_SEL (0x1 << 5)
4612 + #define MGAREG_MISC_HSYNCPOL BIT(6)
4613 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
4614 +index 9c5e4618aa0ae..183b9f9c1b315 100644
4615 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
4616 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
4617 +@@ -1383,13 +1383,13 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
4618 + {
4619 + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
4620 + struct msm_gpu *gpu = &adreno_gpu->base;
4621 +- u32 cntl1_regval = 0;
4622 ++ u32 gpu_scid, cntl1_regval = 0;
4623 +
4624 + if (IS_ERR(a6xx_gpu->llc_mmio))
4625 + return;
4626 +
4627 + if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
4628 +- u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
4629 ++ gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
4630 +
4631 + gpu_scid &= 0x1f;
4632 + cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
4633 +@@ -1409,26 +1409,34 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
4634 + }
4635 + }
4636 +
4637 +- if (cntl1_regval) {
4638 ++ if (!cntl1_regval)
4639 ++ return;
4640 ++
4641 ++ /*
4642 ++ * Program the slice IDs for the various GPU blocks and GPU MMU
4643 ++ * pagetables
4644 ++ */
4645 ++ if (!a6xx_gpu->have_mmu500) {
4646 ++ a6xx_llc_write(a6xx_gpu,
4647 ++ REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval);
4648 ++
4649 + /*
4650 +- * Program the slice IDs for the various GPU blocks and GPU MMU
4651 +- * pagetables
4652 ++ * Program cacheability overrides to not allocate cache
4653 ++ * lines on a write miss
4654 + */
4655 +- if (a6xx_gpu->have_mmu500)
4656 +- gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0),
4657 +- cntl1_regval);
4658 +- else {
4659 +- a6xx_llc_write(a6xx_gpu,
4660 +- REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval);
4661 +-
4662 +- /*
4663 +- * Program cacheability overrides to not allocate cache
4664 +- * lines on a write miss
4665 +- */
4666 +- a6xx_llc_rmw(a6xx_gpu,
4667 +- REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03);
4668 +- }
4669 ++ a6xx_llc_rmw(a6xx_gpu,
4670 ++ REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03);
4671 ++ return;
4672 + }
4673 ++
4674 ++ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
4675 ++
4676 ++ /* On A660, the SCID programming for UCHE traffic is done in
4677 ++ * A6XX_GBIF_SCACHE_CNTL0[14:10]
4678 ++ */
4679 ++ if (adreno_is_a660(adreno_gpu))
4680 ++ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
4681 ++ (1 << 8), (gpu_scid << 10) | (1 << 8));
4682 + }
4683 +
4684 + static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
4685 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
4686 +index 704dace895cbe..b131fd376192b 100644
4687 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
4688 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
4689 +@@ -974,6 +974,7 @@ static const struct dpu_perf_cfg sdm845_perf_data = {
4690 + .amortizable_threshold = 25,
4691 + .min_prefill_lines = 24,
4692 + .danger_lut_tbl = {0xf, 0xffff, 0x0},
4693 ++ .safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
4694 + .qos_lut_tbl = {
4695 + {.nentry = ARRAY_SIZE(sdm845_qos_linear),
4696 + .entries = sdm845_qos_linear
4697 +@@ -1001,6 +1002,7 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
4698 + .min_dram_ib = 1600000,
4699 + .min_prefill_lines = 24,
4700 + .danger_lut_tbl = {0xff, 0xffff, 0x0},
4701 ++ .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
4702 + .qos_lut_tbl = {
4703 + {.nentry = ARRAY_SIZE(sc7180_qos_linear),
4704 + .entries = sc7180_qos_linear
4705 +@@ -1028,6 +1030,7 @@ static const struct dpu_perf_cfg sm8150_perf_data = {
4706 + .min_dram_ib = 800000,
4707 + .min_prefill_lines = 24,
4708 + .danger_lut_tbl = {0xf, 0xffff, 0x0},
4709 ++ .safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
4710 + .qos_lut_tbl = {
4711 + {.nentry = ARRAY_SIZE(sm8150_qos_linear),
4712 + .entries = sm8150_qos_linear
4713 +@@ -1056,6 +1059,7 @@ static const struct dpu_perf_cfg sm8250_perf_data = {
4714 + .min_dram_ib = 800000,
4715 + .min_prefill_lines = 35,
4716 + .danger_lut_tbl = {0xf, 0xffff, 0x0},
4717 ++ .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
4718 + .qos_lut_tbl = {
4719 + {.nentry = ARRAY_SIZE(sc7180_qos_linear),
4720 + .entries = sc7180_qos_linear
4721 +@@ -1084,6 +1088,7 @@ static const struct dpu_perf_cfg sc7280_perf_data = {
4722 + .min_dram_ib = 1600000,
4723 + .min_prefill_lines = 24,
4724 + .danger_lut_tbl = {0xffff, 0xffff, 0x0},
4725 ++ .safe_lut_tbl = {0xff00, 0xff00, 0xffff},
4726 + .qos_lut_tbl = {
4727 + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
4728 + .entries = sc7180_qos_macrotile
4729 +diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
4730 +index 0712752742f4f..cdcaf470f1480 100644
4731 +--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
4732 ++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
4733 +@@ -89,13 +89,6 @@ static void mdp4_disable_commit(struct msm_kms *kms)
4734 +
4735 + static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
4736 + {
4737 +- int i;
4738 +- struct drm_crtc *crtc;
4739 +- struct drm_crtc_state *crtc_state;
4740 +-
4741 +- /* see 119ecb7fd */
4742 +- for_each_new_crtc_in_state(state, crtc, crtc_state, i)
4743 +- drm_crtc_vblank_get(crtc);
4744 + }
4745 +
4746 + static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
4747 +@@ -114,12 +107,6 @@ static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
4748 +
4749 + static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
4750 + {
4751 +- struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
4752 +- struct drm_crtc *crtc;
4753 +-
4754 +- /* see 119ecb7fd */
4755 +- for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
4756 +- drm_crtc_vblank_put(crtc);
4757 + }
4758 +
4759 + static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
4760 +@@ -412,6 +399,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
4761 + {
4762 + struct platform_device *pdev = to_platform_device(dev->dev);
4763 + struct mdp4_platform_config *config = mdp4_get_config(pdev);
4764 ++ struct msm_drm_private *priv = dev->dev_private;
4765 + struct mdp4_kms *mdp4_kms;
4766 + struct msm_kms *kms = NULL;
4767 + struct msm_gem_address_space *aspace;
4768 +@@ -431,7 +419,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
4769 + goto fail;
4770 + }
4771 +
4772 +- kms = &mdp4_kms->base.base;
4773 ++ priv->kms = &mdp4_kms->base.base;
4774 ++ kms = priv->kms;
4775 +
4776 + mdp4_kms->dev = dev;
4777 +
4778 +diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
4779 +index 4a3293b590b05..eb40d8413bca9 100644
4780 +--- a/drivers/gpu/drm/msm/dp/dp_aux.c
4781 ++++ b/drivers/gpu/drm/msm/dp/dp_aux.c
4782 +@@ -353,6 +353,9 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
4783 + if (!(aux->retry_cnt % MAX_AUX_RETRIES))
4784 + dp_catalog_aux_update_cfg(aux->catalog);
4785 + }
4786 ++ /* reset aux if link is in connected state */
4787 ++ if (dp_catalog_link_is_connected(aux->catalog))
4788 ++ dp_catalog_aux_reset(aux->catalog);
4789 + } else {
4790 + aux->retry_cnt = 0;
4791 + switch (aux->aux_error_num) {
4792 +diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
4793 +index eaddfd7398850..6f5e45d54b268 100644
4794 +--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
4795 ++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
4796 +@@ -81,13 +81,6 @@ struct dp_ctrl_private {
4797 + struct completion video_comp;
4798 + };
4799 +
4800 +-struct dp_cr_status {
4801 +- u8 lane_0_1;
4802 +- u8 lane_2_3;
4803 +-};
4804 +-
4805 +-#define DP_LANE0_1_CR_DONE 0x11
4806 +-
4807 + static int dp_aux_link_configure(struct drm_dp_aux *aux,
4808 + struct dp_link_info *link)
4809 + {
4810 +@@ -1078,7 +1071,7 @@ static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
4811 + }
4812 +
4813 + static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
4814 +- struct dp_cr_status *cr, int *training_step)
4815 ++ int *training_step)
4816 + {
4817 + int tries, old_v_level, ret = 0;
4818 + u8 link_status[DP_LINK_STATUS_SIZE];
4819 +@@ -1107,9 +1100,6 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
4820 + if (ret)
4821 + return ret;
4822 +
4823 +- cr->lane_0_1 = link_status[0];
4824 +- cr->lane_2_3 = link_status[1];
4825 +-
4826 + if (drm_dp_clock_recovery_ok(link_status,
4827 + ctrl->link->link_params.num_lanes)) {
4828 + return 0;
4829 +@@ -1186,7 +1176,7 @@ static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
4830 + }
4831 +
4832 + static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
4833 +- struct dp_cr_status *cr, int *training_step)
4834 ++ int *training_step)
4835 + {
4836 + int tries = 0, ret = 0;
4837 + char pattern;
4838 +@@ -1202,10 +1192,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
4839 + else
4840 + pattern = DP_TRAINING_PATTERN_2;
4841 +
4842 +- ret = dp_ctrl_update_vx_px(ctrl);
4843 +- if (ret)
4844 +- return ret;
4845 +-
4846 + ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, pattern);
4847 + if (ret)
4848 + return ret;
4849 +@@ -1218,8 +1204,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
4850 + ret = dp_ctrl_read_link_status(ctrl, link_status);
4851 + if (ret)
4852 + return ret;
4853 +- cr->lane_0_1 = link_status[0];
4854 +- cr->lane_2_3 = link_status[1];
4855 +
4856 + if (drm_dp_channel_eq_ok(link_status,
4857 + ctrl->link->link_params.num_lanes)) {
4858 +@@ -1239,7 +1223,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
4859 + static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl);
4860 +
4861 + static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
4862 +- struct dp_cr_status *cr, int *training_step)
4863 ++ int *training_step)
4864 + {
4865 + int ret = 0;
4866 + u8 encoding = DP_SET_ANSI_8B10B;
4867 +@@ -1255,7 +1239,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
4868 + drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
4869 + &encoding, 1);
4870 +
4871 +- ret = dp_ctrl_link_train_1(ctrl, cr, training_step);
4872 ++ ret = dp_ctrl_link_train_1(ctrl, training_step);
4873 + if (ret) {
4874 + DRM_ERROR("link training #1 failed. ret=%d\n", ret);
4875 + goto end;
4876 +@@ -1264,7 +1248,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
4877 + /* print success info as this is a result of user initiated action */
4878 + DRM_DEBUG_DP("link training #1 successful\n");
4879 +
4880 +- ret = dp_ctrl_link_train_2(ctrl, cr, training_step);
4881 ++ ret = dp_ctrl_link_train_2(ctrl, training_step);
4882 + if (ret) {
4883 + DRM_ERROR("link training #2 failed. ret=%d\n", ret);
4884 + goto end;
4885 +@@ -1280,7 +1264,7 @@ end:
4886 + }
4887 +
4888 + static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
4889 +- struct dp_cr_status *cr, int *training_step)
4890 ++ int *training_step)
4891 + {
4892 + int ret = 0;
4893 +
4894 +@@ -1295,7 +1279,7 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
4895 + * a link training pattern, we have to first do soft reset.
4896 + */
4897 +
4898 +- ret = dp_ctrl_link_train(ctrl, cr, training_step);
4899 ++ ret = dp_ctrl_link_train(ctrl, training_step);
4900 +
4901 + return ret;
4902 + }
4903 +@@ -1492,14 +1476,16 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
4904 + static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
4905 + {
4906 + int ret = 0;
4907 +- struct dp_cr_status cr;
4908 + int training_step = DP_TRAINING_NONE;
4909 +
4910 + dp_ctrl_push_idle(&ctrl->dp_ctrl);
4911 +
4912 ++ ctrl->link->phy_params.p_level = 0;
4913 ++ ctrl->link->phy_params.v_level = 0;
4914 ++
4915 + ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
4916 +
4917 +- ret = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
4918 ++ ret = dp_ctrl_setup_main_link(ctrl, &training_step);
4919 + if (ret)
4920 + goto end;
4921 +
4922 +@@ -1630,6 +1616,35 @@ void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
4923 + }
4924 + }
4925 +
4926 ++static bool dp_ctrl_clock_recovery_any_ok(
4927 ++ const u8 link_status[DP_LINK_STATUS_SIZE],
4928 ++ int lane_count)
4929 ++{
4930 ++ int reduced_cnt;
4931 ++
4932 ++ if (lane_count <= 1)
4933 ++ return false;
4934 ++
4935 ++ /*
4936 ++ * only interested in the lane number after reduced
4937 ++ * lane_count = 4, then only interested in 2 lanes
4938 ++ * lane_count = 2, then only interested in 1 lane
4939 ++ */
4940 ++ reduced_cnt = lane_count >> 1;
4941 ++
4942 ++ return drm_dp_clock_recovery_ok(link_status, reduced_cnt);
4943 ++}
4944 ++
4945 ++static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl)
4946 ++{
4947 ++ u8 link_status[DP_LINK_STATUS_SIZE];
4948 ++ int num_lanes = ctrl->link->link_params.num_lanes;
4949 ++
4950 ++ dp_ctrl_read_link_status(ctrl, link_status);
4951 ++
4952 ++ return drm_dp_channel_eq_ok(link_status, num_lanes);
4953 ++}
4954 ++
4955 + int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
4956 + {
4957 + int rc = 0;
4958 +@@ -1637,7 +1652,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
4959 + u32 rate = 0;
4960 + int link_train_max_retries = 5;
4961 + u32 const phy_cts_pixel_clk_khz = 148500;
4962 +- struct dp_cr_status cr;
4963 ++ u8 link_status[DP_LINK_STATUS_SIZE];
4964 + unsigned int training_step;
4965 +
4966 + if (!dp_ctrl)
4967 +@@ -1664,6 +1679,9 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
4968 + ctrl->link->link_params.rate,
4969 + ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
4970 +
4971 ++ ctrl->link->phy_params.p_level = 0;
4972 ++ ctrl->link->phy_params.v_level = 0;
4973 ++
4974 + rc = dp_ctrl_enable_mainlink_clocks(ctrl);
4975 + if (rc)
4976 + return rc;
4977 +@@ -1677,19 +1695,21 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
4978 + }
4979 +
4980 + training_step = DP_TRAINING_NONE;
4981 +- rc = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
4982 ++ rc = dp_ctrl_setup_main_link(ctrl, &training_step);
4983 + if (rc == 0) {
4984 + /* training completed successfully */
4985 + break;
4986 + } else if (training_step == DP_TRAINING_1) {
4987 + /* link train_1 failed */
4988 +- if (!dp_catalog_link_is_connected(ctrl->catalog)) {
4989 ++ if (!dp_catalog_link_is_connected(ctrl->catalog))
4990 + break;
4991 +- }
4992 ++
4993 ++ dp_ctrl_read_link_status(ctrl, link_status);
4994 +
4995 + rc = dp_ctrl_link_rate_down_shift(ctrl);
4996 + if (rc < 0) { /* already in RBR = 1.6G */
4997 +- if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) {
4998 ++ if (dp_ctrl_clock_recovery_any_ok(link_status,
4999 ++ ctrl->link->link_params.num_lanes)) {
5000 + /*
5001 + * some lanes are ready,
5002 + * reduce lane number
5003 +@@ -1705,12 +1725,18 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
5004 + }
5005 + }
5006 + } else if (training_step == DP_TRAINING_2) {
5007 +- /* link train_2 failed, lower lane rate */
5008 +- if (!dp_catalog_link_is_connected(ctrl->catalog)) {
5009 ++ /* link train_2 failed */
5010 ++ if (!dp_catalog_link_is_connected(ctrl->catalog))
5011 + break;
5012 +- }
5013 +
5014 +- rc = dp_ctrl_link_lane_down_shift(ctrl);
5015 ++ dp_ctrl_read_link_status(ctrl, link_status);
5016 ++
5017 ++ if (!drm_dp_clock_recovery_ok(link_status,
5018 ++ ctrl->link->link_params.num_lanes))
5019 ++ rc = dp_ctrl_link_rate_down_shift(ctrl);
5020 ++ else
5021 ++ rc = dp_ctrl_link_lane_down_shift(ctrl);
5022 ++
5023 + if (rc < 0) {
5024 + /* end with failure */
5025 + break; /* lane == 1 already */
5026 +@@ -1721,17 +1747,19 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
5027 + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
5028 + return rc;
5029 +
5030 +- /* stop txing train pattern */
5031 +- dp_ctrl_clear_training_pattern(ctrl);
5032 ++ if (rc == 0) { /* link train successfully */
5033 ++ /*
5034 ++ * do not stop train pattern here
5035 ++ * stop link training at on_stream
5036 ++ * to pass compliance test
5037 ++ */
5038 ++ } else {
5039 ++ /*
5040 ++ * link training failed
5041 ++ * end txing train pattern here
5042 ++ */
5043 ++ dp_ctrl_clear_training_pattern(ctrl);
5044 +
5045 +- /*
5046 +- * keep transmitting idle pattern until video ready
5047 +- * to avoid main link from loss of sync
5048 +- */
5049 +- if (rc == 0) /* link train successfully */
5050 +- dp_ctrl_push_idle(dp_ctrl);
5051 +- else {
5052 +- /* link training failed */
5053 + dp_ctrl_deinitialize_mainlink(ctrl);
5054 + rc = -ECONNRESET;
5055 + }
5056 +@@ -1739,9 +1767,15 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
5057 + return rc;
5058 + }
5059 +
5060 ++static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
5061 ++{
5062 ++ int training_step = DP_TRAINING_NONE;
5063 ++
5064 ++ return dp_ctrl_setup_main_link(ctrl, &training_step);
5065 ++}
5066 ++
5067 + int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
5068 + {
5069 +- u32 rate = 0;
5070 + int ret = 0;
5071 + bool mainlink_ready = false;
5072 + struct dp_ctrl_private *ctrl;
5073 +@@ -1751,10 +1785,6 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
5074 +
5075 + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
5076 +
5077 +- rate = ctrl->panel->link_info.rate;
5078 +-
5079 +- ctrl->link->link_params.rate = rate;
5080 +- ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes;
5081 + ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
5082 +
5083 + DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n",
5084 +@@ -1769,6 +1799,12 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
5085 + }
5086 + }
5087 +
5088 ++ if (!dp_ctrl_channel_eq_ok(ctrl))
5089 ++ dp_ctrl_link_retrain(ctrl);
5090 ++
5091 ++ /* stop txing train pattern to end link training */
5092 ++ dp_ctrl_clear_training_pattern(ctrl);
5093 ++
5094 + ret = dp_ctrl_enable_stream_clocks(ctrl);
5095 + if (ret) {
5096 + DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
5097 +diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
5098 +index 440b327534302..2181b60e1d1d8 100644
5099 +--- a/drivers/gpu/drm/msm/dp/dp_panel.c
5100 ++++ b/drivers/gpu/drm/msm/dp/dp_panel.c
5101 +@@ -271,7 +271,7 @@ static u8 dp_panel_get_edid_checksum(struct edid *edid)
5102 + {
5103 + struct edid *last_block;
5104 + u8 *raw_edid;
5105 +- bool is_edid_corrupt;
5106 ++ bool is_edid_corrupt = false;
5107 +
5108 + if (!edid) {
5109 + DRM_ERROR("invalid edid input\n");
5110 +@@ -303,7 +303,12 @@ void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
5111 + panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
5112 +
5113 + if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
5114 +- u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid);
5115 ++ u8 checksum;
5116 ++
5117 ++ if (dp_panel->edid)
5118 ++ checksum = dp_panel_get_edid_checksum(dp_panel->edid);
5119 ++ else
5120 ++ checksum = dp_panel->connector->real_edid_checksum;
5121 +
5122 + dp_link_send_edid_checksum(panel->link, checksum);
5123 + dp_link_send_test_response(panel->link);
5124 +diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
5125 +index f3f1c03c7db95..763f127e46213 100644
5126 +--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
5127 ++++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
5128 +@@ -154,7 +154,6 @@ static const struct msm_dsi_config sdm660_dsi_cfg = {
5129 + .reg_cfg = {
5130 + .num = 2,
5131 + .regs = {
5132 +- {"vdd", 73400, 32 }, /* 0.9 V */
5133 + {"vdda", 12560, 4 }, /* 1.2 V */
5134 + },
5135 + },
5136 +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
5137 +index a34cf151c5170..bb31230721bdd 100644
5138 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
5139 ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
5140 +@@ -1050,7 +1050,7 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
5141 + .reg_cfg = {
5142 + .num = 1,
5143 + .regs = {
5144 +- {"vcca", 17000, 32},
5145 ++ {"vcca", 73400, 32},
5146 + },
5147 + },
5148 + .ops = {
5149 +diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
5150 +index 801da917507d5..512af976b7e90 100644
5151 +--- a/drivers/gpu/drm/omapdrm/omap_plane.c
5152 ++++ b/drivers/gpu/drm/omapdrm/omap_plane.c
5153 +@@ -6,6 +6,7 @@
5154 +
5155 + #include <drm/drm_atomic.h>
5156 + #include <drm/drm_atomic_helper.h>
5157 ++#include <drm/drm_gem_atomic_helper.h>
5158 + #include <drm/drm_plane_helper.h>
5159 +
5160 + #include "omap_dmm_tiler.h"
5161 +@@ -29,6 +30,8 @@ static int omap_plane_prepare_fb(struct drm_plane *plane,
5162 + if (!new_state->fb)
5163 + return 0;
5164 +
5165 ++ drm_gem_plane_helper_prepare_fb(plane, new_state);
5166 ++
5167 + return omap_framebuffer_pin(new_state->fb);
5168 + }
5169 +
5170 +diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
5171 +index f614e98771e49..8b2cdb8c701d8 100644
5172 +--- a/drivers/gpu/drm/panfrost/panfrost_device.h
5173 ++++ b/drivers/gpu/drm/panfrost/panfrost_device.h
5174 +@@ -121,8 +121,12 @@ struct panfrost_device {
5175 + };
5176 +
5177 + struct panfrost_mmu {
5178 ++ struct panfrost_device *pfdev;
5179 ++ struct kref refcount;
5180 + struct io_pgtable_cfg pgtbl_cfg;
5181 + struct io_pgtable_ops *pgtbl_ops;
5182 ++ struct drm_mm mm;
5183 ++ spinlock_t mm_lock;
5184 + int as;
5185 + atomic_t as_count;
5186 + struct list_head list;
5187 +@@ -133,9 +137,7 @@ struct panfrost_file_priv {
5188 +
5189 + struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
5190 +
5191 +- struct panfrost_mmu mmu;
5192 +- struct drm_mm mm;
5193 +- spinlock_t mm_lock;
5194 ++ struct panfrost_mmu *mmu;
5195 + };
5196 +
5197 + static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
5198 +diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
5199 +index 075ec0ef746cf..945133db1857f 100644
5200 +--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
5201 ++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
5202 +@@ -417,7 +417,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
5203 + * anyway, so let's not bother.
5204 + */
5205 + if (!list_is_singular(&bo->mappings.list) ||
5206 +- WARN_ON_ONCE(first->mmu != &priv->mmu)) {
5207 ++ WARN_ON_ONCE(first->mmu != priv->mmu)) {
5208 + ret = -EINVAL;
5209 + goto out_unlock_mappings;
5210 + }
5211 +@@ -449,32 +449,6 @@ int panfrost_unstable_ioctl_check(void)
5212 + return 0;
5213 + }
5214 +
5215 +-#define PFN_4G (SZ_4G >> PAGE_SHIFT)
5216 +-#define PFN_4G_MASK (PFN_4G - 1)
5217 +-#define PFN_16M (SZ_16M >> PAGE_SHIFT)
5218 +-
5219 +-static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
5220 +- unsigned long color,
5221 +- u64 *start, u64 *end)
5222 +-{
5223 +- /* Executable buffers can't start or end on a 4GB boundary */
5224 +- if (!(color & PANFROST_BO_NOEXEC)) {
5225 +- u64 next_seg;
5226 +-
5227 +- if ((*start & PFN_4G_MASK) == 0)
5228 +- (*start)++;
5229 +-
5230 +- if ((*end & PFN_4G_MASK) == 0)
5231 +- (*end)--;
5232 +-
5233 +- next_seg = ALIGN(*start, PFN_4G);
5234 +- if (next_seg - *start <= PFN_16M)
5235 +- *start = next_seg + 1;
5236 +-
5237 +- *end = min(*end, ALIGN(*start, PFN_4G) - 1);
5238 +- }
5239 +-}
5240 +-
5241 + static int
5242 + panfrost_open(struct drm_device *dev, struct drm_file *file)
5243 + {
5244 +@@ -489,15 +463,11 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
5245 + panfrost_priv->pfdev = pfdev;
5246 + file->driver_priv = panfrost_priv;
5247 +
5248 +- spin_lock_init(&panfrost_priv->mm_lock);
5249 +-
5250 +- /* 4G enough for now. can be 48-bit */
5251 +- drm_mm_init(&panfrost_priv->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
5252 +- panfrost_priv->mm.color_adjust = panfrost_drm_mm_color_adjust;
5253 +-
5254 +- ret = panfrost_mmu_pgtable_alloc(panfrost_priv);
5255 +- if (ret)
5256 +- goto err_pgtable;
5257 ++ panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev);
5258 ++ if (IS_ERR(panfrost_priv->mmu)) {
5259 ++ ret = PTR_ERR(panfrost_priv->mmu);
5260 ++ goto err_free;
5261 ++ }
5262 +
5263 + ret = panfrost_job_open(panfrost_priv);
5264 + if (ret)
5265 +@@ -506,9 +476,8 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
5266 + return 0;
5267 +
5268 + err_job:
5269 +- panfrost_mmu_pgtable_free(panfrost_priv);
5270 +-err_pgtable:
5271 +- drm_mm_takedown(&panfrost_priv->mm);
5272 ++ panfrost_mmu_ctx_put(panfrost_priv->mmu);
5273 ++err_free:
5274 + kfree(panfrost_priv);
5275 + return ret;
5276 + }
5277 +@@ -521,8 +490,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
5278 + panfrost_perfcnt_close(file);
5279 + panfrost_job_close(panfrost_priv);
5280 +
5281 +- panfrost_mmu_pgtable_free(panfrost_priv);
5282 +- drm_mm_takedown(&panfrost_priv->mm);
5283 ++ panfrost_mmu_ctx_put(panfrost_priv->mmu);
5284 + kfree(panfrost_priv);
5285 + }
5286 +
5287 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
5288 +index 3e0723bc36bda..23377481f4e31 100644
5289 +--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
5290 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
5291 +@@ -60,7 +60,7 @@ panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
5292 +
5293 + mutex_lock(&bo->mappings.lock);
5294 + list_for_each_entry(iter, &bo->mappings.list, node) {
5295 +- if (iter->mmu == &priv->mmu) {
5296 ++ if (iter->mmu == priv->mmu) {
5297 + kref_get(&iter->refcount);
5298 + mapping = iter;
5299 + break;
5300 +@@ -74,16 +74,13 @@ panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
5301 + static void
5302 + panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
5303 + {
5304 +- struct panfrost_file_priv *priv;
5305 +-
5306 + if (mapping->active)
5307 + panfrost_mmu_unmap(mapping);
5308 +
5309 +- priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
5310 +- spin_lock(&priv->mm_lock);
5311 ++ spin_lock(&mapping->mmu->mm_lock);
5312 + if (drm_mm_node_allocated(&mapping->mmnode))
5313 + drm_mm_remove_node(&mapping->mmnode);
5314 +- spin_unlock(&priv->mm_lock);
5315 ++ spin_unlock(&mapping->mmu->mm_lock);
5316 + }
5317 +
5318 + static void panfrost_gem_mapping_release(struct kref *kref)
5319 +@@ -94,6 +91,7 @@ static void panfrost_gem_mapping_release(struct kref *kref)
5320 +
5321 + panfrost_gem_teardown_mapping(mapping);
5322 + drm_gem_object_put(&mapping->obj->base.base);
5323 ++ panfrost_mmu_ctx_put(mapping->mmu);
5324 + kfree(mapping);
5325 + }
5326 +
5327 +@@ -143,11 +141,11 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
5328 + else
5329 + align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
5330 +
5331 +- mapping->mmu = &priv->mmu;
5332 +- spin_lock(&priv->mm_lock);
5333 +- ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
5334 ++ mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
5335 ++ spin_lock(&mapping->mmu->mm_lock);
5336 ++ ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
5337 + size >> PAGE_SHIFT, align, color, 0);
5338 +- spin_unlock(&priv->mm_lock);
5339 ++ spin_unlock(&mapping->mmu->mm_lock);
5340 + if (ret)
5341 + goto err;
5342 +
5343 +@@ -176,7 +174,7 @@ void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
5344 +
5345 + mutex_lock(&bo->mappings.lock);
5346 + list_for_each_entry(iter, &bo->mappings.list, node) {
5347 +- if (iter->mmu == &priv->mmu) {
5348 ++ if (iter->mmu == priv->mmu) {
5349 + mapping = iter;
5350 + list_del(&iter->node);
5351 + break;
5352 +diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
5353 +index 2df3e999a38d0..3757c6eb30238 100644
5354 +--- a/drivers/gpu/drm/panfrost/panfrost_job.c
5355 ++++ b/drivers/gpu/drm/panfrost/panfrost_job.c
5356 +@@ -165,7 +165,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
5357 + return;
5358 + }
5359 +
5360 +- cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
5361 ++ cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
5362 +
5363 + job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
5364 + job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
5365 +@@ -527,7 +527,7 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
5366 + if (job) {
5367 + pfdev->jobs[j] = NULL;
5368 +
5369 +- panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
5370 ++ panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
5371 + panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
5372 +
5373 + dma_fence_signal_locked(job->done_fence);
5374 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
5375 +index 0581186ebfb3a..eea6ade902cb4 100644
5376 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
5377 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
5378 +@@ -1,5 +1,8 @@
5379 + // SPDX-License-Identifier: GPL-2.0
5380 + /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@××××××.org> */
5381 ++
5382 ++#include <drm/panfrost_drm.h>
5383 ++
5384 + #include <linux/atomic.h>
5385 + #include <linux/bitfield.h>
5386 + #include <linux/delay.h>
5387 +@@ -52,25 +55,16 @@ static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
5388 + }
5389 +
5390 + static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
5391 +- u64 iova, size_t size)
5392 ++ u64 iova, u64 size)
5393 + {
5394 + u8 region_width;
5395 + u64 region = iova & PAGE_MASK;
5396 +- /*
5397 +- * fls returns:
5398 +- * 1 .. 32
5399 +- *
5400 +- * 10 + fls(num_pages)
5401 +- * results in the range (11 .. 42)
5402 +- */
5403 +-
5404 +- size = round_up(size, PAGE_SIZE);
5405 +
5406 +- region_width = 10 + fls(size >> PAGE_SHIFT);
5407 +- if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
5408 +- /* not pow2, so must go up to the next pow2 */
5409 +- region_width += 1;
5410 +- }
5411 ++ /* The size is encoded as ceil(log2) minus(1), which may be calculated
5412 ++ * with fls. The size must be clamped to hardware bounds.
5413 ++ */
5414 ++ size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
5415 ++ region_width = fls64(size - 1) - 1;
5416 + region |= region_width;
5417 +
5418 + /* Lock the region that needs to be updated */
5419 +@@ -81,7 +75,7 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
5420 +
5421 +
5422 + static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
5423 +- u64 iova, size_t size, u32 op)
5424 ++ u64 iova, u64 size, u32 op)
5425 + {
5426 + if (as_nr < 0)
5427 + return 0;
5428 +@@ -98,7 +92,7 @@ static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
5429 +
5430 + static int mmu_hw_do_operation(struct panfrost_device *pfdev,
5431 + struct panfrost_mmu *mmu,
5432 +- u64 iova, size_t size, u32 op)
5433 ++ u64 iova, u64 size, u32 op)
5434 + {
5435 + int ret;
5436 +
5437 +@@ -115,7 +109,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
5438 + u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
5439 + u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
5440 +
5441 +- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
5442 ++ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
5443 +
5444 + mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
5445 + mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
5446 +@@ -131,7 +125,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
5447 +
5448 + static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
5449 + {
5450 +- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
5451 ++ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
5452 +
5453 + mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
5454 + mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
5455 +@@ -231,7 +225,7 @@ static size_t get_pgsize(u64 addr, size_t size)
5456 +
5457 + static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
5458 + struct panfrost_mmu *mmu,
5459 +- u64 iova, size_t size)
5460 ++ u64 iova, u64 size)
5461 + {
5462 + if (mmu->as < 0)
5463 + return;
5464 +@@ -337,7 +331,7 @@ static void mmu_tlb_inv_context_s1(void *cookie)
5465 +
5466 + static void mmu_tlb_sync_context(void *cookie)
5467 + {
5468 +- //struct panfrost_device *pfdev = cookie;
5469 ++ //struct panfrost_mmu *mmu = cookie;
5470 + // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
5471 + }
5472 +
5473 +@@ -352,57 +346,10 @@ static const struct iommu_flush_ops mmu_tlb_ops = {
5474 + .tlb_flush_walk = mmu_tlb_flush_walk,
5475 + };
5476 +
5477 +-int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
5478 +-{
5479 +- struct panfrost_mmu *mmu = &priv->mmu;
5480 +- struct panfrost_device *pfdev = priv->pfdev;
5481 +-
5482 +- INIT_LIST_HEAD(&mmu->list);
5483 +- mmu->as = -1;
5484 +-
5485 +- mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
5486 +- .pgsize_bitmap = SZ_4K | SZ_2M,
5487 +- .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
5488 +- .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
5489 +- .coherent_walk = pfdev->coherent,
5490 +- .tlb = &mmu_tlb_ops,
5491 +- .iommu_dev = pfdev->dev,
5492 +- };
5493 +-
5494 +- mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
5495 +- priv);
5496 +- if (!mmu->pgtbl_ops)
5497 +- return -EINVAL;
5498 +-
5499 +- return 0;
5500 +-}
5501 +-
5502 +-void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
5503 +-{
5504 +- struct panfrost_device *pfdev = priv->pfdev;
5505 +- struct panfrost_mmu *mmu = &priv->mmu;
5506 +-
5507 +- spin_lock(&pfdev->as_lock);
5508 +- if (mmu->as >= 0) {
5509 +- pm_runtime_get_noresume(pfdev->dev);
5510 +- if (pm_runtime_active(pfdev->dev))
5511 +- panfrost_mmu_disable(pfdev, mmu->as);
5512 +- pm_runtime_put_autosuspend(pfdev->dev);
5513 +-
5514 +- clear_bit(mmu->as, &pfdev->as_alloc_mask);
5515 +- clear_bit(mmu->as, &pfdev->as_in_use_mask);
5516 +- list_del(&mmu->list);
5517 +- }
5518 +- spin_unlock(&pfdev->as_lock);
5519 +-
5520 +- free_io_pgtable_ops(mmu->pgtbl_ops);
5521 +-}
5522 +-
5523 + static struct panfrost_gem_mapping *
5524 + addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
5525 + {
5526 + struct panfrost_gem_mapping *mapping = NULL;
5527 +- struct panfrost_file_priv *priv;
5528 + struct drm_mm_node *node;
5529 + u64 offset = addr >> PAGE_SHIFT;
5530 + struct panfrost_mmu *mmu;
5531 +@@ -415,11 +362,10 @@ addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
5532 + goto out;
5533 +
5534 + found_mmu:
5535 +- priv = container_of(mmu, struct panfrost_file_priv, mmu);
5536 +
5537 +- spin_lock(&priv->mm_lock);
5538 ++ spin_lock(&mmu->mm_lock);
5539 +
5540 +- drm_mm_for_each_node(node, &priv->mm) {
5541 ++ drm_mm_for_each_node(node, &mmu->mm) {
5542 + if (offset >= node->start &&
5543 + offset < (node->start + node->size)) {
5544 + mapping = drm_mm_node_to_panfrost_mapping(node);
5545 +@@ -429,7 +375,7 @@ found_mmu:
5546 + }
5547 + }
5548 +
5549 +- spin_unlock(&priv->mm_lock);
5550 ++ spin_unlock(&mmu->mm_lock);
5551 + out:
5552 + spin_unlock(&pfdev->as_lock);
5553 + return mapping;
5554 +@@ -542,6 +488,107 @@ err_bo:
5555 + return ret;
5556 + }
5557 +
5558 ++static void panfrost_mmu_release_ctx(struct kref *kref)
5559 ++{
5560 ++ struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
5561 ++ refcount);
5562 ++ struct panfrost_device *pfdev = mmu->pfdev;
5563 ++
5564 ++ spin_lock(&pfdev->as_lock);
5565 ++ if (mmu->as >= 0) {
5566 ++ pm_runtime_get_noresume(pfdev->dev);
5567 ++ if (pm_runtime_active(pfdev->dev))
5568 ++ panfrost_mmu_disable(pfdev, mmu->as);
5569 ++ pm_runtime_put_autosuspend(pfdev->dev);
5570 ++
5571 ++ clear_bit(mmu->as, &pfdev->as_alloc_mask);
5572 ++ clear_bit(mmu->as, &pfdev->as_in_use_mask);
5573 ++ list_del(&mmu->list);
5574 ++ }
5575 ++ spin_unlock(&pfdev->as_lock);
5576 ++
5577 ++ free_io_pgtable_ops(mmu->pgtbl_ops);
5578 ++ drm_mm_takedown(&mmu->mm);
5579 ++ kfree(mmu);
5580 ++}
5581 ++
5582 ++void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
5583 ++{
5584 ++ kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
5585 ++}
5586 ++
5587 ++struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
5588 ++{
5589 ++ kref_get(&mmu->refcount);
5590 ++
5591 ++ return mmu;
5592 ++}
5593 ++
5594 ++#define PFN_4G (SZ_4G >> PAGE_SHIFT)
5595 ++#define PFN_4G_MASK (PFN_4G - 1)
5596 ++#define PFN_16M (SZ_16M >> PAGE_SHIFT)
5597 ++
5598 ++static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
5599 ++ unsigned long color,
5600 ++ u64 *start, u64 *end)
5601 ++{
5602 ++ /* Executable buffers can't start or end on a 4GB boundary */
5603 ++ if (!(color & PANFROST_BO_NOEXEC)) {
5604 ++ u64 next_seg;
5605 ++
5606 ++ if ((*start & PFN_4G_MASK) == 0)
5607 ++ (*start)++;
5608 ++
5609 ++ if ((*end & PFN_4G_MASK) == 0)
5610 ++ (*end)--;
5611 ++
5612 ++ next_seg = ALIGN(*start, PFN_4G);
5613 ++ if (next_seg - *start <= PFN_16M)
5614 ++ *start = next_seg + 1;
5615 ++
5616 ++ *end = min(*end, ALIGN(*start, PFN_4G) - 1);
5617 ++ }
5618 ++}
5619 ++
5620 ++struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
5621 ++{
5622 ++ struct panfrost_mmu *mmu;
5623 ++
5624 ++ mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
5625 ++ if (!mmu)
5626 ++ return ERR_PTR(-ENOMEM);
5627 ++
5628 ++ mmu->pfdev = pfdev;
5629 ++ spin_lock_init(&mmu->mm_lock);
5630 ++
5631 ++ /* 4G enough for now. can be 48-bit */
5632 ++ drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
5633 ++ mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
5634 ++
5635 ++ INIT_LIST_HEAD(&mmu->list);
5636 ++ mmu->as = -1;
5637 ++
5638 ++ mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
5639 ++ .pgsize_bitmap = SZ_4K | SZ_2M,
5640 ++ .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
5641 ++ .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
5642 ++ .coherent_walk = pfdev->coherent,
5643 ++ .tlb = &mmu_tlb_ops,
5644 ++ .iommu_dev = pfdev->dev,
5645 ++ };
5646 ++
5647 ++ mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
5648 ++ mmu);
5649 ++ if (!mmu->pgtbl_ops) {
5650 ++ kfree(mmu);
5651 ++ return ERR_PTR(-EINVAL);
5652 ++ }
5653 ++
5654 ++ kref_init(&mmu->refcount);
5655 ++
5656 ++ return mmu;
5657 ++}
5658 ++
5659 + static const char *access_type_name(struct panfrost_device *pfdev,
5660 + u32 fault_status)
5661 + {
5662 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
5663 +index 44fc2edf63ce6..cc2a0d307febc 100644
5664 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
5665 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
5666 +@@ -18,7 +18,8 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev);
5667 + u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
5668 + void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
5669 +
5670 +-int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv);
5671 +-void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv);
5672 ++struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu);
5673 ++void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu);
5674 ++struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev);
5675 +
5676 + #endif
5677 +diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
5678 +index dc9df5457f1c3..db3d9930b19c1 100644
5679 +--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
5680 ++++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
5681 +@@ -319,6 +319,8 @@
5682 + #define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
5683 + #define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
5684 +
5685 ++#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
5686 ++
5687 + #define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
5688 + #define gpu_read(dev, reg) readl(dev->iomem + reg)
5689 +
5690 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
5691 +index c22551c2facb1..2a06ec1cbefb0 100644
5692 +--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
5693 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
5694 +@@ -559,6 +559,13 @@ static int rcar_du_remove(struct platform_device *pdev)
5695 + return 0;
5696 + }
5697 +
5698 ++static void rcar_du_shutdown(struct platform_device *pdev)
5699 ++{
5700 ++ struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
5701 ++
5702 ++ drm_atomic_helper_shutdown(&rcdu->ddev);
5703 ++}
5704 ++
5705 + static int rcar_du_probe(struct platform_device *pdev)
5706 + {
5707 + struct rcar_du_device *rcdu;
5708 +@@ -615,6 +622,7 @@ error:
5709 + static struct platform_driver rcar_du_platform_driver = {
5710 + .probe = rcar_du_probe,
5711 + .remove = rcar_du_remove,
5712 ++ .shutdown = rcar_du_shutdown,
5713 + .driver = {
5714 + .name = "rcar-du",
5715 + .pm = &rcar_du_pm_ops,
5716 +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
5717 +index 8d7fd65ccced3..32202385073a2 100644
5718 +--- a/drivers/gpu/drm/ttm/ttm_bo.c
5719 ++++ b/drivers/gpu/drm/ttm/ttm_bo.c
5720 +@@ -488,6 +488,31 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
5721 + }
5722 + EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
5723 +
5724 ++static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
5725 ++ struct ttm_resource **mem,
5726 ++ struct ttm_operation_ctx *ctx,
5727 ++ struct ttm_place *hop)
5728 ++{
5729 ++ struct ttm_placement hop_placement;
5730 ++ struct ttm_resource *hop_mem;
5731 ++ int ret;
5732 ++
5733 ++ hop_placement.num_placement = hop_placement.num_busy_placement = 1;
5734 ++ hop_placement.placement = hop_placement.busy_placement = hop;
5735 ++
5736 ++ /* find space in the bounce domain */
5737 ++ ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
5738 ++ if (ret)
5739 ++ return ret;
5740 ++ /* move to the bounce domain */
5741 ++ ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
5742 ++ if (ret) {
5743 ++ ttm_resource_free(bo, &hop_mem);
5744 ++ return ret;
5745 ++ }
5746 ++ return 0;
5747 ++}
5748 ++
5749 + static int ttm_bo_evict(struct ttm_buffer_object *bo,
5750 + struct ttm_operation_ctx *ctx)
5751 + {
5752 +@@ -527,12 +552,17 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
5753 + goto out;
5754 + }
5755 +
5756 ++bounce:
5757 + ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
5758 +- if (unlikely(ret)) {
5759 +- WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n");
5760 +- if (ret != -ERESTARTSYS)
5761 ++ if (ret == -EMULTIHOP) {
5762 ++ ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
5763 ++ if (ret) {
5764 + pr_err("Buffer eviction failed\n");
5765 +- ttm_resource_free(bo, &evict_mem);
5766 ++ ttm_resource_free(bo, &evict_mem);
5767 ++ goto out;
5768 ++ }
5769 ++ /* try and move to final place now. */
5770 ++ goto bounce;
5771 + }
5772 + out:
5773 + return ret;
5774 +@@ -847,31 +877,6 @@ error:
5775 + }
5776 + EXPORT_SYMBOL(ttm_bo_mem_space);
5777 +
5778 +-static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
5779 +- struct ttm_resource **mem,
5780 +- struct ttm_operation_ctx *ctx,
5781 +- struct ttm_place *hop)
5782 +-{
5783 +- struct ttm_placement hop_placement;
5784 +- struct ttm_resource *hop_mem;
5785 +- int ret;
5786 +-
5787 +- hop_placement.num_placement = hop_placement.num_busy_placement = 1;
5788 +- hop_placement.placement = hop_placement.busy_placement = hop;
5789 +-
5790 +- /* find space in the bounce domain */
5791 +- ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
5792 +- if (ret)
5793 +- return ret;
5794 +- /* move to the bounce domain */
5795 +- ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
5796 +- if (ret) {
5797 +- ttm_resource_free(bo, &hop_mem);
5798 +- return ret;
5799 +- }
5800 +- return 0;
5801 +-}
5802 +-
5803 + static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
5804 + struct ttm_placement *placement,
5805 + struct ttm_operation_ctx *ctx)
5806 +diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
5807 +index 763fa6f4e07de..1c5ffe2935af5 100644
5808 +--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
5809 ++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
5810 +@@ -143,7 +143,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
5811 + struct ttm_resource *src_mem = bo->resource;
5812 + struct ttm_resource_manager *src_man =
5813 + ttm_manager_type(bdev, src_mem->mem_type);
5814 +- struct ttm_resource src_copy = *src_mem;
5815 + union {
5816 + struct ttm_kmap_iter_tt tt;
5817 + struct ttm_kmap_iter_linear_io io;
5818 +@@ -173,11 +172,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
5819 + }
5820 +
5821 + ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
5822 +- src_copy = *src_mem;
5823 +- ttm_bo_move_sync_cleanup(bo, dst_mem);
5824 +
5825 + if (!src_iter->ops->maps_tt)
5826 +- ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, &src_copy);
5827 ++ ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
5828 ++ ttm_bo_move_sync_cleanup(bo, dst_mem);
5829 ++
5830 + out_src_iter:
5831 + if (!dst_iter->ops->maps_tt)
5832 + ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
5833 +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
5834 +index c2876731ee2dc..f91d37beb1133 100644
5835 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
5836 ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
5837 +@@ -613,12 +613,12 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
5838 +
5839 + HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, 0);
5840 +
5841 +- HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) |
5842 +- VC4_HD_VID_CTL_CLRRGB | VC4_HD_VID_CTL_CLRSYNC);
5843 ++ HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_CLRRGB);
5844 +
5845 +- HDMI_WRITE(HDMI_VID_CTL,
5846 +- HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
5847 ++ mdelay(1);
5848 +
5849 ++ HDMI_WRITE(HDMI_VID_CTL,
5850 ++ HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
5851 + vc4_hdmi_disable_scrambling(encoder);
5852 + }
5853 +
5854 +@@ -628,12 +628,12 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
5855 + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
5856 + int ret;
5857 +
5858 ++ HDMI_WRITE(HDMI_VID_CTL,
5859 ++ HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
5860 ++
5861 + if (vc4_hdmi->variant->phy_disable)
5862 + vc4_hdmi->variant->phy_disable(vc4_hdmi);
5863 +
5864 +- HDMI_WRITE(HDMI_VID_CTL,
5865 +- HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
5866 +-
5867 + clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
5868 + clk_disable_unprepare(vc4_hdmi->pixel_clock);
5869 +
5870 +@@ -1015,6 +1015,7 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
5871 +
5872 + HDMI_WRITE(HDMI_VID_CTL,
5873 + VC4_HD_VID_CTL_ENABLE |
5874 ++ VC4_HD_VID_CTL_CLRRGB |
5875 + VC4_HD_VID_CTL_UNDERFLOW_ENABLE |
5876 + VC4_HD_VID_CTL_FRAME_COUNTER_RESET |
5877 + (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) |
5878 +@@ -1372,7 +1373,9 @@ static int vc4_hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
5879 + HDMI_WRITE(HDMI_MAI_CTL,
5880 + VC4_SET_FIELD(vc4_hdmi->audio.channels,
5881 + VC4_HD_MAI_CTL_CHNUM) |
5882 +- VC4_HD_MAI_CTL_ENABLE);
5883 ++ VC4_HD_MAI_CTL_WHOLSMP |
5884 ++ VC4_HD_MAI_CTL_CHALIGN |
5885 ++ VC4_HD_MAI_CTL_ENABLE);
5886 + break;
5887 + case SNDRV_PCM_TRIGGER_STOP:
5888 + HDMI_WRITE(HDMI_MAI_CTL,
5889 +diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
5890 +index 107521ace597a..092514a2155fe 100644
5891 +--- a/drivers/gpu/drm/vkms/vkms_plane.c
5892 ++++ b/drivers/gpu/drm/vkms/vkms_plane.c
5893 +@@ -8,7 +8,6 @@
5894 + #include <drm/drm_gem_atomic_helper.h>
5895 + #include <drm/drm_gem_framebuffer_helper.h>
5896 + #include <drm/drm_plane_helper.h>
5897 +-#include <drm/drm_gem_shmem_helper.h>
5898 +
5899 + #include "vkms_drv.h"
5900 +
5901 +@@ -150,45 +149,10 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
5902 + return 0;
5903 + }
5904 +
5905 +-static int vkms_prepare_fb(struct drm_plane *plane,
5906 +- struct drm_plane_state *state)
5907 +-{
5908 +- struct drm_gem_object *gem_obj;
5909 +- struct dma_buf_map map;
5910 +- int ret;
5911 +-
5912 +- if (!state->fb)
5913 +- return 0;
5914 +-
5915 +- gem_obj = drm_gem_fb_get_obj(state->fb, 0);
5916 +- ret = drm_gem_shmem_vmap(gem_obj, &map);
5917 +- if (ret)
5918 +- DRM_ERROR("vmap failed: %d\n", ret);
5919 +-
5920 +- return drm_gem_plane_helper_prepare_fb(plane, state);
5921 +-}
5922 +-
5923 +-static void vkms_cleanup_fb(struct drm_plane *plane,
5924 +- struct drm_plane_state *old_state)
5925 +-{
5926 +- struct drm_gem_object *gem_obj;
5927 +- struct drm_gem_shmem_object *shmem_obj;
5928 +- struct dma_buf_map map;
5929 +-
5930 +- if (!old_state->fb)
5931 +- return;
5932 +-
5933 +- gem_obj = drm_gem_fb_get_obj(old_state->fb, 0);
5934 +- shmem_obj = to_drm_gem_shmem_obj(drm_gem_fb_get_obj(old_state->fb, 0));
5935 +- dma_buf_map_set_vaddr(&map, shmem_obj->vaddr);
5936 +- drm_gem_shmem_vunmap(gem_obj, &map);
5937 +-}
5938 +-
5939 + static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
5940 + .atomic_update = vkms_plane_atomic_update,
5941 + .atomic_check = vkms_plane_atomic_check,
5942 +- .prepare_fb = vkms_prepare_fb,
5943 +- .cleanup_fb = vkms_cleanup_fb,
5944 ++ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
5945 + };
5946 +
5947 + struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
5948 +diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c
5949 +index aeb0a22a2c347..edd17c30d5a51 100644
5950 +--- a/drivers/gpu/drm/vmwgfx/ttm_memory.c
5951 ++++ b/drivers/gpu/drm/vmwgfx/ttm_memory.c
5952 +@@ -435,8 +435,10 @@ int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev)
5953 +
5954 + si_meminfo(&si);
5955 +
5956 ++ spin_lock(&glob->lock);
5957 + /* set it as 0 by default to keep original behavior of OOM */
5958 + glob->lower_mem_limit = 0;
5959 ++ spin_unlock(&glob->lock);
5960 +
5961 + ret = ttm_mem_init_kernel_zone(glob, &si);
5962 + if (unlikely(ret != 0))
5963 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
5964 +index 05b3248259007..ea6d8c86985f6 100644
5965 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
5966 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
5967 +@@ -715,7 +715,7 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
5968 + * without checking which bindings actually need to be emitted
5969 + *
5970 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state
5971 +- * @bi: Pointer to where the binding info array is stored in @cbs
5972 ++ * @biv: Pointer to where the binding info array is stored in @cbs
5973 + * @max_num: Maximum number of entries in the @bi array.
5974 + *
5975 + * Scans the @bi array for bindings and builds a buffer of view id data.
5976 +@@ -725,11 +725,9 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
5977 + * contains the command data.
5978 + */
5979 + static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
5980 +- const struct vmw_ctx_bindinfo *bi,
5981 ++ const struct vmw_ctx_bindinfo_view *biv,
5982 + u32 max_num)
5983 + {
5984 +- const struct vmw_ctx_bindinfo_view *biv =
5985 +- container_of(bi, struct vmw_ctx_bindinfo_view, bi);
5986 + unsigned long i;
5987 +
5988 + cbs->bind_cmd_count = 0;
5989 +@@ -838,7 +836,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
5990 + */
5991 + static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
5992 + {
5993 +- const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
5994 ++ const struct vmw_ctx_bindinfo_view *loc = &cbs->render_targets[0];
5995 + struct {
5996 + SVGA3dCmdHeader header;
5997 + SVGA3dCmdDXSetRenderTargets body;
5998 +@@ -874,7 +872,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
5999 + * without checking which bindings actually need to be emitted
6000 + *
6001 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state
6002 +- * @bi: Pointer to where the binding info array is stored in @cbs
6003 ++ * @biso: Pointer to where the binding info array is stored in @cbs
6004 + * @max_num: Maximum number of entries in the @bi array.
6005 + *
6006 + * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
6007 +@@ -884,11 +882,9 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
6008 + * contains the command data.
6009 + */
6010 + static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
6011 +- const struct vmw_ctx_bindinfo *bi,
6012 ++ const struct vmw_ctx_bindinfo_so_target *biso,
6013 + u32 max_num)
6014 + {
6015 +- const struct vmw_ctx_bindinfo_so_target *biso =
6016 +- container_of(bi, struct vmw_ctx_bindinfo_so_target, bi);
6017 + unsigned long i;
6018 + SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
6019 +
6020 +@@ -919,7 +915,7 @@ static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
6021 + */
6022 + static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
6023 + {
6024 +- const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
6025 ++ const struct vmw_ctx_bindinfo_so_target *loc = &cbs->so_targets[0];
6026 + struct {
6027 + SVGA3dCmdHeader header;
6028 + SVGA3dCmdDXSetSOTargets body;
6029 +@@ -1066,7 +1062,7 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
6030 +
6031 + static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
6032 + {
6033 +- const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[0].views[0].bi;
6034 ++ const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[0].views[0];
6035 + struct {
6036 + SVGA3dCmdHeader header;
6037 + SVGA3dCmdDXSetUAViews body;
6038 +@@ -1096,7 +1092,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
6039 +
6040 + static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
6041 + {
6042 +- const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[1].views[0].bi;
6043 ++ const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[1].views[0];
6044 + struct {
6045 + SVGA3dCmdHeader header;
6046 + SVGA3dCmdDXSetCSUAViews body;
6047 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
6048 +index 6bb4961e64a57..9656d4a2abff8 100644
6049 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
6050 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
6051 +@@ -516,7 +516,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
6052 + struct vmw_cmdbuf_man *man =
6053 + container_of(work, struct vmw_cmdbuf_man, work);
6054 + struct vmw_cmdbuf_header *entry, *next;
6055 +- uint32_t dummy;
6056 ++ uint32_t dummy = 0;
6057 + bool send_fence = false;
6058 + struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
6059 + int i;
6060 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
6061 +index b262d61d839d5..9487faff52293 100644
6062 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
6063 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
6064 +@@ -159,6 +159,7 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
6065 + void vmw_cmdbuf_res_revert(struct list_head *list)
6066 + {
6067 + struct vmw_cmdbuf_res *entry, *next;
6068 ++ int ret;
6069 +
6070 + list_for_each_entry_safe(entry, next, list, head) {
6071 + switch (entry->state) {
6072 +@@ -166,7 +167,8 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
6073 + vmw_cmdbuf_res_free(entry->man, entry);
6074 + break;
6075 + case VMW_CMDBUF_RES_DEL:
6076 +- drm_ht_insert_item(&entry->man->resources, &entry->hash);
6077 ++ ret = drm_ht_insert_item(&entry->man->resources, &entry->hash);
6078 ++ BUG_ON(ret);
6079 + list_del(&entry->head);
6080 + list_add_tail(&entry->head, &entry->man->list);
6081 + entry->state = VMW_CMDBUF_RES_COMMITTED;
6082 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
6083 +index a2b8464b3f566..06e8332682c5e 100644
6084 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
6085 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
6086 +@@ -2546,6 +2546,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
6087 +
6088 + so_type = vmw_so_cmd_to_type(header->id);
6089 + res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
6090 ++ if (IS_ERR(res))
6091 ++ return PTR_ERR(res);
6092 + cmd = container_of(header, typeof(*cmd), header);
6093 + ret = vmw_cotable_notify(res, cmd->defined_id);
6094 +
6095 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
6096 +index f2d6254154585..2d8caf09f1727 100644
6097 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
6098 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
6099 +@@ -506,11 +506,13 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
6100 + {
6101 + unsigned long num_pt_pages = 0;
6102 + struct ttm_buffer_object *bo = mob->pt_bo;
6103 +- struct vmw_piter save_pt_iter;
6104 ++ struct vmw_piter save_pt_iter = {0};
6105 + struct vmw_piter pt_iter;
6106 + const struct vmw_sg_table *vsgt;
6107 + int ret;
6108 +
6109 ++ BUG_ON(num_data_pages == 0);
6110 ++
6111 + ret = ttm_bo_reserve(bo, false, true, NULL);
6112 + BUG_ON(ret != 0);
6113 +
6114 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
6115 +index 3d08f5700bdb4..7e3f99722d026 100644
6116 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
6117 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
6118 +@@ -155,6 +155,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
6119 + /* HB port can't access encrypted memory. */
6120 + if (hb && !mem_encrypt_active()) {
6121 + unsigned long bp = channel->cookie_high;
6122 ++ u32 channel_id = (channel->channel_id << 16);
6123 +
6124 + si = (uintptr_t) msg;
6125 + di = channel->cookie_low;
6126 +@@ -162,7 +163,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
6127 + VMW_PORT_HB_OUT(
6128 + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
6129 + msg_len, si, di,
6130 +- VMWARE_HYPERVISOR_HB | (channel->channel_id << 16) |
6131 ++ VMWARE_HYPERVISOR_HB | channel_id |
6132 + VMWARE_HYPERVISOR_OUT,
6133 + VMW_HYPERVISOR_MAGIC, bp,
6134 + eax, ebx, ecx, edx, si, di);
6135 +@@ -210,6 +211,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
6136 + /* HB port can't access encrypted memory */
6137 + if (hb && !mem_encrypt_active()) {
6138 + unsigned long bp = channel->cookie_low;
6139 ++ u32 channel_id = (channel->channel_id << 16);
6140 +
6141 + si = channel->cookie_high;
6142 + di = (uintptr_t) reply;
6143 +@@ -217,7 +219,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
6144 + VMW_PORT_HB_IN(
6145 + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
6146 + reply_len, si, di,
6147 +- VMWARE_HYPERVISOR_HB | (channel->channel_id << 16),
6148 ++ VMWARE_HYPERVISOR_HB | channel_id,
6149 + VMW_HYPERVISOR_MAGIC, bp,
6150 + eax, ebx, ecx, edx, si, di);
6151 +
6152 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
6153 +index 7b45393ad98e9..3b6f6044c3259 100644
6154 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
6155 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
6156 +@@ -114,6 +114,7 @@ static void vmw_resource_release(struct kref *kref)
6157 + container_of(kref, struct vmw_resource, kref);
6158 + struct vmw_private *dev_priv = res->dev_priv;
6159 + int id;
6160 ++ int ret;
6161 + struct idr *idr = &dev_priv->res_idr[res->func->res_type];
6162 +
6163 + spin_lock(&dev_priv->resource_lock);
6164 +@@ -122,7 +123,8 @@ static void vmw_resource_release(struct kref *kref)
6165 + if (res->backup) {
6166 + struct ttm_buffer_object *bo = &res->backup->base;
6167 +
6168 +- ttm_bo_reserve(bo, false, false, NULL);
6169 ++ ret = ttm_bo_reserve(bo, false, false, NULL);
6170 ++ BUG_ON(ret);
6171 + if (vmw_resource_mob_attached(res) &&
6172 + res->func->unbind != NULL) {
6173 + struct ttm_validate_buffer val_buf;
6174 +@@ -1001,7 +1003,9 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
6175 + if (res->backup) {
6176 + vbo = res->backup;
6177 +
6178 +- ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
6179 ++ ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
6180 ++ if (ret)
6181 ++ goto out_no_validate;
6182 + if (!vbo->base.pin_count) {
6183 + ret = ttm_bo_validate
6184 + (&vbo->base,
6185 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
6186 +index c3a8d6e8380e4..9efb4463ce997 100644
6187 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
6188 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
6189 +@@ -539,7 +539,8 @@ const SVGACOTableType vmw_so_cotables[] = {
6190 + [vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
6191 + [vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
6192 + [vmw_so_ss] = SVGA_COTABLE_SAMPLER,
6193 +- [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT
6194 ++ [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT,
6195 ++ [vmw_so_max]= SVGA_COTABLE_MAX
6196 + };
6197 +
6198 +
6199 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
6200 +index 0835468bb2eed..a04ad7812960c 100644
6201 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
6202 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
6203 +@@ -865,7 +865,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
6204 + user_srf->prime.base.shareable = false;
6205 + user_srf->prime.base.tfile = NULL;
6206 + if (drm_is_primary_client(file_priv))
6207 +- user_srf->master = drm_master_get(file_priv->master);
6208 ++ user_srf->master = drm_file_get_master(file_priv);
6209 +
6210 + /**
6211 + * From this point, the generic resource management functions
6212 +@@ -1534,7 +1534,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
6213 +
6214 + user_srf = container_of(srf, struct vmw_user_surface, srf);
6215 + if (drm_is_primary_client(file_priv))
6216 +- user_srf->master = drm_master_get(file_priv->master);
6217 ++ user_srf->master = drm_file_get_master(file_priv);
6218 +
6219 + res = &user_srf->srf.res;
6220 +
6221 +@@ -1872,7 +1872,6 @@ static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
6222 + static int vmw_surface_dirty_sync(struct vmw_resource *res)
6223 + {
6224 + struct vmw_private *dev_priv = res->dev_priv;
6225 +- bool has_dx = 0;
6226 + u32 i, num_dirty;
6227 + struct vmw_surface_dirty *dirty =
6228 + (struct vmw_surface_dirty *) res->dirty;
6229 +@@ -1899,7 +1898,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
6230 + if (!num_dirty)
6231 + goto out;
6232 +
6233 +- alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2));
6234 ++ alloc_size = num_dirty * ((has_sm4_context(dev_priv)) ? sizeof(*cmd1) : sizeof(*cmd2));
6235 + cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
6236 + if (!cmd)
6237 + return -ENOMEM;
6238 +@@ -1917,7 +1916,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
6239 + * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
6240 + * UPDATE_GB_IMAGE is not.
6241 + */
6242 +- if (has_dx) {
6243 ++ if (has_sm4_context(dev_priv)) {
6244 + cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
6245 + cmd1->header.size = sizeof(cmd1->body);
6246 + cmd1->body.sid = res->id;
6247 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
6248 +index 8338b1d20f2a3..b09094b50c5d0 100644
6249 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
6250 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
6251 +@@ -586,13 +586,13 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
6252 + container_of(entry->base.bo, typeof(*vbo), base);
6253 +
6254 + if (entry->cpu_blit) {
6255 +- struct ttm_operation_ctx ctx = {
6256 ++ struct ttm_operation_ctx ttm_ctx = {
6257 + .interruptible = intr,
6258 + .no_wait_gpu = false
6259 + };
6260 +
6261 + ret = ttm_bo_validate(entry->base.bo,
6262 +- &vmw_nonfixed_placement, &ctx);
6263 ++ &vmw_nonfixed_placement, &ttm_ctx);
6264 + } else {
6265 + ret = vmw_validation_bo_validate_single
6266 + (entry->base.bo, intr, entry->as_mob);
6267 +diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
6268 +index 109d627968ac0..01c6ce7784ddb 100644
6269 +--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
6270 ++++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
6271 +@@ -1452,9 +1452,10 @@ zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
6272 + struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
6273 + int ret, vrefresh;
6274 +
6275 ++ pm_runtime_get_sync(disp->dev);
6276 ++
6277 + zynqmp_disp_crtc_setup_clock(crtc, adjusted_mode);
6278 +
6279 +- pm_runtime_get_sync(disp->dev);
6280 + ret = clk_prepare_enable(disp->pclk);
6281 + if (ret) {
6282 + dev_err(disp->dev, "failed to enable a pixel clock\n");
6283 +diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
6284 +index 82430ca9b9133..6f588dc09ba63 100644
6285 +--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
6286 ++++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
6287 +@@ -402,10 +402,6 @@ static int zynqmp_dp_phy_init(struct zynqmp_dp *dp)
6288 + }
6289 + }
6290 +
6291 +- ret = zynqmp_dp_reset(dp, false);
6292 +- if (ret < 0)
6293 +- return ret;
6294 +-
6295 + zynqmp_dp_clr(dp, ZYNQMP_DP_PHY_RESET, ZYNQMP_DP_PHY_RESET_ALL_RESET);
6296 +
6297 + /*
6298 +@@ -441,8 +437,6 @@ static void zynqmp_dp_phy_exit(struct zynqmp_dp *dp)
6299 + ret);
6300 + }
6301 +
6302 +- zynqmp_dp_reset(dp, true);
6303 +-
6304 + for (i = 0; i < dp->num_lanes; i++) {
6305 + ret = phy_exit(dp->phy[i]);
6306 + if (ret)
6307 +@@ -1683,9 +1677,13 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
6308 + return PTR_ERR(dp->reset);
6309 + }
6310 +
6311 ++ ret = zynqmp_dp_reset(dp, false);
6312 ++ if (ret < 0)
6313 ++ return ret;
6314 ++
6315 + ret = zynqmp_dp_phy_probe(dp);
6316 + if (ret)
6317 +- return ret;
6318 ++ goto err_reset;
6319 +
6320 + /* Initialize the hardware. */
6321 + zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN,
6322 +@@ -1697,7 +1695,7 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
6323 +
6324 + ret = zynqmp_dp_phy_init(dp);
6325 + if (ret)
6326 +- return ret;
6327 ++ goto err_reset;
6328 +
6329 + zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 1);
6330 +
6331 +@@ -1709,15 +1707,18 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
6332 + zynqmp_dp_irq_handler, IRQF_ONESHOT,
6333 + dev_name(dp->dev), dp);
6334 + if (ret < 0)
6335 +- goto error;
6336 ++ goto err_phy_exit;
6337 +
6338 + dev_dbg(dp->dev, "ZynqMP DisplayPort Tx probed with %u lanes\n",
6339 + dp->num_lanes);
6340 +
6341 + return 0;
6342 +
6343 +-error:
6344 ++err_phy_exit:
6345 + zynqmp_dp_phy_exit(dp);
6346 ++err_reset:
6347 ++ zynqmp_dp_reset(dp, true);
6348 ++
6349 + return ret;
6350 + }
6351 +
6352 +@@ -1735,4 +1736,5 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub)
6353 + zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, 0xffffffff);
6354 +
6355 + zynqmp_dp_phy_exit(dp);
6356 ++ zynqmp_dp_reset(dp, true);
6357 + }
6358 +diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
6359 +index 1ea1a7c0b20fe..e29efcb1c0402 100644
6360 +--- a/drivers/hid/Makefile
6361 ++++ b/drivers/hid/Makefile
6362 +@@ -115,7 +115,6 @@ obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o
6363 + obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
6364 + obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
6365 + obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o hid-thrustmaster.o
6366 +-obj-$(CONFIG_HID_TMINIT) += hid-tminit.o
6367 + obj-$(CONFIG_HID_TIVO) += hid-tivo.o
6368 + obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
6369 + obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
6370 +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
6371 +index efb849411d254..4710b9aa24a57 100644
6372 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
6373 ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
6374 +@@ -184,7 +184,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
6375 + rc = -ENOMEM;
6376 + goto cleanup;
6377 + }
6378 +- info.period = msecs_to_jiffies(AMD_SFH_IDLE_LOOP);
6379 ++ info.period = AMD_SFH_IDLE_LOOP;
6380 + info.sensor_idx = cl_idx;
6381 + info.dma_address = cl_data->sensor_dma_addr[i];
6382 +
6383 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
6384 +index 4286a51f7f169..4b5ebeacd2836 100644
6385 +--- a/drivers/hid/hid-input.c
6386 ++++ b/drivers/hid/hid-input.c
6387 +@@ -419,8 +419,6 @@ static int hidinput_get_battery_property(struct power_supply *psy,
6388 +
6389 + if (dev->battery_status == HID_BATTERY_UNKNOWN)
6390 + val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
6391 +- else if (dev->battery_capacity == 100)
6392 +- val->intval = POWER_SUPPLY_STATUS_FULL;
6393 + else
6394 + val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
6395 + break;
6396 +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
6397 +index 51b39bda9a9d2..2e104682c22b9 100644
6398 +--- a/drivers/hid/hid-quirks.c
6399 ++++ b/drivers/hid/hid-quirks.c
6400 +@@ -662,8 +662,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
6401 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
6402 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
6403 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
6404 +-#endif
6405 +-#if IS_ENABLED(CONFIG_HID_TMINIT)
6406 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65d) },
6407 + #endif
6408 + #if IS_ENABLED(CONFIG_HID_TIVO)
6409 +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
6410 +index 46474612e73c6..517141138b007 100644
6411 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c
6412 ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
6413 +@@ -171,8 +171,6 @@ static const struct i2c_hid_quirks {
6414 + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
6415 + { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
6416 + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
6417 +- { USB_VENDOR_ID_ELAN, HID_ANY_ID,
6418 +- I2C_HID_QUIRK_BOGUS_IRQ },
6419 + { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
6420 + I2C_HID_QUIRK_RESET_ON_RESUME },
6421 + { I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393,
6422 +@@ -183,7 +181,8 @@ static const struct i2c_hid_quirks {
6423 + * Sending the wakeup after reset actually break ELAN touchscreen controller
6424 + */
6425 + { USB_VENDOR_ID_ELAN, HID_ANY_ID,
6426 +- I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
6427 ++ I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET |
6428 ++ I2C_HID_QUIRK_BOGUS_IRQ },
6429 + { 0, 0 }
6430 + };
6431 +
6432 +diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
6433 +index 5668d8305b78e..df712ce4b164d 100644
6434 +--- a/drivers/hwmon/pmbus/ibm-cffps.c
6435 ++++ b/drivers/hwmon/pmbus/ibm-cffps.c
6436 +@@ -50,9 +50,9 @@
6437 + #define CFFPS_MFR_VAUX_FAULT BIT(6)
6438 + #define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
6439 +
6440 +-#define CFFPS_LED_BLINK BIT(0)
6441 +-#define CFFPS_LED_ON BIT(1)
6442 +-#define CFFPS_LED_OFF BIT(2)
6443 ++#define CFFPS_LED_BLINK (BIT(0) | BIT(6))
6444 ++#define CFFPS_LED_ON (BIT(1) | BIT(6))
6445 ++#define CFFPS_LED_OFF (BIT(2) | BIT(6))
6446 + #define CFFPS_BLINK_RATE_MS 250
6447 +
6448 + enum {
6449 +diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
6450 +index 9bde869829121..530529feebb51 100644
6451 +--- a/drivers/iio/dac/ad5624r_spi.c
6452 ++++ b/drivers/iio/dac/ad5624r_spi.c
6453 +@@ -229,7 +229,7 @@ static int ad5624r_probe(struct spi_device *spi)
6454 + if (!indio_dev)
6455 + return -ENOMEM;
6456 + st = iio_priv(indio_dev);
6457 +- st->reg = devm_regulator_get(&spi->dev, "vcc");
6458 ++ st->reg = devm_regulator_get_optional(&spi->dev, "vref");
6459 + if (!IS_ERR(st->reg)) {
6460 + ret = regulator_enable(st->reg);
6461 + if (ret)
6462 +@@ -240,6 +240,22 @@ static int ad5624r_probe(struct spi_device *spi)
6463 + goto error_disable_reg;
6464 +
6465 + voltage_uv = ret;
6466 ++ } else {
6467 ++ if (PTR_ERR(st->reg) != -ENODEV)
6468 ++ return PTR_ERR(st->reg);
6469 ++ /* Backwards compatibility. This naming is not correct */
6470 ++ st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
6471 ++ if (!IS_ERR(st->reg)) {
6472 ++ ret = regulator_enable(st->reg);
6473 ++ if (ret)
6474 ++ return ret;
6475 ++
6476 ++ ret = regulator_get_voltage(st->reg);
6477 ++ if (ret < 0)
6478 ++ goto error_disable_reg;
6479 ++
6480 ++ voltage_uv = ret;
6481 ++ }
6482 + }
6483 +
6484 + spi_set_drvdata(spi, indio_dev);
6485 +diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
6486 +index 3b5ba26d7d867..3b4a0e60e6059 100644
6487 +--- a/drivers/iio/temperature/ltc2983.c
6488 ++++ b/drivers/iio/temperature/ltc2983.c
6489 +@@ -89,6 +89,8 @@
6490 +
6491 + #define LTC2983_STATUS_START_MASK BIT(7)
6492 + #define LTC2983_STATUS_START(x) FIELD_PREP(LTC2983_STATUS_START_MASK, x)
6493 ++#define LTC2983_STATUS_UP_MASK GENMASK(7, 6)
6494 ++#define LTC2983_STATUS_UP(reg) FIELD_GET(LTC2983_STATUS_UP_MASK, reg)
6495 +
6496 + #define LTC2983_STATUS_CHAN_SEL_MASK GENMASK(4, 0)
6497 + #define LTC2983_STATUS_CHAN_SEL(x) \
6498 +@@ -1362,17 +1364,16 @@ put_child:
6499 +
6500 + static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio)
6501 + {
6502 +- u32 iio_chan_t = 0, iio_chan_v = 0, chan, iio_idx = 0;
6503 ++ u32 iio_chan_t = 0, iio_chan_v = 0, chan, iio_idx = 0, status;
6504 + int ret;
6505 +- unsigned long time;
6506 +-
6507 +- /* make sure the device is up */
6508 +- time = wait_for_completion_timeout(&st->completion,
6509 +- msecs_to_jiffies(250));
6510 +
6511 +- if (!time) {
6512 ++ /* make sure the device is up: start bit (7) is 0 and done bit (6) is 1 */
6513 ++ ret = regmap_read_poll_timeout(st->regmap, LTC2983_STATUS_REG, status,
6514 ++ LTC2983_STATUS_UP(status) == 1, 25000,
6515 ++ 25000 * 10);
6516 ++ if (ret) {
6517 + dev_err(&st->spi->dev, "Device startup timed out\n");
6518 +- return -ETIMEDOUT;
6519 ++ return ret;
6520 + }
6521 +
6522 + st->iio_chan = devm_kzalloc(&st->spi->dev,
6523 +@@ -1492,10 +1493,11 @@ static int ltc2983_probe(struct spi_device *spi)
6524 + ret = ltc2983_parse_dt(st);
6525 + if (ret)
6526 + return ret;
6527 +- /*
6528 +- * let's request the irq now so it is used to sync the device
6529 +- * startup in ltc2983_setup()
6530 +- */
6531 ++
6532 ++ ret = ltc2983_setup(st, true);
6533 ++ if (ret)
6534 ++ return ret;
6535 ++
6536 + ret = devm_request_irq(&spi->dev, spi->irq, ltc2983_irq_handler,
6537 + IRQF_TRIGGER_RISING, name, st);
6538 + if (ret) {
6539 +@@ -1503,10 +1505,6 @@ static int ltc2983_probe(struct spi_device *spi)
6540 + return ret;
6541 + }
6542 +
6543 +- ret = ltc2983_setup(st, true);
6544 +- if (ret)
6545 +- return ret;
6546 +-
6547 + indio_dev->name = name;
6548 + indio_dev->num_channels = st->iio_channels;
6549 + indio_dev->channels = st->iio_chan;
6550 +diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
6551 +index 42261152b4892..2b47073c61a65 100644
6552 +--- a/drivers/infiniband/core/iwcm.c
6553 ++++ b/drivers/infiniband/core/iwcm.c
6554 +@@ -1186,29 +1186,34 @@ static int __init iw_cm_init(void)
6555 +
6556 + ret = iwpm_init(RDMA_NL_IWCM);
6557 + if (ret)
6558 +- pr_err("iw_cm: couldn't init iwpm\n");
6559 +- else
6560 +- rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
6561 ++ return ret;
6562 ++
6563 + iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
6564 + if (!iwcm_wq)
6565 +- return -ENOMEM;
6566 ++ goto err_alloc;
6567 +
6568 + iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
6569 + iwcm_ctl_table);
6570 + if (!iwcm_ctl_table_hdr) {
6571 + pr_err("iw_cm: couldn't register sysctl paths\n");
6572 +- destroy_workqueue(iwcm_wq);
6573 +- return -ENOMEM;
6574 ++ goto err_sysctl;
6575 + }
6576 +
6577 ++ rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
6578 + return 0;
6579 ++
6580 ++err_sysctl:
6581 ++ destroy_workqueue(iwcm_wq);
6582 ++err_alloc:
6583 ++ iwpm_exit(RDMA_NL_IWCM);
6584 ++ return -ENOMEM;
6585 + }
6586 +
6587 + static void __exit iw_cm_cleanup(void)
6588 + {
6589 ++ rdma_nl_unregister(RDMA_NL_IWCM);
6590 + unregister_net_sysctl_table(iwcm_ctl_table_hdr);
6591 + destroy_workqueue(iwcm_wq);
6592 +- rdma_nl_unregister(RDMA_NL_IWCM);
6593 + iwpm_exit(RDMA_NL_IWCM);
6594 + }
6595 +
6596 +diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
6597 +index be6d3ff0f1be2..29c9df9f25aa3 100644
6598 +--- a/drivers/infiniband/hw/efa/efa_verbs.c
6599 ++++ b/drivers/infiniband/hw/efa/efa_verbs.c
6600 +@@ -717,7 +717,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
6601 +
6602 + qp->qp_handle = create_qp_resp.qp_handle;
6603 + qp->ibqp.qp_num = create_qp_resp.qp_num;
6604 +- qp->ibqp.qp_type = init_attr->qp_type;
6605 + qp->max_send_wr = init_attr->cap.max_send_wr;
6606 + qp->max_recv_wr = init_attr->cap.max_recv_wr;
6607 + qp->max_send_sge = init_attr->cap.max_send_sge;
6608 +diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
6609 +index 0986aa065418a..34106e5be6794 100644
6610 +--- a/drivers/infiniband/hw/hfi1/init.c
6611 ++++ b/drivers/infiniband/hw/hfi1/init.c
6612 +@@ -650,12 +650,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
6613 +
6614 + ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
6615 + ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
6616 +-
6617 +- if (loopback) {
6618 +- dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
6619 +- !default_pkey_idx);
6620 +- ppd->pkeys[!default_pkey_idx] = 0x8001;
6621 +- }
6622 ++ ppd->pkeys[0] = 0x8001;
6623 +
6624 + INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
6625 + INIT_WORK(&ppd->link_up_work, handle_link_up);
6626 +diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
6627 +index 991f65269fa61..8518b1571f2c6 100644
6628 +--- a/drivers/infiniband/hw/hns/hns_roce_device.h
6629 ++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
6630 +@@ -496,6 +496,12 @@ struct hns_roce_bank {
6631 + u32 next; /* Next ID to allocate. */
6632 + };
6633 +
6634 ++struct hns_roce_idx_table {
6635 ++ u32 *spare_idx;
6636 ++ u32 head;
6637 ++ u32 tail;
6638 ++};
6639 ++
6640 + struct hns_roce_qp_table {
6641 + struct hns_roce_hem_table qp_table;
6642 + struct hns_roce_hem_table irrl_table;
6643 +@@ -504,6 +510,7 @@ struct hns_roce_qp_table {
6644 + struct mutex scc_mutex;
6645 + struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
6646 + struct mutex bank_mutex;
6647 ++ struct hns_roce_idx_table idx_table;
6648 + };
6649 +
6650 + struct hns_roce_cq_table {
6651 +@@ -1146,7 +1153,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
6652 + void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
6653 + void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
6654 + void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
6655 +-void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
6656 ++int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
6657 + int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
6658 + void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
6659 +
6660 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
6661 +index 594d4cef31b36..bf4d9f6658ff9 100644
6662 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
6663 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
6664 +@@ -4114,6 +4114,9 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
6665 + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
6666 + hr_reg_enable(context, QPC_RQ_RECORD_EN);
6667 +
6668 ++ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
6669 ++ hr_reg_enable(context, QPC_OWNER_MODE);
6670 ++
6671 + hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_L,
6672 + lower_32_bits(hr_qp->rdb.dma) >> 1);
6673 + hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
6674 +@@ -4486,9 +4489,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
6675 +
6676 + hr_reg_clear(qpc_mask, QPC_CHECK_FLG);
6677 +
6678 +- hr_reg_write(context, QPC_LSN, 0x100);
6679 +- hr_reg_clear(qpc_mask, QPC_LSN);
6680 +-
6681 + hr_reg_clear(qpc_mask, QPC_V2_IRRL_HEAD);
6682 +
6683 + return 0;
6684 +@@ -4507,15 +4507,23 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
6685 + {
6686 + const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
6687 + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
6688 ++ u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx;
6689 ++ u32 *head = &hr_dev->qp_table.idx_table.head;
6690 ++ u32 *tail = &hr_dev->qp_table.idx_table.tail;
6691 + struct hns_roce_dip *hr_dip;
6692 + unsigned long flags;
6693 + int ret = 0;
6694 +
6695 + spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
6696 +
6697 ++ spare_idx[*tail] = ibqp->qp_num;
6698 ++ *tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1);
6699 ++
6700 + list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
6701 +- if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16))
6702 ++ if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) {
6703 ++ *dip_idx = hr_dip->dip_idx;
6704 + goto out;
6705 ++ }
6706 + }
6707 +
6708 + /* If no dgid is found, a new dip and a mapping between dgid and
6709 +@@ -4528,7 +4536,8 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
6710 + }
6711 +
6712 + memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
6713 +- hr_dip->dip_idx = *dip_idx = ibqp->qp_num;
6714 ++ hr_dip->dip_idx = *dip_idx = spare_idx[*head];
6715 ++ *head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1);
6716 + list_add_tail(&hr_dip->node, &hr_dev->dip_list);
6717 +
6718 + out:
6719 +@@ -5127,7 +5136,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
6720 +
6721 + qp_attr->rq_psn = hr_reg_read(&context, QPC_RX_REQ_EPSN);
6722 + qp_attr->sq_psn = (u32)hr_reg_read(&context, QPC_SQ_CUR_PSN);
6723 +- qp_attr->dest_qp_num = (u8)hr_reg_read(&context, QPC_DQPN);
6724 ++ qp_attr->dest_qp_num = hr_reg_read(&context, QPC_DQPN);
6725 + qp_attr->qp_access_flags =
6726 + ((hr_reg_read(&context, QPC_RRE)) << V2_QP_RRE_S) |
6727 + ((hr_reg_read(&context, QPC_RWE)) << V2_QP_RWE_S) |
6728 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
6729 +index b8a09d411e2e5..68c8c4b225ca3 100644
6730 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
6731 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
6732 +@@ -1447,7 +1447,7 @@ struct hns_roce_v2_priv {
6733 +
6734 + struct hns_roce_dip {
6735 + u8 dgid[GID_LEN_V2];
6736 +- u8 dip_idx;
6737 ++ u32 dip_idx;
6738 + struct list_head node; /* all dips are on a list */
6739 + };
6740 +
6741 +diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
6742 +index cc6eab14a2220..217aad8d9bd93 100644
6743 +--- a/drivers/infiniband/hw/hns/hns_roce_main.c
6744 ++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
6745 +@@ -748,6 +748,12 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
6746 + goto err_uar_table_free;
6747 + }
6748 +
6749 ++ ret = hns_roce_init_qp_table(hr_dev);
6750 ++ if (ret) {
6751 ++ dev_err(dev, "Failed to init qp_table.\n");
6752 ++ goto err_uar_table_free;
6753 ++ }
6754 ++
6755 + hns_roce_init_pd_table(hr_dev);
6756 +
6757 + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
6758 +@@ -757,8 +763,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
6759 +
6760 + hns_roce_init_cq_table(hr_dev);
6761 +
6762 +- hns_roce_init_qp_table(hr_dev);
6763 +-
6764 + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
6765 + ret = hns_roce_init_srq_table(hr_dev);
6766 + if (ret) {
6767 +diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
6768 +index 006c84bb3f9fd..7089ac7802913 100644
6769 +--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
6770 ++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
6771 +@@ -352,7 +352,9 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
6772 + free_cmd_mbox:
6773 + hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6774 +
6775 +- return ERR_PTR(ret);
6776 ++ if (ret)
6777 ++ return ERR_PTR(ret);
6778 ++ return NULL;
6779 + }
6780 +
6781 + int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
6782 +diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
6783 +index b101b7e578f25..a6d1e44b75cf7 100644
6784 +--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
6785 ++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
6786 +@@ -848,7 +848,6 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
6787 + goto err_out;
6788 + }
6789 + hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
6790 +- resp->cap_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
6791 + }
6792 +
6793 + if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
6794 +@@ -861,7 +860,6 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
6795 + goto err_sdb;
6796 + }
6797 + hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
6798 +- resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
6799 + }
6800 + } else {
6801 + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
6802 +@@ -1073,6 +1071,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
6803 + }
6804 +
6805 + if (udata) {
6806 ++ resp.cap_flags = hr_qp->en_flags;
6807 + ret = ib_copy_to_udata(udata, &resp,
6808 + min(udata->outlen, sizeof(resp)));
6809 + if (ret) {
6810 +@@ -1171,14 +1170,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
6811 + if (!hr_qp)
6812 + return ERR_PTR(-ENOMEM);
6813 +
6814 +- if (init_attr->qp_type == IB_QPT_XRC_INI)
6815 +- init_attr->recv_cq = NULL;
6816 +-
6817 +- if (init_attr->qp_type == IB_QPT_XRC_TGT) {
6818 ++ if (init_attr->qp_type == IB_QPT_XRC_TGT)
6819 + hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
6820 +- init_attr->recv_cq = NULL;
6821 +- init_attr->send_cq = NULL;
6822 +- }
6823 +
6824 + if (init_attr->qp_type == IB_QPT_GSI) {
6825 + hr_qp->port = init_attr->port_num - 1;
6826 +@@ -1429,12 +1422,17 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
6827 + return cur + nreq >= hr_wq->wqe_cnt;
6828 + }
6829 +
6830 +-void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
6831 ++int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
6832 + {
6833 + struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
6834 + unsigned int reserved_from_bot;
6835 + unsigned int i;
6836 +
6837 ++ qp_table->idx_table.spare_idx = kcalloc(hr_dev->caps.num_qps,
6838 ++ sizeof(u32), GFP_KERNEL);
6839 ++ if (!qp_table->idx_table.spare_idx)
6840 ++ return -ENOMEM;
6841 ++
6842 + mutex_init(&qp_table->scc_mutex);
6843 + mutex_init(&qp_table->bank_mutex);
6844 + xa_init(&hr_dev->qp_table_xa);
6845 +@@ -1452,6 +1450,8 @@ void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
6846 + HNS_ROCE_QP_BANK_NUM - 1;
6847 + hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min;
6848 + }
6849 ++
6850 ++ return 0;
6851 + }
6852 +
6853 + void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
6854 +@@ -1460,4 +1460,5 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
6855 +
6856 + for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
6857 + ida_destroy(&hr_dev->qp_table.bank[i].ida);
6858 ++ kfree(hr_dev->qp_table.idx_table.spare_idx);
6859 + }
6860 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
6861 +index a77db29f83914..fd88b9ae96fe8 100644
6862 +--- a/drivers/infiniband/hw/mlx5/qp.c
6863 ++++ b/drivers/infiniband/hw/mlx5/qp.c
6864 +@@ -1906,7 +1906,6 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev,
6865 + static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
6866 + struct mlx5_create_qp_params *params)
6867 + {
6868 +- struct mlx5_ib_create_qp *ucmd = params->ucmd;
6869 + struct ib_qp_init_attr *attr = params->attr;
6870 + u32 uidx = params->uidx;
6871 + struct mlx5_ib_resources *devr = &dev->devr;
6872 +@@ -1926,8 +1925,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
6873 + if (!in)
6874 + return -ENOMEM;
6875 +
6876 +- if (MLX5_CAP_GEN(mdev, ece_support) && ucmd)
6877 +- MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
6878 + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
6879 +
6880 + MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC);
6881 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
6882 +index f2c40e50f25ea..ece3205531b8e 100644
6883 +--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
6884 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
6885 +@@ -478,7 +478,7 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
6886 + * From time to time we have to post signalled sends,
6887 + * or send queue will fill up and only QP reset can help.
6888 + */
6889 +- flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
6890 ++ flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
6891 + 0 : IB_SEND_SIGNALED;
6892 +
6893 + ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
6894 +@@ -680,6 +680,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
6895 + case IB_WC_RDMA_WRITE:
6896 + /*
6897 + * post_send() RDMA write completions of IO reqs (read/write)
6898 ++ * and hb.
6899 + */
6900 + break;
6901 +
6902 +@@ -1043,7 +1044,7 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
6903 + * From time to time we have to post signalled sends,
6904 + * or send queue will fill up and only QP reset can help.
6905 + */
6906 +- flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
6907 ++ flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
6908 + 0 : IB_SEND_SIGNALED;
6909 +
6910 + ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
6911 +@@ -1601,7 +1602,8 @@ static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
6912 + con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
6913 + con->c.cid = cid;
6914 + con->c.sess = &sess->s;
6915 +- atomic_set(&con->io_cnt, 0);
6916 ++ /* Align with srv, init as 1 */
6917 ++ atomic_set(&con->c.wr_cnt, 1);
6918 + mutex_init(&con->con_mutex);
6919 +
6920 + sess->s.con[cid] = &con->c;
6921 +@@ -1678,6 +1680,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
6922 + sess->queue_depth * 3 + 1);
6923 + max_send_sge = 2;
6924 + }
6925 ++ atomic_set(&con->c.sq_wr_avail, max_send_wr);
6926 + cq_num = max_send_wr + max_recv_wr;
6927 + /* alloc iu to recv new rkey reply when server reports flags set */
6928 + if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
6929 +@@ -1848,6 +1851,8 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
6930 + return -ENOMEM;
6931 + }
6932 + sess->queue_depth = queue_depth;
6933 ++ sess->s.signal_interval = min_not_zero(queue_depth,
6934 ++ (unsigned short) SERVICE_CON_QUEUE_DEPTH);
6935 + sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
6936 + sess->max_io_size = le32_to_cpu(msg->max_io_size);
6937 + sess->flags = le32_to_cpu(msg->flags);
6938 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
6939 +index e276a2dfcf7c7..3c3ff094588cb 100644
6940 +--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
6941 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
6942 +@@ -74,7 +74,6 @@ struct rtrs_clt_con {
6943 + u32 queue_num;
6944 + unsigned int cpu;
6945 + struct mutex con_mutex;
6946 +- atomic_t io_cnt;
6947 + int cm_err;
6948 + };
6949 +
6950 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
6951 +index 36f184a3b6761..119aa3f7eafe2 100644
6952 +--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
6953 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
6954 +@@ -96,6 +96,8 @@ struct rtrs_con {
6955 + struct rdma_cm_id *cm_id;
6956 + unsigned int cid;
6957 + int nr_cqe;
6958 ++ atomic_t wr_cnt;
6959 ++ atomic_t sq_wr_avail;
6960 + };
6961 +
6962 + struct rtrs_sess {
6963 +@@ -108,6 +110,7 @@ struct rtrs_sess {
6964 + unsigned int con_num;
6965 + unsigned int irq_con_num;
6966 + unsigned int recon_cnt;
6967 ++ unsigned int signal_interval;
6968 + struct rtrs_ib_dev *dev;
6969 + int dev_ref;
6970 + struct ib_cqe *hb_cqe;
6971 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
6972 +index 3df2900861697..cd9a4ccf4c289 100644
6973 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
6974 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
6975 +@@ -201,7 +201,6 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
6976 + struct rtrs_srv_sess *sess = to_srv_sess(s);
6977 + dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
6978 + struct rtrs_srv_mr *srv_mr;
6979 +- struct rtrs_srv *srv = sess->srv;
6980 + struct ib_send_wr inv_wr;
6981 + struct ib_rdma_wr imm_wr;
6982 + struct ib_rdma_wr *wr = NULL;
6983 +@@ -269,7 +268,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
6984 + * From time to time we have to post signaled sends,
6985 + * or send queue will fill up and only QP reset can help.
6986 + */
6987 +- flags = (atomic_inc_return(&id->con->wr_cnt) % srv->queue_depth) ?
6988 ++ flags = (atomic_inc_return(&id->con->c.wr_cnt) % s->signal_interval) ?
6989 + 0 : IB_SEND_SIGNALED;
6990 +
6991 + if (need_inval) {
6992 +@@ -347,7 +346,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
6993 + struct ib_send_wr inv_wr, *wr = NULL;
6994 + struct ib_rdma_wr imm_wr;
6995 + struct ib_reg_wr rwr;
6996 +- struct rtrs_srv *srv = sess->srv;
6997 + struct rtrs_srv_mr *srv_mr;
6998 + bool need_inval = false;
6999 + enum ib_send_flags flags;
7000 +@@ -396,7 +394,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
7001 + * From time to time we have to post signalled sends,
7002 + * or send queue will fill up and only QP reset can help.
7003 + */
7004 +- flags = (atomic_inc_return(&con->wr_cnt) % srv->queue_depth) ?
7005 ++ flags = (atomic_inc_return(&con->c.wr_cnt) % s->signal_interval) ?
7006 + 0 : IB_SEND_SIGNALED;
7007 + imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
7008 + imm_wr.wr.next = NULL;
7009 +@@ -509,11 +507,11 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
7010 + ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
7011 + }
7012 + if (unlikely(atomic_sub_return(1,
7013 +- &con->sq_wr_avail) < 0)) {
7014 ++ &con->c.sq_wr_avail) < 0)) {
7015 + rtrs_err(s, "IB send queue full: sess=%s cid=%d\n",
7016 + kobject_name(&sess->kobj),
7017 + con->c.cid);
7018 +- atomic_add(1, &con->sq_wr_avail);
7019 ++ atomic_add(1, &con->c.sq_wr_avail);
7020 + spin_lock(&con->rsp_wr_wait_lock);
7021 + list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
7022 + spin_unlock(&con->rsp_wr_wait_lock);
7023 +@@ -1268,8 +1266,9 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
7024 + case IB_WC_SEND:
7025 + /*
7026 + * post_send() RDMA write completions of IO reqs (read/write)
7027 ++ * and hb.
7028 + */
7029 +- atomic_add(srv->queue_depth, &con->sq_wr_avail);
7030 ++ atomic_add(s->signal_interval, &con->c.sq_wr_avail);
7031 +
7032 + if (unlikely(!list_empty_careful(&con->rsp_wr_wait_list)))
7033 + rtrs_rdma_process_wr_wait_list(con);
7034 +@@ -1648,7 +1647,7 @@ static int create_con(struct rtrs_srv_sess *sess,
7035 + con->c.cm_id = cm_id;
7036 + con->c.sess = &sess->s;
7037 + con->c.cid = cid;
7038 +- atomic_set(&con->wr_cnt, 1);
7039 ++ atomic_set(&con->c.wr_cnt, 1);
7040 + wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
7041 +
7042 + if (con->c.cid == 0) {
7043 +@@ -1659,6 +1658,8 @@ static int create_con(struct rtrs_srv_sess *sess,
7044 + max_send_wr = min_t(int, wr_limit,
7045 + SERVICE_CON_QUEUE_DEPTH * 2 + 2);
7046 + max_recv_wr = max_send_wr;
7047 ++ s->signal_interval = min_not_zero(srv->queue_depth,
7048 ++ (size_t)SERVICE_CON_QUEUE_DEPTH);
7049 + } else {
7050 + /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
7051 + if (always_invalidate)
7052 +@@ -1679,7 +1680,7 @@ static int create_con(struct rtrs_srv_sess *sess,
7053 + */
7054 + }
7055 + cq_num = max_send_wr + max_recv_wr;
7056 +- atomic_set(&con->sq_wr_avail, max_send_wr);
7057 ++ atomic_set(&con->c.sq_wr_avail, max_send_wr);
7058 + cq_vector = rtrs_srv_get_next_cq_vector(sess);
7059 +
7060 + /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
7061 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
7062 +index f8da2e3f0bdac..e81774f5acd33 100644
7063 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
7064 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
7065 +@@ -42,8 +42,6 @@ struct rtrs_srv_stats {
7066 +
7067 + struct rtrs_srv_con {
7068 + struct rtrs_con c;
7069 +- atomic_t wr_cnt;
7070 +- atomic_t sq_wr_avail;
7071 + struct list_head rsp_wr_wait_list;
7072 + spinlock_t rsp_wr_wait_lock;
7073 + };
7074 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
7075 +index 61919ebd92b2d..0a4b4e1b5e5ff 100644
7076 +--- a/drivers/infiniband/ulp/rtrs/rtrs.c
7077 ++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
7078 +@@ -187,10 +187,16 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
7079 + struct ib_send_wr *head)
7080 + {
7081 + struct ib_rdma_wr wr;
7082 ++ struct rtrs_sess *sess = con->sess;
7083 ++ enum ib_send_flags sflags;
7084 ++
7085 ++ atomic_dec_if_positive(&con->sq_wr_avail);
7086 ++ sflags = (atomic_inc_return(&con->wr_cnt) % sess->signal_interval) ?
7087 ++ 0 : IB_SEND_SIGNALED;
7088 +
7089 + wr = (struct ib_rdma_wr) {
7090 + .wr.wr_cqe = cqe,
7091 +- .wr.send_flags = flags,
7092 ++ .wr.send_flags = sflags,
7093 + .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
7094 + .wr.ex.imm_data = cpu_to_be32(imm_data),
7095 + };
7096 +diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
7097 +index dc4a240f44895..3c84deefa327d 100644
7098 +--- a/drivers/input/mouse/elan_i2c.h
7099 ++++ b/drivers/input/mouse/elan_i2c.h
7100 +@@ -55,8 +55,9 @@
7101 + #define ETP_FW_PAGE_SIZE_512 512
7102 + #define ETP_FW_SIGNATURE_SIZE 6
7103 +
7104 +-#define ETP_PRODUCT_ID_DELBIN 0x00C2
7105 ++#define ETP_PRODUCT_ID_WHITEBOX 0x00B8
7106 + #define ETP_PRODUCT_ID_VOXEL 0x00BF
7107 ++#define ETP_PRODUCT_ID_DELBIN 0x00C2
7108 + #define ETP_PRODUCT_ID_MAGPIE 0x0120
7109 + #define ETP_PRODUCT_ID_BOBBA 0x0121
7110 +
7111 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
7112 +index dad22c1ea6a0f..47af62c122672 100644
7113 +--- a/drivers/input/mouse/elan_i2c_core.c
7114 ++++ b/drivers/input/mouse/elan_i2c_core.c
7115 +@@ -105,6 +105,7 @@ static u32 elan_i2c_lookup_quirks(u16 ic_type, u16 product_id)
7116 + u32 quirks;
7117 + } elan_i2c_quirks[] = {
7118 + { 0x0D, ETP_PRODUCT_ID_DELBIN, ETP_QUIRK_QUICK_WAKEUP },
7119 ++ { 0x0D, ETP_PRODUCT_ID_WHITEBOX, ETP_QUIRK_QUICK_WAKEUP },
7120 + { 0x10, ETP_PRODUCT_ID_VOXEL, ETP_QUIRK_QUICK_WAKEUP },
7121 + { 0x14, ETP_PRODUCT_ID_MAGPIE, ETP_QUIRK_QUICK_WAKEUP },
7122 + { 0x14, ETP_PRODUCT_ID_BOBBA, ETP_QUIRK_QUICK_WAKEUP },
7123 +diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
7124 +index c11bc8b833b8e..d5552e2c160d2 100644
7125 +--- a/drivers/iommu/intel/pasid.h
7126 ++++ b/drivers/iommu/intel/pasid.h
7127 +@@ -28,12 +28,12 @@
7128 + #define VCMD_CMD_ALLOC 0x1
7129 + #define VCMD_CMD_FREE 0x2
7130 + #define VCMD_VRSP_IP 0x1
7131 +-#define VCMD_VRSP_SC(e) (((e) >> 1) & 0x3)
7132 ++#define VCMD_VRSP_SC(e) (((e) & 0xff) >> 1)
7133 + #define VCMD_VRSP_SC_SUCCESS 0
7134 +-#define VCMD_VRSP_SC_NO_PASID_AVAIL 2
7135 +-#define VCMD_VRSP_SC_INVALID_PASID 2
7136 +-#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 8) & 0xfffff)
7137 +-#define VCMD_CMD_OPERAND(e) ((e) << 8)
7138 ++#define VCMD_VRSP_SC_NO_PASID_AVAIL 16
7139 ++#define VCMD_VRSP_SC_INVALID_PASID 16
7140 ++#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 16) & 0xfffff)
7141 ++#define VCMD_CMD_OPERAND(e) ((e) << 16)
7142 + /*
7143 + * Domain ID reserved for pasid entries programmed for first-level
7144 + * only and pass-through transfer modes.
7145 +diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
7146 +index 67a42b514429e..4f907e8f3894b 100644
7147 +--- a/drivers/mailbox/mtk-cmdq-mailbox.c
7148 ++++ b/drivers/mailbox/mtk-cmdq-mailbox.c
7149 +@@ -168,7 +168,8 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
7150 + dma_sync_single_for_cpu(dev, prev_task->pa_base,
7151 + prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
7152 + prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
7153 +- (u64)CMDQ_JUMP_BY_PA << 32 | task->pa_base;
7154 ++ (u64)CMDQ_JUMP_BY_PA << 32 |
7155 ++ (task->pa_base >> task->cmdq->shift_pa);
7156 + dma_sync_single_for_device(dev, prev_task->pa_base,
7157 + prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
7158 +
7159 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
7160 +index 50f4cbd600d58..8c0c3d1f54bba 100644
7161 +--- a/drivers/md/dm-crypt.c
7162 ++++ b/drivers/md/dm-crypt.c
7163 +@@ -2661,7 +2661,12 @@ static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
7164 + struct crypt_config *cc = pool_data;
7165 + struct page *page;
7166 +
7167 +- if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
7168 ++ /*
7169 ++ * Note, percpu_counter_read_positive() may over (and under) estimate
7170 ++ * the current usage by at most (batch - 1) * num_online_cpus() pages,
7171 ++ * but avoids potential spinlock contention of an exact result.
7172 ++ */
7173 ++ if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
7174 + likely(gfp_mask & __GFP_NORETRY))
7175 + return NULL;
7176 +
7177 +diff --git a/drivers/media/cec/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c
7178 +index ea4b1ebfca991..0ffd89712536b 100644
7179 +--- a/drivers/media/cec/platform/stm32/stm32-cec.c
7180 ++++ b/drivers/media/cec/platform/stm32/stm32-cec.c
7181 +@@ -305,14 +305,16 @@ static int stm32_cec_probe(struct platform_device *pdev)
7182 +
7183 + cec->clk_hdmi_cec = devm_clk_get(&pdev->dev, "hdmi-cec");
7184 + if (IS_ERR(cec->clk_hdmi_cec) &&
7185 +- PTR_ERR(cec->clk_hdmi_cec) == -EPROBE_DEFER)
7186 +- return -EPROBE_DEFER;
7187 ++ PTR_ERR(cec->clk_hdmi_cec) == -EPROBE_DEFER) {
7188 ++ ret = -EPROBE_DEFER;
7189 ++ goto err_unprepare_cec_clk;
7190 ++ }
7191 +
7192 + if (!IS_ERR(cec->clk_hdmi_cec)) {
7193 + ret = clk_prepare(cec->clk_hdmi_cec);
7194 + if (ret) {
7195 + dev_err(&pdev->dev, "Can't prepare hdmi-cec clock\n");
7196 +- return ret;
7197 ++ goto err_unprepare_cec_clk;
7198 + }
7199 + }
7200 +
7201 +@@ -324,19 +326,27 @@ static int stm32_cec_probe(struct platform_device *pdev)
7202 + CEC_NAME, caps, CEC_MAX_LOG_ADDRS);
7203 + ret = PTR_ERR_OR_ZERO(cec->adap);
7204 + if (ret)
7205 +- return ret;
7206 ++ goto err_unprepare_hdmi_cec_clk;
7207 +
7208 + ret = cec_register_adapter(cec->adap, &pdev->dev);
7209 +- if (ret) {
7210 +- cec_delete_adapter(cec->adap);
7211 +- return ret;
7212 +- }
7213 ++ if (ret)
7214 ++ goto err_delete_adapter;
7215 +
7216 + cec_hw_init(cec);
7217 +
7218 + platform_set_drvdata(pdev, cec);
7219 +
7220 + return 0;
7221 ++
7222 ++err_delete_adapter:
7223 ++ cec_delete_adapter(cec->adap);
7224 ++
7225 ++err_unprepare_hdmi_cec_clk:
7226 ++ clk_unprepare(cec->clk_hdmi_cec);
7227 ++
7228 ++err_unprepare_cec_clk:
7229 ++ clk_unprepare(cec->clk_cec);
7230 ++ return ret;
7231 + }
7232 +
7233 + static int stm32_cec_remove(struct platform_device *pdev)
7234 +diff --git a/drivers/media/cec/platform/tegra/tegra_cec.c b/drivers/media/cec/platform/tegra/tegra_cec.c
7235 +index 1ac0c70a59818..5e907395ca2e5 100644
7236 +--- a/drivers/media/cec/platform/tegra/tegra_cec.c
7237 ++++ b/drivers/media/cec/platform/tegra/tegra_cec.c
7238 +@@ -366,7 +366,11 @@ static int tegra_cec_probe(struct platform_device *pdev)
7239 + return -ENOENT;
7240 + }
7241 +
7242 +- clk_prepare_enable(cec->clk);
7243 ++ ret = clk_prepare_enable(cec->clk);
7244 ++ if (ret) {
7245 ++ dev_err(&pdev->dev, "Unable to prepare clock for CEC\n");
7246 ++ return ret;
7247 ++ }
7248 +
7249 + /* set context info. */
7250 + cec->dev = &pdev->dev;
7251 +@@ -446,9 +450,7 @@ static int tegra_cec_resume(struct platform_device *pdev)
7252 +
7253 + dev_notice(&pdev->dev, "Resuming\n");
7254 +
7255 +- clk_prepare_enable(cec->clk);
7256 +-
7257 +- return 0;
7258 ++ return clk_prepare_enable(cec->clk);
7259 + }
7260 + #endif
7261 +
7262 +diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
7263 +index 082796534b0ae..bb02354a48b81 100644
7264 +--- a/drivers/media/dvb-frontends/dib8000.c
7265 ++++ b/drivers/media/dvb-frontends/dib8000.c
7266 +@@ -2107,32 +2107,55 @@ static void dib8000_load_ana_fe_coefs(struct dib8000_state *state, const s16 *an
7267 + dib8000_write_word(state, 117 + mode, ana_fe[mode]);
7268 + }
7269 +
7270 +-static const u16 lut_prbs_2k[14] = {
7271 +- 0, 0x423, 0x009, 0x5C7, 0x7A6, 0x3D8, 0x527, 0x7FF, 0x79B, 0x3D6, 0x3A2, 0x53B, 0x2F4, 0x213
7272 ++static const u16 lut_prbs_2k[13] = {
7273 ++ 0x423, 0x009, 0x5C7,
7274 ++ 0x7A6, 0x3D8, 0x527,
7275 ++ 0x7FF, 0x79B, 0x3D6,
7276 ++ 0x3A2, 0x53B, 0x2F4,
7277 ++ 0x213
7278 + };
7279 +-static const u16 lut_prbs_4k[14] = {
7280 +- 0, 0x208, 0x0C3, 0x7B9, 0x423, 0x5C7, 0x3D8, 0x7FF, 0x3D6, 0x53B, 0x213, 0x029, 0x0D0, 0x48E
7281 ++
7282 ++static const u16 lut_prbs_4k[13] = {
7283 ++ 0x208, 0x0C3, 0x7B9,
7284 ++ 0x423, 0x5C7, 0x3D8,
7285 ++ 0x7FF, 0x3D6, 0x53B,
7286 ++ 0x213, 0x029, 0x0D0,
7287 ++ 0x48E
7288 + };
7289 +-static const u16 lut_prbs_8k[14] = {
7290 +- 0, 0x740, 0x069, 0x7DD, 0x208, 0x7B9, 0x5C7, 0x7FF, 0x53B, 0x029, 0x48E, 0x4C4, 0x367, 0x684
7291 ++
7292 ++static const u16 lut_prbs_8k[13] = {
7293 ++ 0x740, 0x069, 0x7DD,
7294 ++ 0x208, 0x7B9, 0x5C7,
7295 ++ 0x7FF, 0x53B, 0x029,
7296 ++ 0x48E, 0x4C4, 0x367,
7297 ++ 0x684
7298 + };
7299 +
7300 + static u16 dib8000_get_init_prbs(struct dib8000_state *state, u16 subchannel)
7301 + {
7302 + int sub_channel_prbs_group = 0;
7303 ++ int prbs_group;
7304 +
7305 +- sub_channel_prbs_group = (subchannel / 3) + 1;
7306 +- dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n", sub_channel_prbs_group, subchannel, lut_prbs_8k[sub_channel_prbs_group]);
7307 ++ sub_channel_prbs_group = subchannel / 3;
7308 ++ if (sub_channel_prbs_group >= ARRAY_SIZE(lut_prbs_2k))
7309 ++ return 0;
7310 +
7311 + switch (state->fe[0]->dtv_property_cache.transmission_mode) {
7312 + case TRANSMISSION_MODE_2K:
7313 +- return lut_prbs_2k[sub_channel_prbs_group];
7314 ++ prbs_group = lut_prbs_2k[sub_channel_prbs_group];
7315 ++ break;
7316 + case TRANSMISSION_MODE_4K:
7317 +- return lut_prbs_4k[sub_channel_prbs_group];
7318 ++ prbs_group = lut_prbs_4k[sub_channel_prbs_group];
7319 ++ break;
7320 + default:
7321 + case TRANSMISSION_MODE_8K:
7322 +- return lut_prbs_8k[sub_channel_prbs_group];
7323 ++ prbs_group = lut_prbs_8k[sub_channel_prbs_group];
7324 + }
7325 ++
7326 ++ dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n",
7327 ++ sub_channel_prbs_group, subchannel, prbs_group);
7328 ++
7329 ++ return prbs_group;
7330 + }
7331 +
7332 + static void dib8000_set_13seg_channel(struct dib8000_state *state)
7333 +@@ -2409,10 +2432,8 @@ static void dib8000_set_isdbt_common_channel(struct dib8000_state *state, u8 seq
7334 + /* TSB or ISDBT ? apply it now */
7335 + if (c->isdbt_sb_mode) {
7336 + dib8000_set_sb_channel(state);
7337 +- if (c->isdbt_sb_subchannel < 14)
7338 +- init_prbs = dib8000_get_init_prbs(state, c->isdbt_sb_subchannel);
7339 +- else
7340 +- init_prbs = 0;
7341 ++ init_prbs = dib8000_get_init_prbs(state,
7342 ++ c->isdbt_sb_subchannel);
7343 + } else {
7344 + dib8000_set_13seg_channel(state);
7345 + init_prbs = 0xfff;
7346 +@@ -3004,6 +3025,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
7347 +
7348 + unsigned long *timeout = &state->timeout;
7349 + unsigned long now = jiffies;
7350 ++ u16 init_prbs;
7351 + #ifdef DIB8000_AGC_FREEZE
7352 + u16 agc1, agc2;
7353 + #endif
7354 +@@ -3302,8 +3324,10 @@ static int dib8000_tune(struct dvb_frontend *fe)
7355 + break;
7356 +
7357 + case CT_DEMOD_STEP_11: /* 41 : init prbs autosearch */
7358 +- if (state->subchannel <= 41) {
7359 +- dib8000_set_subchannel_prbs(state, dib8000_get_init_prbs(state, state->subchannel));
7360 ++ init_prbs = dib8000_get_init_prbs(state, state->subchannel);
7361 ++
7362 ++ if (init_prbs) {
7363 ++ dib8000_set_subchannel_prbs(state, init_prbs);
7364 + *tune_state = CT_DEMOD_STEP_9;
7365 + } else {
7366 + *tune_state = CT_DEMOD_STOP;
7367 +diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
7368 +index 7ab9e5f9f2676..81cdf37216ca7 100644
7369 +--- a/drivers/media/i2c/imx258.c
7370 ++++ b/drivers/media/i2c/imx258.c
7371 +@@ -23,7 +23,7 @@
7372 + #define IMX258_CHIP_ID 0x0258
7373 +
7374 + /* V_TIMING internal */
7375 +-#define IMX258_VTS_30FPS 0x0c98
7376 ++#define IMX258_VTS_30FPS 0x0c50
7377 + #define IMX258_VTS_30FPS_2K 0x0638
7378 + #define IMX258_VTS_30FPS_VGA 0x034c
7379 + #define IMX258_VTS_MAX 0xffff
7380 +@@ -47,7 +47,7 @@
7381 + /* Analog gain control */
7382 + #define IMX258_REG_ANALOG_GAIN 0x0204
7383 + #define IMX258_ANA_GAIN_MIN 0
7384 +-#define IMX258_ANA_GAIN_MAX 0x1fff
7385 ++#define IMX258_ANA_GAIN_MAX 480
7386 + #define IMX258_ANA_GAIN_STEP 1
7387 + #define IMX258_ANA_GAIN_DEFAULT 0x0
7388 +
7389 +diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
7390 +index 3a191e257fad0..ef726faee2a4c 100644
7391 +--- a/drivers/media/i2c/tda1997x.c
7392 ++++ b/drivers/media/i2c/tda1997x.c
7393 +@@ -1695,14 +1695,15 @@ static int tda1997x_query_dv_timings(struct v4l2_subdev *sd,
7394 + struct v4l2_dv_timings *timings)
7395 + {
7396 + struct tda1997x_state *state = to_state(sd);
7397 ++ int ret;
7398 +
7399 + v4l_dbg(1, debug, state->client, "%s\n", __func__);
7400 + memset(timings, 0, sizeof(struct v4l2_dv_timings));
7401 + mutex_lock(&state->lock);
7402 +- tda1997x_detect_std(state, timings);
7403 ++ ret = tda1997x_detect_std(state, timings);
7404 + mutex_unlock(&state->lock);
7405 +
7406 +- return 0;
7407 ++ return ret;
7408 + }
7409 +
7410 + static const struct v4l2_subdev_video_ops tda1997x_video_ops = {
7411 +diff --git a/drivers/media/platform/ti-vpe/cal-camerarx.c b/drivers/media/platform/ti-vpe/cal-camerarx.c
7412 +index 124a4e2bdefe0..e2e384a887ac2 100644
7413 +--- a/drivers/media/platform/ti-vpe/cal-camerarx.c
7414 ++++ b/drivers/media/platform/ti-vpe/cal-camerarx.c
7415 +@@ -845,7 +845,9 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
7416 + if (ret)
7417 + goto error;
7418 +
7419 +- cal_camerarx_sd_init_cfg(sd, NULL);
7420 ++ ret = cal_camerarx_sd_init_cfg(sd, NULL);
7421 ++ if (ret)
7422 ++ goto error;
7423 +
7424 + ret = v4l2_device_register_subdev(&cal->v4l2_dev, sd);
7425 + if (ret)
7426 +diff --git a/drivers/media/platform/ti-vpe/cal-video.c b/drivers/media/platform/ti-vpe/cal-video.c
7427 +index 15fb5360cf13c..552619cb81a81 100644
7428 +--- a/drivers/media/platform/ti-vpe/cal-video.c
7429 ++++ b/drivers/media/platform/ti-vpe/cal-video.c
7430 +@@ -694,7 +694,7 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
7431 +
7432 + spin_lock_irq(&ctx->dma.lock);
7433 + buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, list);
7434 +- ctx->dma.pending = buf;
7435 ++ ctx->dma.active = buf;
7436 + list_del(&buf->list);
7437 + spin_unlock_irq(&ctx->dma.lock);
7438 +
7439 +diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
7440 +index 1ba3f96ffa7dc..40ab66c850f23 100644
7441 +--- a/drivers/media/rc/rc-loopback.c
7442 ++++ b/drivers/media/rc/rc-loopback.c
7443 +@@ -42,7 +42,7 @@ static int loop_set_tx_mask(struct rc_dev *dev, u32 mask)
7444 +
7445 + if ((mask & (RXMASK_REGULAR | RXMASK_LEARNING)) != mask) {
7446 + dprintk("invalid tx mask: %u\n", mask);
7447 +- return -EINVAL;
7448 ++ return 2;
7449 + }
7450 +
7451 + dprintk("setting tx mask: %u\n", mask);
7452 +diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
7453 +index 252136cc885ce..6acb8013de08b 100644
7454 +--- a/drivers/media/usb/uvc/uvc_v4l2.c
7455 ++++ b/drivers/media/usb/uvc/uvc_v4l2.c
7456 +@@ -899,8 +899,8 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
7457 + {
7458 + struct uvc_fh *handle = fh;
7459 + struct uvc_video_chain *chain = handle->chain;
7460 ++ u8 *buf;
7461 + int ret;
7462 +- u8 i;
7463 +
7464 + if (chain->selector == NULL ||
7465 + (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
7466 +@@ -908,22 +908,27 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
7467 + return 0;
7468 + }
7469 +
7470 ++ buf = kmalloc(1, GFP_KERNEL);
7471 ++ if (!buf)
7472 ++ return -ENOMEM;
7473 ++
7474 + ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, chain->selector->id,
7475 + chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
7476 +- &i, 1);
7477 +- if (ret < 0)
7478 +- return ret;
7479 ++ buf, 1);
7480 ++ if (!ret)
7481 ++ *input = *buf - 1;
7482 +
7483 +- *input = i - 1;
7484 +- return 0;
7485 ++ kfree(buf);
7486 ++
7487 ++ return ret;
7488 + }
7489 +
7490 + static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
7491 + {
7492 + struct uvc_fh *handle = fh;
7493 + struct uvc_video_chain *chain = handle->chain;
7494 ++ u8 *buf;
7495 + int ret;
7496 +- u32 i;
7497 +
7498 + ret = uvc_acquire_privileges(handle);
7499 + if (ret < 0)
7500 +@@ -939,10 +944,17 @@ static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
7501 + if (input >= chain->selector->bNrInPins)
7502 + return -EINVAL;
7503 +
7504 +- i = input + 1;
7505 +- return uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
7506 +- chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
7507 +- &i, 1);
7508 ++ buf = kmalloc(1, GFP_KERNEL);
7509 ++ if (!buf)
7510 ++ return -ENOMEM;
7511 ++
7512 ++ *buf = input + 1;
7513 ++ ret = uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
7514 ++ chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
7515 ++ buf, 1);
7516 ++ kfree(buf);
7517 ++
7518 ++ return ret;
7519 + }
7520 +
7521 + static int uvc_ioctl_queryctrl(struct file *file, void *fh,
7522 +diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
7523 +index 230d65a642178..af48705c704f8 100644
7524 +--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
7525 ++++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
7526 +@@ -196,7 +196,7 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
7527 + if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
7528 + return false;
7529 +
7530 +- for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
7531 ++ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
7532 + if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
7533 + fnc, fnc_handle) &&
7534 + v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
7535 +@@ -218,7 +218,7 @@ bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic)
7536 + {
7537 + unsigned int i;
7538 +
7539 +- for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
7540 ++ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
7541 + const struct v4l2_bt_timings *bt =
7542 + &v4l2_dv_timings_presets[i].bt;
7543 +
7544 +diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
7545 +index a43c401017ae2..741116b3d9958 100644
7546 +--- a/drivers/misc/pvpanic/pvpanic-pci.c
7547 ++++ b/drivers/misc/pvpanic/pvpanic-pci.c
7548 +@@ -108,4 +108,6 @@ static struct pci_driver pvpanic_pci_driver = {
7549 + },
7550 + };
7551 +
7552 ++MODULE_DEVICE_TABLE(pci, pvpanic_pci_id_tbl);
7553 ++
7554 + module_pci_driver(pvpanic_pci_driver);
7555 +diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
7556 +index 93638ae2753af..4c26b19f5154a 100644
7557 +--- a/drivers/misc/sram.c
7558 ++++ b/drivers/misc/sram.c
7559 +@@ -97,7 +97,24 @@ static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
7560 + struct sram_partition *part = &sram->partition[sram->partitions];
7561 +
7562 + mutex_init(&part->lock);
7563 +- part->base = sram->virt_base + block->start;
7564 ++
7565 ++ if (sram->config && sram->config->map_only_reserved) {
7566 ++ void __iomem *virt_base;
7567 ++
7568 ++ if (sram->no_memory_wc)
7569 ++ virt_base = devm_ioremap_resource(sram->dev, &block->res);
7570 ++ else
7571 ++ virt_base = devm_ioremap_resource_wc(sram->dev, &block->res);
7572 ++
7573 ++ if (IS_ERR(virt_base)) {
7574 ++ dev_err(sram->dev, "could not map SRAM at %pr\n", &block->res);
7575 ++ return PTR_ERR(virt_base);
7576 ++ }
7577 ++
7578 ++ part->base = virt_base;
7579 ++ } else {
7580 ++ part->base = sram->virt_base + block->start;
7581 ++ }
7582 +
7583 + if (block->pool) {
7584 + ret = sram_add_pool(sram, block, start, part);
7585 +@@ -198,6 +215,7 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
7586 +
7587 + block->start = child_res.start - res->start;
7588 + block->size = resource_size(&child_res);
7589 ++ block->res = child_res;
7590 + list_add_tail(&block->list, &reserve_list);
7591 +
7592 + if (of_find_property(child, "export", NULL))
7593 +@@ -295,15 +313,17 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
7594 + */
7595 + cur_size = block->start - cur_start;
7596 +
7597 +- dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
7598 +- cur_start, cur_start + cur_size);
7599 ++ if (sram->pool) {
7600 ++ dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
7601 ++ cur_start, cur_start + cur_size);
7602 +
7603 +- ret = gen_pool_add_virt(sram->pool,
7604 +- (unsigned long)sram->virt_base + cur_start,
7605 +- res->start + cur_start, cur_size, -1);
7606 +- if (ret < 0) {
7607 +- sram_free_partitions(sram);
7608 +- goto err_chunks;
7609 ++ ret = gen_pool_add_virt(sram->pool,
7610 ++ (unsigned long)sram->virt_base + cur_start,
7611 ++ res->start + cur_start, cur_size, -1);
7612 ++ if (ret < 0) {
7613 ++ sram_free_partitions(sram);
7614 ++ goto err_chunks;
7615 ++ }
7616 + }
7617 +
7618 + /* next allocation after this reserved block */
7619 +@@ -331,40 +351,63 @@ static int atmel_securam_wait(void)
7620 + 10000, 500000);
7621 + }
7622 +
7623 ++static const struct sram_config atmel_securam_config = {
7624 ++ .init = atmel_securam_wait,
7625 ++};
7626 ++
7627 ++/*
7628 ++ * SYSRAM contains areas that are not accessible by the
7629 ++ * kernel, such as the first 256K that is reserved for TZ.
7630 ++ * Accesses to those areas (including speculative accesses)
7631 ++ * trigger SErrors. As such we must map only the areas of
7632 ++ * SYSRAM specified in the device tree.
7633 ++ */
7634 ++static const struct sram_config tegra_sysram_config = {
7635 ++ .map_only_reserved = true,
7636 ++};
7637 ++
7638 + static const struct of_device_id sram_dt_ids[] = {
7639 + { .compatible = "mmio-sram" },
7640 +- { .compatible = "atmel,sama5d2-securam", .data = atmel_securam_wait },
7641 ++ { .compatible = "atmel,sama5d2-securam", .data = &atmel_securam_config },
7642 ++ { .compatible = "nvidia,tegra186-sysram", .data = &tegra_sysram_config },
7643 ++ { .compatible = "nvidia,tegra194-sysram", .data = &tegra_sysram_config },
7644 + {}
7645 + };
7646 +
7647 + static int sram_probe(struct platform_device *pdev)
7648 + {
7649 ++ const struct sram_config *config;
7650 + struct sram_dev *sram;
7651 + int ret;
7652 + struct resource *res;
7653 +- int (*init_func)(void);
7654 ++
7655 ++ config = of_device_get_match_data(&pdev->dev);
7656 +
7657 + sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
7658 + if (!sram)
7659 + return -ENOMEM;
7660 +
7661 + sram->dev = &pdev->dev;
7662 ++ sram->no_memory_wc = of_property_read_bool(pdev->dev.of_node, "no-memory-wc");
7663 ++ sram->config = config;
7664 ++
7665 ++ if (!config || !config->map_only_reserved) {
7666 ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
7667 ++ if (sram->no_memory_wc)
7668 ++ sram->virt_base = devm_ioremap_resource(&pdev->dev, res);
7669 ++ else
7670 ++ sram->virt_base = devm_ioremap_resource_wc(&pdev->dev, res);
7671 ++ if (IS_ERR(sram->virt_base)) {
7672 ++ dev_err(&pdev->dev, "could not map SRAM registers\n");
7673 ++ return PTR_ERR(sram->virt_base);
7674 ++ }
7675 +
7676 +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
7677 +- if (of_property_read_bool(pdev->dev.of_node, "no-memory-wc"))
7678 +- sram->virt_base = devm_ioremap_resource(&pdev->dev, res);
7679 +- else
7680 +- sram->virt_base = devm_ioremap_resource_wc(&pdev->dev, res);
7681 +- if (IS_ERR(sram->virt_base)) {
7682 +- dev_err(&pdev->dev, "could not map SRAM registers\n");
7683 +- return PTR_ERR(sram->virt_base);
7684 ++ sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
7685 ++ NUMA_NO_NODE, NULL);
7686 ++ if (IS_ERR(sram->pool))
7687 ++ return PTR_ERR(sram->pool);
7688 + }
7689 +
7690 +- sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
7691 +- NUMA_NO_NODE, NULL);
7692 +- if (IS_ERR(sram->pool))
7693 +- return PTR_ERR(sram->pool);
7694 +-
7695 + sram->clk = devm_clk_get(sram->dev, NULL);
7696 + if (IS_ERR(sram->clk))
7697 + sram->clk = NULL;
7698 +@@ -378,15 +421,15 @@ static int sram_probe(struct platform_device *pdev)
7699 +
7700 + platform_set_drvdata(pdev, sram);
7701 +
7702 +- init_func = of_device_get_match_data(&pdev->dev);
7703 +- if (init_func) {
7704 +- ret = init_func();
7705 ++ if (config && config->init) {
7706 ++ ret = config->init();
7707 + if (ret)
7708 + goto err_free_partitions;
7709 + }
7710 +
7711 +- dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
7712 +- gen_pool_size(sram->pool) / 1024, sram->virt_base);
7713 ++ if (sram->pool)
7714 ++ dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
7715 ++ gen_pool_size(sram->pool) / 1024, sram->virt_base);
7716 +
7717 + return 0;
7718 +
7719 +@@ -405,7 +448,7 @@ static int sram_remove(struct platform_device *pdev)
7720 +
7721 + sram_free_partitions(sram);
7722 +
7723 +- if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
7724 ++ if (sram->pool && gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
7725 + dev_err(sram->dev, "removed while SRAM allocated\n");
7726 +
7727 + if (sram->clk)
7728 +diff --git a/drivers/misc/sram.h b/drivers/misc/sram.h
7729 +index 9c1d21ff73476..d2058d8c8f1d2 100644
7730 +--- a/drivers/misc/sram.h
7731 ++++ b/drivers/misc/sram.h
7732 +@@ -5,6 +5,11 @@
7733 + #ifndef __SRAM_H
7734 + #define __SRAM_H
7735 +
7736 ++struct sram_config {
7737 ++ int (*init)(void);
7738 ++ bool map_only_reserved;
7739 ++};
7740 ++
7741 + struct sram_partition {
7742 + void __iomem *base;
7743 +
7744 +@@ -15,8 +20,11 @@ struct sram_partition {
7745 + };
7746 +
7747 + struct sram_dev {
7748 ++ const struct sram_config *config;
7749 ++
7750 + struct device *dev;
7751 + void __iomem *virt_base;
7752 ++ bool no_memory_wc;
7753 +
7754 + struct gen_pool *pool;
7755 + struct clk *clk;
7756 +@@ -29,6 +37,7 @@ struct sram_reserve {
7757 + struct list_head list;
7758 + u32 start;
7759 + u32 size;
7760 ++ struct resource res;
7761 + bool export;
7762 + bool pool;
7763 + bool protect_exec;
7764 +diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
7765 +index 880c33ab9f47b..94ebf7f3fd58a 100644
7766 +--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
7767 ++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
7768 +@@ -2243,7 +2243,8 @@ int vmci_qp_broker_map(struct vmci_handle handle,
7769 +
7770 + result = VMCI_SUCCESS;
7771 +
7772 +- if (context_id != VMCI_HOST_CONTEXT_ID) {
7773 ++ if (context_id != VMCI_HOST_CONTEXT_ID &&
7774 ++ !QPBROKERSTATE_HAS_MEM(entry)) {
7775 + struct vmci_qp_page_store page_store;
7776 +
7777 + page_store.pages = guest_mem;
7778 +@@ -2350,7 +2351,8 @@ int vmci_qp_broker_unmap(struct vmci_handle handle,
7779 + goto out;
7780 + }
7781 +
7782 +- if (context_id != VMCI_HOST_CONTEXT_ID) {
7783 ++ if (context_id != VMCI_HOST_CONTEXT_ID &&
7784 ++ QPBROKERSTATE_HAS_MEM(entry)) {
7785 + qp_acquire_queue_mutex(entry->produce_q);
7786 + result = qp_save_headers(entry);
7787 + if (result < VMCI_SUCCESS)
7788 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
7789 +index ce8aed5629295..c3ecec3f6ddc6 100644
7790 +--- a/drivers/mmc/core/block.c
7791 ++++ b/drivers/mmc/core/block.c
7792 +@@ -98,6 +98,11 @@ static int max_devices;
7793 + static DEFINE_IDA(mmc_blk_ida);
7794 + static DEFINE_IDA(mmc_rpmb_ida);
7795 +
7796 ++struct mmc_blk_busy_data {
7797 ++ struct mmc_card *card;
7798 ++ u32 status;
7799 ++};
7800 ++
7801 + /*
7802 + * There is one mmc_blk_data per slot.
7803 + */
7804 +@@ -417,42 +422,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
7805 + return 0;
7806 + }
7807 +
7808 +-static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
7809 +- u32 *resp_errs)
7810 +-{
7811 +- unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
7812 +- int err = 0;
7813 +- u32 status;
7814 +-
7815 +- do {
7816 +- bool done = time_after(jiffies, timeout);
7817 +-
7818 +- err = __mmc_send_status(card, &status, 5);
7819 +- if (err) {
7820 +- dev_err(mmc_dev(card->host),
7821 +- "error %d requesting status\n", err);
7822 +- return err;
7823 +- }
7824 +-
7825 +- /* Accumulate any response error bits seen */
7826 +- if (resp_errs)
7827 +- *resp_errs |= status;
7828 +-
7829 +- /*
7830 +- * Timeout if the device never becomes ready for data and never
7831 +- * leaves the program state.
7832 +- */
7833 +- if (done) {
7834 +- dev_err(mmc_dev(card->host),
7835 +- "Card stuck in wrong state! %s status: %#x\n",
7836 +- __func__, status);
7837 +- return -ETIMEDOUT;
7838 +- }
7839 +- } while (!mmc_ready_for_data(status));
7840 +-
7841 +- return err;
7842 +-}
7843 +-
7844 + static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
7845 + struct mmc_blk_ioc_data *idata)
7846 + {
7847 +@@ -549,6 +518,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
7848 + return mmc_sanitize(card, idata->ic.cmd_timeout_ms);
7849 +
7850 + mmc_wait_for_req(card->host, &mrq);
7851 ++ memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
7852 +
7853 + if (cmd.error) {
7854 + dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
7855 +@@ -598,14 +568,13 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
7856 + if (idata->ic.postsleep_min_us)
7857 + usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
7858 +
7859 +- memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
7860 +-
7861 + if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
7862 + /*
7863 + * Ensure RPMB/R1B command has completed by polling CMD13
7864 + * "Send Status".
7865 + */
7866 +- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL);
7867 ++ err = mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, false,
7868 ++ MMC_BUSY_IO);
7869 + }
7870 +
7871 + return err;
7872 +@@ -1636,7 +1605,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
7873 +
7874 + mmc_blk_send_stop(card, timeout);
7875 +
7876 +- err = card_busy_detect(card, timeout, NULL);
7877 ++ err = mmc_poll_for_busy(card, timeout, false, MMC_BUSY_IO);
7878 +
7879 + mmc_retune_release(card->host);
7880 +
7881 +@@ -1851,28 +1820,48 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
7882 + brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
7883 + }
7884 +
7885 ++static int mmc_blk_busy_cb(void *cb_data, bool *busy)
7886 ++{
7887 ++ struct mmc_blk_busy_data *data = cb_data;
7888 ++ u32 status = 0;
7889 ++ int err;
7890 ++
7891 ++ err = mmc_send_status(data->card, &status);
7892 ++ if (err)
7893 ++ return err;
7894 ++
7895 ++ /* Accumulate response error bits. */
7896 ++ data->status |= status;
7897 ++
7898 ++ *busy = !mmc_ready_for_data(status);
7899 ++ return 0;
7900 ++}
7901 ++
7902 + static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
7903 + {
7904 + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
7905 +- u32 status = 0;
7906 ++ struct mmc_blk_busy_data cb_data;
7907 + int err;
7908 +
7909 + if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
7910 + return 0;
7911 +
7912 +- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status);
7913 ++ cb_data.card = card;
7914 ++ cb_data.status = 0;
7915 ++ err = __mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, &mmc_blk_busy_cb,
7916 ++ &cb_data);
7917 +
7918 + /*
7919 + * Do not assume data transferred correctly if there are any error bits
7920 + * set.
7921 + */
7922 +- if (status & mmc_blk_stop_err_bits(&mqrq->brq)) {
7923 ++ if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) {
7924 + mqrq->brq.data.bytes_xfered = 0;
7925 + err = err ? err : -EIO;
7926 + }
7927 +
7928 + /* Copy the exception bit so it will be seen later on */
7929 +- if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT)
7930 ++ if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT)
7931 + mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
7932 +
7933 + return err;
7934 +diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
7935 +index 973756ed4016f..90d213a2203f4 100644
7936 +--- a/drivers/mmc/core/mmc_ops.c
7937 ++++ b/drivers/mmc/core/mmc_ops.c
7938 +@@ -435,7 +435,7 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
7939 + u32 status = 0;
7940 + int err;
7941 +
7942 +- if (host->ops->card_busy) {
7943 ++ if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
7944 + *busy = host->ops->card_busy(host);
7945 + return 0;
7946 + }
7947 +@@ -457,6 +457,7 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
7948 + break;
7949 + case MMC_BUSY_HPI:
7950 + case MMC_BUSY_EXTR_SINGLE:
7951 ++ case MMC_BUSY_IO:
7952 + break;
7953 + default:
7954 + err = -EINVAL;
7955 +@@ -509,6 +510,7 @@ int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
7956 +
7957 + return 0;
7958 + }
7959 ++EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
7960 +
7961 + int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
7962 + bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
7963 +@@ -521,6 +523,7 @@ int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
7964 +
7965 + return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data);
7966 + }
7967 ++EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
7968 +
7969 + bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
7970 + unsigned int timeout_ms)
7971 +diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
7972 +index 41ab4f573a310..ae25ffc2e8704 100644
7973 +--- a/drivers/mmc/core/mmc_ops.h
7974 ++++ b/drivers/mmc/core/mmc_ops.h
7975 +@@ -15,6 +15,7 @@ enum mmc_busy_cmd {
7976 + MMC_BUSY_ERASE,
7977 + MMC_BUSY_HPI,
7978 + MMC_BUSY_EXTR_SINGLE,
7979 ++ MMC_BUSY_IO,
7980 + };
7981 +
7982 + struct mmc_host;
7983 +diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
7984 +index 4ca9374157348..58cfaffa3c2d8 100644
7985 +--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
7986 ++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
7987 +@@ -542,9 +542,22 @@ static int sd_write_long_data(struct realtek_pci_sdmmc *host,
7988 + return 0;
7989 + }
7990 +
7991 ++static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
7992 ++{
7993 ++ rtsx_pci_write_register(host->pcr, SD_CFG1,
7994 ++ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
7995 ++}
7996 ++
7997 ++static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
7998 ++{
7999 ++ rtsx_pci_write_register(host->pcr, SD_CFG1,
8000 ++ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
8001 ++}
8002 ++
8003 + static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
8004 + {
8005 + struct mmc_data *data = mrq->data;
8006 ++ int err;
8007 +
8008 + if (host->sg_count < 0) {
8009 + data->error = host->sg_count;
8010 +@@ -553,22 +566,19 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
8011 + return data->error;
8012 + }
8013 +
8014 +- if (data->flags & MMC_DATA_READ)
8015 +- return sd_read_long_data(host, mrq);
8016 ++ if (data->flags & MMC_DATA_READ) {
8017 ++ if (host->initial_mode)
8018 ++ sd_disable_initial_mode(host);
8019 +
8020 +- return sd_write_long_data(host, mrq);
8021 +-}
8022 ++ err = sd_read_long_data(host, mrq);
8023 +
8024 +-static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
8025 +-{
8026 +- rtsx_pci_write_register(host->pcr, SD_CFG1,
8027 +- SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
8028 +-}
8029 ++ if (host->initial_mode)
8030 ++ sd_enable_initial_mode(host);
8031 +
8032 +-static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
8033 +-{
8034 +- rtsx_pci_write_register(host->pcr, SD_CFG1,
8035 +- SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
8036 ++ return err;
8037 ++ }
8038 ++
8039 ++ return sd_write_long_data(host, mrq);
8040 + }
8041 +
8042 + static void sd_normal_rw(struct realtek_pci_sdmmc *host,
8043 +diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
8044 +index 0e7c07ed96904..b6902447d7797 100644
8045 +--- a/drivers/mmc/host/sdhci-of-arasan.c
8046 ++++ b/drivers/mmc/host/sdhci-of-arasan.c
8047 +@@ -159,6 +159,12 @@ struct sdhci_arasan_data {
8048 + /* Controller immediately reports SDHCI_CLOCK_INT_STABLE after enabling the
8049 + * internal clock even when the clock isn't stable */
8050 + #define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1)
8051 ++/*
8052 ++ * Some of the Arasan variations might not have timing requirements
8053 ++ * met at 25MHz for Default Speed mode, those controllers work at
8054 ++ * 19MHz instead
8055 ++ */
8056 ++#define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2)
8057 + };
8058 +
8059 + struct sdhci_arasan_of_data {
8060 +@@ -267,7 +273,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
8061 + * through low speeds without power cycling.
8062 + */
8063 + sdhci_set_clock(host, host->max_clk);
8064 +- phy_power_on(sdhci_arasan->phy);
8065 ++ if (phy_power_on(sdhci_arasan->phy)) {
8066 ++ pr_err("%s: Cannot power on phy.\n",
8067 ++ mmc_hostname(host->mmc));
8068 ++ return;
8069 ++ }
8070 ++
8071 + sdhci_arasan->is_phy_on = true;
8072 +
8073 + /*
8074 +@@ -290,6 +301,16 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
8075 + sdhci_arasan->is_phy_on = false;
8076 + }
8077 +
8078 ++ if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN) {
8079 ++ /*
8080 ++ * Some of the Arasan variations might not have timing
8081 ++ * requirements met at 25MHz for Default Speed mode,
8082 ++ * those controllers work at 19MHz instead.
8083 ++ */
8084 ++ if (clock == DEFAULT_SPEED_MAX_DTR)
8085 ++ clock = (DEFAULT_SPEED_MAX_DTR * 19) / 25;
8086 ++ }
8087 ++
8088 + /* Set the Input and Output Clock Phase Delays */
8089 + if (clk_data->set_clk_delays)
8090 + clk_data->set_clk_delays(host);
8091 +@@ -307,7 +328,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
8092 + msleep(20);
8093 +
8094 + if (ctrl_phy) {
8095 +- phy_power_on(sdhci_arasan->phy);
8096 ++ if (phy_power_on(sdhci_arasan->phy)) {
8097 ++ pr_err("%s: Cannot power on phy.\n",
8098 ++ mmc_hostname(host->mmc));
8099 ++ return;
8100 ++ }
8101 ++
8102 + sdhci_arasan->is_phy_on = true;
8103 + }
8104 + }
8105 +@@ -463,7 +489,9 @@ static int sdhci_arasan_suspend(struct device *dev)
8106 + ret = phy_power_off(sdhci_arasan->phy);
8107 + if (ret) {
8108 + dev_err(dev, "Cannot power off phy.\n");
8109 +- sdhci_resume_host(host);
8110 ++ if (sdhci_resume_host(host))
8111 ++ dev_err(dev, "Cannot resume host.\n");
8112 ++
8113 + return ret;
8114 + }
8115 + sdhci_arasan->is_phy_on = false;
8116 +@@ -1608,6 +1636,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
8117 + if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
8118 + host->mmc_host_ops.execute_tuning =
8119 + arasan_zynqmp_execute_tuning;
8120 ++
8121 ++ sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN;
8122 + }
8123 +
8124 + arasan_dt_parse_clk_phases(dev, &sdhci_arasan->clk_data);
8125 +diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
8126 +index 8b49fd56cf964..29e8a546dcd60 100644
8127 +--- a/drivers/mtd/nand/raw/intel-nand-controller.c
8128 ++++ b/drivers/mtd/nand/raw/intel-nand-controller.c
8129 +@@ -631,19 +631,26 @@ static int ebu_nand_probe(struct platform_device *pdev)
8130 + ebu_host->clk_rate = clk_get_rate(ebu_host->clk);
8131 +
8132 + ebu_host->dma_tx = dma_request_chan(dev, "tx");
8133 +- if (IS_ERR(ebu_host->dma_tx))
8134 +- return dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
8135 +- "failed to request DMA tx chan!.\n");
8136 ++ if (IS_ERR(ebu_host->dma_tx)) {
8137 ++ ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
8138 ++ "failed to request DMA tx chan!.\n");
8139 ++ goto err_disable_unprepare_clk;
8140 ++ }
8141 +
8142 + ebu_host->dma_rx = dma_request_chan(dev, "rx");
8143 +- if (IS_ERR(ebu_host->dma_rx))
8144 +- return dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx),
8145 +- "failed to request DMA rx chan!.\n");
8146 ++ if (IS_ERR(ebu_host->dma_rx)) {
8147 ++ ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx),
8148 ++ "failed to request DMA rx chan!.\n");
8149 ++ ebu_host->dma_rx = NULL;
8150 ++ goto err_cleanup_dma;
8151 ++ }
8152 +
8153 + resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
8154 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
8155 +- if (!res)
8156 +- return -EINVAL;
8157 ++ if (!res) {
8158 ++ ret = -EINVAL;
8159 ++ goto err_cleanup_dma;
8160 ++ }
8161 + ebu_host->cs[cs].addr_sel = res->start;
8162 + writel(ebu_host->cs[cs].addr_sel | EBU_ADDR_MASK(5) | EBU_ADDR_SEL_REGEN,
8163 + ebu_host->ebu + EBU_ADDR_SEL(cs));
8164 +@@ -653,7 +660,8 @@ static int ebu_nand_probe(struct platform_device *pdev)
8165 + mtd = nand_to_mtd(&ebu_host->chip);
8166 + if (!mtd->name) {
8167 + dev_err(ebu_host->dev, "NAND label property is mandatory\n");
8168 +- return -EINVAL;
8169 ++ ret = -EINVAL;
8170 ++ goto err_cleanup_dma;
8171 + }
8172 +
8173 + mtd->dev.parent = dev;
8174 +@@ -681,6 +689,7 @@ err_clean_nand:
8175 + nand_cleanup(&ebu_host->chip);
8176 + err_cleanup_dma:
8177 + ebu_dma_cleanup(ebu_host);
8178 ++err_disable_unprepare_clk:
8179 + clk_disable_unprepare(ebu_host->clk);
8180 +
8181 + return ret;
8182 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
8183 +index 31730efa75382..8aef6005bfee1 100644
8184 +--- a/drivers/net/bonding/bond_main.c
8185 ++++ b/drivers/net/bonding/bond_main.c
8186 +@@ -2252,7 +2252,6 @@ static int __bond_release_one(struct net_device *bond_dev,
8187 + /* recompute stats just before removing the slave */
8188 + bond_get_stats(bond->dev, &bond->bond_stats);
8189 +
8190 +- bond_upper_dev_unlink(bond, slave);
8191 + /* unregister rx_handler early so bond_handle_frame wouldn't be called
8192 + * for this slave anymore.
8193 + */
8194 +@@ -2261,6 +2260,8 @@ static int __bond_release_one(struct net_device *bond_dev,
8195 + if (BOND_MODE(bond) == BOND_MODE_8023AD)
8196 + bond_3ad_unbind_slave(slave);
8197 +
8198 ++ bond_upper_dev_unlink(bond, slave);
8199 ++
8200 + if (bond_mode_can_use_xmit_hash(bond))
8201 + bond_update_slave_arr(bond, slave);
8202 +
8203 +diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
8204 +index e78026ef6d8cc..64d6dfa831220 100644
8205 +--- a/drivers/net/dsa/lantiq_gswip.c
8206 ++++ b/drivers/net/dsa/lantiq_gswip.c
8207 +@@ -843,7 +843,8 @@ static int gswip_setup(struct dsa_switch *ds)
8208 +
8209 + gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
8210 + GSWIP_MAC_CTRL_2p(cpu_port));
8211 +- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN);
8212 ++ gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
8213 ++ GSWIP_MAC_FLEN);
8214 + gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
8215 + GSWIP_BM_QUEUE_GCTRL);
8216 +
8217 +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
8218 +index 98cc0133c3437..5ad5419e8be36 100644
8219 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
8220 ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
8221 +@@ -3231,12 +3231,6 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
8222 + &ethsw->fq[i].napi, dpaa2_switch_poll,
8223 + NAPI_POLL_WEIGHT);
8224 +
8225 +- err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
8226 +- if (err) {
8227 +- dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
8228 +- goto err_free_netdev;
8229 +- }
8230 +-
8231 + /* Setup IRQs */
8232 + err = dpaa2_switch_setup_irqs(sw_dev);
8233 + if (err)
8234 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
8235 +index c0a478ae95834..0dbed35645eda 100644
8236 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
8237 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
8238 +@@ -10,7 +10,14 @@
8239 +
8240 + static u16 hclge_errno_to_resp(int errno)
8241 + {
8242 +- return abs(errno);
8243 ++ int resp = abs(errno);
8244 ++
8245 ++ /* The status for pf to vf msg cmd is u16, constrainted by HW.
8246 ++ * We need to keep the same type with it.
8247 ++ * The intput errno is the stander error code, it's safely to
8248 ++ * use a u16 to store the abs(errno).
8249 ++ */
8250 ++ return (u16)resp;
8251 + }
8252 +
8253 + /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
8254 +diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
8255 +index 90793b36126e6..68c80f04113c8 100644
8256 +--- a/drivers/net/ethernet/intel/iavf/iavf.h
8257 ++++ b/drivers/net/ethernet/intel/iavf/iavf.h
8258 +@@ -186,12 +186,6 @@ enum iavf_state_t {
8259 + __IAVF_RUNNING, /* opened, working */
8260 + };
8261 +
8262 +-enum iavf_critical_section_t {
8263 +- __IAVF_IN_CRITICAL_TASK, /* cannot be interrupted */
8264 +- __IAVF_IN_CLIENT_TASK,
8265 +- __IAVF_IN_REMOVE_TASK, /* device being removed */
8266 +-};
8267 +-
8268 + #define IAVF_CLOUD_FIELD_OMAC 0x01
8269 + #define IAVF_CLOUD_FIELD_IMAC 0x02
8270 + #define IAVF_CLOUD_FIELD_IVLAN 0x04
8271 +@@ -236,6 +230,9 @@ struct iavf_adapter {
8272 + struct iavf_q_vector *q_vectors;
8273 + struct list_head vlan_filter_list;
8274 + struct list_head mac_filter_list;
8275 ++ struct mutex crit_lock;
8276 ++ struct mutex client_lock;
8277 ++ struct mutex remove_lock;
8278 + /* Lock to protect accesses to MAC and VLAN lists */
8279 + spinlock_t mac_vlan_list_lock;
8280 + char misc_vector_name[IFNAMSIZ + 9];
8281 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
8282 +index af43fbd8cb75e..edbeb27213f83 100644
8283 +--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
8284 ++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
8285 +@@ -1352,8 +1352,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
8286 + if (!fltr)
8287 + return -ENOMEM;
8288 +
8289 +- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
8290 +- &adapter->crit_section)) {
8291 ++ while (!mutex_trylock(&adapter->crit_lock)) {
8292 + if (--count == 0) {
8293 + kfree(fltr);
8294 + return -EINVAL;
8295 +@@ -1378,7 +1377,7 @@ ret:
8296 + if (err && fltr)
8297 + kfree(fltr);
8298 +
8299 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
8300 ++ mutex_unlock(&adapter->crit_lock);
8301 + return err;
8302 + }
8303 +
8304 +@@ -1563,8 +1562,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
8305 + return -EINVAL;
8306 + }
8307 +
8308 +- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
8309 +- &adapter->crit_section)) {
8310 ++ while (!mutex_trylock(&adapter->crit_lock)) {
8311 + if (--count == 0) {
8312 + kfree(rss_new);
8313 + return -EINVAL;
8314 +@@ -1600,7 +1598,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
8315 + if (!err)
8316 + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
8317 +
8318 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
8319 ++ mutex_unlock(&adapter->crit_lock);
8320 +
8321 + if (!rss_new_add)
8322 + kfree(rss_new);
8323 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
8324 +index 606a01ce40739..23762a7ef740b 100644
8325 +--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
8326 ++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
8327 +@@ -131,6 +131,27 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
8328 + return 0;
8329 + }
8330 +
8331 ++/**
8332 ++ * iavf_lock_timeout - try to lock mutex but give up after timeout
8333 ++ * @lock: mutex that should be locked
8334 ++ * @msecs: timeout in msecs
8335 ++ *
8336 ++ * Returns 0 on success, negative on failure
8337 ++ **/
8338 ++static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
8339 ++{
8340 ++ unsigned int wait, delay = 10;
8341 ++
8342 ++ for (wait = 0; wait < msecs; wait += delay) {
8343 ++ if (mutex_trylock(lock))
8344 ++ return 0;
8345 ++
8346 ++ msleep(delay);
8347 ++ }
8348 ++
8349 ++ return -1;
8350 ++}
8351 ++
8352 + /**
8353 + * iavf_schedule_reset - Set the flags and schedule a reset event
8354 + * @adapter: board private structure
8355 +@@ -1916,7 +1937,7 @@ static void iavf_watchdog_task(struct work_struct *work)
8356 + struct iavf_hw *hw = &adapter->hw;
8357 + u32 reg_val;
8358 +
8359 +- if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
8360 ++ if (!mutex_trylock(&adapter->crit_lock))
8361 + goto restart_watchdog;
8362 +
8363 + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
8364 +@@ -1934,8 +1955,7 @@ static void iavf_watchdog_task(struct work_struct *work)
8365 + adapter->state = __IAVF_STARTUP;
8366 + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
8367 + queue_delayed_work(iavf_wq, &adapter->init_task, 10);
8368 +- clear_bit(__IAVF_IN_CRITICAL_TASK,
8369 +- &adapter->crit_section);
8370 ++ mutex_unlock(&adapter->crit_lock);
8371 + /* Don't reschedule the watchdog, since we've restarted
8372 + * the init task. When init_task contacts the PF and
8373 + * gets everything set up again, it'll restart the
8374 +@@ -1945,14 +1965,13 @@ static void iavf_watchdog_task(struct work_struct *work)
8375 + }
8376 + adapter->aq_required = 0;
8377 + adapter->current_op = VIRTCHNL_OP_UNKNOWN;
8378 +- clear_bit(__IAVF_IN_CRITICAL_TASK,
8379 +- &adapter->crit_section);
8380 ++ mutex_unlock(&adapter->crit_lock);
8381 + queue_delayed_work(iavf_wq,
8382 + &adapter->watchdog_task,
8383 + msecs_to_jiffies(10));
8384 + goto watchdog_done;
8385 + case __IAVF_RESETTING:
8386 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
8387 ++ mutex_unlock(&adapter->crit_lock);
8388 + queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
8389 + return;
8390 + case __IAVF_DOWN:
8391 +@@ -1975,7 +1994,7 @@ static void iavf_watchdog_task(struct work_struct *work)
8392 + }
8393 + break;
8394 + case __IAVF_REMOVE:
8395 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
8396 ++ mutex_unlock(&adapter->crit_lock);
8397 + return;
8398 + default:
8399 + goto restart_watchdog;
8400 +@@ -1984,7 +2003,6 @@ static void iavf_watchdog_task(struct work_struct *work)
8401 + /* check for hw reset */
8402 + reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
8403 + if (!reg_val) {
8404 +- adapter->state = __IAVF_RESETTING;
8405 + adapter->flags |= IAVF_FLAG_RESET_PENDING;
8406 + adapter->aq_required = 0;
8407 + adapter->current_op = VIRTCHNL_OP_UNKNOWN;
8408 +@@ -1998,7 +2016,7 @@ watchdog_done:
8409 + if (adapter->state == __IAVF_RUNNING ||
8410 + adapter->state == __IAVF_COMM_FAILED)
8411 + iavf_detect_recover_hung(&adapter->vsi);
8412 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
8413 ++ mutex_unlock(&adapter->crit_lock);
8414 + restart_watchdog:
8415 + if (adapter->aq_required)
8416 + queue_delayed_work(iavf_wq, &adapter->watchdog_task,
8417 +@@ -2062,7 +2080,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
8418 + memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
8419 + iavf_shutdown_adminq(&adapter->hw);
8420 + adapter->netdev->flags &= ~IFF_UP;
8421 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
8422 ++ mutex_unlock(&adapter->crit_lock);
8423 + adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
8424 + adapter->state = __IAVF_DOWN;
8425 + wake_up(&adapter->down_waitqueue);
8426 +@@ -2095,11 +2113,14 @@ static void iavf_reset_task(struct work_struct *work)
8427 + /* When device is being removed it doesn't make sense to run the reset
8428 + * task, just return in such a case.
8429 + */
8430 +- if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
8431 ++ if (mutex_is_locked(&adapter->remove_lock))
8432 + return;
8433 +
8434 +- while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
8435 +- &adapter->crit_section))
8436 ++ if (iavf_lock_timeout(&adapter->crit_lock, 200)) {
8437 ++ schedule_work(&adapter->reset_task);
8438 ++ return;
8439 ++ }
8440 ++ while (!mutex_trylock(&adapter->client_lock))
8441 + usleep_range(500, 1000);
8442 + if (CLIENT_ENABLED(adapter)) {
8443 + adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
8444 +@@ -2151,7 +2172,7 @@ static void iavf_reset_task(struct work_struct *work)
8445 + dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
8446 + reg_val);
8447 + iavf_disable_vf(adapter);
8448 +- clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
8449 ++ mutex_unlock(&adapter->client_lock);
8450 + return; /* Do not attempt to reinit. It's dead, Jim. */
8451 + }
8452 +
8453 +@@ -2278,13 +2299,13 @@ continue_reset:
8454 + adapter->state = __IAVF_DOWN;
8455 + wake_up(&adapter->down_waitqueue);
8456 + }
8457 +- clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
8458 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
8459 ++ mutex_unlock(&adapter->client_lock);
8460 ++ mutex_unlock(&adapter->crit_lock);
8461 +
8462 + return;
8463 + reset_err:
8464 +- clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
8465 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
8466 ++ mutex_unlock(&adapter->client_lock);
8467 ++ mutex_unlock(&adapter->crit_lock);
8468 + dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
8469 + iavf_close(netdev);
8470 + }
8471 +@@ -2312,6 +2333,8 @@ static void iavf_adminq_task(struct work_struct *work)
8472 + if (!event.msg_buf)
8473 + goto out;
8474 +
8475 ++ if (iavf_lock_timeout(&adapter->crit_lock, 200))
8476 ++ goto freedom;
8477 + do {
8478 + ret = iavf_clean_arq_element(hw, &event, &pending);
8479 + v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
8480 +@@ -2325,6 +2348,7 @@ static void iavf_adminq_task(struct work_struct *work)
8481 + if (pending != 0)
8482 + memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
8483 + } while (pending);
8484 ++ mutex_unlock(&adapter->crit_lock);
8485 +
8486 + if ((adapter->flags &
8487 + (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
8488 +@@ -2391,7 +2415,7 @@ static void iavf_client_task(struct work_struct *work)
8489 + * later.
8490 + */
8491 +
8492 +- if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
8493 ++ if (!mutex_trylock(&adapter->client_lock))
8494 + return;
8495 +
8496 + if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
8497 +@@ -2414,7 +2438,7 @@ static void iavf_client_task(struct work_struct *work)
8498 + adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
8499 + }
8500 + out:
8501 +- clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
8502 ++ mutex_unlock(&adapter->client_lock);
8503 + }
8504 +
8505 + /**
8506 +@@ -3017,8 +3041,7 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
8507 + if (!filter)
8508 + return -ENOMEM;
8509 +
8510 +- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
8511 +- &adapter->crit_section)) {
8512 ++ while (!mutex_trylock(&adapter->crit_lock)) {
8513 + if (--count == 0)
8514 + goto err;
8515 + udelay(1);
8516 +@@ -3049,7 +3072,7 @@ err:
8517 + if (err)
8518 + kfree(filter);
8519 +
8520 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
8521 ++ mutex_unlock(&adapter->crit_lock);
8522 + return err;
8523 + }
8524 +
8525 +@@ -3196,8 +3219,7 @@ static int iavf_open(struct net_device *netdev)
8526 + return -EIO;
8527 + }
8528 +
8529 +- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
8530 +- &adapter->crit_section))
8531 ++ while (!mutex_trylock(&adapter->crit_lock))
8532 + usleep_range(500, 1000);
8533 +
8534 + if (adapter->state != __IAVF_DOWN) {
8535 +@@ -3232,7 +3254,7 @@ static int iavf_open(struct net_device *netdev)
8536 +
8537 + iavf_irq_enable(adapter, true);
8538 +
8539 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
8540 ++ mutex_unlock(&adapter->crit_lock);
8541 +
8542 + return 0;
8543 +
8544 +@@ -3244,7 +3266,7 @@ err_setup_rx:
8545 + err_setup_tx:
8546 + iavf_free_all_tx_resources(adapter);
8547 + err_unlock:
8548 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
8549 ++ mutex_unlock(&adapter->crit_lock);
8550 +
8551 + return err;
8552 + }
8553 +@@ -3268,8 +3290,7 @@ static int iavf_close(struct net_device *netdev)
8554 + if (adapter->state <= __IAVF_DOWN_PENDING)
8555 + return 0;
8556 +
8557 +- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
8558 +- &adapter->crit_section))
8559 ++ while (!mutex_trylock(&adapter->crit_lock))
8560 + usleep_range(500, 1000);
8561 +
8562 + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
8563 +@@ -3280,7 +3301,7 @@ static int iavf_close(struct net_device *netdev)
8564 + adapter->state = __IAVF_DOWN_PENDING;
8565 + iavf_free_traffic_irqs(adapter);
8566 +
8567 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
8568 ++ mutex_unlock(&adapter->crit_lock);
8569 +
8570 + /* We explicitly don't free resources here because the hardware is
8571 + * still active and can DMA into memory. Resources are cleared in
8572 +@@ -3629,6 +3650,10 @@ static void iavf_init_task(struct work_struct *work)
8573 + init_task.work);
8574 + struct iavf_hw *hw = &adapter->hw;
8575 +
8576 ++ if (iavf_lock_timeout(&adapter->crit_lock, 5000)) {
8577 ++ dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
8578 ++ return;
8579 ++ }
8580 + switch (adapter->state) {
8581 + case __IAVF_STARTUP:
8582 + if (iavf_startup(adapter) < 0)
8583 +@@ -3641,14 +3666,14 @@ static void iavf_init_task(struct work_struct *work)
8584 + case __IAVF_INIT_GET_RESOURCES:
8585 + if (iavf_init_get_resources(adapter) < 0)
8586 + goto init_failed;
8587 +- return;
8588 ++ goto out;
8589 + default:
8590 + goto init_failed;
8591 + }
8592 +
8593 + queue_delayed_work(iavf_wq, &adapter->init_task,
8594 + msecs_to_jiffies(30));
8595 +- return;
8596 ++ goto out;
8597 + init_failed:
8598 + if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
8599 + dev_err(&adapter->pdev->dev,
8600 +@@ -3657,9 +3682,11 @@ init_failed:
8601 + iavf_shutdown_adminq(hw);
8602 + adapter->state = __IAVF_STARTUP;
8603 + queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
8604 +- return;
8605 ++ goto out;
8606 + }
8607 + queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
8608 ++out:
8609 ++ mutex_unlock(&adapter->crit_lock);
8610 + }
8611 +
8612 + /**
8613 +@@ -3676,9 +3703,12 @@ static void iavf_shutdown(struct pci_dev *pdev)
8614 + if (netif_running(netdev))
8615 + iavf_close(netdev);
8616 +
8617 ++ if (iavf_lock_timeout(&adapter->crit_lock, 5000))
8618 ++ dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
8619 + /* Prevent the watchdog from running. */
8620 + adapter->state = __IAVF_REMOVE;
8621 + adapter->aq_required = 0;
8622 ++ mutex_unlock(&adapter->crit_lock);
8623 +
8624 + #ifdef CONFIG_PM
8625 + pci_save_state(pdev);
8626 +@@ -3772,6 +3802,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8627 + /* set up the locks for the AQ, do this only once in probe
8628 + * and destroy them only once in remove
8629 + */
8630 ++ mutex_init(&adapter->crit_lock);
8631 ++ mutex_init(&adapter->client_lock);
8632 ++ mutex_init(&adapter->remove_lock);
8633 + mutex_init(&hw->aq.asq_mutex);
8634 + mutex_init(&hw->aq.arq_mutex);
8635 +
8636 +@@ -3823,8 +3856,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
8637 +
8638 + netif_device_detach(netdev);
8639 +
8640 +- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
8641 +- &adapter->crit_section))
8642 ++ while (!mutex_trylock(&adapter->crit_lock))
8643 + usleep_range(500, 1000);
8644 +
8645 + if (netif_running(netdev)) {
8646 +@@ -3835,7 +3867,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
8647 + iavf_free_misc_irq(adapter);
8648 + iavf_reset_interrupt_capability(adapter);
8649 +
8650 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
8651 ++ mutex_unlock(&adapter->crit_lock);
8652 +
8653 + return 0;
8654 + }
8655 +@@ -3897,7 +3929,7 @@ static void iavf_remove(struct pci_dev *pdev)
8656 + struct iavf_hw *hw = &adapter->hw;
8657 + int err;
8658 + /* Indicate we are in remove and not to run reset_task */
8659 +- set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
8660 ++ mutex_lock(&adapter->remove_lock);
8661 + cancel_delayed_work_sync(&adapter->init_task);
8662 + cancel_work_sync(&adapter->reset_task);
8663 + cancel_delayed_work_sync(&adapter->client_task);
8664 +@@ -3912,10 +3944,6 @@ static void iavf_remove(struct pci_dev *pdev)
8665 + err);
8666 + }
8667 +
8668 +- /* Shut down all the garbage mashers on the detention level */
8669 +- adapter->state = __IAVF_REMOVE;
8670 +- adapter->aq_required = 0;
8671 +- adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
8672 + iavf_request_reset(adapter);
8673 + msleep(50);
8674 + /* If the FW isn't responding, kick it once, but only once. */
8675 +@@ -3923,6 +3951,13 @@ static void iavf_remove(struct pci_dev *pdev)
8676 + iavf_request_reset(adapter);
8677 + msleep(50);
8678 + }
8679 ++ if (iavf_lock_timeout(&adapter->crit_lock, 5000))
8680 ++ dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
8681 ++
8682 ++ /* Shut down all the garbage mashers on the detention level */
8683 ++ adapter->state = __IAVF_REMOVE;
8684 ++ adapter->aq_required = 0;
8685 ++ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
8686 + iavf_free_all_tx_resources(adapter);
8687 + iavf_free_all_rx_resources(adapter);
8688 + iavf_misc_irq_disable(adapter);
8689 +@@ -3942,6 +3977,11 @@ static void iavf_remove(struct pci_dev *pdev)
8690 + /* destroy the locks only once, here */
8691 + mutex_destroy(&hw->aq.arq_mutex);
8692 + mutex_destroy(&hw->aq.asq_mutex);
8693 ++ mutex_destroy(&adapter->client_lock);
8694 ++ mutex_unlock(&adapter->crit_lock);
8695 ++ mutex_destroy(&adapter->crit_lock);
8696 ++ mutex_unlock(&adapter->remove_lock);
8697 ++ mutex_destroy(&adapter->remove_lock);
8698 +
8699 + iounmap(hw->hw_addr);
8700 + pci_release_regions(pdev);
8701 +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
8702 +index ed2d66bc2d6c3..f62982c4d933d 100644
8703 +--- a/drivers/net/ethernet/intel/igc/igc_main.c
8704 ++++ b/drivers/net/ethernet/intel/igc/igc_main.c
8705 +@@ -4817,6 +4817,7 @@ static irqreturn_t igc_msix_ring(int irq, void *data)
8706 + */
8707 + static int igc_request_msix(struct igc_adapter *adapter)
8708 + {
8709 ++ unsigned int num_q_vectors = adapter->num_q_vectors;
8710 + int i = 0, err = 0, vector = 0, free_vector = 0;
8711 + struct net_device *netdev = adapter->netdev;
8712 +
8713 +@@ -4825,7 +4826,13 @@ static int igc_request_msix(struct igc_adapter *adapter)
8714 + if (err)
8715 + goto err_out;
8716 +
8717 +- for (i = 0; i < adapter->num_q_vectors; i++) {
8718 ++ if (num_q_vectors > MAX_Q_VECTORS) {
8719 ++ num_q_vectors = MAX_Q_VECTORS;
8720 ++ dev_warn(&adapter->pdev->dev,
8721 ++ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
8722 ++ adapter->num_q_vectors, MAX_Q_VECTORS);
8723 ++ }
8724 ++ for (i = 0; i < num_q_vectors; i++) {
8725 + struct igc_q_vector *q_vector = adapter->q_vector[i];
8726 +
8727 + vector++;
8728 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
8729 +index f5ec39de026a5..05f4334700e90 100644
8730 +--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
8731 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
8732 +@@ -717,6 +717,7 @@ struct nix_lf_alloc_rsp {
8733 + u8 cgx_links; /* No. of CGX links present in HW */
8734 + u8 lbk_links; /* No. of LBK links present in HW */
8735 + u8 sdp_links; /* No. of SDP links present in HW */
8736 ++ u8 tx_link; /* Transmit channel link number */
8737 + };
8738 +
8739 + struct nix_lf_free_req {
8740 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
8741 +index c32195073e8a5..87af164951eae 100644
8742 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
8743 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
8744 +@@ -249,9 +249,11 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
8745 + return true;
8746 + }
8747 +
8748 +-static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
8749 ++static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
8750 ++ struct nix_lf_alloc_rsp *rsp)
8751 + {
8752 + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
8753 ++ struct rvu_hwinfo *hw = rvu->hw;
8754 + struct mac_ops *mac_ops;
8755 + int pkind, pf, vf, lbkid;
8756 + u8 cgx_id, lmac_id;
8757 +@@ -276,6 +278,8 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
8758 + pfvf->tx_chan_base = pfvf->rx_chan_base;
8759 + pfvf->rx_chan_cnt = 1;
8760 + pfvf->tx_chan_cnt = 1;
8761 ++ rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
8762 ++
8763 + cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
8764 + rvu_npc_set_pkind(rvu, pkind, pfvf);
8765 +
8766 +@@ -309,6 +313,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
8767 + rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
8768 + pfvf->rx_chan_cnt = 1;
8769 + pfvf->tx_chan_cnt = 1;
8770 ++ rsp->tx_link = hw->cgx_links + lbkid;
8771 + rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
8772 + rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
8773 + pfvf->rx_chan_base,
8774 +@@ -1258,7 +1263,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
8775 + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
8776 +
8777 + intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
8778 +- err = nix_interface_init(rvu, pcifunc, intf, nixlf);
8779 ++ err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp);
8780 + if (err)
8781 + goto free_mem;
8782 +
8783 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
8784 +index 94dfd64f526fa..124465b3987c4 100644
8785 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
8786 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
8787 +@@ -586,25 +586,6 @@ void otx2_get_mac_from_af(struct net_device *netdev)
8788 + }
8789 + EXPORT_SYMBOL(otx2_get_mac_from_af);
8790 +
8791 +-static int otx2_get_link(struct otx2_nic *pfvf)
8792 +-{
8793 +- int link = 0;
8794 +- u16 map;
8795 +-
8796 +- /* cgx lmac link */
8797 +- if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) {
8798 +- map = pfvf->hw.tx_chan_base & 0x7FF;
8799 +- link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
8800 +- }
8801 +- /* LBK channel */
8802 +- if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE) {
8803 +- map = pfvf->hw.tx_chan_base & 0x7FF;
8804 +- link = pfvf->hw.cgx_links | ((map >> 8) & 0xF);
8805 +- }
8806 +-
8807 +- return link;
8808 +-}
8809 +-
8810 + int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
8811 + {
8812 + struct otx2_hw *hw = &pfvf->hw;
8813 +@@ -660,8 +641,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
8814 + req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM;
8815 +
8816 + req->num_regs++;
8817 +- req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
8818 +- otx2_get_link(pfvf));
8819 ++ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
8820 + /* Enable this queue and backpressure */
8821 + req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
8822 +
8823 +@@ -1204,7 +1184,22 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
8824 + /* Enable backpressure for RQ aura */
8825 + if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
8826 + aq->aura.bp_ena = 0;
8827 ++ /* If NIX1 LF is attached then specify NIX1_RX.
8828 ++ *
8829 ++ * Below NPA_AURA_S[BP_ENA] is set according to the
8830 ++ * NPA_BPINTF_E enumeration given as:
8831 ++ * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so
8832 ++ * NIX0_RX is 0x0 + 0*0x1 = 0
8833 ++ * NIX1_RX is 0x0 + 1*0x1 = 1
8834 ++ * But in HRM it is given that
8835 ++ * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
8836 ++ * NIX-RX based on [BP] level. One bit per NIX-RX; index
8837 ++ * enumerated by NPA_BPINTF_E."
8838 ++ */
8839 ++ if (pfvf->nix_blkaddr == BLKADDR_NIX1)
8840 ++ aq->aura.bp_ena = 1;
8841 + aq->aura.nix0_bpid = pfvf->bpid[0];
8842 ++
8843 + /* Set backpressure level for RQ's Aura */
8844 + aq->aura.bp = RQ_BP_LVL_AURA;
8845 + }
8846 +@@ -1591,6 +1586,7 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
8847 + pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
8848 + pfvf->hw.cgx_links = rsp->cgx_links;
8849 + pfvf->hw.lbk_links = rsp->lbk_links;
8850 ++ pfvf->hw.tx_link = rsp->tx_link;
8851 + }
8852 + EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
8853 +
8854 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
8855 +index 8c602d27108a7..11686c5cf45bd 100644
8856 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
8857 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
8858 +@@ -215,6 +215,7 @@ struct otx2_hw {
8859 + u64 cgx_fec_uncorr_blks;
8860 + u8 cgx_links; /* No. of CGX links present in HW */
8861 + u8 lbk_links; /* No. of LBK links present in HW */
8862 ++ u8 tx_link; /* Transmit channel link number */
8863 + #define HW_TSO 0
8864 + #define CN10K_MBOX 1
8865 + #define CN10K_LMTST 2
8866 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
8867 +index 9d79c5ec31e9f..db5dfff585c99 100644
8868 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
8869 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
8870 +@@ -877,7 +877,7 @@ static void cb_timeout_handler(struct work_struct *work)
8871 + ent->ret = -ETIMEDOUT;
8872 + mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
8873 + ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
8874 +- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
8875 ++ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
8876 +
8877 + out:
8878 + cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
8879 +@@ -994,7 +994,7 @@ static void cmd_work_handler(struct work_struct *work)
8880 + MLX5_SET(mbox_out, ent->out, status, status);
8881 + MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
8882 +
8883 +- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
8884 ++ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
8885 + return;
8886 + }
8887 +
8888 +@@ -1008,7 +1008,7 @@ static void cmd_work_handler(struct work_struct *work)
8889 + poll_timeout(ent);
8890 + /* make sure we read the descriptor after ownership is SW */
8891 + rmb();
8892 +- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
8893 ++ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
8894 + }
8895 + }
8896 +
8897 +@@ -1068,7 +1068,7 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
8898 + mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
8899 +
8900 + ent->ret = -ETIMEDOUT;
8901 +- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
8902 ++ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
8903 + }
8904 +
8905 + static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
8906 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
8907 +index 43356fad53deb..ffdfb5a94b14b 100644
8908 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
8909 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
8910 +@@ -846,9 +846,9 @@ again:
8911 + new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
8912 + ste_location, send_ste_list);
8913 + if (!new_htbl) {
8914 +- mlx5dr_htbl_put(cur_htbl);
8915 + mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
8916 + cur_htbl->chunk_size);
8917 ++ mlx5dr_htbl_put(cur_htbl);
8918 + } else {
8919 + cur_htbl = new_htbl;
8920 + }
8921 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
8922 +index 9df0e73d1c358..69b49deb66b22 100644
8923 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
8924 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
8925 +@@ -620,6 +620,7 @@ static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
8926 +
8927 + MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
8928 + MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
8929 ++ MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
8930 +
8931 + MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
8932 + MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
8933 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
8934 +index 5dfa4799c34f2..ed2ade2a4f043 100644
8935 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
8936 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
8937 +@@ -1697,7 +1697,7 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
8938 + case NFP_NET_META_RESYNC_INFO:
8939 + if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
8940 + pkt_len))
8941 +- return NULL;
8942 ++ return false;
8943 + data += sizeof(struct nfp_net_tls_resync_req);
8944 + break;
8945 + default:
8946 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
8947 +index 28dd0ed85a824..f7dc8458cde86 100644
8948 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
8949 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
8950 +@@ -289,10 +289,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
8951 + val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
8952 + break;
8953 + default:
8954 +- dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
8955 +- phy_modes(gmac->phy_mode));
8956 +- err = -EINVAL;
8957 +- goto err_remove_config_dt;
8958 ++ goto err_unsupported_phy;
8959 + }
8960 + regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
8961 +
8962 +@@ -309,10 +306,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
8963 + NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
8964 + break;
8965 + default:
8966 +- dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
8967 +- phy_modes(gmac->phy_mode));
8968 +- err = -EINVAL;
8969 +- goto err_remove_config_dt;
8970 ++ goto err_unsupported_phy;
8971 + }
8972 + regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
8973 +
8974 +@@ -329,8 +323,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
8975 + NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
8976 + break;
8977 + default:
8978 +- /* We don't get here; the switch above will have errored out */
8979 +- unreachable();
8980 ++ goto err_unsupported_phy;
8981 + }
8982 + regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
8983 +
8984 +@@ -361,6 +354,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
8985 +
8986 + return 0;
8987 +
8988 ++err_unsupported_phy:
8989 ++ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
8990 ++ phy_modes(gmac->phy_mode));
8991 ++ err = -EINVAL;
8992 ++
8993 + err_remove_config_dt:
8994 + stmmac_remove_config_dt(pdev, plat_dat);
8995 +
8996 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
8997 +index fa90bcdf4e455..8a150cc462dcf 100644
8998 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
8999 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
9000 +@@ -5342,7 +5342,7 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
9001 + struct stmmac_channel *ch =
9002 + container_of(napi, struct stmmac_channel, rxtx_napi);
9003 + struct stmmac_priv *priv = ch->priv_data;
9004 +- int rx_done, tx_done;
9005 ++ int rx_done, tx_done, rxtx_done;
9006 + u32 chan = ch->index;
9007 +
9008 + priv->xstats.napi_poll++;
9009 +@@ -5352,14 +5352,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
9010 +
9011 + rx_done = stmmac_rx_zc(priv, budget, chan);
9012 +
9013 ++ rxtx_done = max(tx_done, rx_done);
9014 ++
9015 + /* If either TX or RX work is not complete, return budget
9016 + * and keep pooling
9017 + */
9018 +- if (tx_done >= budget || rx_done >= budget)
9019 ++ if (rxtx_done >= budget)
9020 + return budget;
9021 +
9022 + /* all work done, exit the polling mode */
9023 +- if (napi_complete_done(napi, rx_done)) {
9024 ++ if (napi_complete_done(napi, rxtx_done)) {
9025 + unsigned long flags;
9026 +
9027 + spin_lock_irqsave(&ch->lock, flags);
9028 +@@ -5370,7 +5372,7 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
9029 + spin_unlock_irqrestore(&ch->lock, flags);
9030 + }
9031 +
9032 +- return min(rx_done, budget - 1);
9033 ++ return min(rxtx_done, budget - 1);
9034 + }
9035 +
9036 + /**
9037 +diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
9038 +index 811815f8cd3bb..f974e70a82e8b 100644
9039 +--- a/drivers/net/ethernet/wiznet/w5100.c
9040 ++++ b/drivers/net/ethernet/wiznet/w5100.c
9041 +@@ -1047,6 +1047,8 @@ static int w5100_mmio_probe(struct platform_device *pdev)
9042 + mac_addr = data->mac_addr;
9043 +
9044 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
9045 ++ if (!mem)
9046 ++ return -EINVAL;
9047 + if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
9048 + ops = &w5100_mmio_indirect_ops;
9049 + else
9050 +diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
9051 +index af44ca41189e3..bda8677eae88d 100644
9052 +--- a/drivers/net/ipa/ipa_cmd.c
9053 ++++ b/drivers/net/ipa/ipa_cmd.c
9054 +@@ -159,35 +159,45 @@ static void ipa_cmd_validate_build(void)
9055 + BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
9056 + #undef TABLE_COUNT_MAX
9057 + #undef TABLE_SIZE
9058 +-}
9059 +
9060 +-#ifdef IPA_VALIDATE
9061 ++ /* Hashed and non-hashed fields are assumed to be the same size */
9062 ++ BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
9063 ++ field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
9064 ++ BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
9065 ++ field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
9066 ++}
9067 +
9068 + /* Validate a memory region holding a table */
9069 +-bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
9070 +- bool route, bool ipv6, bool hashed)
9071 ++bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
9072 + {
9073 ++ u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
9074 ++ u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
9075 ++ const char *table = route ? "route" : "filter";
9076 + struct device *dev = &ipa->pdev->dev;
9077 +- u32 offset_max;
9078 +
9079 +- offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK)
9080 +- : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
9081 ++ /* Size must fit in the immediate command field that holds it */
9082 ++ if (mem->size > size_max) {
9083 ++ dev_err(dev, "%s table region size too large\n", table);
9084 ++ dev_err(dev, " (0x%04x > 0x%04x)\n",
9085 ++ mem->size, size_max);
9086 ++
9087 ++ return false;
9088 ++ }
9089 ++
9090 ++ /* Offset must fit in the immediate command field that holds it */
9091 + if (mem->offset > offset_max ||
9092 + ipa->mem_offset > offset_max - mem->offset) {
9093 +- dev_err(dev, "IPv%c %s%s table region offset too large\n",
9094 +- ipv6 ? '6' : '4', hashed ? "hashed " : "",
9095 +- route ? "route" : "filter");
9096 ++ dev_err(dev, "%s table region offset too large\n", table);
9097 + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
9098 + ipa->mem_offset, mem->offset, offset_max);
9099 +
9100 + return false;
9101 + }
9102 +
9103 ++ /* Entire memory range must fit within IPA-local memory */
9104 + if (mem->offset > ipa->mem_size ||
9105 + mem->size > ipa->mem_size - mem->offset) {
9106 +- dev_err(dev, "IPv%c %s%s table region out of range\n",
9107 +- ipv6 ? '6' : '4', hashed ? "hashed " : "",
9108 +- route ? "route" : "filter");
9109 ++ dev_err(dev, "%s table region out of range\n", table);
9110 + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
9111 + mem->offset, mem->size, ipa->mem_size);
9112 +
9113 +@@ -197,6 +207,8 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
9114 + return true;
9115 + }
9116 +
9117 ++#ifdef IPA_VALIDATE
9118 ++
9119 + /* Validate the memory region that holds headers */
9120 + static bool ipa_cmd_header_valid(struct ipa *ipa)
9121 + {
9122 +diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
9123 +index b99262281f41c..ea723419c826b 100644
9124 +--- a/drivers/net/ipa/ipa_cmd.h
9125 ++++ b/drivers/net/ipa/ipa_cmd.h
9126 +@@ -57,20 +57,18 @@ struct ipa_cmd_info {
9127 + enum dma_data_direction direction;
9128 + };
9129 +
9130 +-#ifdef IPA_VALIDATE
9131 +-
9132 + /**
9133 + * ipa_cmd_table_valid() - Validate a memory region holding a table
9134 + * @ipa: - IPA pointer
9135 + * @mem: - IPA memory region descriptor
9136 + * @route: - Whether the region holds a route or filter table
9137 +- * @ipv6: - Whether the table is for IPv6 or IPv4
9138 +- * @hashed: - Whether the table is hashed or non-hashed
9139 + *
9140 + * Return: true if region is valid, false otherwise
9141 + */
9142 + bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
9143 +- bool route, bool ipv6, bool hashed);
9144 ++ bool route);
9145 ++
9146 ++#ifdef IPA_VALIDATE
9147 +
9148 + /**
9149 + * ipa_cmd_data_valid() - Validate command-realted configuration is valid
9150 +@@ -82,13 +80,6 @@ bool ipa_cmd_data_valid(struct ipa *ipa);
9151 +
9152 + #else /* !IPA_VALIDATE */
9153 +
9154 +-static inline bool ipa_cmd_table_valid(struct ipa *ipa,
9155 +- const struct ipa_mem *mem, bool route,
9156 +- bool ipv6, bool hashed)
9157 +-{
9158 +- return true;
9159 +-}
9160 +-
9161 + static inline bool ipa_cmd_data_valid(struct ipa *ipa)
9162 + {
9163 + return true;
9164 +diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c
9165 +index 9353efbd504fb..598b410cd7ab4 100644
9166 +--- a/drivers/net/ipa/ipa_data-v4.11.c
9167 ++++ b/drivers/net/ipa/ipa_data-v4.11.c
9168 +@@ -368,18 +368,13 @@ static const struct ipa_mem_data ipa_mem_data = {
9169 + static const struct ipa_interconnect_data ipa_interconnect_data[] = {
9170 + {
9171 + .name = "memory",
9172 +- .peak_bandwidth = 465000, /* 465 MBps */
9173 +- .average_bandwidth = 80000, /* 80 MBps */
9174 +- },
9175 +- /* Average rate is unused for the next two interconnects */
9176 +- {
9177 +- .name = "imem",
9178 +- .peak_bandwidth = 68570, /* 68.57 MBps */
9179 +- .average_bandwidth = 80000, /* 80 MBps (unused?) */
9180 ++ .peak_bandwidth = 600000, /* 600 MBps */
9181 ++ .average_bandwidth = 150000, /* 150 MBps */
9182 + },
9183 ++ /* Average rate is unused for the next interconnect */
9184 + {
9185 + .name = "config",
9186 +- .peak_bandwidth = 30000, /* 30 MBps */
9187 ++ .peak_bandwidth = 74000, /* 74 MBps */
9188 + .average_bandwidth = 0, /* unused */
9189 + },
9190 + };
9191 +diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c
9192 +index 798d43e1eb133..4cce5dce92158 100644
9193 +--- a/drivers/net/ipa/ipa_data-v4.9.c
9194 ++++ b/drivers/net/ipa/ipa_data-v4.9.c
9195 +@@ -416,18 +416,13 @@ static const struct ipa_mem_data ipa_mem_data = {
9196 + /* Interconnect rates are in 1000 byte/second units */
9197 + static const struct ipa_interconnect_data ipa_interconnect_data[] = {
9198 + {
9199 +- .name = "ipa_to_llcc",
9200 ++ .name = "memory",
9201 + .peak_bandwidth = 600000, /* 600 MBps */
9202 + .average_bandwidth = 150000, /* 150 MBps */
9203 + },
9204 +- {
9205 +- .name = "llcc_to_ebi1",
9206 +- .peak_bandwidth = 1804000, /* 1.804 GBps */
9207 +- .average_bandwidth = 150000, /* 150 MBps */
9208 +- },
9209 + /* Average rate is unused for the next interconnect */
9210 + {
9211 +- .name = "appss_to_ipa",
9212 ++ .name = "config",
9213 + .peak_bandwidth = 74000, /* 74 MBps */
9214 + .average_bandwidth = 0, /* unused */
9215 + },
9216 +diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
9217 +index c617a9156f26d..c607ebec74567 100644
9218 +--- a/drivers/net/ipa/ipa_table.c
9219 ++++ b/drivers/net/ipa/ipa_table.c
9220 +@@ -120,8 +120,6 @@
9221 + */
9222 + #define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32))
9223 +
9224 +-#ifdef IPA_VALIDATE
9225 +-
9226 + /* Check things that can be validated at build time. */
9227 + static void ipa_table_validate_build(void)
9228 + {
9229 +@@ -161,7 +159,7 @@ ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
9230 + else
9231 + size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
9232 +
9233 +- if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
9234 ++ if (!ipa_cmd_table_valid(ipa, mem, route))
9235 + return false;
9236 +
9237 + /* mem->size >= size is sufficient, but we'll demand more */
9238 +@@ -169,7 +167,7 @@ ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
9239 + return true;
9240 +
9241 + /* Hashed table regions can be zero size if hashing is not supported */
9242 +- if (hashed && !mem->size)
9243 ++ if (ipa_table_hash_support(ipa) && !mem->size)
9244 + return true;
9245 +
9246 + dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n",
9247 +@@ -183,14 +181,22 @@ bool ipa_table_valid(struct ipa *ipa)
9248 + {
9249 + bool valid;
9250 +
9251 +- valid = ipa_table_valid_one(IPA_MEM_V4_FILTER, false);
9252 +- valid = valid && ipa_table_valid_one(IPA_MEM_V4_FILTER_HASHED, false);
9253 +- valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER, false);
9254 +- valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER_HASHED, false);
9255 +- valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE, true);
9256 +- valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE_HASHED, true);
9257 +- valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE, true);
9258 +- valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE_HASHED, true);
9259 ++ valid = ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER, false);
9260 ++ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER, false);
9261 ++ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE, true);
9262 ++ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE, true);
9263 ++
9264 ++ if (!ipa_table_hash_support(ipa))
9265 ++ return valid;
9266 ++
9267 ++ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER_HASHED,
9268 ++ false);
9269 ++ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER_HASHED,
9270 ++ false);
9271 ++ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE_HASHED,
9272 ++ true);
9273 ++ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE_HASHED,
9274 ++ true);
9275 +
9276 + return valid;
9277 + }
9278 +@@ -217,14 +223,6 @@ bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map)
9279 + return true;
9280 + }
9281 +
9282 +-#else /* !IPA_VALIDATE */
9283 +-static void ipa_table_validate_build(void)
9284 +-
9285 +-{
9286 +-}
9287 +-
9288 +-#endif /* !IPA_VALIDATE */
9289 +-
9290 + /* Zero entry count means no table, so just return a 0 address */
9291 + static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
9292 + {
9293 +diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
9294 +index 1e2be9fce2f81..b6a9a0d79d68e 100644
9295 +--- a/drivers/net/ipa/ipa_table.h
9296 ++++ b/drivers/net/ipa/ipa_table.h
9297 +@@ -16,8 +16,6 @@ struct ipa;
9298 + /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
9299 + #define IPA_ROUTE_COUNT_MAX 15
9300 +
9301 +-#ifdef IPA_VALIDATE
9302 +-
9303 + /**
9304 + * ipa_table_valid() - Validate route and filter table memory regions
9305 + * @ipa: IPA pointer
9306 +@@ -35,20 +33,6 @@ bool ipa_table_valid(struct ipa *ipa);
9307 + */
9308 + bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask);
9309 +
9310 +-#else /* !IPA_VALIDATE */
9311 +-
9312 +-static inline bool ipa_table_valid(struct ipa *ipa)
9313 +-{
9314 +- return true;
9315 +-}
9316 +-
9317 +-static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask)
9318 +-{
9319 +- return true;
9320 +-}
9321 +-
9322 +-#endif /* !IPA_VALIDATE */
9323 +-
9324 + /**
9325 + * ipa_table_hash_support() - Return true if hashed tables are supported
9326 + * @ipa: IPA pointer
9327 +diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
9328 +index f7a2ec150e542..211b5476a6f51 100644
9329 +--- a/drivers/net/phy/dp83822.c
9330 ++++ b/drivers/net/phy/dp83822.c
9331 +@@ -326,11 +326,9 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev)
9332 +
9333 + static int dp8382x_disable_wol(struct phy_device *phydev)
9334 + {
9335 +- int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
9336 +- DP83822_WOL_SECURE_ON;
9337 +-
9338 +- return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
9339 +- MII_DP83822_WOL_CFG, value);
9340 ++ return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
9341 ++ DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
9342 ++ DP83822_WOL_SECURE_ON);
9343 + }
9344 +
9345 + static int dp83822_read_status(struct phy_device *phydev)
9346 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
9347 +index b4885a700296e..b0a4ca3559fd8 100644
9348 +--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
9349 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
9350 +@@ -3351,7 +3351,8 @@ found:
9351 + "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n",
9352 + cptr, code, reference, length, major, minor);
9353 + if ((!AR_SREV_9485(ah) && length >= 1024) ||
9354 +- (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) {
9355 ++ (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485) ||
9356 ++ (length > cptr)) {
9357 + ath_dbg(common, EEPROM, "Skipping bad header\n");
9358 + cptr -= COMP_HDR_LEN;
9359 + continue;
9360 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
9361 +index 2ca3b86714a9d..172081ffe4774 100644
9362 +--- a/drivers/net/wireless/ath/ath9k/hw.c
9363 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
9364 +@@ -1621,7 +1621,6 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
9365 + ath9k_hw_gpio_request_out(ah, i, NULL,
9366 + AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
9367 + ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
9368 +- ath9k_hw_gpio_free(ah, i);
9369 + }
9370 + }
9371 +
9372 +@@ -2728,14 +2727,17 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type)
9373 + static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out,
9374 + const char *label)
9375 + {
9376 ++ int err;
9377 ++
9378 + if (ah->caps.gpio_requested & BIT(gpio))
9379 + return;
9380 +
9381 +- /* may be requested by BSP, free anyway */
9382 +- gpio_free(gpio);
9383 +-
9384 +- if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label))
9385 ++ err = gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label);
9386 ++ if (err) {
9387 ++ ath_err(ath9k_hw_common(ah), "request GPIO%d failed:%d\n",
9388 ++ gpio, err);
9389 + return;
9390 ++ }
9391 +
9392 + ah->caps.gpio_requested |= BIT(gpio);
9393 + }
9394 +diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
9395 +index d202f2128df23..67f4db662402b 100644
9396 +--- a/drivers/net/wireless/ath/wcn36xx/main.c
9397 ++++ b/drivers/net/wireless/ath/wcn36xx/main.c
9398 +@@ -408,13 +408,14 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
9399 + wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
9400 + ch);
9401 +
9402 +- if (wcn->sw_scan_opchannel == ch) {
9403 ++ if (wcn->sw_scan_opchannel == ch && wcn->sw_scan_channel) {
9404 + /* If channel is the initial operating channel, we may
9405 + * want to receive/transmit regular data packets, then
9406 + * simply stop the scan session and exit PS mode.
9407 + */
9408 + wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
9409 + wcn->sw_scan_vif);
9410 ++ wcn->sw_scan_channel = 0;
9411 + } else if (wcn->sw_scan) {
9412 + /* A scan is ongoing, do not change the operating
9413 + * channel, but start a scan session on the channel.
9414 +@@ -422,6 +423,7 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
9415 + wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN,
9416 + wcn->sw_scan_vif);
9417 + wcn36xx_smd_start_scan(wcn, ch);
9418 ++ wcn->sw_scan_channel = ch;
9419 + } else {
9420 + wcn36xx_change_opchannel(wcn, ch);
9421 + }
9422 +@@ -702,6 +704,7 @@ static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
9423 +
9424 + wcn->sw_scan = true;
9425 + wcn->sw_scan_vif = vif;
9426 ++ wcn->sw_scan_channel = 0;
9427 + if (vif_priv->sta_assoc)
9428 + wcn->sw_scan_opchannel = WCN36XX_HW_CHANNEL(wcn);
9429 + else
9430 +diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
9431 +index 1b831157ede17..cab196bb38cd4 100644
9432 +--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
9433 ++++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
9434 +@@ -287,6 +287,10 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
9435 + status.rate_idx = 0;
9436 + }
9437 +
9438 ++ if (ieee80211_is_beacon(hdr->frame_control) ||
9439 ++ ieee80211_is_probe_resp(hdr->frame_control))
9440 ++ status.boottime_ns = ktime_get_boottime_ns();
9441 ++
9442 + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
9443 +
9444 + if (ieee80211_is_beacon(hdr->frame_control)) {
9445 +diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
9446 +index 6121d8a5641ab..0feb235b5a426 100644
9447 +--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
9448 ++++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
9449 +@@ -246,6 +246,7 @@ struct wcn36xx {
9450 + struct cfg80211_scan_request *scan_req;
9451 + bool sw_scan;
9452 + u8 sw_scan_opchannel;
9453 ++ u8 sw_scan_channel;
9454 + struct ieee80211_vif *sw_scan_vif;
9455 + struct mutex scan_lock;
9456 + bool scan_aborted;
9457 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
9458 +index b2605aefc2909..8b200379f7c20 100644
9459 +--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
9460 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
9461 +@@ -1,6 +1,6 @@
9462 + /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
9463 + /*
9464 +- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
9465 ++ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
9466 + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
9467 + * Copyright (C) 2016-2017 Intel Deutschland GmbH
9468 + */
9469 +@@ -874,7 +874,7 @@ struct iwl_scan_probe_params_v3 {
9470 + u8 reserved;
9471 + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
9472 + __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE];
9473 +- u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE];
9474 ++ u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN];
9475 + } __packed; /* SCAN_PROBE_PARAMS_API_S_VER_3 */
9476 +
9477 + /**
9478 +@@ -894,7 +894,7 @@ struct iwl_scan_probe_params_v4 {
9479 + __le16 reserved;
9480 + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
9481 + __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE];
9482 +- u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE];
9483 ++ u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN];
9484 + } __packed; /* SCAN_PROBE_PARAMS_API_S_VER_4 */
9485 +
9486 + #define SCAN_MAX_NUM_CHANS_V3 67
9487 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
9488 +index df7c55e06f54e..a13fe01e487b9 100644
9489 +--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
9490 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
9491 +@@ -2321,7 +2321,7 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
9492 + return;
9493 +
9494 + if (dump_data->monitor_only)
9495 +- dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
9496 ++ dump_mask &= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR);
9497 +
9498 + fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
9499 + file_len = le32_to_cpu(dump_file->file_len);
9500 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
9501 +index fd5e089616515..7f0c821898082 100644
9502 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
9503 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
9504 +@@ -1005,8 +1005,10 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
9505 + return -ENOMEM;
9506 +
9507 + #ifdef CONFIG_IWLWIFI_DEBUGFS
9508 +- if (mvm->beacon_inject_active)
9509 ++ if (mvm->beacon_inject_active) {
9510 ++ dev_kfree_skb(beacon);
9511 + return -EBUSY;
9512 ++ }
9513 + #endif
9514 +
9515 + ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
9516 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
9517 +index 70ebecb73c244..79f44435972e4 100644
9518 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
9519 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
9520 +@@ -2987,16 +2987,20 @@ static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
9521 + void *_data)
9522 + {
9523 + struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data;
9524 ++ const struct cfg80211_bss_ies *ies;
9525 + const struct element *elem;
9526 +
9527 +- elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, bss->ies->data,
9528 +- bss->ies->len);
9529 ++ rcu_read_lock();
9530 ++ ies = rcu_dereference(bss->ies);
9531 ++ elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data,
9532 ++ ies->len);
9533 +
9534 + if (!elem || elem->datalen < 10 ||
9535 + !(elem->data[10] &
9536 + WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) {
9537 + data->tolerated = false;
9538 + }
9539 ++ rcu_read_unlock();
9540 + }
9541 +
9542 + static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
9543 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
9544 +index 20e8d343a9501..b637cf9d85fd7 100644
9545 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
9546 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
9547 +@@ -792,10 +792,26 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
9548 +
9549 + mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
9550 +
9551 +- mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
9552 +- mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
9553 +- mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
9554 +- mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
9555 ++ if (iwl_mvm_has_new_tx_api(mvm)) {
9556 ++ /*
9557 ++ * If we have the new TX/queue allocation API initialize them
9558 ++ * all to invalid numbers. We'll rewrite the ones that we need
9559 ++ * later, but that doesn't happen for all of them all of the
9560 ++ * time (e.g. P2P Device is optional), and if a dynamic queue
9561 ++ * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then
9562 ++ * iwl_mvm_is_static_queue() erroneously returns true, and we
9563 ++ * might have things getting stuck.
9564 ++ */
9565 ++ mvm->aux_queue = IWL_MVM_INVALID_QUEUE;
9566 ++ mvm->snif_queue = IWL_MVM_INVALID_QUEUE;
9567 ++ mvm->probe_queue = IWL_MVM_INVALID_QUEUE;
9568 ++ mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE;
9569 ++ } else {
9570 ++ mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
9571 ++ mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
9572 ++ mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
9573 ++ mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
9574 ++ }
9575 +
9576 + mvm->sf_state = SF_UNINIT;
9577 + if (iwl_mvm_has_unified_ucode(mvm))
9578 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
9579 +index 0368b7101222c..2d600a8b20ed7 100644
9580 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
9581 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
9582 +@@ -1648,7 +1648,7 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
9583 + struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
9584 + u32 n_aps_flag =
9585 + iwl_mvm_scan_ch_n_aps_flag(vif_type,
9586 +- cfg->v2.channel_num);
9587 ++ channels[i]->hw_value);
9588 +
9589 + cfg->flags = cpu_to_le32(flags | n_aps_flag);
9590 + cfg->v2.channel_num = channels[i]->hw_value;
9591 +@@ -2368,14 +2368,17 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
9592 + if (ret)
9593 + return ret;
9594 +
9595 +- iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
9596 +- &bitmap_ssid);
9597 + if (!params->scan_6ghz) {
9598 ++ iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
9599 ++ &bitmap_ssid);
9600 + iwl_mvm_scan_umac_fill_ch_p_v6(mvm, params, vif,
9601 +- &scan_p->channel_params, bitmap_ssid);
9602 ++ &scan_p->channel_params, bitmap_ssid);
9603 +
9604 + return 0;
9605 ++ } else {
9606 ++ pb->preq = params->preq;
9607 + }
9608 ++
9609 + cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
9610 + cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
9611 + cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
9612 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
9613 +index 9c45a64c50094..252b81b1dc8cf 100644
9614 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
9615 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
9616 +@@ -316,8 +316,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
9617 + }
9618 +
9619 + static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
9620 +- int queue, u8 tid, u8 flags)
9621 ++ u16 *queueptr, u8 tid, u8 flags)
9622 + {
9623 ++ int queue = *queueptr;
9624 + struct iwl_scd_txq_cfg_cmd cmd = {
9625 + .scd_queue = queue,
9626 + .action = SCD_CFG_DISABLE_QUEUE,
9627 +@@ -326,6 +327,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
9628 +
9629 + if (iwl_mvm_has_new_tx_api(mvm)) {
9630 + iwl_trans_txq_free(mvm->trans, queue);
9631 ++ *queueptr = IWL_MVM_INVALID_QUEUE;
9632 +
9633 + return 0;
9634 + }
9635 +@@ -487,6 +489,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
9636 + u8 sta_id, tid;
9637 + unsigned long disable_agg_tids = 0;
9638 + bool same_sta;
9639 ++ u16 queue_tmp = queue;
9640 + int ret;
9641 +
9642 + lockdep_assert_held(&mvm->mutex);
9643 +@@ -509,7 +512,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
9644 + iwl_mvm_invalidate_sta_queue(mvm, queue,
9645 + disable_agg_tids, false);
9646 +
9647 +- ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
9648 ++ ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
9649 + if (ret) {
9650 + IWL_ERR(mvm,
9651 + "Failed to free inactive queue %d (ret=%d)\n",
9652 +@@ -1184,6 +1187,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
9653 + unsigned int wdg_timeout =
9654 + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
9655 + int queue = -1;
9656 ++ u16 queue_tmp;
9657 + unsigned long disable_agg_tids = 0;
9658 + enum iwl_mvm_agg_state queue_state;
9659 + bool shared_queue = false, inc_ssn;
9660 +@@ -1332,7 +1336,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
9661 + return 0;
9662 +
9663 + out_err:
9664 +- iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
9665 ++ queue_tmp = queue;
9666 ++ iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
9667 +
9668 + return ret;
9669 + }
9670 +@@ -1779,7 +1784,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
9671 + if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
9672 + continue;
9673 +
9674 +- iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
9675 ++ iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
9676 + 0);
9677 + mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
9678 + }
9679 +@@ -1987,7 +1992,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
9680 + ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
9681 + if (ret) {
9682 + if (!iwl_mvm_has_new_tx_api(mvm))
9683 +- iwl_mvm_disable_txq(mvm, NULL, *queue,
9684 ++ iwl_mvm_disable_txq(mvm, NULL, queue,
9685 + IWL_MAX_TID_COUNT, 0);
9686 + return ret;
9687 + }
9688 +@@ -2060,7 +2065,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
9689 + if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
9690 + return -EINVAL;
9691 +
9692 +- iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
9693 ++ iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
9694 + ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
9695 + if (ret)
9696 + IWL_WARN(mvm, "Failed sending remove station\n");
9697 +@@ -2077,7 +2082,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
9698 + if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
9699 + return -EINVAL;
9700 +
9701 +- iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
9702 ++ iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
9703 + ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
9704 + if (ret)
9705 + IWL_WARN(mvm, "Failed sending remove station\n");
9706 +@@ -2173,7 +2178,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
9707 + struct ieee80211_vif *vif)
9708 + {
9709 + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
9710 +- int queue;
9711 ++ u16 *queueptr, queue;
9712 +
9713 + lockdep_assert_held(&mvm->mutex);
9714 +
9715 +@@ -2182,10 +2187,10 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
9716 + switch (vif->type) {
9717 + case NL80211_IFTYPE_AP:
9718 + case NL80211_IFTYPE_ADHOC:
9719 +- queue = mvm->probe_queue;
9720 ++ queueptr = &mvm->probe_queue;
9721 + break;
9722 + case NL80211_IFTYPE_P2P_DEVICE:
9723 +- queue = mvm->p2p_dev_queue;
9724 ++ queueptr = &mvm->p2p_dev_queue;
9725 + break;
9726 + default:
9727 + WARN(1, "Can't free bcast queue on vif type %d\n",
9728 +@@ -2193,7 +2198,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
9729 + return;
9730 + }
9731 +
9732 +- iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
9733 ++ queue = *queueptr;
9734 ++ iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
9735 + if (iwl_mvm_has_new_tx_api(mvm))
9736 + return;
9737 +
9738 +@@ -2428,7 +2434,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
9739 +
9740 + iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
9741 +
9742 +- iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
9743 ++ iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
9744 +
9745 + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
9746 + if (ret)
9747 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
9748 +index d3307a11fcac4..24b658a3098aa 100644
9749 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
9750 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
9751 +@@ -246,6 +246,18 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
9752 + }
9753 + }
9754 +
9755 ++static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm)
9756 ++{
9757 ++ /*
9758 ++ * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the
9759 ++ * roc_done_wk is already scheduled or running, so don't schedule it
9760 ++ * again to avoid a race where the roc_done_wk clears this bit after
9761 ++ * it is set here, affecting the next run of the roc_done_wk.
9762 ++ */
9763 ++ if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status))
9764 ++ iwl_mvm_roc_finished(mvm);
9765 ++}
9766 ++
9767 + /*
9768 + * Handles a FW notification for an event that is known to the driver.
9769 + *
9770 +@@ -297,8 +309,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
9771 + switch (te_data->vif->type) {
9772 + case NL80211_IFTYPE_P2P_DEVICE:
9773 + ieee80211_remain_on_channel_expired(mvm->hw);
9774 +- set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
9775 +- iwl_mvm_roc_finished(mvm);
9776 ++ iwl_mvm_p2p_roc_finished(mvm);
9777 + break;
9778 + case NL80211_IFTYPE_STATION:
9779 + /*
9780 +@@ -674,8 +685,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
9781 + /* Session protection is still ongoing. Cancel it */
9782 + iwl_mvm_cancel_session_protection(mvm, mvmvif, id);
9783 + if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
9784 +- set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
9785 +- iwl_mvm_roc_finished(mvm);
9786 ++ iwl_mvm_p2p_roc_finished(mvm);
9787 + }
9788 + }
9789 + return false;
9790 +@@ -842,8 +852,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
9791 + /* End TE, notify mac80211 */
9792 + mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
9793 + ieee80211_remain_on_channel_expired(mvm->hw);
9794 +- set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
9795 +- iwl_mvm_roc_finished(mvm);
9796 ++ iwl_mvm_p2p_roc_finished(mvm);
9797 + } else if (le32_to_cpu(notif->start)) {
9798 + if (WARN_ON(mvmvif->time_event_data.id !=
9799 + le32_to_cpu(notif->conf_id)))
9800 +@@ -1004,14 +1013,13 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
9801 + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
9802 + iwl_mvm_cancel_session_protection(mvm, mvmvif,
9803 + mvmvif->time_event_data.id);
9804 +- set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
9805 ++ iwl_mvm_p2p_roc_finished(mvm);
9806 + } else {
9807 + iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
9808 + &mvmvif->time_event_data);
9809 ++ iwl_mvm_roc_finished(mvm);
9810 + }
9811 +
9812 +- iwl_mvm_roc_finished(mvm);
9813 +-
9814 + return;
9815 + }
9816 +
9817 +@@ -1025,12 +1033,11 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
9818 +
9819 + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
9820 + iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
9821 +- set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
9822 ++ iwl_mvm_p2p_roc_finished(mvm);
9823 + } else {
9824 + iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
9825 ++ iwl_mvm_roc_finished(mvm);
9826 + }
9827 +-
9828 +- iwl_mvm_roc_finished(mvm);
9829 + }
9830 +
9831 + void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
9832 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
9833 +index 4f6f4b2720f01..ff7ca3c57f34d 100644
9834 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
9835 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
9836 +@@ -487,6 +487,9 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
9837 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
9838 + int i;
9839 +
9840 ++ if (!trans_pcie->rx_pool)
9841 ++ return;
9842 ++
9843 + for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
9844 + if (!trans_pcie->rx_pool[i].page)
9845 + continue;
9846 +@@ -1062,7 +1065,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
9847 + INIT_LIST_HEAD(&rba->rbd_empty);
9848 + spin_unlock_bh(&rba->lock);
9849 +
9850 +- /* free all first - we might be reconfigured for a different size */
9851 ++ /* free all first - we overwrite everything here */
9852 + iwl_pcie_free_rbs_pool(trans);
9853 +
9854 + for (i = 0; i < RX_QUEUE_SIZE; i++)
9855 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
9856 +index bee6b45742268..65cc25cbb9ec0 100644
9857 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
9858 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
9859 +@@ -1866,6 +1866,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
9860 + {
9861 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
9862 +
9863 ++ /* free all first - we might be reconfigured for a different size */
9864 ++ iwl_pcie_free_rbs_pool(trans);
9865 ++
9866 + trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
9867 + trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
9868 + trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
9869 +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
9870 +index 01735776345a9..7ddce3c3f0c48 100644
9871 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
9872 ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
9873 +@@ -1378,6 +1378,8 @@ struct rtl8xxxu_priv {
9874 + u8 no_pape:1;
9875 + u8 int_buf[USB_INTR_CONTENT_LENGTH];
9876 + u8 rssi_level;
9877 ++ DECLARE_BITMAP(tx_aggr_started, IEEE80211_NUM_TIDS);
9878 ++ DECLARE_BITMAP(tid_tx_operational, IEEE80211_NUM_TIDS);
9879 + /*
9880 + * Only one virtual interface permitted because only STA mode
9881 + * is supported and no iface_combinations are provided.
9882 +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
9883 +index ac1061caacd65..3285a91efb91e 100644
9884 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
9885 ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
9886 +@@ -4805,6 +4805,8 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
9887 + struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
9888 + struct rtl8xxxu_priv *priv = hw->priv;
9889 + struct device *dev = &priv->udev->dev;
9890 ++ u8 *qc = ieee80211_get_qos_ctl(hdr);
9891 ++ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
9892 + u32 rate;
9893 + u16 rate_flags = tx_info->control.rates[0].flags;
9894 + u16 seq_number;
9895 +@@ -4828,7 +4830,7 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
9896 +
9897 + tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
9898 +
9899 +- if (ampdu_enable)
9900 ++ if (ampdu_enable && test_bit(tid, priv->tid_tx_operational))
9901 + tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
9902 + else
9903 + tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
9904 +@@ -4876,6 +4878,8 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
9905 + struct rtl8xxxu_priv *priv = hw->priv;
9906 + struct device *dev = &priv->udev->dev;
9907 + struct rtl8xxxu_txdesc40 *tx_desc40;
9908 ++ u8 *qc = ieee80211_get_qos_ctl(hdr);
9909 ++ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
9910 + u32 rate;
9911 + u16 rate_flags = tx_info->control.rates[0].flags;
9912 + u16 seq_number;
9913 +@@ -4902,7 +4906,7 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
9914 +
9915 + tx_desc40->txdw9 = cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
9916 +
9917 +- if (ampdu_enable)
9918 ++ if (ampdu_enable && test_bit(tid, priv->tid_tx_operational))
9919 + tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
9920 + else
9921 + tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
9922 +@@ -5015,12 +5019,19 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
9923 + if (ieee80211_is_data_qos(hdr->frame_control) && sta) {
9924 + if (sta->ht_cap.ht_supported) {
9925 + u32 ampdu, val32;
9926 ++ u8 *qc = ieee80211_get_qos_ctl(hdr);
9927 ++ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
9928 +
9929 + ampdu = (u32)sta->ht_cap.ampdu_density;
9930 + val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT;
9931 + tx_desc->txdw2 |= cpu_to_le32(val32);
9932 +
9933 + ampdu_enable = true;
9934 ++
9935 ++ if (!test_bit(tid, priv->tx_aggr_started) &&
9936 ++ !(skb->protocol == cpu_to_be16(ETH_P_PAE)))
9937 ++ if (!ieee80211_start_tx_ba_session(sta, tid, 0))
9938 ++ set_bit(tid, priv->tx_aggr_started);
9939 + }
9940 + }
9941 +
9942 +@@ -6096,6 +6107,7 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
9943 + struct device *dev = &priv->udev->dev;
9944 + u8 ampdu_factor, ampdu_density;
9945 + struct ieee80211_sta *sta = params->sta;
9946 ++ u16 tid = params->tid;
9947 + enum ieee80211_ampdu_mlme_action action = params->action;
9948 +
9949 + switch (action) {
9950 +@@ -6108,17 +6120,20 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
9951 + dev_dbg(dev,
9952 + "Changed HT: ampdu_factor %02x, ampdu_density %02x\n",
9953 + ampdu_factor, ampdu_density);
9954 +- break;
9955 ++ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
9956 ++ case IEEE80211_AMPDU_TX_STOP_CONT:
9957 + case IEEE80211_AMPDU_TX_STOP_FLUSH:
9958 +- dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__);
9959 +- rtl8xxxu_set_ampdu_factor(priv, 0);
9960 +- rtl8xxxu_set_ampdu_min_space(priv, 0);
9961 +- break;
9962 + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
9963 +- dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n",
9964 +- __func__);
9965 ++ dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP\n", __func__);
9966 + rtl8xxxu_set_ampdu_factor(priv, 0);
9967 + rtl8xxxu_set_ampdu_min_space(priv, 0);
9968 ++ clear_bit(tid, priv->tx_aggr_started);
9969 ++ clear_bit(tid, priv->tid_tx_operational);
9970 ++ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
9971 ++ break;
9972 ++ case IEEE80211_AMPDU_TX_OPERATIONAL:
9973 ++ dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_OPERATIONAL\n", __func__);
9974 ++ set_bit(tid, priv->tid_tx_operational);
9975 + break;
9976 + case IEEE80211_AMPDU_RX_START:
9977 + dev_dbg(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__);
9978 +diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
9979 +index c0e4b111c8b4e..73d6807a8cdfb 100644
9980 +--- a/drivers/net/wireless/realtek/rtw88/Makefile
9981 ++++ b/drivers/net/wireless/realtek/rtw88/Makefile
9982 +@@ -15,9 +15,9 @@ rtw88_core-y += main.o \
9983 + ps.o \
9984 + sec.o \
9985 + bf.o \
9986 +- wow.o \
9987 + regd.o
9988 +
9989 ++rtw88_core-$(CONFIG_PM) += wow.o
9990 +
9991 + obj-$(CONFIG_RTW88_8822B) += rtw88_8822b.o
9992 + rtw88_8822b-objs := rtw8822b.o rtw8822b_table.o
9993 +diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
9994 +index 3bfa5ecc00537..e6399519584bd 100644
9995 +--- a/drivers/net/wireless/realtek/rtw88/fw.c
9996 ++++ b/drivers/net/wireless/realtek/rtw88/fw.c
9997 +@@ -819,7 +819,7 @@ static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
9998 + continue;
9999 + if ((!ssid && !rsvd_pkt->ssid) ||
10000 + rtw_ssid_equal(rsvd_pkt->ssid, ssid))
10001 +- size = rsvd_pkt->skb->len;
10002 ++ size = rsvd_pkt->probe_req_size;
10003 + }
10004 +
10005 + return size;
10006 +@@ -1047,6 +1047,8 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
10007 + ssid->ssid_len, 0);
10008 + else
10009 + skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0);
10010 ++ if (skb_new)
10011 ++ rsvd_pkt->probe_req_size = (u16)skb_new->len;
10012 + break;
10013 + case RSVD_NLO_INFO:
10014 + skb_new = rtw_nlo_info_get(hw);
10015 +@@ -1643,6 +1645,7 @@ int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
10016 + static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
10017 + u8 location)
10018 + {
10019 ++ struct rtw_chip_info *chip = rtwdev->chip;
10020 + u8 h2c_pkt[H2C_PKT_SIZE] = {0};
10021 + u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
10022 +
10023 +@@ -1653,6 +1656,7 @@ static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
10024 + UPDATE_PKT_SET_LOCATION(h2c_pkt, location);
10025 +
10026 + /* include txdesc size */
10027 ++ size += chip->tx_pkt_desc_sz;
10028 + UPDATE_PKT_SET_SIZE(h2c_pkt, size);
10029 +
10030 + rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
10031 +@@ -1662,7 +1666,7 @@ void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
10032 + struct cfg80211_ssid *ssid)
10033 + {
10034 + u8 loc;
10035 +- u32 size;
10036 ++ u16 size;
10037 +
10038 + loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
10039 + if (!loc) {
10040 +diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
10041 +index a8a7162fbe64c..a3a28ac6f1ded 100644
10042 +--- a/drivers/net/wireless/realtek/rtw88/fw.h
10043 ++++ b/drivers/net/wireless/realtek/rtw88/fw.h
10044 +@@ -147,6 +147,7 @@ struct rtw_rsvd_page {
10045 + u8 page;
10046 + bool add_txdesc;
10047 + struct cfg80211_ssid *ssid;
10048 ++ u16 probe_req_size;
10049 + };
10050 +
10051 + enum rtw_keep_alive_pkt_type {
10052 +diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
10053 +index fc9544f4e5e45..bdccfa70dddc7 100644
10054 +--- a/drivers/net/wireless/realtek/rtw88/wow.c
10055 ++++ b/drivers/net/wireless/realtek/rtw88/wow.c
10056 +@@ -283,15 +283,26 @@ static void rtw_wow_rx_dma_start(struct rtw_dev *rtwdev)
10057 +
10058 + static int rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
10059 + {
10060 +- /* wait 100ms for wow firmware to finish work */
10061 +- msleep(100);
10062 ++ int ret;
10063 ++ u8 check;
10064 ++ u32 check_dis;
10065 +
10066 + if (wow_enable) {
10067 +- if (rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
10068 ++ ret = read_poll_timeout(rtw_read8, check, !check, 1000,
10069 ++ 100000, true, rtwdev,
10070 ++ REG_WOWLAN_WAKE_REASON);
10071 ++ if (ret)
10072 + goto wow_fail;
10073 + } else {
10074 +- if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) ||
10075 +- rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE))
10076 ++ ret = read_poll_timeout(rtw_read32_mask, check_dis,
10077 ++ !check_dis, 1000, 100000, true, rtwdev,
10078 ++ REG_FE1IMR, BIT_FS_RXDONE);
10079 ++ if (ret)
10080 ++ goto wow_fail;
10081 ++ ret = read_poll_timeout(rtw_read32_mask, check_dis,
10082 ++ !check_dis, 1000, 100000, false, rtwdev,
10083 ++ REG_RXPKT_NUM, BIT_RW_RELEASE);
10084 ++ if (ret)
10085 + goto wow_fail;
10086 + }
10087 +
10088 +diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
10089 +index 1e0615b8565e7..72de88ff0d30d 100644
10090 +--- a/drivers/nvdimm/pmem.c
10091 ++++ b/drivers/nvdimm/pmem.c
10092 +@@ -450,11 +450,11 @@ static int pmem_attach_disk(struct device *dev,
10093 + pmem->pfn_flags |= PFN_MAP;
10094 + bb_range = pmem->pgmap.range;
10095 + } else {
10096 ++ addr = devm_memremap(dev, pmem->phys_addr,
10097 ++ pmem->size, ARCH_MEMREMAP_PMEM);
10098 + if (devm_add_action_or_reset(dev, pmem_release_queue,
10099 + &pmem->pgmap))
10100 + return -ENOMEM;
10101 +- addr = devm_memremap(dev, pmem->phys_addr,
10102 +- pmem->size, ARCH_MEMREMAP_PMEM);
10103 + bb_range.start = res->start;
10104 + bb_range.end = res->end;
10105 + }
10106 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
10107 +index dfd9dec0c1f60..2f0cbaba12ac4 100644
10108 +--- a/drivers/nvme/host/core.c
10109 ++++ b/drivers/nvme/host/core.c
10110 +@@ -1029,7 +1029,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
10111 + return BLK_STS_IOERR;
10112 + }
10113 +
10114 +- cmd->common.command_id = req->tag;
10115 ++ nvme_req(req)->genctr++;
10116 ++ cmd->common.command_id = nvme_cid(req);
10117 + trace_nvme_setup_cmd(req, cmd);
10118 + return ret;
10119 + }
10120 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
10121 +index 5cd1fa3b8464d..26511794629bc 100644
10122 +--- a/drivers/nvme/host/nvme.h
10123 ++++ b/drivers/nvme/host/nvme.h
10124 +@@ -158,6 +158,7 @@ enum nvme_quirks {
10125 + struct nvme_request {
10126 + struct nvme_command *cmd;
10127 + union nvme_result result;
10128 ++ u8 genctr;
10129 + u8 retries;
10130 + u8 flags;
10131 + u16 status;
10132 +@@ -497,6 +498,49 @@ struct nvme_ctrl_ops {
10133 + int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
10134 + };
10135 +
10136 ++/*
10137 ++ * nvme command_id is constructed as such:
10138 ++ * | xxxx | xxxxxxxxxxxx |
10139 ++ * gen request tag
10140 ++ */
10141 ++#define nvme_genctr_mask(gen) (gen & 0xf)
10142 ++#define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12)
10143 ++#define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12)
10144 ++#define nvme_tag_from_cid(cid) (cid & 0xfff)
10145 ++
10146 ++static inline u16 nvme_cid(struct request *rq)
10147 ++{
10148 ++ return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
10149 ++}
10150 ++
10151 ++static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
10152 ++ u16 command_id)
10153 ++{
10154 ++ u8 genctr = nvme_genctr_from_cid(command_id);
10155 ++ u16 tag = nvme_tag_from_cid(command_id);
10156 ++ struct request *rq;
10157 ++
10158 ++ rq = blk_mq_tag_to_rq(tags, tag);
10159 ++ if (unlikely(!rq)) {
10160 ++ pr_err("could not locate request for tag %#x\n",
10161 ++ tag);
10162 ++ return NULL;
10163 ++ }
10164 ++ if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
10165 ++ dev_err(nvme_req(rq)->ctrl->device,
10166 ++ "request %#x genctr mismatch (got %#x expected %#x)\n",
10167 ++ tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
10168 ++ return NULL;
10169 ++ }
10170 ++ return rq;
10171 ++}
10172 ++
10173 ++static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
10174 ++ u16 command_id)
10175 ++{
10176 ++ return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
10177 ++}
10178 ++
10179 + #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
10180 + void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
10181 + const char *dev_name);
10182 +@@ -594,7 +638,8 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
10183 +
10184 + static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
10185 + {
10186 +- return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
10187 ++ return !qid &&
10188 ++ nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
10189 + }
10190 +
10191 + void nvme_complete_rq(struct request *req);
10192 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
10193 +index 51852085239ef..c246fdacba2e5 100644
10194 +--- a/drivers/nvme/host/pci.c
10195 ++++ b/drivers/nvme/host/pci.c
10196 +@@ -1014,7 +1014,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
10197 + return;
10198 + }
10199 +
10200 +- req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
10201 ++ req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
10202 + if (unlikely(!req)) {
10203 + dev_warn(nvmeq->dev->ctrl.device,
10204 + "invalid id %d completed on queue %d\n",
10205 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
10206 +index 3bd9cbc80246f..a68704e39084e 100644
10207 +--- a/drivers/nvme/host/rdma.c
10208 ++++ b/drivers/nvme/host/rdma.c
10209 +@@ -1730,10 +1730,10 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
10210 + struct request *rq;
10211 + struct nvme_rdma_request *req;
10212 +
10213 +- rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
10214 ++ rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
10215 + if (!rq) {
10216 + dev_err(queue->ctrl->ctrl.device,
10217 +- "tag 0x%x on QP %#x not found\n",
10218 ++ "got bad command_id %#x on QP %#x\n",
10219 + cqe->command_id, queue->qp->qp_num);
10220 + nvme_rdma_error_recovery(queue->ctrl);
10221 + return;
10222 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
10223 +index 18bd68b82d78f..48b70e5235a39 100644
10224 +--- a/drivers/nvme/host/tcp.c
10225 ++++ b/drivers/nvme/host/tcp.c
10226 +@@ -487,11 +487,11 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
10227 + {
10228 + struct request *rq;
10229 +
10230 +- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
10231 ++ rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
10232 + if (!rq) {
10233 + dev_err(queue->ctrl->ctrl.device,
10234 +- "queue %d tag 0x%x not found\n",
10235 +- nvme_tcp_queue_id(queue), cqe->command_id);
10236 ++ "got bad cqe.command_id %#x on queue %d\n",
10237 ++ cqe->command_id, nvme_tcp_queue_id(queue));
10238 + nvme_tcp_error_recovery(&queue->ctrl->ctrl);
10239 + return -EINVAL;
10240 + }
10241 +@@ -508,11 +508,11 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
10242 + {
10243 + struct request *rq;
10244 +
10245 +- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
10246 ++ rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
10247 + if (!rq) {
10248 + dev_err(queue->ctrl->ctrl.device,
10249 +- "queue %d tag %#x not found\n",
10250 +- nvme_tcp_queue_id(queue), pdu->command_id);
10251 ++ "got bad c2hdata.command_id %#x on queue %d\n",
10252 ++ pdu->command_id, nvme_tcp_queue_id(queue));
10253 + return -ENOENT;
10254 + }
10255 +
10256 +@@ -606,7 +606,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
10257 + data->hdr.plen =
10258 + cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
10259 + data->ttag = pdu->ttag;
10260 +- data->command_id = rq->tag;
10261 ++ data->command_id = nvme_cid(rq);
10262 + data->data_offset = cpu_to_le32(req->data_sent);
10263 + data->data_length = cpu_to_le32(req->pdu_len);
10264 + return 0;
10265 +@@ -619,11 +619,11 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
10266 + struct request *rq;
10267 + int ret;
10268 +
10269 +- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
10270 ++ rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
10271 + if (!rq) {
10272 + dev_err(queue->ctrl->ctrl.device,
10273 +- "queue %d tag %#x not found\n",
10274 +- nvme_tcp_queue_id(queue), pdu->command_id);
10275 ++ "got bad r2t.command_id %#x on queue %d\n",
10276 ++ pdu->command_id, nvme_tcp_queue_id(queue));
10277 + return -ENOENT;
10278 + }
10279 + req = blk_mq_rq_to_pdu(rq);
10280 +@@ -702,17 +702,9 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
10281 + unsigned int *offset, size_t *len)
10282 + {
10283 + struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
10284 +- struct nvme_tcp_request *req;
10285 +- struct request *rq;
10286 +-
10287 +- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
10288 +- if (!rq) {
10289 +- dev_err(queue->ctrl->ctrl.device,
10290 +- "queue %d tag %#x not found\n",
10291 +- nvme_tcp_queue_id(queue), pdu->command_id);
10292 +- return -ENOENT;
10293 +- }
10294 +- req = blk_mq_rq_to_pdu(rq);
10295 ++ struct request *rq =
10296 ++ nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
10297 ++ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
10298 +
10299 + while (true) {
10300 + int recv_len, ret;
10301 +@@ -804,8 +796,8 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
10302 + }
10303 +
10304 + if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
10305 +- struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
10306 +- pdu->command_id);
10307 ++ struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
10308 ++ pdu->command_id);
10309 +
10310 + nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
10311 + queue->nr_cqe++;
10312 +diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
10313 +index 3a17a7e26bbfc..0285ccc7541f6 100644
10314 +--- a/drivers/nvme/target/loop.c
10315 ++++ b/drivers/nvme/target/loop.c
10316 +@@ -107,10 +107,10 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
10317 + } else {
10318 + struct request *rq;
10319 +
10320 +- rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
10321 ++ rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
10322 + if (!rq) {
10323 + dev_err(queue->ctrl->ctrl.device,
10324 +- "tag 0x%x on queue %d not found\n",
10325 ++ "got bad command_id %#x on queue %d\n",
10326 + cqe->command_id, nvme_loop_queue_idx(queue));
10327 + return;
10328 + }
10329 +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
10330 +index b3bc30a04ed7c..3d87fadaa160d 100644
10331 +--- a/drivers/nvmem/core.c
10332 ++++ b/drivers/nvmem/core.c
10333 +@@ -824,8 +824,11 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
10334 +
10335 + if (nvmem->nkeepout) {
10336 + rval = nvmem_validate_keepouts(nvmem);
10337 +- if (rval)
10338 +- goto err_put_device;
10339 ++ if (rval) {
10340 ++ ida_free(&nvmem_ida, nvmem->id);
10341 ++ kfree(nvmem);
10342 ++ return ERR_PTR(rval);
10343 ++ }
10344 + }
10345 +
10346 + dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
10347 +diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
10348 +index 81fbad5e939df..b0ca4c6264665 100644
10349 +--- a/drivers/nvmem/qfprom.c
10350 ++++ b/drivers/nvmem/qfprom.c
10351 +@@ -139,6 +139,9 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
10352 + {
10353 + int ret;
10354 +
10355 ++ writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
10356 ++ writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET);
10357 ++
10358 + /*
10359 + * This may be a shared rail and may be able to run at a lower rate
10360 + * when we're not blowing fuses. At the moment, the regulator framework
10361 +@@ -159,9 +162,6 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
10362 + "Failed to set clock rate for disable (ignoring)\n");
10363 +
10364 + clk_disable_unprepare(priv->secclk);
10365 +-
10366 +- writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
10367 +- writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET);
10368 + }
10369 +
10370 + /**
10371 +diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
10372 +index a32e60b024b8d..6675b5e56960c 100644
10373 +--- a/drivers/of/kobj.c
10374 ++++ b/drivers/of/kobj.c
10375 +@@ -119,7 +119,7 @@ int __of_attach_node_sysfs(struct device_node *np)
10376 + struct property *pp;
10377 + int rc;
10378 +
10379 +- if (!of_kset)
10380 ++ if (!IS_ENABLED(CONFIG_SYSFS) || !of_kset)
10381 + return 0;
10382 +
10383 + np->kobj.kset = of_kset;
10384 +diff --git a/drivers/opp/of.c b/drivers/opp/of.c
10385 +index 67f2e0710e79c..2a97c6535c4c6 100644
10386 +--- a/drivers/opp/of.c
10387 ++++ b/drivers/opp/of.c
10388 +@@ -95,15 +95,7 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
10389 + static struct device_node *of_parse_required_opp(struct device_node *np,
10390 + int index)
10391 + {
10392 +- struct device_node *required_np;
10393 +-
10394 +- required_np = of_parse_phandle(np, "required-opps", index);
10395 +- if (unlikely(!required_np)) {
10396 +- pr_err("%s: Unable to parse required-opps: %pOF, index: %d\n",
10397 +- __func__, np, index);
10398 +- }
10399 +-
10400 +- return required_np;
10401 ++ return of_parse_phandle(np, "required-opps", index);
10402 + }
10403 +
10404 + /* The caller must call dev_pm_opp_put_opp_table() after the table is used */
10405 +@@ -1328,7 +1320,7 @@ int of_get_required_opp_performance_state(struct device_node *np, int index)
10406 +
10407 + required_np = of_parse_required_opp(np, index);
10408 + if (!required_np)
10409 +- return -EINVAL;
10410 ++ return -ENODEV;
10411 +
10412 + opp_table = _find_table_of_opp_np(required_np);
10413 + if (IS_ERR(opp_table)) {
10414 +diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
10415 +index 2c11bd3fe1fd6..17061f1df0f44 100644
10416 +--- a/drivers/parport/ieee1284_ops.c
10417 ++++ b/drivers/parport/ieee1284_ops.c
10418 +@@ -518,7 +518,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
10419 + goto out;
10420 +
10421 + /* Yield the port for a while. */
10422 +- if (count && dev->port->irq != PARPORT_IRQ_NONE) {
10423 ++ if (dev->port->irq != PARPORT_IRQ_NONE) {
10424 + parport_release (dev);
10425 + schedule_timeout_interruptible(msecs_to_jiffies(40));
10426 + parport_claim_or_block (dev);
10427 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
10428 +index c95ebe808f92b..fdbf051586970 100644
10429 +--- a/drivers/pci/controller/pci-aardvark.c
10430 ++++ b/drivers/pci/controller/pci-aardvark.c
10431 +@@ -58,6 +58,7 @@
10432 + #define PIO_COMPLETION_STATUS_CRS 2
10433 + #define PIO_COMPLETION_STATUS_CA 4
10434 + #define PIO_NON_POSTED_REQ BIT(10)
10435 ++#define PIO_ERR_STATUS BIT(11)
10436 + #define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
10437 + #define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
10438 + #define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
10439 +@@ -118,6 +119,46 @@
10440 + #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
10441 + #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
10442 +
10443 ++/* PCIe window configuration */
10444 ++#define OB_WIN_BASE_ADDR 0x4c00
10445 ++#define OB_WIN_BLOCK_SIZE 0x20
10446 ++#define OB_WIN_COUNT 8
10447 ++#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
10448 ++ OB_WIN_BLOCK_SIZE * (win) + \
10449 ++ (offset))
10450 ++#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
10451 ++#define OB_WIN_ENABLE BIT(0)
10452 ++#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
10453 ++#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
10454 ++#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
10455 ++#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
10456 ++#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
10457 ++#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
10458 ++#define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
10459 ++#define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
10460 ++#define OB_WIN_FUNC_NUM_SHIFT 24
10461 ++#define OB_WIN_FUNC_NUM_ENABLE BIT(23)
10462 ++#define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
10463 ++#define OB_WIN_BUS_NUM_BITS_SHIFT 20
10464 ++#define OB_WIN_MSG_CODE_ENABLE BIT(22)
10465 ++#define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
10466 ++#define OB_WIN_MSG_CODE_SHIFT 14
10467 ++#define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
10468 ++#define OB_WIN_ATTR_ENABLE BIT(11)
10469 ++#define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
10470 ++#define OB_WIN_ATTR_TC_SHIFT 8
10471 ++#define OB_WIN_ATTR_RELAXED BIT(7)
10472 ++#define OB_WIN_ATTR_NOSNOOP BIT(6)
10473 ++#define OB_WIN_ATTR_POISON BIT(5)
10474 ++#define OB_WIN_ATTR_IDO BIT(4)
10475 ++#define OB_WIN_TYPE_MASK GENMASK(3, 0)
10476 ++#define OB_WIN_TYPE_SHIFT 0
10477 ++#define OB_WIN_TYPE_MEM 0x0
10478 ++#define OB_WIN_TYPE_IO 0x4
10479 ++#define OB_WIN_TYPE_CONFIG_TYPE0 0x8
10480 ++#define OB_WIN_TYPE_CONFIG_TYPE1 0x9
10481 ++#define OB_WIN_TYPE_MSG 0xc
10482 ++
10483 + /* LMI registers base address and register offsets */
10484 + #define LMI_BASE_ADDR 0x6000
10485 + #define CFG_REG (LMI_BASE_ADDR + 0x0)
10486 +@@ -166,7 +207,7 @@
10487 + #define PCIE_CONFIG_WR_TYPE0 0xa
10488 + #define PCIE_CONFIG_WR_TYPE1 0xb
10489 +
10490 +-#define PIO_RETRY_CNT 500
10491 ++#define PIO_RETRY_CNT 750000 /* 1.5 s */
10492 + #define PIO_RETRY_DELAY 2 /* 2 us*/
10493 +
10494 + #define LINK_WAIT_MAX_RETRIES 10
10495 +@@ -180,8 +221,16 @@
10496 + struct advk_pcie {
10497 + struct platform_device *pdev;
10498 + void __iomem *base;
10499 ++ struct {
10500 ++ phys_addr_t match;
10501 ++ phys_addr_t remap;
10502 ++ phys_addr_t mask;
10503 ++ u32 actions;
10504 ++ } wins[OB_WIN_COUNT];
10505 ++ u8 wins_count;
10506 + struct irq_domain *irq_domain;
10507 + struct irq_chip irq_chip;
10508 ++ raw_spinlock_t irq_lock;
10509 + struct irq_domain *msi_domain;
10510 + struct irq_domain *msi_inner_domain;
10511 + struct irq_chip msi_bottom_irq_chip;
10512 +@@ -366,9 +415,39 @@ err:
10513 + dev_err(dev, "link never came up\n");
10514 + }
10515 +
10516 ++/*
10517 ++ * Set PCIe address window register which could be used for memory
10518 ++ * mapping.
10519 ++ */
10520 ++static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
10521 ++ phys_addr_t match, phys_addr_t remap,
10522 ++ phys_addr_t mask, u32 actions)
10523 ++{
10524 ++ advk_writel(pcie, OB_WIN_ENABLE |
10525 ++ lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
10526 ++ advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
10527 ++ advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
10528 ++ advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
10529 ++ advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
10530 ++ advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
10531 ++ advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
10532 ++}
10533 ++
10534 ++static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
10535 ++{
10536 ++ advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
10537 ++ advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
10538 ++ advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
10539 ++ advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
10540 ++ advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
10541 ++ advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
10542 ++ advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
10543 ++}
10544 ++
10545 + static void advk_pcie_setup_hw(struct advk_pcie *pcie)
10546 + {
10547 + u32 reg;
10548 ++ int i;
10549 +
10550 + /* Enable TX */
10551 + reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
10552 +@@ -447,15 +526,51 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
10553 + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
10554 + advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
10555 +
10556 ++ /*
10557 ++ * Enable AXI address window location generation:
10558 ++ * When it is enabled, the default outbound window
10559 ++ * configurations (Default User Field: 0xD0074CFC)
10560 ++ * are used to transparent address translation for
10561 ++ * the outbound transactions. Thus, PCIe address
10562 ++ * windows are not required for transparent memory
10563 ++ * access when default outbound window configuration
10564 ++ * is set for memory access.
10565 ++ */
10566 + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
10567 + reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
10568 + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
10569 +
10570 +- /* Bypass the address window mapping for PIO */
10571 ++ /*
10572 ++ * Set memory access in Default User Field so it
10573 ++ * is not required to configure PCIe address for
10574 ++ * transparent memory access.
10575 ++ */
10576 ++ advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
10577 ++
10578 ++ /*
10579 ++ * Bypass the address window mapping for PIO:
10580 ++ * Since PIO access already contains all required
10581 ++ * info over AXI interface by PIO registers, the
10582 ++ * address window is not required.
10583 ++ */
10584 + reg = advk_readl(pcie, PIO_CTRL);
10585 + reg |= PIO_CTRL_ADDR_WIN_DISABLE;
10586 + advk_writel(pcie, reg, PIO_CTRL);
10587 +
10588 ++ /*
10589 ++ * Configure PCIe address windows for non-memory or
10590 ++ * non-transparent access as by default PCIe uses
10591 ++ * transparent memory access.
10592 ++ */
10593 ++ for (i = 0; i < pcie->wins_count; i++)
10594 ++ advk_pcie_set_ob_win(pcie, i,
10595 ++ pcie->wins[i].match, pcie->wins[i].remap,
10596 ++ pcie->wins[i].mask, pcie->wins[i].actions);
10597 ++
10598 ++ /* Disable remaining PCIe outbound windows */
10599 ++ for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
10600 ++ advk_pcie_disable_ob_win(pcie, i);
10601 ++
10602 + advk_pcie_train_link(pcie);
10603 +
10604 + /*
10605 +@@ -472,7 +587,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
10606 + advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
10607 + }
10608 +
10609 +-static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
10610 ++static int advk_pcie_check_pio_status(struct advk_pcie *pcie, u32 *val)
10611 + {
10612 + struct device *dev = &pcie->pdev->dev;
10613 + u32 reg;
10614 +@@ -483,14 +598,49 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
10615 + status = (reg & PIO_COMPLETION_STATUS_MASK) >>
10616 + PIO_COMPLETION_STATUS_SHIFT;
10617 +
10618 +- if (!status)
10619 +- return;
10620 +-
10621 ++ /*
10622 ++ * According to HW spec, the PIO status check sequence as below:
10623 ++ * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
10624 ++ * it still needs to check Error Status(bit11), only when this bit
10625 ++ * indicates no error happen, the operation is successful.
10626 ++ * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
10627 ++ * means a PIO write error, and for PIO read it is successful with
10628 ++ * a read value of 0xFFFFFFFF.
10629 ++ * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
10630 ++ * only means a PIO write error, and for PIO read it is successful
10631 ++ * with a read value of 0xFFFF0001.
10632 ++ * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
10633 ++ * error for both PIO read and PIO write operation.
10634 ++ * 5) other errors are indicated as 'unknown'.
10635 ++ */
10636 + switch (status) {
10637 ++ case PIO_COMPLETION_STATUS_OK:
10638 ++ if (reg & PIO_ERR_STATUS) {
10639 ++ strcomp_status = "COMP_ERR";
10640 ++ break;
10641 ++ }
10642 ++ /* Get the read result */
10643 ++ if (val)
10644 ++ *val = advk_readl(pcie, PIO_RD_DATA);
10645 ++ /* No error */
10646 ++ strcomp_status = NULL;
10647 ++ break;
10648 + case PIO_COMPLETION_STATUS_UR:
10649 + strcomp_status = "UR";
10650 + break;
10651 + case PIO_COMPLETION_STATUS_CRS:
10652 ++ /* PCIe r4.0, sec 2.3.2, says:
10653 ++ * If CRS Software Visibility is not enabled, the Root Complex
10654 ++ * must re-issue the Configuration Request as a new Request.
10655 ++ * A Root Complex implementation may choose to limit the number
10656 ++ * of Configuration Request/CRS Completion Status loops before
10657 ++ * determining that something is wrong with the target of the
10658 ++ * Request and taking appropriate action, e.g., complete the
10659 ++ * Request to the host as a failed transaction.
10660 ++ *
10661 ++ * To simplify implementation do not re-issue the Configuration
10662 ++ * Request and complete the Request as a failed transaction.
10663 ++ */
10664 + strcomp_status = "CRS";
10665 + break;
10666 + case PIO_COMPLETION_STATUS_CA:
10667 +@@ -501,6 +651,9 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
10668 + break;
10669 + }
10670 +
10671 ++ if (!strcomp_status)
10672 ++ return 0;
10673 ++
10674 + if (reg & PIO_NON_POSTED_REQ)
10675 + str_posted = "Non-posted";
10676 + else
10677 +@@ -508,6 +661,8 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
10678 +
10679 + dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
10680 + str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
10681 ++
10682 ++ return -EFAULT;
10683 + }
10684 +
10685 + static int advk_pcie_wait_pio(struct advk_pcie *pcie)
10686 +@@ -745,10 +900,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
10687 + return PCIBIOS_SET_FAILED;
10688 + }
10689 +
10690 +- advk_pcie_check_pio_status(pcie);
10691 ++ /* Check PIO status and get the read result */
10692 ++ ret = advk_pcie_check_pio_status(pcie, val);
10693 ++ if (ret < 0) {
10694 ++ *val = 0xffffffff;
10695 ++ return PCIBIOS_SET_FAILED;
10696 ++ }
10697 +
10698 +- /* Get the read result */
10699 +- *val = advk_readl(pcie, PIO_RD_DATA);
10700 + if (size == 1)
10701 + *val = (*val >> (8 * (where & 3))) & 0xff;
10702 + else if (size == 2)
10703 +@@ -812,7 +970,9 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
10704 + if (ret < 0)
10705 + return PCIBIOS_SET_FAILED;
10706 +
10707 +- advk_pcie_check_pio_status(pcie);
10708 ++ ret = advk_pcie_check_pio_status(pcie, NULL);
10709 ++ if (ret < 0)
10710 ++ return PCIBIOS_SET_FAILED;
10711 +
10712 + return PCIBIOS_SUCCESSFUL;
10713 + }
10714 +@@ -886,22 +1046,28 @@ static void advk_pcie_irq_mask(struct irq_data *d)
10715 + {
10716 + struct advk_pcie *pcie = d->domain->host_data;
10717 + irq_hw_number_t hwirq = irqd_to_hwirq(d);
10718 ++ unsigned long flags;
10719 + u32 mask;
10720 +
10721 ++ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
10722 + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
10723 + mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
10724 + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
10725 ++ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
10726 + }
10727 +
10728 + static void advk_pcie_irq_unmask(struct irq_data *d)
10729 + {
10730 + struct advk_pcie *pcie = d->domain->host_data;
10731 + irq_hw_number_t hwirq = irqd_to_hwirq(d);
10732 ++ unsigned long flags;
10733 + u32 mask;
10734 +
10735 ++ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
10736 + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
10737 + mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
10738 + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
10739 ++ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
10740 + }
10741 +
10742 + static int advk_pcie_irq_map(struct irq_domain *h,
10743 +@@ -985,6 +1151,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
10744 + struct irq_chip *irq_chip;
10745 + int ret = 0;
10746 +
10747 ++ raw_spin_lock_init(&pcie->irq_lock);
10748 ++
10749 + pcie_intc_node = of_get_next_child(node, NULL);
10750 + if (!pcie_intc_node) {
10751 + dev_err(dev, "No PCIe Intc node found\n");
10752 +@@ -1162,6 +1330,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
10753 + struct device *dev = &pdev->dev;
10754 + struct advk_pcie *pcie;
10755 + struct pci_host_bridge *bridge;
10756 ++ struct resource_entry *entry;
10757 + int ret, irq;
10758 +
10759 + bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
10760 +@@ -1172,6 +1341,80 @@ static int advk_pcie_probe(struct platform_device *pdev)
10761 + pcie->pdev = pdev;
10762 + platform_set_drvdata(pdev, pcie);
10763 +
10764 ++ resource_list_for_each_entry(entry, &bridge->windows) {
10765 ++ resource_size_t start = entry->res->start;
10766 ++ resource_size_t size = resource_size(entry->res);
10767 ++ unsigned long type = resource_type(entry->res);
10768 ++ u64 win_size;
10769 ++
10770 ++ /*
10771 ++ * Aardvark hardware allows to configure also PCIe window
10772 ++ * for config type 0 and type 1 mapping, but driver uses
10773 ++ * only PIO for issuing configuration transfers which does
10774 ++ * not use PCIe window configuration.
10775 ++ */
10776 ++ if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
10777 ++ type != IORESOURCE_IO)
10778 ++ continue;
10779 ++
10780 ++ /*
10781 ++ * Skip transparent memory resources. Default outbound access
10782 ++ * configuration is set to transparent memory access so it
10783 ++ * does not need window configuration.
10784 ++ */
10785 ++ if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
10786 ++ entry->offset == 0)
10787 ++ continue;
10788 ++
10789 ++ /*
10790 ++ * The n-th PCIe window is configured by tuple (match, remap, mask)
10791 ++ * and an access to address A uses this window if A matches the
10792 ++ * match with given mask.
10793 ++ * So every PCIe window size must be a power of two and every start
10794 ++ * address must be aligned to window size. Minimal size is 64 KiB
10795 ++ * because lower 16 bits of mask must be zero. Remapped address
10796 ++ * may have set only bits from the mask.
10797 ++ */
10798 ++ while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
10799 ++ /* Calculate the largest aligned window size */
10800 ++ win_size = (1ULL << (fls64(size)-1)) |
10801 ++ (start ? (1ULL << __ffs64(start)) : 0);
10802 ++ win_size = 1ULL << __ffs64(win_size);
10803 ++ if (win_size < 0x10000)
10804 ++ break;
10805 ++
10806 ++ dev_dbg(dev,
10807 ++ "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
10808 ++ pcie->wins_count, (unsigned long long)start,
10809 ++ (unsigned long long)start + win_size, type);
10810 ++
10811 ++ if (type == IORESOURCE_IO) {
10812 ++ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
10813 ++ pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
10814 ++ } else {
10815 ++ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
10816 ++ pcie->wins[pcie->wins_count].match = start;
10817 ++ }
10818 ++ pcie->wins[pcie->wins_count].remap = start - entry->offset;
10819 ++ pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
10820 ++
10821 ++ if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
10822 ++ break;
10823 ++
10824 ++ start += win_size;
10825 ++ size -= win_size;
10826 ++ pcie->wins_count++;
10827 ++ }
10828 ++
10829 ++ if (size > 0) {
10830 ++ dev_err(&pcie->pdev->dev,
10831 ++ "Invalid PCIe region [0x%llx-0x%llx]\n",
10832 ++ (unsigned long long)entry->res->start,
10833 ++ (unsigned long long)entry->res->end + 1);
10834 ++ return -EINVAL;
10835 ++ }
10836 ++ }
10837 ++
10838 + pcie->base = devm_platform_ioremap_resource(pdev, 0);
10839 + if (IS_ERR(pcie->base))
10840 + return PTR_ERR(pcie->base);
10841 +@@ -1252,6 +1495,7 @@ static int advk_pcie_remove(struct platform_device *pdev)
10842 + {
10843 + struct advk_pcie *pcie = platform_get_drvdata(pdev);
10844 + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
10845 ++ int i;
10846 +
10847 + pci_lock_rescan_remove();
10848 + pci_stop_root_bus(bridge->bus);
10849 +@@ -1261,6 +1505,10 @@ static int advk_pcie_remove(struct platform_device *pdev)
10850 + advk_pcie_remove_msi_irq_domain(pcie);
10851 + advk_pcie_remove_irq_domain(pcie);
10852 +
10853 ++ /* Disable outbound address windows mapping */
10854 ++ for (i = 0; i < OB_WIN_COUNT; i++)
10855 ++ advk_pcie_disable_ob_win(pcie, i);
10856 ++
10857 + return 0;
10858 + }
10859 +
10860 +diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
10861 +index 8689311c5ef66..1c3d5b87ef20e 100644
10862 +--- a/drivers/pci/controller/pcie-xilinx-nwl.c
10863 ++++ b/drivers/pci/controller/pcie-xilinx-nwl.c
10864 +@@ -6,6 +6,7 @@
10865 + * (C) Copyright 2014 - 2015, Xilinx, Inc.
10866 + */
10867 +
10868 ++#include <linux/clk.h>
10869 + #include <linux/delay.h>
10870 + #include <linux/interrupt.h>
10871 + #include <linux/irq.h>
10872 +@@ -169,6 +170,7 @@ struct nwl_pcie {
10873 + u8 last_busno;
10874 + struct nwl_msi msi;
10875 + struct irq_domain *legacy_irq_domain;
10876 ++ struct clk *clk;
10877 + raw_spinlock_t leg_mask_lock;
10878 + };
10879 +
10880 +@@ -823,6 +825,16 @@ static int nwl_pcie_probe(struct platform_device *pdev)
10881 + return err;
10882 + }
10883 +
10884 ++ pcie->clk = devm_clk_get(dev, NULL);
10885 ++ if (IS_ERR(pcie->clk))
10886 ++ return PTR_ERR(pcie->clk);
10887 ++
10888 ++ err = clk_prepare_enable(pcie->clk);
10889 ++ if (err) {
10890 ++ dev_err(dev, "can't enable PCIe ref clock\n");
10891 ++ return err;
10892 ++ }
10893 ++
10894 + err = nwl_pcie_bridge_init(pcie);
10895 + if (err) {
10896 + dev_err(dev, "HW Initialization failed\n");
10897 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
10898 +index 3f353572588df..a5e6759c407b9 100644
10899 +--- a/drivers/pci/pci.c
10900 ++++ b/drivers/pci/pci.c
10901 +@@ -1906,11 +1906,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
10902 + * so that things like MSI message writing will behave as expected
10903 + * (e.g. if the device really is in D0 at enable time).
10904 + */
10905 +- if (dev->pm_cap) {
10906 +- u16 pmcsr;
10907 +- pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
10908 +- dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
10909 +- }
10910 ++ pci_update_current_state(dev, dev->current_state);
10911 +
10912 + if (atomic_inc_return(&dev->enable_cnt) > 1)
10913 + return 0; /* already enabled */
10914 +diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
10915 +index e1fed6649c41f..3ee63968deaa5 100644
10916 +--- a/drivers/pci/pcie/portdrv_core.c
10917 ++++ b/drivers/pci/pcie/portdrv_core.c
10918 +@@ -257,8 +257,13 @@ static int get_port_device_capability(struct pci_dev *dev)
10919 + services |= PCIE_PORT_SERVICE_DPC;
10920 +
10921 + if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
10922 +- pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
10923 +- services |= PCIE_PORT_SERVICE_BWNOTIF;
10924 ++ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
10925 ++ u32 linkcap;
10926 ++
10927 ++ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
10928 ++ if (linkcap & PCI_EXP_LNKCAP_LBNC)
10929 ++ services |= PCIE_PORT_SERVICE_BWNOTIF;
10930 ++ }
10931 +
10932 + return services;
10933 + }
10934 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
10935 +index 7b1c81b899cdf..1905ee0297a4c 100644
10936 +--- a/drivers/pci/quirks.c
10937 ++++ b/drivers/pci/quirks.c
10938 +@@ -3241,6 +3241,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
10939 + PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
10940 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
10941 + PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
10942 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256);
10943 +
10944 + /*
10945 + * Intel 5000 and 5100 Memory controllers have an erratum with read completion
10946 +diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
10947 +index 8b003c890b87b..c9f03418e71e0 100644
10948 +--- a/drivers/pci/syscall.c
10949 ++++ b/drivers/pci/syscall.c
10950 +@@ -22,8 +22,10 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
10951 + long err;
10952 + int cfg_ret;
10953 +
10954 ++ err = -EPERM;
10955 ++ dev = NULL;
10956 + if (!capable(CAP_SYS_ADMIN))
10957 +- return -EPERM;
10958 ++ goto error;
10959 +
10960 + err = -ENODEV;
10961 + dev = pci_get_domain_bus_and_slot(0, bus, dfn);
10962 +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
10963 +index 5a68e242f6b34..5cb018f988003 100644
10964 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
10965 ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
10966 +@@ -167,10 +167,14 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
10967 + PIN_GRP_GPIO("jtag", 20, 5, BIT(0), "jtag"),
10968 + PIN_GRP_GPIO("sdio0", 8, 3, BIT(1), "sdio"),
10969 + PIN_GRP_GPIO("emmc_nb", 27, 9, BIT(2), "emmc"),
10970 +- PIN_GRP_GPIO("pwm0", 11, 1, BIT(3), "pwm"),
10971 +- PIN_GRP_GPIO("pwm1", 12, 1, BIT(4), "pwm"),
10972 +- PIN_GRP_GPIO("pwm2", 13, 1, BIT(5), "pwm"),
10973 +- PIN_GRP_GPIO("pwm3", 14, 1, BIT(6), "pwm"),
10974 ++ PIN_GRP_GPIO_3("pwm0", 11, 1, BIT(3) | BIT(20), 0, BIT(20), BIT(3),
10975 ++ "pwm", "led"),
10976 ++ PIN_GRP_GPIO_3("pwm1", 12, 1, BIT(4) | BIT(21), 0, BIT(21), BIT(4),
10977 ++ "pwm", "led"),
10978 ++ PIN_GRP_GPIO_3("pwm2", 13, 1, BIT(5) | BIT(22), 0, BIT(22), BIT(5),
10979 ++ "pwm", "led"),
10980 ++ PIN_GRP_GPIO_3("pwm3", 14, 1, BIT(6) | BIT(23), 0, BIT(23), BIT(6),
10981 ++ "pwm", "led"),
10982 + PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"),
10983 + PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"),
10984 + PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"),
10985 +@@ -184,10 +188,6 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
10986 + PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
10987 + BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
10988 + 18, 2, "gpio", "uart"),
10989 +- PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
10990 +- PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
10991 +- PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
10992 +- PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
10993 + };
10994 +
10995 + static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
10996 +diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
10997 +index 983ba9865f772..263498be8e319 100644
10998 +--- a/drivers/pinctrl/pinctrl-ingenic.c
10999 ++++ b/drivers/pinctrl/pinctrl-ingenic.c
11000 +@@ -710,7 +710,7 @@ static const struct ingenic_chip_info jz4755_chip_info = {
11001 + };
11002 +
11003 + static const u32 jz4760_pull_ups[6] = {
11004 +- 0xffffffff, 0xfffcf3ff, 0xffffffff, 0xffffcfff, 0xfffffb7c, 0xfffff00f,
11005 ++ 0xffffffff, 0xfffcf3ff, 0xffffffff, 0xffffcfff, 0xfffffb7c, 0x0000000f,
11006 + };
11007 +
11008 + static const u32 jz4760_pull_downs[6] = {
11009 +@@ -936,11 +936,11 @@ static const struct ingenic_chip_info jz4760_chip_info = {
11010 + };
11011 +
11012 + static const u32 jz4770_pull_ups[6] = {
11013 +- 0x3fffffff, 0xfff0030c, 0xffffffff, 0xffff4fff, 0xfffffb7c, 0xffa7f00f,
11014 ++ 0x3fffffff, 0xfff0f3fc, 0xffffffff, 0xffff4fff, 0xfffffb7c, 0x0024f00f,
11015 + };
11016 +
11017 + static const u32 jz4770_pull_downs[6] = {
11018 +- 0x00000000, 0x000f0c03, 0x00000000, 0x0000b000, 0x00000483, 0x00580ff0,
11019 ++ 0x00000000, 0x000f0c03, 0x00000000, 0x0000b000, 0x00000483, 0x005b0ff0,
11020 + };
11021 +
11022 + static int jz4770_uart0_data_pins[] = { 0xa0, 0xa3, };
11023 +@@ -3441,17 +3441,17 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
11024 + {
11025 + if (jzpc->info->version >= ID_X2000) {
11026 + switch (bias) {
11027 +- case PIN_CONFIG_BIAS_PULL_UP:
11028 ++ case GPIO_PULL_UP:
11029 + ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false);
11030 + ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, true);
11031 + break;
11032 +
11033 +- case PIN_CONFIG_BIAS_PULL_DOWN:
11034 ++ case GPIO_PULL_DOWN:
11035 + ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, false);
11036 + ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, true);
11037 + break;
11038 +
11039 +- case PIN_CONFIG_BIAS_DISABLE:
11040 ++ case GPIO_PULL_DIS:
11041 + default:
11042 + ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, false);
11043 + ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false);
11044 +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
11045 +index e3aa64798f7d3..4fcae8458359c 100644
11046 +--- a/drivers/pinctrl/pinctrl-single.c
11047 ++++ b/drivers/pinctrl/pinctrl-single.c
11048 +@@ -1224,6 +1224,7 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
11049 +
11050 + if (PCS_HAS_PINCONF) {
11051 + dev_err(pcs->dev, "pinconf not supported\n");
11052 ++ res = -ENOTSUPP;
11053 + goto free_pingroups;
11054 + }
11055 +
11056 +diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
11057 +index 008c83107a3ca..5fa2488fae87a 100644
11058 +--- a/drivers/pinctrl/pinctrl-stmfx.c
11059 ++++ b/drivers/pinctrl/pinctrl-stmfx.c
11060 +@@ -566,7 +566,7 @@ static irqreturn_t stmfx_pinctrl_irq_thread_fn(int irq, void *dev_id)
11061 + u8 pending[NR_GPIO_REGS];
11062 + u8 src[NR_GPIO_REGS] = {0, 0, 0};
11063 + unsigned long n, status;
11064 +- int ret;
11065 ++ int i, ret;
11066 +
11067 + ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_IRQ_GPI_PENDING,
11068 + &pending, NR_GPIO_REGS);
11069 +@@ -576,7 +576,9 @@ static irqreturn_t stmfx_pinctrl_irq_thread_fn(int irq, void *dev_id)
11070 + regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
11071 + src, NR_GPIO_REGS);
11072 +
11073 +- status = *(unsigned long *)pending;
11074 ++ BUILD_BUG_ON(NR_GPIO_REGS > sizeof(status));
11075 ++ for (i = 0, status = 0; i < NR_GPIO_REGS; i++)
11076 ++ status |= (unsigned long)pending[i] << (i * 8);
11077 + for_each_set_bit(n, &status, gc->ngpio) {
11078 + handle_nested_irq(irq_find_mapping(gc->irq.domain, n));
11079 + stmfx_pinctrl_irq_toggle_trigger(pctl, n);
11080 +diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
11081 +index 376876bd66058..2975b4369f32f 100644
11082 +--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
11083 ++++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
11084 +@@ -918,7 +918,7 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
11085 + pin_bank->grange.pin_base = drvdata->pin_base
11086 + + pin_bank->pin_base;
11087 + pin_bank->grange.base = pin_bank->grange.pin_base;
11088 +- pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
11089 ++ pin_bank->grange.npins = pin_bank->nr_pins;
11090 + pin_bank->grange.gc = &pin_bank->gpio_chip;
11091 + pinctrl_add_gpio_range(drvdata->pctl_dev, &pin_bank->grange);
11092 + }
11093 +diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
11094 +index aa7f7aa772971..a7404d69b2d32 100644
11095 +--- a/drivers/platform/chrome/cros_ec_proto.c
11096 ++++ b/drivers/platform/chrome/cros_ec_proto.c
11097 +@@ -279,6 +279,15 @@ static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
11098 + msg->insize = sizeof(struct ec_response_get_protocol_info);
11099 +
11100 + ret = send_command(ec_dev, msg);
11101 ++ /*
11102 ++ * Send command once again when timeout occurred.
11103 ++ * Fingerprint MCU (FPMCU) is restarted during system boot which
11104 ++ * introduces small window in which FPMCU won't respond for any
11105 ++ * messages sent by kernel. There is no need to wait before next
11106 ++ * attempt because we waited at least EC_MSG_DEADLINE_MS.
11107 ++ */
11108 ++ if (ret == -ETIMEDOUT)
11109 ++ ret = send_command(ec_dev, msg);
11110 +
11111 + if (ret < 0) {
11112 + dev_dbg(ec_dev->dev,
11113 +diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
11114 +index 33f8237727335..8e761991455af 100644
11115 +--- a/drivers/platform/x86/dell/dell-smbios-wmi.c
11116 ++++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
11117 +@@ -69,6 +69,7 @@ static int run_smbios_call(struct wmi_device *wdev)
11118 + if (obj->type == ACPI_TYPE_INTEGER)
11119 + dev_dbg(&wdev->dev, "SMBIOS call failed: %llu\n",
11120 + obj->integer.value);
11121 ++ kfree(output.pointer);
11122 + return -EIO;
11123 + }
11124 + memcpy(&priv->buf->std, obj->buffer.pointer, obj->buffer.length);
11125 +diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
11126 +index 6f0cc679c8e5c..8a4d52a9028d5 100644
11127 +--- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
11128 ++++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
11129 +@@ -379,6 +379,8 @@ static int isst_if_cpu_online(unsigned int cpu)
11130 + u64 data;
11131 + int ret;
11132 +
11133 ++ isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
11134 ++
11135 + ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
11136 + if (ret) {
11137 + /* This is not a fatal error on MSR mailbox only I/F */
11138 +@@ -397,7 +399,6 @@ static int isst_if_cpu_online(unsigned int cpu)
11139 + return ret;
11140 + }
11141 + isst_cpu_info[cpu].punit_cpu_id = data;
11142 +- isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
11143 +
11144 + isst_restore_msr_local(cpu);
11145 +
11146 +diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
11147 +index 215e77d3b6d93..622bdae6182c0 100644
11148 +--- a/drivers/power/supply/max17042_battery.c
11149 ++++ b/drivers/power/supply/max17042_battery.c
11150 +@@ -869,8 +869,12 @@ static irqreturn_t max17042_thread_handler(int id, void *dev)
11151 + {
11152 + struct max17042_chip *chip = dev;
11153 + u32 val;
11154 ++ int ret;
11155 ++
11156 ++ ret = regmap_read(chip->regmap, MAX17042_STATUS, &val);
11157 ++ if (ret)
11158 ++ return IRQ_HANDLED;
11159 +
11160 +- regmap_read(chip->regmap, MAX17042_STATUS, &val);
11161 + if ((val & STATUS_INTR_SOCMIN_BIT) ||
11162 + (val & STATUS_INTR_SOCMAX_BIT)) {
11163 + dev_info(&chip->client->dev, "SOC threshold INTR\n");
11164 +diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
11165 +index bc89c62ccb9b5..75e4c2d777b9c 100644
11166 +--- a/drivers/rtc/rtc-tps65910.c
11167 ++++ b/drivers/rtc/rtc-tps65910.c
11168 +@@ -467,6 +467,6 @@ static struct platform_driver tps65910_rtc_driver = {
11169 + };
11170 +
11171 + module_platform_driver(tps65910_rtc_driver);
11172 +-MODULE_ALIAS("platform:rtc-tps65910");
11173 ++MODULE_ALIAS("platform:tps65910-rtc");
11174 + MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@××××××.com>");
11175 + MODULE_LICENSE("GPL");
11176 +diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
11177 +index 3052fab00597c..3567912440dc3 100644
11178 +--- a/drivers/s390/cio/qdio_main.c
11179 ++++ b/drivers/s390/cio/qdio_main.c
11180 +@@ -890,6 +890,33 @@ static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
11181 + }
11182 + }
11183 +
11184 ++static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
11185 ++{
11186 ++ struct ccw_device *cdev = irq->cdev;
11187 ++ int rc;
11188 ++
11189 ++ spin_lock_irq(get_ccwdev_lock(cdev));
11190 ++ qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
11191 ++ if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
11192 ++ rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
11193 ++ else
11194 ++ /* default behaviour is halt */
11195 ++ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
11196 ++ spin_unlock_irq(get_ccwdev_lock(cdev));
11197 ++ if (rc) {
11198 ++ DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
11199 ++ DBF_ERROR("rc:%4d", rc);
11200 ++ return rc;
11201 ++ }
11202 ++
11203 ++ wait_event_interruptible_timeout(cdev->private->wait_q,
11204 ++ irq->state == QDIO_IRQ_STATE_INACTIVE ||
11205 ++ irq->state == QDIO_IRQ_STATE_ERR,
11206 ++ 10 * HZ);
11207 ++
11208 ++ return 0;
11209 ++}
11210 ++
11211 + /**
11212 + * qdio_shutdown - shut down a qdio subchannel
11213 + * @cdev: associated ccw device
11214 +@@ -927,27 +954,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
11215 + qdio_shutdown_queues(irq_ptr);
11216 + qdio_shutdown_debug_entries(irq_ptr);
11217 +
11218 +- /* cleanup subchannel */
11219 +- spin_lock_irq(get_ccwdev_lock(cdev));
11220 +- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
11221 +- if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
11222 +- rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
11223 +- else
11224 +- /* default behaviour is halt */
11225 +- rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
11226 +- spin_unlock_irq(get_ccwdev_lock(cdev));
11227 +- if (rc) {
11228 +- DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
11229 +- DBF_ERROR("rc:%4d", rc);
11230 +- goto no_cleanup;
11231 +- }
11232 +-
11233 +- wait_event_interruptible_timeout(cdev->private->wait_q,
11234 +- irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
11235 +- irq_ptr->state == QDIO_IRQ_STATE_ERR,
11236 +- 10 * HZ);
11237 +-
11238 +-no_cleanup:
11239 ++ rc = qdio_cancel_ccw(irq_ptr, how);
11240 + qdio_shutdown_thinint(irq_ptr);
11241 + qdio_shutdown_irq(irq_ptr);
11242 +
11243 +@@ -1083,6 +1090,7 @@ int qdio_establish(struct ccw_device *cdev,
11244 + {
11245 + struct qdio_irq *irq_ptr = cdev->private->qdio_data;
11246 + struct subchannel_id schid;
11247 ++ long timeout;
11248 + int rc;
11249 +
11250 + ccw_device_get_schid(cdev, &schid);
11251 +@@ -1111,11 +1119,8 @@ int qdio_establish(struct ccw_device *cdev,
11252 + qdio_setup_irq(irq_ptr, init_data);
11253 +
11254 + rc = qdio_establish_thinint(irq_ptr);
11255 +- if (rc) {
11256 +- qdio_shutdown_irq(irq_ptr);
11257 +- mutex_unlock(&irq_ptr->setup_mutex);
11258 +- return rc;
11259 +- }
11260 ++ if (rc)
11261 ++ goto err_thinint;
11262 +
11263 + /* establish q */
11264 + irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
11265 +@@ -1131,15 +1136,16 @@ int qdio_establish(struct ccw_device *cdev,
11266 + if (rc) {
11267 + DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
11268 + DBF_ERROR("rc:%4x", rc);
11269 +- qdio_shutdown_thinint(irq_ptr);
11270 +- qdio_shutdown_irq(irq_ptr);
11271 +- mutex_unlock(&irq_ptr->setup_mutex);
11272 +- return rc;
11273 ++ goto err_ccw_start;
11274 + }
11275 +
11276 +- wait_event_interruptible_timeout(cdev->private->wait_q,
11277 +- irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
11278 +- irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
11279 ++ timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
11280 ++ irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
11281 ++ irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
11282 ++ if (timeout <= 0) {
11283 ++ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
11284 ++ goto err_ccw_timeout;
11285 ++ }
11286 +
11287 + if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
11288 + mutex_unlock(&irq_ptr->setup_mutex);
11289 +@@ -1156,6 +1162,16 @@ int qdio_establish(struct ccw_device *cdev,
11290 + qdio_print_subchannel_info(irq_ptr);
11291 + qdio_setup_debug_entries(irq_ptr);
11292 + return 0;
11293 ++
11294 ++err_ccw_timeout:
11295 ++ qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
11296 ++err_ccw_start:
11297 ++ qdio_shutdown_thinint(irq_ptr);
11298 ++err_thinint:
11299 ++ qdio_shutdown_irq(irq_ptr);
11300 ++ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
11301 ++ mutex_unlock(&irq_ptr->setup_mutex);
11302 ++ return rc;
11303 + }
11304 + EXPORT_SYMBOL_GPL(qdio_establish);
11305 +
11306 +diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
11307 +index adddcd5899416..0df93d2cd3c36 100644
11308 +--- a/drivers/scsi/BusLogic.c
11309 ++++ b/drivers/scsi/BusLogic.c
11310 +@@ -1711,7 +1711,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
11311 + if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
11312 + blogic_info(" DMA Channel: None, ", adapter);
11313 + if (adapter->bios_addr > 0)
11314 +- blogic_info("BIOS Address: 0x%lX, ", adapter,
11315 ++ blogic_info("BIOS Address: 0x%X, ", adapter,
11316 + adapter->bios_addr);
11317 + else
11318 + blogic_info("BIOS Address: None, ", adapter);
11319 +@@ -3451,7 +3451,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
11320 + if (buf[0] != '\n' || len > 1)
11321 + printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf);
11322 + } else
11323 +- printk("%s", buf);
11324 ++ pr_cont("%s", buf);
11325 + } else {
11326 + if (begin) {
11327 + if (adapter != NULL && adapter->adapter_initd)
11328 +@@ -3459,7 +3459,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
11329 + else
11330 + printk("%s%s", blogic_msglevelmap[msglevel], buf);
11331 + } else
11332 +- printk("%s", buf);
11333 ++ pr_cont("%s", buf);
11334 + }
11335 + begin = (buf[len - 1] == '\n');
11336 + }
11337 +diff --git a/drivers/scsi/pcmcia/fdomain_cs.c b/drivers/scsi/pcmcia/fdomain_cs.c
11338 +index e42acf314d068..33df6a9ba9b5f 100644
11339 +--- a/drivers/scsi/pcmcia/fdomain_cs.c
11340 ++++ b/drivers/scsi/pcmcia/fdomain_cs.c
11341 +@@ -45,8 +45,10 @@ static int fdomain_probe(struct pcmcia_device *link)
11342 + goto fail_disable;
11343 +
11344 + if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE,
11345 +- "fdomain_cs"))
11346 ++ "fdomain_cs")) {
11347 ++ ret = -EBUSY;
11348 + goto fail_disable;
11349 ++ }
11350 +
11351 + sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev);
11352 + if (!sh) {
11353 +diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
11354 +index 85f41abcb56c1..42d0d941dba5c 100644
11355 +--- a/drivers/scsi/qedf/qedf_main.c
11356 ++++ b/drivers/scsi/qedf/qedf_main.c
11357 +@@ -3004,7 +3004,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
11358 + {
11359 + u32 *list;
11360 + int i;
11361 +- int status = 0, rc;
11362 ++ int status;
11363 + u32 *pbl;
11364 + dma_addr_t page;
11365 + int num_pages;
11366 +@@ -3016,7 +3016,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
11367 + */
11368 + if (!qedf->num_queues) {
11369 + QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
11370 +- return 1;
11371 ++ return -ENOMEM;
11372 + }
11373 +
11374 + /*
11375 +@@ -3024,7 +3024,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
11376 + * addresses of our queues
11377 + */
11378 + if (!qedf->p_cpuq) {
11379 +- status = 1;
11380 ++ status = -EINVAL;
11381 + QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
11382 + goto mem_alloc_failure;
11383 + }
11384 +@@ -3040,8 +3040,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
11385 + "qedf->global_queues=%p.\n", qedf->global_queues);
11386 +
11387 + /* Allocate DMA coherent buffers for BDQ */
11388 +- rc = qedf_alloc_bdq(qedf);
11389 +- if (rc) {
11390 ++ status = qedf_alloc_bdq(qedf);
11391 ++ if (status) {
11392 + QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
11393 + goto mem_alloc_failure;
11394 + }
11395 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
11396 +index 0b0acb8270719..e6dc0b495a829 100644
11397 +--- a/drivers/scsi/qedi/qedi_main.c
11398 ++++ b/drivers/scsi/qedi/qedi_main.c
11399 +@@ -1621,7 +1621,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
11400 + {
11401 + u32 *list;
11402 + int i;
11403 +- int status = 0, rc;
11404 ++ int status;
11405 + u32 *pbl;
11406 + dma_addr_t page;
11407 + int num_pages;
11408 +@@ -1632,14 +1632,14 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
11409 + */
11410 + if (!qedi->num_queues) {
11411 + QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
11412 +- return 1;
11413 ++ return -ENOMEM;
11414 + }
11415 +
11416 + /* Make sure we allocated the PBL that will contain the physical
11417 + * addresses of our queues
11418 + */
11419 + if (!qedi->p_cpuq) {
11420 +- status = 1;
11421 ++ status = -EINVAL;
11422 + goto mem_alloc_failure;
11423 + }
11424 +
11425 +@@ -1654,13 +1654,13 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
11426 + "qedi->global_queues=%p.\n", qedi->global_queues);
11427 +
11428 + /* Allocate DMA coherent buffers for BDQ */
11429 +- rc = qedi_alloc_bdq(qedi);
11430 +- if (rc)
11431 ++ status = qedi_alloc_bdq(qedi);
11432 ++ if (status)
11433 + goto mem_alloc_failure;
11434 +
11435 + /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
11436 +- rc = qedi_alloc_nvm_iscsi_cfg(qedi);
11437 +- if (rc)
11438 ++ status = qedi_alloc_nvm_iscsi_cfg(qedi);
11439 ++ if (status)
11440 + goto mem_alloc_failure;
11441 +
11442 + /* Allocate a CQ and an associated PBL for each MSI-X
11443 +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
11444 +index 3e5c70a1d969c..a7259733e4709 100644
11445 +--- a/drivers/scsi/qla2xxx/qla_nvme.c
11446 ++++ b/drivers/scsi/qla2xxx/qla_nvme.c
11447 +@@ -91,8 +91,9 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
11448 + struct qla_hw_data *ha;
11449 + struct qla_qpair *qpair;
11450 +
11451 +- if (!qidx)
11452 +- qidx++;
11453 ++ /* Map admin queue and 1st IO queue to index 0 */
11454 ++ if (qidx)
11455 ++ qidx--;
11456 +
11457 + vha = (struct scsi_qla_host *)lport->private;
11458 + ha = vha->hw;
11459 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
11460 +index cedd558f65ebf..37ab71b6a8a78 100644
11461 +--- a/drivers/scsi/qla2xxx/qla_os.c
11462 ++++ b/drivers/scsi/qla2xxx/qla_os.c
11463 +@@ -14,6 +14,7 @@
11464 + #include <linux/slab.h>
11465 + #include <linux/blk-mq-pci.h>
11466 + #include <linux/refcount.h>
11467 ++#include <linux/crash_dump.h>
11468 +
11469 + #include <scsi/scsi_tcq.h>
11470 + #include <scsi/scsicam.h>
11471 +@@ -2818,6 +2819,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
11472 + return ret;
11473 + }
11474 +
11475 ++ if (is_kdump_kernel()) {
11476 ++ ql2xmqsupport = 0;
11477 ++ ql2xallocfwdump = 0;
11478 ++ }
11479 ++
11480 + /* This may fail but that's ok */
11481 + pci_enable_pcie_error_reporting(pdev);
11482 +
11483 +diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
11484 +index dcc0b9618a649..8819a407c02b6 100644
11485 +--- a/drivers/scsi/smartpqi/smartpqi_init.c
11486 ++++ b/drivers/scsi/smartpqi/smartpqi_init.c
11487 +@@ -1322,6 +1322,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
11488 + "requested %u bytes, received %u bytes\n",
11489 + raid_map_size,
11490 + get_unaligned_le32(&raid_map->structure_size));
11491 ++ rc = -EINVAL;
11492 + goto error;
11493 + }
11494 + }
11495 +diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
11496 +index cf46d6f86e0ed..427a2ff7e9da1 100644
11497 +--- a/drivers/scsi/ufs/ufs-exynos.c
11498 ++++ b/drivers/scsi/ufs/ufs-exynos.c
11499 +@@ -260,7 +260,7 @@ static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
11500 + struct ufs_hba *hba = ufs->hba;
11501 + struct list_head *head = &hba->clk_list_head;
11502 + struct ufs_clk_info *clki;
11503 +- u32 pclk_rate;
11504 ++ unsigned long pclk_rate;
11505 + u32 f_min, f_max;
11506 + u8 div = 0;
11507 + int ret = 0;
11508 +@@ -299,7 +299,7 @@ static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
11509 + }
11510 +
11511 + if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) {
11512 +- dev_err(hba->dev, "not available pclk range %d\n", pclk_rate);
11513 ++ dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate);
11514 + ret = -EINVAL;
11515 + goto out;
11516 + }
11517 +diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h
11518 +index 67505fe32ebf9..dadf4fd10dd80 100644
11519 +--- a/drivers/scsi/ufs/ufs-exynos.h
11520 ++++ b/drivers/scsi/ufs/ufs-exynos.h
11521 +@@ -184,7 +184,7 @@ struct exynos_ufs {
11522 + u32 pclk_div;
11523 + u32 pclk_avail_min;
11524 + u32 pclk_avail_max;
11525 +- u32 mclk_rate;
11526 ++ unsigned long mclk_rate;
11527 + int avail_ln_rx;
11528 + int avail_ln_tx;
11529 + int rx_sel_idx;
11530 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
11531 +index 708b3b62fc4d1..15ac5fa148058 100644
11532 +--- a/drivers/scsi/ufs/ufshcd.c
11533 ++++ b/drivers/scsi/ufs/ufshcd.c
11534 +@@ -2766,15 +2766,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
11535 + WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
11536 + (hba->clk_gating.state != CLKS_ON));
11537 +
11538 +- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
11539 +- if (hba->pm_op_in_progress)
11540 +- set_host_byte(cmd, DID_BAD_TARGET);
11541 +- else
11542 +- err = SCSI_MLQUEUE_HOST_BUSY;
11543 +- ufshcd_release(hba);
11544 +- goto out;
11545 +- }
11546 +-
11547 + lrbp = &hba->lrb[tag];
11548 + WARN_ON(lrbp->cmd);
11549 + lrbp->cmd = cmd;
11550 +@@ -2949,11 +2940,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
11551 + enum dev_cmd_type cmd_type, int timeout)
11552 + {
11553 + struct request_queue *q = hba->cmd_queue;
11554 ++ DECLARE_COMPLETION_ONSTACK(wait);
11555 + struct request *req;
11556 + struct ufshcd_lrb *lrbp;
11557 + int err;
11558 + int tag;
11559 +- struct completion wait;
11560 +
11561 + down_read(&hba->clk_scaling_lock);
11562 +
11563 +@@ -2973,12 +2964,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
11564 + req->timeout = msecs_to_jiffies(2 * timeout);
11565 + blk_mq_start_request(req);
11566 +
11567 +- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
11568 +- err = -EBUSY;
11569 +- goto out;
11570 +- }
11571 +-
11572 +- init_completion(&wait);
11573 + lrbp = &hba->lrb[tag];
11574 + WARN_ON(lrbp->cmd);
11575 + err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
11576 +@@ -3419,9 +3404,11 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
11577 +
11578 + if (is_kmalloc) {
11579 + /* Make sure we don't copy more data than available */
11580 +- if (param_offset + param_size > buff_len)
11581 +- param_size = buff_len - param_offset;
11582 +- memcpy(param_read_buf, &desc_buf[param_offset], param_size);
11583 ++ if (param_offset >= buff_len)
11584 ++ ret = -EINVAL;
11585 ++ else
11586 ++ memcpy(param_read_buf, &desc_buf[param_offset],
11587 ++ min_t(u32, param_size, buff_len - param_offset));
11588 + }
11589 + out:
11590 + if (is_kmalloc)
11591 +@@ -3983,14 +3970,13 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
11592 + */
11593 + static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
11594 + {
11595 +- struct completion uic_async_done;
11596 ++ DECLARE_COMPLETION_ONSTACK(uic_async_done);
11597 + unsigned long flags;
11598 + u8 status;
11599 + int ret;
11600 + bool reenable_intr = false;
11601 +
11602 + mutex_lock(&hba->uic_cmd_mutex);
11603 +- init_completion(&uic_async_done);
11604 + ufshcd_add_delay_before_dme_cmd(hba);
11605 +
11606 + spin_lock_irqsave(hba->host->host_lock, flags);
11607 +@@ -5020,15 +5006,34 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
11608 + static void ufshcd_slave_destroy(struct scsi_device *sdev)
11609 + {
11610 + struct ufs_hba *hba;
11611 ++ unsigned long flags;
11612 +
11613 + hba = shost_priv(sdev->host);
11614 + /* Drop the reference as it won't be needed anymore */
11615 + if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
11616 +- unsigned long flags;
11617 +-
11618 + spin_lock_irqsave(hba->host->host_lock, flags);
11619 + hba->sdev_ufs_device = NULL;
11620 + spin_unlock_irqrestore(hba->host->host_lock, flags);
11621 ++ } else if (hba->sdev_ufs_device) {
11622 ++ struct device *supplier = NULL;
11623 ++
11624 ++ /* Ensure UFS Device WLUN exists and does not disappear */
11625 ++ spin_lock_irqsave(hba->host->host_lock, flags);
11626 ++ if (hba->sdev_ufs_device) {
11627 ++ supplier = &hba->sdev_ufs_device->sdev_gendev;
11628 ++ get_device(supplier);
11629 ++ }
11630 ++ spin_unlock_irqrestore(hba->host->host_lock, flags);
11631 ++
11632 ++ if (supplier) {
11633 ++ /*
11634 ++ * If a LUN fails to probe (e.g. absent BOOT WLUN), the
11635 ++ * device will not have been registered but can still
11636 ++ * have a device link holding a reference to the device.
11637 ++ */
11638 ++ device_link_remove(&sdev->sdev_gendev, supplier);
11639 ++ put_device(supplier);
11640 ++ }
11641 + }
11642 + }
11643 +
11644 +@@ -6663,11 +6668,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
11645 + enum query_opcode desc_op)
11646 + {
11647 + struct request_queue *q = hba->cmd_queue;
11648 ++ DECLARE_COMPLETION_ONSTACK(wait);
11649 + struct request *req;
11650 + struct ufshcd_lrb *lrbp;
11651 + int err = 0;
11652 + int tag;
11653 +- struct completion wait;
11654 + u8 upiu_flags;
11655 +
11656 + down_read(&hba->clk_scaling_lock);
11657 +@@ -6685,7 +6690,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
11658 + goto out;
11659 + }
11660 +
11661 +- init_completion(&wait);
11662 + lrbp = &hba->lrb[tag];
11663 + WARN_ON(lrbp->cmd);
11664 + lrbp->cmd = NULL;
11665 +@@ -6984,8 +6988,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
11666 + struct Scsi_Host *host;
11667 + struct ufs_hba *hba;
11668 + unsigned long flags;
11669 +- unsigned int tag;
11670 +- int err = 0;
11671 ++ int tag;
11672 ++ int err = FAILED;
11673 + struct ufshcd_lrb *lrbp;
11674 + u32 reg;
11675 +
11676 +@@ -7002,12 +7006,12 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
11677 +
11678 + ufshcd_hold(hba, false);
11679 + reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
11680 +- /* If command is already aborted/completed, return SUCCESS */
11681 ++ /* If command is already aborted/completed, return FAILED. */
11682 + if (!(test_bit(tag, &hba->outstanding_reqs))) {
11683 + dev_err(hba->dev,
11684 + "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
11685 + __func__, tag, hba->outstanding_reqs, reg);
11686 +- goto out;
11687 ++ goto release;
11688 + }
11689 +
11690 + /* Print Transfer Request of aborted task */
11691 +@@ -7036,7 +7040,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
11692 + dev_err(hba->dev,
11693 + "%s: cmd was completed, but without a notifying intr, tag = %d",
11694 + __func__, tag);
11695 +- goto cleanup;
11696 ++ __ufshcd_transfer_req_compl(hba, 1UL << tag);
11697 ++ goto release;
11698 + }
11699 +
11700 + /*
11701 +@@ -7049,36 +7054,33 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
11702 + */
11703 + if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
11704 + ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
11705 +- __ufshcd_transfer_req_compl(hba, (1UL << tag));
11706 +- set_bit(tag, &hba->outstanding_reqs);
11707 ++
11708 + spin_lock_irqsave(host->host_lock, flags);
11709 + hba->force_reset = true;
11710 + ufshcd_schedule_eh_work(hba);
11711 + spin_unlock_irqrestore(host->host_lock, flags);
11712 +- goto out;
11713 ++ goto release;
11714 + }
11715 +
11716 + /* Skip task abort in case previous aborts failed and report failure */
11717 +- if (lrbp->req_abort_skip)
11718 +- err = -EIO;
11719 +- else
11720 +- err = ufshcd_try_to_abort_task(hba, tag);
11721 ++ if (lrbp->req_abort_skip) {
11722 ++ dev_err(hba->dev, "%s: skipping abort\n", __func__);
11723 ++ ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
11724 ++ goto release;
11725 ++ }
11726 +
11727 +- if (!err) {
11728 +-cleanup:
11729 +- __ufshcd_transfer_req_compl(hba, (1UL << tag));
11730 +-out:
11731 +- err = SUCCESS;
11732 +- } else {
11733 ++ err = ufshcd_try_to_abort_task(hba, tag);
11734 ++ if (err) {
11735 + dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
11736 + ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
11737 + err = FAILED;
11738 ++ goto release;
11739 + }
11740 +
11741 +- /*
11742 +- * This ufshcd_release() corresponds to the original scsi cmd that got
11743 +- * aborted here (as we won't get any IRQ for it).
11744 +- */
11745 ++ err = SUCCESS;
11746 ++
11747 ++release:
11748 ++ /* Matches the ufshcd_hold() call at the start of this function. */
11749 + ufshcd_release(hba);
11750 + return err;
11751 + }
11752 +diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
11753 +index c557ffd0992c7..55e46fa6cf424 100644
11754 +--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
11755 ++++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
11756 +@@ -51,7 +51,7 @@ static int aspeed_lpc_ctrl_mmap(struct file *file, struct vm_area_struct *vma)
11757 + unsigned long vsize = vma->vm_end - vma->vm_start;
11758 + pgprot_t prot = vma->vm_page_prot;
11759 +
11760 +- if (vma->vm_pgoff + vsize > lpc_ctrl->mem_base + lpc_ctrl->mem_size)
11761 ++ if (vma->vm_pgoff + vma_pages(vma) > lpc_ctrl->mem_size >> PAGE_SHIFT)
11762 + return -EINVAL;
11763 +
11764 + /* ast2400/2500 AHB accesses are not cache coherent */
11765 +diff --git a/drivers/soc/aspeed/aspeed-p2a-ctrl.c b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
11766 +index b60fbeaffcbd0..20b5fb2a207cc 100644
11767 +--- a/drivers/soc/aspeed/aspeed-p2a-ctrl.c
11768 ++++ b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
11769 +@@ -110,7 +110,7 @@ static int aspeed_p2a_mmap(struct file *file, struct vm_area_struct *vma)
11770 + vsize = vma->vm_end - vma->vm_start;
11771 + prot = vma->vm_page_prot;
11772 +
11773 +- if (vma->vm_pgoff + vsize > ctrl->mem_base + ctrl->mem_size)
11774 ++ if (vma->vm_pgoff + vma_pages(vma) > ctrl->mem_size >> PAGE_SHIFT)
11775 + return -EINVAL;
11776 +
11777 + /* ast2400/2500 AHB accesses are not cache coherent */
11778 +diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
11779 +index 5f3e2bf0c40bc..9e2b81bd38db1 100644
11780 +--- a/drivers/soc/mediatek/mtk-mmsys.h
11781 ++++ b/drivers/soc/mediatek/mtk-mmsys.h
11782 +@@ -262,6 +262,10 @@ static const struct mtk_mmsys_routes mmsys_default_routing_table[] = {
11783 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3,
11784 + DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_MASK,
11785 + DSI3_SEL_IN_RDMA2
11786 ++ }, {
11787 ++ DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
11788 ++ DISP_REG_CONFIG_DISP_UFOE_MOUT_EN, UFOE_MOUT_EN_DSI0,
11789 ++ UFOE_MOUT_EN_DSI0
11790 + }
11791 + };
11792 +
11793 +diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
11794 +index 934fcc4d2b057..7b6b94332510a 100644
11795 +--- a/drivers/soc/qcom/qcom_aoss.c
11796 ++++ b/drivers/soc/qcom/qcom_aoss.c
11797 +@@ -476,12 +476,12 @@ static int qmp_cooling_device_add(struct qmp *qmp,
11798 + static int qmp_cooling_devices_register(struct qmp *qmp)
11799 + {
11800 + struct device_node *np, *child;
11801 +- int count = QMP_NUM_COOLING_RESOURCES;
11802 ++ int count = 0;
11803 + int ret;
11804 +
11805 + np = qmp->dev->of_node;
11806 +
11807 +- qmp->cooling_devs = devm_kcalloc(qmp->dev, count,
11808 ++ qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES,
11809 + sizeof(*qmp->cooling_devs),
11810 + GFP_KERNEL);
11811 +
11812 +@@ -497,12 +497,16 @@ static int qmp_cooling_devices_register(struct qmp *qmp)
11813 + goto unroll;
11814 + }
11815 +
11816 ++ if (!count)
11817 ++ devm_kfree(qmp->dev, qmp->cooling_devs);
11818 ++
11819 + return 0;
11820 +
11821 + unroll:
11822 + while (--count >= 0)
11823 + thermal_cooling_device_unregister
11824 + (qmp->cooling_devs[count].cdev);
11825 ++ devm_kfree(qmp->dev, qmp->cooling_devs);
11826 +
11827 + return ret;
11828 + }
11829 +diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
11830 +index c11e3d8cd308f..f156de765c68c 100644
11831 +--- a/drivers/soundwire/intel.c
11832 ++++ b/drivers/soundwire/intel.c
11833 +@@ -537,12 +537,14 @@ static int intel_link_power_down(struct sdw_intel *sdw)
11834 +
11835 + mutex_lock(sdw->link_res->shim_lock);
11836 +
11837 +- intel_shim_master_ip_to_glue(sdw);
11838 +-
11839 + if (!(*shim_mask & BIT(link_id)))
11840 + dev_err(sdw->cdns.dev,
11841 + "%s: Unbalanced power-up/down calls\n", __func__);
11842 +
11843 ++ sdw->cdns.link_up = false;
11844 ++
11845 ++ intel_shim_master_ip_to_glue(sdw);
11846 ++
11847 + *shim_mask &= ~BIT(link_id);
11848 +
11849 + if (!*shim_mask) {
11850 +@@ -559,18 +561,19 @@ static int intel_link_power_down(struct sdw_intel *sdw)
11851 + link_control &= spa_mask;
11852 +
11853 + ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
11854 ++ if (ret < 0) {
11855 ++ dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
11856 ++
11857 ++ /*
11858 ++ * we leave the sdw->cdns.link_up flag as false since we've disabled
11859 ++ * the link at this point and cannot handle interrupts any longer.
11860 ++ */
11861 ++ }
11862 + }
11863 +
11864 + mutex_unlock(sdw->link_res->shim_lock);
11865 +
11866 +- if (ret < 0) {
11867 +- dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
11868 +-
11869 +- return ret;
11870 +- }
11871 +-
11872 +- sdw->cdns.link_up = false;
11873 +- return 0;
11874 ++ return ret;
11875 + }
11876 +
11877 + static void intel_shim_sync_arm(struct sdw_intel *sdw)
11878 +diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
11879 +index 87f8829c39952..829770b8ec74c 100644
11880 +--- a/drivers/spi/spi-fsi.c
11881 ++++ b/drivers/spi/spi-fsi.c
11882 +@@ -25,16 +25,11 @@
11883 +
11884 + #define SPI_FSI_BASE 0x70000
11885 + #define SPI_FSI_INIT_TIMEOUT_MS 1000
11886 +-#define SPI_FSI_MAX_XFR_SIZE 2048
11887 +-#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED 8
11888 ++#define SPI_FSI_MAX_RX_SIZE 8
11889 ++#define SPI_FSI_MAX_TX_SIZE 40
11890 +
11891 + #define SPI_FSI_ERROR 0x0
11892 + #define SPI_FSI_COUNTER_CFG 0x1
11893 +-#define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32)
11894 +-#define SPI_FSI_COUNTER_CFG_N2_RX BIT_ULL(8)
11895 +-#define SPI_FSI_COUNTER_CFG_N2_TX BIT_ULL(9)
11896 +-#define SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
11897 +-#define SPI_FSI_COUNTER_CFG_N2_RELOAD BIT_ULL(11)
11898 + #define SPI_FSI_CFG1 0x2
11899 + #define SPI_FSI_CLOCK_CFG 0x3
11900 + #define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
11901 +@@ -76,8 +71,6 @@ struct fsi_spi {
11902 + struct device *dev; /* SPI controller device */
11903 + struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
11904 + u32 base;
11905 +- size_t max_xfr_size;
11906 +- bool restricted;
11907 + };
11908 +
11909 + struct fsi_spi_sequence {
11910 +@@ -241,7 +234,7 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
11911 + return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
11912 + }
11913 +
11914 +-static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
11915 ++static void fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
11916 + {
11917 + /*
11918 + * Add the next byte of instruction to the 8-byte sequence register.
11919 +@@ -251,8 +244,6 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
11920 + */
11921 + seq->data |= (u64)val << seq->bit;
11922 + seq->bit -= 8;
11923 +-
11924 +- return ((64 - seq->bit) / 8) - 2;
11925 + }
11926 +
11927 + static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
11928 +@@ -261,71 +252,11 @@ static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
11929 + seq->data = 0ULL;
11930 + }
11931 +
11932 +-static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
11933 +- struct fsi_spi_sequence *seq,
11934 +- struct spi_transfer *transfer)
11935 +-{
11936 +- int loops;
11937 +- int idx;
11938 +- int rc;
11939 +- u8 val = 0;
11940 +- u8 len = min(transfer->len, 8U);
11941 +- u8 rem = transfer->len % len;
11942 +-
11943 +- loops = transfer->len / len;
11944 +-
11945 +- if (transfer->tx_buf) {
11946 +- val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
11947 +- idx = fsi_spi_sequence_add(seq, val);
11948 +-
11949 +- if (rem)
11950 +- rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
11951 +- } else if (transfer->rx_buf) {
11952 +- val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
11953 +- idx = fsi_spi_sequence_add(seq, val);
11954 +-
11955 +- if (rem)
11956 +- rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
11957 +- } else {
11958 +- return -EINVAL;
11959 +- }
11960 +-
11961 +- if (ctx->restricted && loops > 1) {
11962 +- dev_warn(ctx->dev,
11963 +- "Transfer too large; no branches permitted.\n");
11964 +- return -EINVAL;
11965 +- }
11966 +-
11967 +- if (loops > 1) {
11968 +- u64 cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
11969 +-
11970 +- fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
11971 +-
11972 +- if (transfer->rx_buf)
11973 +- cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
11974 +- SPI_FSI_COUNTER_CFG_N2_TX |
11975 +- SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
11976 +- SPI_FSI_COUNTER_CFG_N2_RELOAD;
11977 +-
11978 +- rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
11979 +- if (rc)
11980 +- return rc;
11981 +- } else {
11982 +- fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
11983 +- }
11984 +-
11985 +- if (rem)
11986 +- fsi_spi_sequence_add(seq, rem);
11987 +-
11988 +- return 0;
11989 +-}
11990 +-
11991 + static int fsi_spi_transfer_data(struct fsi_spi *ctx,
11992 + struct spi_transfer *transfer)
11993 + {
11994 + int rc = 0;
11995 + u64 status = 0ULL;
11996 +- u64 cfg = 0ULL;
11997 +
11998 + if (transfer->tx_buf) {
11999 + int nb;
12000 +@@ -363,16 +294,6 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
12001 + u64 in = 0ULL;
12002 + u8 *rx = transfer->rx_buf;
12003 +
12004 +- rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
12005 +- if (rc)
12006 +- return rc;
12007 +-
12008 +- if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
12009 +- rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
12010 +- if (rc)
12011 +- return rc;
12012 +- }
12013 +-
12014 + while (transfer->len > recv) {
12015 + do {
12016 + rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
12017 +@@ -439,6 +360,10 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
12018 + }
12019 + } while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
12020 +
12021 ++ rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
12022 ++ if (rc)
12023 ++ return rc;
12024 ++
12025 + rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
12026 + if (rc)
12027 + return rc;
12028 +@@ -459,6 +384,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
12029 + {
12030 + int rc;
12031 + u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
12032 ++ unsigned int len;
12033 + struct spi_transfer *transfer;
12034 + struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
12035 +
12036 +@@ -471,8 +397,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
12037 + struct spi_transfer *next = NULL;
12038 +
12039 + /* Sequencer must do shift out (tx) first. */
12040 +- if (!transfer->tx_buf ||
12041 +- transfer->len > (ctx->max_xfr_size + 8)) {
12042 ++ if (!transfer->tx_buf || transfer->len > SPI_FSI_MAX_TX_SIZE) {
12043 + rc = -EINVAL;
12044 + goto error;
12045 + }
12046 +@@ -486,9 +411,13 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
12047 + fsi_spi_sequence_init(&seq);
12048 + fsi_spi_sequence_add(&seq, seq_slave);
12049 +
12050 +- rc = fsi_spi_sequence_transfer(ctx, &seq, transfer);
12051 +- if (rc)
12052 +- goto error;
12053 ++ len = transfer->len;
12054 ++ while (len > 8) {
12055 ++ fsi_spi_sequence_add(&seq,
12056 ++ SPI_FSI_SEQUENCE_SHIFT_OUT(8));
12057 ++ len -= 8;
12058 ++ }
12059 ++ fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
12060 +
12061 + if (!list_is_last(&transfer->transfer_list,
12062 + &mesg->transfers)) {
12063 +@@ -496,7 +425,9 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
12064 +
12065 + /* Sequencer can only do shift in (rx) after tx. */
12066 + if (next->rx_buf) {
12067 +- if (next->len > ctx->max_xfr_size) {
12068 ++ u8 shift;
12069 ++
12070 ++ if (next->len > SPI_FSI_MAX_RX_SIZE) {
12071 + rc = -EINVAL;
12072 + goto error;
12073 + }
12074 +@@ -504,10 +435,8 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
12075 + dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
12076 + next->len);
12077 +
12078 +- rc = fsi_spi_sequence_transfer(ctx, &seq,
12079 +- next);
12080 +- if (rc)
12081 +- goto error;
12082 ++ shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
12083 ++ fsi_spi_sequence_add(&seq, shift);
12084 + } else {
12085 + next = NULL;
12086 + }
12087 +@@ -541,9 +470,7 @@ error:
12088 +
12089 + static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
12090 + {
12091 +- struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
12092 +-
12093 +- return ctx->max_xfr_size;
12094 ++ return SPI_FSI_MAX_RX_SIZE;
12095 + }
12096 +
12097 + static int fsi_spi_probe(struct device *dev)
12098 +@@ -582,14 +509,6 @@ static int fsi_spi_probe(struct device *dev)
12099 + ctx->fsi = fsi;
12100 + ctx->base = base + SPI_FSI_BASE;
12101 +
12102 +- if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
12103 +- ctx->restricted = true;
12104 +- ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
12105 +- } else {
12106 +- ctx->restricted = false;
12107 +- ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
12108 +- }
12109 +-
12110 + rc = devm_spi_register_controller(dev, ctlr);
12111 + if (rc)
12112 + spi_controller_put(ctlr);
12113 +diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
12114 +index cb6feb34dd401..f980af0373452 100644
12115 +--- a/drivers/staging/board/board.c
12116 ++++ b/drivers/staging/board/board.c
12117 +@@ -136,6 +136,7 @@ int __init board_staging_register_clock(const struct board_staging_clk *bsc)
12118 + static int board_staging_add_dev_domain(struct platform_device *pdev,
12119 + const char *domain)
12120 + {
12121 ++ struct device *dev = &pdev->dev;
12122 + struct of_phandle_args pd_args;
12123 + struct device_node *np;
12124 +
12125 +@@ -148,7 +149,11 @@ static int board_staging_add_dev_domain(struct platform_device *pdev,
12126 + pd_args.np = np;
12127 + pd_args.args_count = 0;
12128 +
12129 +- return of_genpd_add_device(&pd_args, &pdev->dev);
12130 ++ /* Initialization similar to device_pm_init_common() */
12131 ++ spin_lock_init(&dev->power.lock);
12132 ++ dev->power.early_init = true;
12133 ++
12134 ++ return of_genpd_add_device(&pd_args, dev);
12135 + }
12136 + #else
12137 + static inline int board_staging_add_dev_domain(struct platform_device *pdev,
12138 +diff --git a/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml b/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
12139 +index 8e355cddd437d..6c348578e4a24 100644
12140 +--- a/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
12141 ++++ b/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
12142 +@@ -41,6 +41,8 @@ properties:
12143 + regulators:
12144 + type: object
12145 +
12146 ++ additionalProperties: false
12147 ++
12148 + properties:
12149 + '#address-cells':
12150 + const: 1
12151 +@@ -49,11 +51,13 @@ properties:
12152 + const: 0
12153 +
12154 + patternProperties:
12155 +- '^ldo[0-9]+@[0-9a-f]$':
12156 ++ '^(ldo|LDO)[0-9]+$':
12157 + type: object
12158 +
12159 + $ref: "/schemas/regulator/regulator.yaml#"
12160 +
12161 ++ unevaluatedProperties: false
12162 ++
12163 + required:
12164 + - compatible
12165 + - reg
12166 +diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
12167 +index cbc0032c16045..98d759e7cc957 100644
12168 +--- a/drivers/staging/ks7010/ks7010_sdio.c
12169 ++++ b/drivers/staging/ks7010/ks7010_sdio.c
12170 +@@ -939,9 +939,9 @@ static void ks7010_private_init(struct ks_wlan_private *priv,
12171 + memset(&priv->wstats, 0, sizeof(priv->wstats));
12172 +
12173 + /* sleep mode */
12174 ++ atomic_set(&priv->sleepstatus.status, 0);
12175 + atomic_set(&priv->sleepstatus.doze_request, 0);
12176 + atomic_set(&priv->sleepstatus.wakeup_request, 0);
12177 +- atomic_set(&priv->sleepstatus.wakeup_request, 0);
12178 +
12179 + trx_device_init(priv);
12180 + hostif_init(priv);
12181 +diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
12182 +index 948769ca6539d..1e324f1f656e5 100644
12183 +--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
12184 ++++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
12185 +@@ -1763,7 +1763,8 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
12186 + if (err < 0)
12187 + goto register_entities_fail;
12188 + /* init atomisp wdts */
12189 +- if (init_atomisp_wdts(isp) != 0)
12190 ++ err = init_atomisp_wdts(isp);
12191 ++ if (err != 0)
12192 + goto wdt_work_queue_fail;
12193 +
12194 + /* save the iunit context only once after all the values are init'ed. */
12195 +@@ -1815,6 +1816,7 @@ request_irq_fail:
12196 + hmm_cleanup();
12197 + hmm_pool_unregister(HMM_POOL_TYPE_RESERVED);
12198 + hmm_pool_fail:
12199 ++ pm_runtime_get_noresume(&pdev->dev);
12200 + destroy_workqueue(isp->wdt_work_queue);
12201 + wdt_work_queue_fail:
12202 + atomisp_acc_cleanup(isp);
12203 +diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
12204 +index 96622a7f8279e..2afd5996d75f8 100644
12205 +--- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
12206 ++++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
12207 +@@ -376,12 +376,17 @@ static void cfg_ref(struct hantro_ctx *ctx,
12208 + vb2_dst = hantro_get_dst_buf(ctx);
12209 +
12210 + ref = hantro_get_ref(ctx, hdr->last_frame_ts);
12211 +- if (!ref)
12212 ++ if (!ref) {
12213 ++ vpu_debug(0, "failed to find last frame ts=%llu\n",
12214 ++ hdr->last_frame_ts);
12215 + ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
12216 ++ }
12217 + vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(0));
12218 +
12219 + ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
12220 +- WARN_ON(!ref && hdr->golden_frame_ts);
12221 ++ if (!ref && hdr->golden_frame_ts)
12222 ++ vpu_debug(0, "failed to find golden frame ts=%llu\n",
12223 ++ hdr->golden_frame_ts);
12224 + if (!ref)
12225 + ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
12226 + if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN)
12227 +@@ -389,7 +394,9 @@ static void cfg_ref(struct hantro_ctx *ctx,
12228 + vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(4));
12229 +
12230 + ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
12231 +- WARN_ON(!ref && hdr->alt_frame_ts);
12232 ++ if (!ref && hdr->alt_frame_ts)
12233 ++ vpu_debug(0, "failed to find alt frame ts=%llu\n",
12234 ++ hdr->alt_frame_ts);
12235 + if (!ref)
12236 + ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
12237 + if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT)
12238 +diff --git a/drivers/staging/media/hantro/rockchip_vpu2_hw_vp8_dec.c b/drivers/staging/media/hantro/rockchip_vpu2_hw_vp8_dec.c
12239 +index 951b55f58a612..704607511b57f 100644
12240 +--- a/drivers/staging/media/hantro/rockchip_vpu2_hw_vp8_dec.c
12241 ++++ b/drivers/staging/media/hantro/rockchip_vpu2_hw_vp8_dec.c
12242 +@@ -453,12 +453,17 @@ static void cfg_ref(struct hantro_ctx *ctx,
12243 + vb2_dst = hantro_get_dst_buf(ctx);
12244 +
12245 + ref = hantro_get_ref(ctx, hdr->last_frame_ts);
12246 +- if (!ref)
12247 ++ if (!ref) {
12248 ++ vpu_debug(0, "failed to find last frame ts=%llu\n",
12249 ++ hdr->last_frame_ts);
12250 + ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
12251 ++ }
12252 + vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF0);
12253 +
12254 + ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
12255 +- WARN_ON(!ref && hdr->golden_frame_ts);
12256 ++ if (!ref && hdr->golden_frame_ts)
12257 ++ vpu_debug(0, "failed to find golden frame ts=%llu\n",
12258 ++ hdr->golden_frame_ts);
12259 + if (!ref)
12260 + ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
12261 + if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN)
12262 +@@ -466,7 +471,9 @@ static void cfg_ref(struct hantro_ctx *ctx,
12263 + vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(2));
12264 +
12265 + ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
12266 +- WARN_ON(!ref && hdr->alt_frame_ts);
12267 ++ if (!ref && hdr->alt_frame_ts)
12268 ++ vpu_debug(0, "failed to find alt frame ts=%llu\n",
12269 ++ hdr->alt_frame_ts);
12270 + if (!ref)
12271 + ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
12272 + if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT)
12273 +diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
12274 +index 894c4de31790e..2882964b85136 100644
12275 +--- a/drivers/staging/media/imx/imx7-media-csi.c
12276 ++++ b/drivers/staging/media/imx/imx7-media-csi.c
12277 +@@ -361,6 +361,7 @@ static void imx7_csi_dma_unsetup_vb2_buf(struct imx7_csi *csi,
12278 +
12279 + vb->timestamp = ktime_get_ns();
12280 + vb2_buffer_done(vb, return_status);
12281 ++ csi->active_vb2_buf[i] = NULL;
12282 + }
12283 + }
12284 + }
12285 +@@ -386,9 +387,10 @@ static int imx7_csi_dma_setup(struct imx7_csi *csi)
12286 + return 0;
12287 + }
12288 +
12289 +-static void imx7_csi_dma_cleanup(struct imx7_csi *csi)
12290 ++static void imx7_csi_dma_cleanup(struct imx7_csi *csi,
12291 ++ enum vb2_buffer_state return_status)
12292 + {
12293 +- imx7_csi_dma_unsetup_vb2_buf(csi, VB2_BUF_STATE_ERROR);
12294 ++ imx7_csi_dma_unsetup_vb2_buf(csi, return_status);
12295 + imx_media_free_dma_buf(csi->dev, &csi->underrun_buf);
12296 + }
12297 +
12298 +@@ -537,9 +539,10 @@ static int imx7_csi_init(struct imx7_csi *csi)
12299 + return 0;
12300 + }
12301 +
12302 +-static void imx7_csi_deinit(struct imx7_csi *csi)
12303 ++static void imx7_csi_deinit(struct imx7_csi *csi,
12304 ++ enum vb2_buffer_state return_status)
12305 + {
12306 +- imx7_csi_dma_cleanup(csi);
12307 ++ imx7_csi_dma_cleanup(csi, return_status);
12308 + imx7_csi_init_default(csi);
12309 + imx7_csi_dmareq_rff_disable(csi);
12310 + clk_disable_unprepare(csi->mclk);
12311 +@@ -702,7 +705,7 @@ static int imx7_csi_s_stream(struct v4l2_subdev *sd, int enable)
12312 +
12313 + ret = v4l2_subdev_call(csi->src_sd, video, s_stream, 1);
12314 + if (ret < 0) {
12315 +- imx7_csi_deinit(csi);
12316 ++ imx7_csi_deinit(csi, VB2_BUF_STATE_QUEUED);
12317 + goto out_unlock;
12318 + }
12319 +
12320 +@@ -712,7 +715,7 @@ static int imx7_csi_s_stream(struct v4l2_subdev *sd, int enable)
12321 +
12322 + v4l2_subdev_call(csi->src_sd, video, s_stream, 0);
12323 +
12324 +- imx7_csi_deinit(csi);
12325 ++ imx7_csi_deinit(csi, VB2_BUF_STATE_ERROR);
12326 + }
12327 +
12328 + csi->is_streaming = !!enable;
12329 +diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
12330 +index bb7941aee0c47..fcf31f6d4b36f 100644
12331 +--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
12332 ++++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
12333 +@@ -463,7 +463,7 @@ static void PHY_StoreTxPowerByRateNew(
12334 + if (RfPath > ODM_RF_PATH_D)
12335 + return;
12336 +
12337 +- if (TxNum > ODM_RF_PATH_D)
12338 ++ if (TxNum > RF_MAX_TX_NUM)
12339 + return;
12340 +
12341 + for (i = 0; i < rateNum; ++i) {
12342 +diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
12343 +index 1deb74112ad43..11d9d9155eef2 100644
12344 +--- a/drivers/staging/rts5208/rtsx_scsi.c
12345 ++++ b/drivers/staging/rts5208/rtsx_scsi.c
12346 +@@ -2802,10 +2802,10 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
12347 + }
12348 +
12349 + if (dev_info_id == 0x15) {
12350 +- buf_len = 0x3A;
12351 ++ buf_len = 0x3C;
12352 + data_len = 0x3A;
12353 + } else {
12354 +- buf_len = 0x6A;
12355 ++ buf_len = 0x6C;
12356 + data_len = 0x6A;
12357 + }
12358 +
12359 +@@ -2855,11 +2855,7 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
12360 + }
12361 +
12362 + rtsx_stor_set_xfer_buf(buf, buf_len, srb);
12363 +-
12364 +- if (dev_info_id == 0x15)
12365 +- scsi_set_resid(srb, scsi_bufflen(srb) - 0x3C);
12366 +- else
12367 +- scsi_set_resid(srb, scsi_bufflen(srb) - 0x6C);
12368 ++ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
12369 +
12370 + kfree(buf);
12371 + return STATUS_SUCCESS;
12372 +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
12373 +index 10d6b228cc941..eec59030c3a73 100644
12374 +--- a/drivers/thunderbolt/switch.c
12375 ++++ b/drivers/thunderbolt/switch.c
12376 +@@ -2443,7 +2443,7 @@ static void tb_switch_default_link_ports(struct tb_switch *sw)
12377 + {
12378 + int i;
12379 +
12380 +- for (i = 1; i <= sw->config.max_port_number; i += 2) {
12381 ++ for (i = 1; i <= sw->config.max_port_number; i++) {
12382 + struct tb_port *port = &sw->ports[i];
12383 + struct tb_port *subordinate;
12384 +
12385 +diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
12386 +index bfc15279d5bc9..f0bc8e7800512 100644
12387 +--- a/drivers/tty/hvc/hvsi.c
12388 ++++ b/drivers/tty/hvc/hvsi.c
12389 +@@ -1038,7 +1038,7 @@ static const struct tty_operations hvsi_ops = {
12390 +
12391 + static int __init hvsi_init(void)
12392 + {
12393 +- int i;
12394 ++ int i, ret;
12395 +
12396 + hvsi_driver = alloc_tty_driver(hvsi_count);
12397 + if (!hvsi_driver)
12398 +@@ -1069,12 +1069,25 @@ static int __init hvsi_init(void)
12399 + }
12400 + hvsi_wait = wait_for_state; /* irqs active now */
12401 +
12402 +- if (tty_register_driver(hvsi_driver))
12403 +- panic("Couldn't register hvsi console driver\n");
12404 ++ ret = tty_register_driver(hvsi_driver);
12405 ++ if (ret) {
12406 ++ pr_err("Couldn't register hvsi console driver\n");
12407 ++ goto err_free_irq;
12408 ++ }
12409 +
12410 + printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count);
12411 +
12412 + return 0;
12413 ++err_free_irq:
12414 ++ hvsi_wait = poll_for_state;
12415 ++ for (i = 0; i < hvsi_count; i++) {
12416 ++ struct hvsi_struct *hp = &hvsi_ports[i];
12417 ++
12418 ++ free_irq(hp->virq, hp);
12419 ++ }
12420 ++ tty_driver_kref_put(hvsi_driver);
12421 ++
12422 ++ return ret;
12423 + }
12424 + device_initcall(hvsi_init);
12425 +
12426 +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
12427 +index 79418d4beb48f..b6c731a267d26 100644
12428 +--- a/drivers/tty/serial/8250/8250_omap.c
12429 ++++ b/drivers/tty/serial/8250/8250_omap.c
12430 +@@ -617,7 +617,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
12431 + struct uart_port *port = dev_id;
12432 + struct omap8250_priv *priv = port->private_data;
12433 + struct uart_8250_port *up = up_to_u8250p(port);
12434 +- unsigned int iir;
12435 ++ unsigned int iir, lsr;
12436 + int ret;
12437 +
12438 + #ifdef CONFIG_SERIAL_8250_DMA
12439 +@@ -628,6 +628,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
12440 + #endif
12441 +
12442 + serial8250_rpm_get(up);
12443 ++ lsr = serial_port_in(port, UART_LSR);
12444 + iir = serial_port_in(port, UART_IIR);
12445 + ret = serial8250_handle_irq(port, iir);
12446 +
12447 +@@ -642,6 +643,24 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
12448 + serial_port_in(port, UART_RX);
12449 + }
12450 +
12451 ++ /* Stop processing interrupts on input overrun */
12452 ++ if ((lsr & UART_LSR_OE) && up->overrun_backoff_time_ms > 0) {
12453 ++ unsigned long delay;
12454 ++
12455 ++ up->ier = port->serial_in(port, UART_IER);
12456 ++ if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
12457 ++ port->ops->stop_rx(port);
12458 ++ } else {
12459 ++ /* Keep restarting the timer until
12460 ++ * the input overrun subsides.
12461 ++ */
12462 ++ cancel_delayed_work(&up->overrun_backoff);
12463 ++ }
12464 ++
12465 ++ delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
12466 ++ schedule_delayed_work(&up->overrun_backoff, delay);
12467 ++ }
12468 ++
12469 + serial8250_rpm_put(up);
12470 +
12471 + return IRQ_RETVAL(ret);
12472 +@@ -1353,6 +1372,10 @@ static int omap8250_probe(struct platform_device *pdev)
12473 + }
12474 + }
12475 +
12476 ++ if (of_property_read_u32(np, "overrun-throttle-ms",
12477 ++ &up.overrun_backoff_time_ms) != 0)
12478 ++ up.overrun_backoff_time_ms = 0;
12479 ++
12480 + priv->wakeirq = irq_of_parse_and_map(np, 1);
12481 +
12482 + pdata = of_device_get_match_data(&pdev->dev);
12483 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
12484 +index a808c283883e0..726912b16a559 100644
12485 +--- a/drivers/tty/serial/8250/8250_pci.c
12486 ++++ b/drivers/tty/serial/8250/8250_pci.c
12487 +@@ -87,7 +87,7 @@ static void moan_device(const char *str, struct pci_dev *dev)
12488 +
12489 + static int
12490 + setup_port(struct serial_private *priv, struct uart_8250_port *port,
12491 +- int bar, int offset, int regshift)
12492 ++ u8 bar, unsigned int offset, int regshift)
12493 + {
12494 + struct pci_dev *dev = priv->dev;
12495 +
12496 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
12497 +index 1da29a219842b..66374704747ec 100644
12498 +--- a/drivers/tty/serial/8250/8250_port.c
12499 ++++ b/drivers/tty/serial/8250/8250_port.c
12500 +@@ -122,7 +122,8 @@ static const struct serial8250_config uart_config[] = {
12501 + .name = "16C950/954",
12502 + .fifo_size = 128,
12503 + .tx_loadsz = 128,
12504 +- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
12505 ++ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01,
12506 ++ .rxtrig_bytes = {16, 32, 112, 120},
12507 + /* UART_CAP_EFR breaks billionon CF bluetooth card. */
12508 + .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
12509 + },
12510 +diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
12511 +index bf0e2a4cb0cef..c6f927a76c3be 100644
12512 +--- a/drivers/tty/serial/jsm/jsm_neo.c
12513 ++++ b/drivers/tty/serial/jsm/jsm_neo.c
12514 +@@ -815,7 +815,9 @@ static void neo_parse_isr(struct jsm_board *brd, u32 port)
12515 + /* Parse any modem signal changes */
12516 + jsm_dbg(INTR, &ch->ch_bd->pci_dev,
12517 + "MOD_STAT: sending to parse_modem_sigs\n");
12518 ++ spin_lock_irqsave(&ch->uart_port.lock, lock_flags);
12519 + neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
12520 ++ spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags);
12521 + }
12522 + }
12523 +
12524 +diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
12525 +index 8e42a7682c63d..d74cbbbf33c62 100644
12526 +--- a/drivers/tty/serial/jsm/jsm_tty.c
12527 ++++ b/drivers/tty/serial/jsm/jsm_tty.c
12528 +@@ -187,6 +187,7 @@ static void jsm_tty_break(struct uart_port *port, int break_state)
12529 +
12530 + static int jsm_tty_open(struct uart_port *port)
12531 + {
12532 ++ unsigned long lock_flags;
12533 + struct jsm_board *brd;
12534 + struct jsm_channel *channel =
12535 + container_of(port, struct jsm_channel, uart_port);
12536 +@@ -240,6 +241,7 @@ static int jsm_tty_open(struct uart_port *port)
12537 + channel->ch_cached_lsr = 0;
12538 + channel->ch_stops_sent = 0;
12539 +
12540 ++ spin_lock_irqsave(&port->lock, lock_flags);
12541 + termios = &port->state->port.tty->termios;
12542 + channel->ch_c_cflag = termios->c_cflag;
12543 + channel->ch_c_iflag = termios->c_iflag;
12544 +@@ -259,6 +261,7 @@ static int jsm_tty_open(struct uart_port *port)
12545 + jsm_carrier(channel);
12546 +
12547 + channel->ch_open_count++;
12548 ++ spin_unlock_irqrestore(&port->lock, lock_flags);
12549 +
12550 + jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
12551 + return 0;
12552 +diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
12553 +index ef11860cd69e5..3df0788ddeb0f 100644
12554 +--- a/drivers/tty/serial/max310x.c
12555 ++++ b/drivers/tty/serial/max310x.c
12556 +@@ -1271,18 +1271,13 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
12557 + /* Always ask for fixed clock rate from a property. */
12558 + device_property_read_u32(dev, "clock-frequency", &uartclk);
12559 +
12560 +- s->clk = devm_clk_get_optional(dev, "osc");
12561 ++ xtal = device_property_match_string(dev, "clock-names", "osc") < 0;
12562 ++ if (xtal)
12563 ++ s->clk = devm_clk_get_optional(dev, "xtal");
12564 ++ else
12565 ++ s->clk = devm_clk_get_optional(dev, "osc");
12566 + if (IS_ERR(s->clk))
12567 + return PTR_ERR(s->clk);
12568 +- if (s->clk) {
12569 +- xtal = false;
12570 +- } else {
12571 +- s->clk = devm_clk_get_optional(dev, "xtal");
12572 +- if (IS_ERR(s->clk))
12573 +- return PTR_ERR(s->clk);
12574 +-
12575 +- xtal = true;
12576 +- }
12577 +
12578 + ret = clk_prepare_enable(s->clk);
12579 + if (ret)
12580 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
12581 +index 07eb56294371b..89ee43061d3ae 100644
12582 +--- a/drivers/tty/serial/sh-sci.c
12583 ++++ b/drivers/tty/serial/sh-sci.c
12584 +@@ -1758,6 +1758,10 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
12585 +
12586 + /* Handle BREAKs */
12587 + sci_handle_breaks(port);
12588 ++
12589 ++ /* drop invalid character received before break was detected */
12590 ++ serial_port_in(port, SCxRDR);
12591 ++
12592 + sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
12593 +
12594 + return IRQ_HANDLED;
12595 +@@ -1837,7 +1841,8 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
12596 + ret = sci_er_interrupt(irq, ptr);
12597 +
12598 + /* Break Interrupt */
12599 +- if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
12600 ++ if (s->irqs[SCIx_ERI_IRQ] != s->irqs[SCIx_BRI_IRQ] &&
12601 ++ (ssr_status & SCxSR_BRK(port)) && err_enabled)
12602 + ret = sci_br_interrupt(irq, ptr);
12603 +
12604 + /* Overrun Interrupt */
12605 +diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
12606 +index 4b0d69042ceb6..bf6efebeb4bd8 100644
12607 +--- a/drivers/tty/vt/keyboard.c
12608 ++++ b/drivers/tty/vt/keyboard.c
12609 +@@ -1171,7 +1171,7 @@ static inline unsigned char getleds(void)
12610 + *
12611 + * Check the status of a keyboard led flag and report it back
12612 + */
12613 +-int vt_get_leds(int console, int flag)
12614 ++int vt_get_leds(unsigned int console, int flag)
12615 + {
12616 + struct kbd_struct *kb = kbd_table + console;
12617 + int ret;
12618 +@@ -1193,7 +1193,7 @@ EXPORT_SYMBOL_GPL(vt_get_leds);
12619 + * Set the LEDs on a console. This is a wrapper for the VT layer
12620 + * so that we can keep kbd knowledge internal
12621 + */
12622 +-void vt_set_led_state(int console, int leds)
12623 ++void vt_set_led_state(unsigned int console, int leds)
12624 + {
12625 + struct kbd_struct *kb = kbd_table + console;
12626 + setledstate(kb, leds);
12627 +@@ -1212,7 +1212,7 @@ void vt_set_led_state(int console, int leds)
12628 + * don't hold the lock. We probably need to split out an LED lock
12629 + * but not during an -rc release!
12630 + */
12631 +-void vt_kbd_con_start(int console)
12632 ++void vt_kbd_con_start(unsigned int console)
12633 + {
12634 + struct kbd_struct *kb = kbd_table + console;
12635 + unsigned long flags;
12636 +@@ -1229,7 +1229,7 @@ void vt_kbd_con_start(int console)
12637 + * Handle console stop. This is a wrapper for the VT layer
12638 + * so that we can keep kbd knowledge internal
12639 + */
12640 +-void vt_kbd_con_stop(int console)
12641 ++void vt_kbd_con_stop(unsigned int console)
12642 + {
12643 + struct kbd_struct *kb = kbd_table + console;
12644 + unsigned long flags;
12645 +@@ -1825,7 +1825,7 @@ int vt_do_diacrit(unsigned int cmd, void __user *udp, int perm)
12646 + * Update the keyboard mode bits while holding the correct locks.
12647 + * Return 0 for success or an error code.
12648 + */
12649 +-int vt_do_kdskbmode(int console, unsigned int arg)
12650 ++int vt_do_kdskbmode(unsigned int console, unsigned int arg)
12651 + {
12652 + struct kbd_struct *kb = kbd_table + console;
12653 + int ret = 0;
12654 +@@ -1865,7 +1865,7 @@ int vt_do_kdskbmode(int console, unsigned int arg)
12655 + * Update the keyboard meta bits while holding the correct locks.
12656 + * Return 0 for success or an error code.
12657 + */
12658 +-int vt_do_kdskbmeta(int console, unsigned int arg)
12659 ++int vt_do_kdskbmeta(unsigned int console, unsigned int arg)
12660 + {
12661 + struct kbd_struct *kb = kbd_table + console;
12662 + int ret = 0;
12663 +@@ -2008,7 +2008,7 @@ out:
12664 + }
12665 +
12666 + int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
12667 +- int console)
12668 ++ unsigned int console)
12669 + {
12670 + struct kbd_struct *kb = kbd_table + console;
12671 + struct kbentry kbe;
12672 +@@ -2097,7 +2097,7 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
12673 + return ret;
12674 + }
12675 +
12676 +-int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm)
12677 ++int vt_do_kdskled(unsigned int console, int cmd, unsigned long arg, int perm)
12678 + {
12679 + struct kbd_struct *kb = kbd_table + console;
12680 + unsigned long flags;
12681 +@@ -2139,7 +2139,7 @@ int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm)
12682 + return -ENOIOCTLCMD;
12683 + }
12684 +
12685 +-int vt_do_kdgkbmode(int console)
12686 ++int vt_do_kdgkbmode(unsigned int console)
12687 + {
12688 + struct kbd_struct *kb = kbd_table + console;
12689 + /* This is a spot read so needs no locking */
12690 +@@ -2163,7 +2163,7 @@ int vt_do_kdgkbmode(int console)
12691 + *
12692 + * Report the meta flag status of this console
12693 + */
12694 +-int vt_do_kdgkbmeta(int console)
12695 ++int vt_do_kdgkbmeta(unsigned int console)
12696 + {
12697 + struct kbd_struct *kb = kbd_table + console;
12698 + /* Again a spot read so no locking */
12699 +@@ -2176,7 +2176,7 @@ int vt_do_kdgkbmeta(int console)
12700 + *
12701 + * Restore the unicode console state to its default
12702 + */
12703 +-void vt_reset_unicode(int console)
12704 ++void vt_reset_unicode(unsigned int console)
12705 + {
12706 + unsigned long flags;
12707 +
12708 +@@ -2204,7 +2204,7 @@ int vt_get_shift_state(void)
12709 + * Reset the keyboard bits for a console as part of a general console
12710 + * reset event
12711 + */
12712 +-void vt_reset_keyboard(int console)
12713 ++void vt_reset_keyboard(unsigned int console)
12714 + {
12715 + struct kbd_struct *kb = kbd_table + console;
12716 + unsigned long flags;
12717 +@@ -2234,7 +2234,7 @@ void vt_reset_keyboard(int console)
12718 + * caller must be sure that there are no synchronization needs
12719 + */
12720 +
12721 +-int vt_get_kbd_mode_bit(int console, int bit)
12722 ++int vt_get_kbd_mode_bit(unsigned int console, int bit)
12723 + {
12724 + struct kbd_struct *kb = kbd_table + console;
12725 + return vc_kbd_mode(kb, bit);
12726 +@@ -2249,7 +2249,7 @@ int vt_get_kbd_mode_bit(int console, int bit)
12727 + * caller must be sure that there are no synchronization needs
12728 + */
12729 +
12730 +-void vt_set_kbd_mode_bit(int console, int bit)
12731 ++void vt_set_kbd_mode_bit(unsigned int console, int bit)
12732 + {
12733 + struct kbd_struct *kb = kbd_table + console;
12734 + unsigned long flags;
12735 +@@ -2268,7 +2268,7 @@ void vt_set_kbd_mode_bit(int console, int bit)
12736 + * caller must be sure that there are no synchronization needs
12737 + */
12738 +
12739 +-void vt_clr_kbd_mode_bit(int console, int bit)
12740 ++void vt_clr_kbd_mode_bit(unsigned int console, int bit)
12741 + {
12742 + struct kbd_struct *kb = kbd_table + console;
12743 + unsigned long flags;
12744 +diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
12745 +index e86d13c04bdbe..bdc3885c0d493 100644
12746 +--- a/drivers/usb/chipidea/host.c
12747 ++++ b/drivers/usb/chipidea/host.c
12748 +@@ -240,15 +240,18 @@ static int ci_ehci_hub_control(
12749 + )
12750 + {
12751 + struct ehci_hcd *ehci = hcd_to_ehci(hcd);
12752 ++ unsigned int ports = HCS_N_PORTS(ehci->hcs_params);
12753 + u32 __iomem *status_reg;
12754 +- u32 temp;
12755 ++ u32 temp, port_index;
12756 + unsigned long flags;
12757 + int retval = 0;
12758 + bool done = false;
12759 + struct device *dev = hcd->self.controller;
12760 + struct ci_hdrc *ci = dev_get_drvdata(dev);
12761 +
12762 +- status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
12763 ++ port_index = wIndex & 0xff;
12764 ++ port_index -= (port_index > 0);
12765 ++ status_reg = &ehci->regs->port_status[port_index];
12766 +
12767 + spin_lock_irqsave(&ehci->lock, flags);
12768 +
12769 +@@ -260,6 +263,11 @@ static int ci_ehci_hub_control(
12770 + }
12771 +
12772 + if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
12773 ++ if (!wIndex || wIndex > ports) {
12774 ++ retval = -EPIPE;
12775 ++ goto done;
12776 ++ }
12777 ++
12778 + temp = ehci_readl(ehci, status_reg);
12779 + if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
12780 + retval = -EPIPE;
12781 +@@ -288,7 +296,7 @@ static int ci_ehci_hub_control(
12782 + ehci_writel(ehci, temp, status_reg);
12783 + }
12784 +
12785 +- set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
12786 ++ set_bit(port_index, &ehci->suspended_ports);
12787 + goto done;
12788 + }
12789 +
12790 +diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
12791 +index 756faa46d33a7..d328d20abfbc4 100644
12792 +--- a/drivers/usb/dwc3/dwc3-imx8mp.c
12793 ++++ b/drivers/usb/dwc3/dwc3-imx8mp.c
12794 +@@ -152,13 +152,6 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
12795 + }
12796 + dwc3_imx->irq = irq;
12797 +
12798 +- err = devm_request_threaded_irq(dev, irq, NULL, dwc3_imx8mp_interrupt,
12799 +- IRQF_ONESHOT, dev_name(dev), dwc3_imx);
12800 +- if (err) {
12801 +- dev_err(dev, "failed to request IRQ #%d --> %d\n", irq, err);
12802 +- goto disable_clks;
12803 +- }
12804 +-
12805 + pm_runtime_set_active(dev);
12806 + pm_runtime_enable(dev);
12807 + err = pm_runtime_get_sync(dev);
12808 +@@ -186,6 +179,13 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
12809 + }
12810 + of_node_put(dwc3_np);
12811 +
12812 ++ err = devm_request_threaded_irq(dev, irq, NULL, dwc3_imx8mp_interrupt,
12813 ++ IRQF_ONESHOT, dev_name(dev), dwc3_imx);
12814 ++ if (err) {
12815 ++ dev_err(dev, "failed to request IRQ #%d --> %d\n", irq, err);
12816 ++ goto depopulate;
12817 ++ }
12818 ++
12819 + device_set_wakeup_capable(dev, true);
12820 + pm_runtime_put(dev);
12821 +
12822 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
12823 +index 72a9797dbbae0..504c1cbc255d1 100644
12824 +--- a/drivers/usb/gadget/composite.c
12825 ++++ b/drivers/usb/gadget/composite.c
12826 +@@ -482,7 +482,7 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
12827 + {
12828 + unsigned val;
12829 +
12830 +- if (c->MaxPower)
12831 ++ if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
12832 + val = c->MaxPower;
12833 + else
12834 + val = CONFIG_USB_GADGET_VBUS_DRAW;
12835 +@@ -936,7 +936,11 @@ static int set_config(struct usb_composite_dev *cdev,
12836 + }
12837 +
12838 + /* when we return, be sure our power usage is valid */
12839 +- power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
12840 ++ if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
12841 ++ power = c->MaxPower;
12842 ++ else
12843 ++ power = CONFIG_USB_GADGET_VBUS_DRAW;
12844 ++
12845 + if (gadget->speed < USB_SPEED_SUPER)
12846 + power = min(power, 500U);
12847 + else
12848 +diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
12849 +index d1d044d9f8594..85a3f6d4b5af3 100644
12850 +--- a/drivers/usb/gadget/function/u_ether.c
12851 ++++ b/drivers/usb/gadget/function/u_ether.c
12852 +@@ -492,8 +492,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
12853 + }
12854 + spin_unlock_irqrestore(&dev->lock, flags);
12855 +
12856 +- if (skb && !in) {
12857 +- dev_kfree_skb_any(skb);
12858 ++ if (!in) {
12859 ++ if (skb)
12860 ++ dev_kfree_skb_any(skb);
12861 + return NETDEV_TX_OK;
12862 + }
12863 +
12864 +diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
12865 +index cffdc8d01b2a8..8fd27249ad257 100644
12866 +--- a/drivers/usb/host/ehci-mv.c
12867 ++++ b/drivers/usb/host/ehci-mv.c
12868 +@@ -42,26 +42,25 @@ struct ehci_hcd_mv {
12869 + int (*set_vbus)(unsigned int vbus);
12870 + };
12871 +
12872 +-static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
12873 ++static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
12874 + {
12875 +- clk_prepare_enable(ehci_mv->clk);
12876 +-}
12877 ++ int retval;
12878 +
12879 +-static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
12880 +-{
12881 +- clk_disable_unprepare(ehci_mv->clk);
12882 +-}
12883 ++ retval = clk_prepare_enable(ehci_mv->clk);
12884 ++ if (retval)
12885 ++ return retval;
12886 +
12887 +-static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
12888 +-{
12889 +- ehci_clock_enable(ehci_mv);
12890 +- return phy_init(ehci_mv->phy);
12891 ++ retval = phy_init(ehci_mv->phy);
12892 ++ if (retval)
12893 ++ clk_disable_unprepare(ehci_mv->clk);
12894 ++
12895 ++ return retval;
12896 + }
12897 +
12898 + static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
12899 + {
12900 + phy_exit(ehci_mv->phy);
12901 +- ehci_clock_disable(ehci_mv);
12902 ++ clk_disable_unprepare(ehci_mv->clk);
12903 + }
12904 +
12905 + static int mv_ehci_reset(struct usb_hcd *hcd)
12906 +diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
12907 +index 05fb8d97cf027..aeb235ce06c1c 100644
12908 +--- a/drivers/usb/host/fotg210-hcd.c
12909 ++++ b/drivers/usb/host/fotg210-hcd.c
12910 +@@ -2510,11 +2510,6 @@ retry_xacterr:
12911 + return count;
12912 + }
12913 +
12914 +-/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
12915 +-#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
12916 +-/* ... and packet size, for any kind of endpoint descriptor */
12917 +-#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
12918 +-
12919 + /* reverse of qh_urb_transaction: free a list of TDs.
12920 + * used for cleanup after errors, before HC sees an URB's TDs.
12921 + */
12922 +@@ -2600,7 +2595,7 @@ static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210,
12923 + token |= (1 /* "in" */ << 8);
12924 + /* else it's already initted to "out" pid (0 << 8) */
12925 +
12926 +- maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
12927 ++ maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
12928 +
12929 + /*
12930 + * buffer gets wrapped in one or more qtds;
12931 +@@ -2714,9 +2709,11 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
12932 + gfp_t flags)
12933 + {
12934 + struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
12935 ++ struct usb_host_endpoint *ep;
12936 + u32 info1 = 0, info2 = 0;
12937 + int is_input, type;
12938 + int maxp = 0;
12939 ++ int mult;
12940 + struct usb_tt *tt = urb->dev->tt;
12941 + struct fotg210_qh_hw *hw;
12942 +
12943 +@@ -2731,14 +2728,15 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
12944 +
12945 + is_input = usb_pipein(urb->pipe);
12946 + type = usb_pipetype(urb->pipe);
12947 +- maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
12948 ++ ep = usb_pipe_endpoint(urb->dev, urb->pipe);
12949 ++ maxp = usb_endpoint_maxp(&ep->desc);
12950 ++ mult = usb_endpoint_maxp_mult(&ep->desc);
12951 +
12952 + /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
12953 + * acts like up to 3KB, but is built from smaller packets.
12954 + */
12955 +- if (max_packet(maxp) > 1024) {
12956 +- fotg210_dbg(fotg210, "bogus qh maxpacket %d\n",
12957 +- max_packet(maxp));
12958 ++ if (maxp > 1024) {
12959 ++ fotg210_dbg(fotg210, "bogus qh maxpacket %d\n", maxp);
12960 + goto done;
12961 + }
12962 +
12963 +@@ -2752,8 +2750,7 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
12964 + */
12965 + if (type == PIPE_INTERRUPT) {
12966 + qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
12967 +- is_input, 0,
12968 +- hb_mult(maxp) * max_packet(maxp)));
12969 ++ is_input, 0, mult * maxp));
12970 + qh->start = NO_FRAME;
12971 +
12972 + if (urb->dev->speed == USB_SPEED_HIGH) {
12973 +@@ -2790,7 +2787,7 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
12974 + think_time = tt ? tt->think_time : 0;
12975 + qh->tt_usecs = NS_TO_US(think_time +
12976 + usb_calc_bus_time(urb->dev->speed,
12977 +- is_input, 0, max_packet(maxp)));
12978 ++ is_input, 0, maxp));
12979 + qh->period = urb->interval;
12980 + if (qh->period > fotg210->periodic_size) {
12981 + qh->period = fotg210->periodic_size;
12982 +@@ -2853,11 +2850,11 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
12983 + * to help them do so. So now people expect to use
12984 + * such nonconformant devices with Linux too; sigh.
12985 + */
12986 +- info1 |= max_packet(maxp) << 16;
12987 ++ info1 |= maxp << 16;
12988 + info2 |= (FOTG210_TUNE_MULT_HS << 30);
12989 + } else { /* PIPE_INTERRUPT */
12990 +- info1 |= max_packet(maxp) << 16;
12991 +- info2 |= hb_mult(maxp) << 30;
12992 ++ info1 |= maxp << 16;
12993 ++ info2 |= mult << 30;
12994 + }
12995 + break;
12996 + default:
12997 +@@ -3927,6 +3924,7 @@ static void iso_stream_init(struct fotg210_hcd *fotg210,
12998 + int is_input;
12999 + long bandwidth;
13000 + unsigned multi;
13001 ++ struct usb_host_endpoint *ep;
13002 +
13003 + /*
13004 + * this might be a "high bandwidth" highspeed endpoint,
13005 +@@ -3934,14 +3932,14 @@ static void iso_stream_init(struct fotg210_hcd *fotg210,
13006 + */
13007 + epnum = usb_pipeendpoint(pipe);
13008 + is_input = usb_pipein(pipe) ? USB_DIR_IN : 0;
13009 +- maxp = usb_maxpacket(dev, pipe, !is_input);
13010 ++ ep = usb_pipe_endpoint(dev, pipe);
13011 ++ maxp = usb_endpoint_maxp(&ep->desc);
13012 + if (is_input)
13013 + buf1 = (1 << 11);
13014 + else
13015 + buf1 = 0;
13016 +
13017 +- maxp = max_packet(maxp);
13018 +- multi = hb_mult(maxp);
13019 ++ multi = usb_endpoint_maxp_mult(&ep->desc);
13020 + buf1 |= maxp;
13021 + maxp *= multi;
13022 +
13023 +@@ -4462,13 +4460,12 @@ static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
13024 +
13025 + /* HC need not update length with this error */
13026 + if (!(t & FOTG210_ISOC_BABBLE)) {
13027 +- desc->actual_length =
13028 +- fotg210_itdlen(urb, desc, t);
13029 ++ desc->actual_length = FOTG210_ITD_LENGTH(t);
13030 + urb->actual_length += desc->actual_length;
13031 + }
13032 + } else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) {
13033 + desc->status = 0;
13034 +- desc->actual_length = fotg210_itdlen(urb, desc, t);
13035 ++ desc->actual_length = FOTG210_ITD_LENGTH(t);
13036 + urb->actual_length += desc->actual_length;
13037 + } else {
13038 + /* URB was too late */
13039 +diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
13040 +index 0a91061a0551d..0781442b7a24a 100644
13041 +--- a/drivers/usb/host/fotg210.h
13042 ++++ b/drivers/usb/host/fotg210.h
13043 +@@ -683,11 +683,6 @@ static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
13044 + return fotg210_readl(fotg210, &fotg210->regs->frame_index);
13045 + }
13046 +
13047 +-#define fotg210_itdlen(urb, desc, t) ({ \
13048 +- usb_pipein((urb)->pipe) ? \
13049 +- (desc)->length - FOTG210_ITD_LENGTH(t) : \
13050 +- FOTG210_ITD_LENGTH(t); \
13051 +-})
13052 + /*-------------------------------------------------------------------------*/
13053 +
13054 + #endif /* __LINUX_FOTG210_H */
13055 +diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
13056 +index 0bb1a6295d64a..f8adf393875f3 100644
13057 +--- a/drivers/usb/host/xhci-mtk-sch.c
13058 ++++ b/drivers/usb/host/xhci-mtk-sch.c
13059 +@@ -80,7 +80,7 @@ decode_ep(struct usb_host_endpoint *ep, enum usb_device_speed speed)
13060 + interval /= 1000;
13061 + }
13062 +
13063 +- snprintf(buf, DBG_BUF_EN, "%s ep%d%s %s, mpkt:%d, interval:%d/%d%s\n",
13064 ++ snprintf(buf, DBG_BUF_EN, "%s ep%d%s %s, mpkt:%d, interval:%d/%d%s",
13065 + usb_speed_string(speed), usb_endpoint_num(epd),
13066 + usb_endpoint_dir_in(epd) ? "in" : "out",
13067 + usb_ep_type_string(usb_endpoint_type(epd)),
13068 +@@ -129,6 +129,10 @@ get_bw_info(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
13069 + int bw_index;
13070 +
13071 + virt_dev = xhci->devs[udev->slot_id];
13072 ++ if (!virt_dev->real_port) {
13073 ++ WARN_ONCE(1, "%s invalid real_port\n", dev_name(&udev->dev));
13074 ++ return NULL;
13075 ++ }
13076 +
13077 + if (udev->speed >= USB_SPEED_SUPER) {
13078 + if (usb_endpoint_dir_out(&ep->desc))
13079 +@@ -236,14 +240,20 @@ static void drop_tt(struct usb_device *udev)
13080 + }
13081 + }
13082 +
13083 +-static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
13084 +- struct usb_host_endpoint *ep, struct xhci_ep_ctx *ep_ctx)
13085 ++static struct mu3h_sch_ep_info *
13086 ++create_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
13087 ++ struct usb_host_endpoint *ep, struct xhci_ep_ctx *ep_ctx)
13088 + {
13089 + struct mu3h_sch_ep_info *sch_ep;
13090 ++ struct mu3h_sch_bw_info *bw_info;
13091 + struct mu3h_sch_tt *tt = NULL;
13092 + u32 len_bw_budget_table;
13093 + size_t mem_size;
13094 +
13095 ++ bw_info = get_bw_info(mtk, udev, ep);
13096 ++ if (!bw_info)
13097 ++ return ERR_PTR(-ENODEV);
13098 ++
13099 + if (is_fs_or_ls(udev->speed))
13100 + len_bw_budget_table = TT_MICROFRAMES_MAX;
13101 + else if ((udev->speed >= USB_SPEED_SUPER)
13102 +@@ -266,11 +276,13 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
13103 + }
13104 + }
13105 +
13106 ++ sch_ep->bw_info = bw_info;
13107 + sch_ep->sch_tt = tt;
13108 + sch_ep->ep = ep;
13109 + sch_ep->speed = udev->speed;
13110 + INIT_LIST_HEAD(&sch_ep->endpoint);
13111 + INIT_LIST_HEAD(&sch_ep->tt_endpoint);
13112 ++ INIT_HLIST_NODE(&sch_ep->hentry);
13113 +
13114 + return sch_ep;
13115 + }
13116 +@@ -587,9 +599,9 @@ static u32 get_esit_boundary(struct mu3h_sch_ep_info *sch_ep)
13117 + return boundary;
13118 + }
13119 +
13120 +-static int check_sch_bw(struct mu3h_sch_bw_info *sch_bw,
13121 +- struct mu3h_sch_ep_info *sch_ep)
13122 ++static int check_sch_bw(struct mu3h_sch_ep_info *sch_ep)
13123 + {
13124 ++ struct mu3h_sch_bw_info *sch_bw = sch_ep->bw_info;
13125 + const u32 esit_boundary = get_esit_boundary(sch_ep);
13126 + const u32 bw_boundary = get_bw_boundary(sch_ep->speed);
13127 + u32 offset;
13128 +@@ -635,23 +647,26 @@ static int check_sch_bw(struct mu3h_sch_bw_info *sch_bw,
13129 + return load_ep_bw(sch_bw, sch_ep, true);
13130 + }
13131 +
13132 +-static void destroy_sch_ep(struct usb_device *udev,
13133 +- struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
13134 ++static void destroy_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
13135 ++ struct mu3h_sch_ep_info *sch_ep)
13136 + {
13137 + /* only release ep bw check passed by check_sch_bw() */
13138 + if (sch_ep->allocated)
13139 +- load_ep_bw(sch_bw, sch_ep, false);
13140 ++ load_ep_bw(sch_ep->bw_info, sch_ep, false);
13141 +
13142 + if (sch_ep->sch_tt)
13143 + drop_tt(udev);
13144 +
13145 + list_del(&sch_ep->endpoint);
13146 ++ hlist_del(&sch_ep->hentry);
13147 + kfree(sch_ep);
13148 + }
13149 +
13150 +-static bool need_bw_sch(struct usb_host_endpoint *ep,
13151 +- enum usb_device_speed speed, int has_tt)
13152 ++static bool need_bw_sch(struct usb_device *udev,
13153 ++ struct usb_host_endpoint *ep)
13154 + {
13155 ++ bool has_tt = udev->tt && udev->tt->hub->parent;
13156 ++
13157 + /* only for periodic endpoints */
13158 + if (usb_endpoint_xfer_control(&ep->desc)
13159 + || usb_endpoint_xfer_bulk(&ep->desc))
13160 +@@ -662,7 +677,7 @@ static bool need_bw_sch(struct usb_host_endpoint *ep,
13161 + * a TT are also ignored, root-hub will schedule them directly,
13162 + * but need set @bpkts field of endpoint context to 1.
13163 + */
13164 +- if (is_fs_or_ls(speed) && !has_tt)
13165 ++ if (is_fs_or_ls(udev->speed) && !has_tt)
13166 + return false;
13167 +
13168 + /* skip endpoint with zero maxpkt */
13169 +@@ -677,7 +692,6 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
13170 + struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
13171 + struct mu3h_sch_bw_info *sch_array;
13172 + int num_usb_bus;
13173 +- int i;
13174 +
13175 + /* ss IN and OUT are separated */
13176 + num_usb_bus = xhci->usb3_rhub.num_ports * 2 + xhci->usb2_rhub.num_ports;
13177 +@@ -686,12 +700,10 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
13178 + if (sch_array == NULL)
13179 + return -ENOMEM;
13180 +
13181 +- for (i = 0; i < num_usb_bus; i++)
13182 +- INIT_LIST_HEAD(&sch_array[i].bw_ep_list);
13183 +-
13184 + mtk->sch_array = sch_array;
13185 +
13186 + INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
13187 ++ hash_init(mtk->sch_ep_hash);
13188 +
13189 + return 0;
13190 + }
13191 +@@ -715,9 +727,7 @@ static int add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
13192 + ep_index = xhci_get_endpoint_index(&ep->desc);
13193 + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
13194 +
13195 +- xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
13196 +-
13197 +- if (!need_bw_sch(ep, udev->speed, !!virt_dev->tt_info)) {
13198 ++ if (!need_bw_sch(udev, ep)) {
13199 + /*
13200 + * set @bpkts to 1 if it is LS or FS periodic endpoint, and its
13201 + * device does not connected through an external HS hub
13202 +@@ -729,13 +739,16 @@ static int add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
13203 + return 0;
13204 + }
13205 +
13206 +- sch_ep = create_sch_ep(udev, ep, ep_ctx);
13207 ++ xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
13208 ++
13209 ++ sch_ep = create_sch_ep(mtk, udev, ep, ep_ctx);
13210 + if (IS_ERR_OR_NULL(sch_ep))
13211 + return -ENOMEM;
13212 +
13213 + setup_sch_info(ep_ctx, sch_ep);
13214 +
13215 + list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list);
13216 ++ hash_add(mtk->sch_ep_hash, &sch_ep->hentry, (unsigned long)ep);
13217 +
13218 + return 0;
13219 + }
13220 +@@ -745,22 +758,18 @@ static void drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
13221 + {
13222 + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
13223 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
13224 +- struct xhci_virt_device *virt_dev;
13225 +- struct mu3h_sch_bw_info *sch_bw;
13226 +- struct mu3h_sch_ep_info *sch_ep, *tmp;
13227 +-
13228 +- virt_dev = xhci->devs[udev->slot_id];
13229 +-
13230 +- xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
13231 ++ struct mu3h_sch_ep_info *sch_ep;
13232 ++ struct hlist_node *hn;
13233 +
13234 +- if (!need_bw_sch(ep, udev->speed, !!virt_dev->tt_info))
13235 ++ if (!need_bw_sch(udev, ep))
13236 + return;
13237 +
13238 +- sch_bw = get_bw_info(mtk, udev, ep);
13239 ++ xhci_err(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
13240 +
13241 +- list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) {
13242 ++ hash_for_each_possible_safe(mtk->sch_ep_hash, sch_ep,
13243 ++ hn, hentry, (unsigned long)ep) {
13244 + if (sch_ep->ep == ep) {
13245 +- destroy_sch_ep(udev, sch_bw, sch_ep);
13246 ++ destroy_sch_ep(mtk, udev, sch_ep);
13247 + break;
13248 + }
13249 + }
13250 +@@ -771,30 +780,22 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
13251 + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
13252 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
13253 + struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
13254 +- struct mu3h_sch_bw_info *sch_bw;
13255 +- struct mu3h_sch_ep_info *sch_ep, *tmp;
13256 ++ struct mu3h_sch_ep_info *sch_ep;
13257 + int ret;
13258 +
13259 + xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
13260 +
13261 + list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) {
13262 +- sch_bw = get_bw_info(mtk, udev, sch_ep->ep);
13263 ++ struct xhci_ep_ctx *ep_ctx;
13264 ++ struct usb_host_endpoint *ep = sch_ep->ep;
13265 ++ unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
13266 +
13267 +- ret = check_sch_bw(sch_bw, sch_ep);
13268 ++ ret = check_sch_bw(sch_ep);
13269 + if (ret) {
13270 + xhci_err(xhci, "Not enough bandwidth! (%s)\n",
13271 + sch_error_string(-ret));
13272 + return -ENOSPC;
13273 + }
13274 +- }
13275 +-
13276 +- list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
13277 +- struct xhci_ep_ctx *ep_ctx;
13278 +- struct usb_host_endpoint *ep = sch_ep->ep;
13279 +- unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
13280 +-
13281 +- sch_bw = get_bw_info(mtk, udev, ep);
13282 +- list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
13283 +
13284 + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
13285 + ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(sch_ep->pkts)
13286 +@@ -808,22 +809,23 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
13287 + sch_ep->offset, sch_ep->repeat);
13288 + }
13289 +
13290 +- return xhci_check_bandwidth(hcd, udev);
13291 ++ ret = xhci_check_bandwidth(hcd, udev);
13292 ++ if (!ret)
13293 ++ INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
13294 ++
13295 ++ return ret;
13296 + }
13297 +
13298 + void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
13299 + {
13300 + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
13301 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
13302 +- struct mu3h_sch_bw_info *sch_bw;
13303 + struct mu3h_sch_ep_info *sch_ep, *tmp;
13304 +
13305 + xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
13306 +
13307 +- list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
13308 +- sch_bw = get_bw_info(mtk, udev, sch_ep->ep);
13309 +- destroy_sch_ep(udev, sch_bw, sch_ep);
13310 +- }
13311 ++ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint)
13312 ++ destroy_sch_ep(mtk, udev, sch_ep);
13313 +
13314 + xhci_reset_bandwidth(hcd, udev);
13315 + }
13316 +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
13317 +index 2548976bcf05c..cb27569186a0d 100644
13318 +--- a/drivers/usb/host/xhci-mtk.c
13319 ++++ b/drivers/usb/host/xhci-mtk.c
13320 +@@ -569,7 +569,7 @@ disable_ldos:
13321 + xhci_mtk_ldos_disable(mtk);
13322 +
13323 + disable_pm:
13324 +- pm_runtime_put_sync_autosuspend(dev);
13325 ++ pm_runtime_put_noidle(dev);
13326 + pm_runtime_disable(dev);
13327 + return ret;
13328 + }
13329 +diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
13330 +index ace432356c412..f87d199b08181 100644
13331 +--- a/drivers/usb/host/xhci-mtk.h
13332 ++++ b/drivers/usb/host/xhci-mtk.h
13333 +@@ -10,11 +10,15 @@
13334 + #define _XHCI_MTK_H_
13335 +
13336 + #include <linux/clk.h>
13337 ++#include <linux/hashtable.h>
13338 +
13339 + #include "xhci.h"
13340 +
13341 + #define BULK_CLKS_NUM 5
13342 +
13343 ++/* support at most 64 ep, use 32 size hash table */
13344 ++#define SCH_EP_HASH_BITS 5
13345 ++
13346 + /**
13347 + * To simplify scheduler algorithm, set a upper limit for ESIT,
13348 + * if a synchromous ep's ESIT is larger than @XHCI_MTK_MAX_ESIT,
13349 +@@ -36,14 +40,12 @@ struct mu3h_sch_tt {
13350 + * struct mu3h_sch_bw_info: schedule information for bandwidth domain
13351 + *
13352 + * @bus_bw: array to keep track of bandwidth already used at each uframes
13353 +- * @bw_ep_list: eps in the bandwidth domain
13354 + *
13355 + * treat a HS root port as a bandwidth domain, but treat a SS root port as
13356 + * two bandwidth domains, one for IN eps and another for OUT eps.
13357 + */
13358 + struct mu3h_sch_bw_info {
13359 + u32 bus_bw[XHCI_MTK_MAX_ESIT];
13360 +- struct list_head bw_ep_list;
13361 + };
13362 +
13363 + /**
13364 +@@ -53,8 +55,10 @@ struct mu3h_sch_bw_info {
13365 + * @num_budget_microframes: number of continuous uframes
13366 + * (@repeat==1) scheduled within the interval
13367 + * @bw_cost_per_microframe: bandwidth cost per microframe
13368 ++ * @hentry: hash table entry
13369 + * @endpoint: linked into bandwidth domain which it belongs to
13370 + * @tt_endpoint: linked into mu3h_sch_tt's list which it belongs to
13371 ++ * @bw_info: bandwidth domain which this endpoint belongs
13372 + * @sch_tt: mu3h_sch_tt linked into
13373 + * @ep_type: endpoint type
13374 + * @maxpkt: max packet size of endpoint
13375 +@@ -82,7 +86,9 @@ struct mu3h_sch_ep_info {
13376 + u32 num_budget_microframes;
13377 + u32 bw_cost_per_microframe;
13378 + struct list_head endpoint;
13379 ++ struct hlist_node hentry;
13380 + struct list_head tt_endpoint;
13381 ++ struct mu3h_sch_bw_info *bw_info;
13382 + struct mu3h_sch_tt *sch_tt;
13383 + u32 ep_type;
13384 + u32 maxpkt;
13385 +@@ -135,6 +141,7 @@ struct xhci_hcd_mtk {
13386 + struct usb_hcd *hcd;
13387 + struct mu3h_sch_bw_info *sch_array;
13388 + struct list_head bw_ep_chk_list;
13389 ++ DECLARE_HASHTABLE(sch_ep_hash, SCH_EP_HASH_BITS);
13390 + struct mu3c_ippc_regs __iomem *ippc_regs;
13391 + int num_u2_ports;
13392 + int num_u3_ports;
13393 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
13394 +index 3618070eba786..18a203c9011eb 100644
13395 +--- a/drivers/usb/host/xhci.c
13396 ++++ b/drivers/usb/host/xhci.c
13397 +@@ -4705,19 +4705,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
13398 + {
13399 + unsigned long long timeout_ns;
13400 +
13401 +- if (xhci->quirks & XHCI_INTEL_HOST)
13402 +- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
13403 +- else
13404 +- timeout_ns = udev->u1_params.sel;
13405 +-
13406 + /* Prevent U1 if service interval is shorter than U1 exit latency */
13407 + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
13408 +- if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
13409 ++ if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
13410 + dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
13411 + return USB3_LPM_DISABLED;
13412 + }
13413 + }
13414 +
13415 ++ if (xhci->quirks & XHCI_INTEL_HOST)
13416 ++ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
13417 ++ else
13418 ++ timeout_ns = udev->u1_params.sel;
13419 ++
13420 + /* The U1 timeout is encoded in 1us intervals.
13421 + * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
13422 + */
13423 +@@ -4769,19 +4769,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
13424 + {
13425 + unsigned long long timeout_ns;
13426 +
13427 +- if (xhci->quirks & XHCI_INTEL_HOST)
13428 +- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
13429 +- else
13430 +- timeout_ns = udev->u2_params.sel;
13431 +-
13432 + /* Prevent U2 if service interval is shorter than U2 exit latency */
13433 + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
13434 +- if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
13435 ++ if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
13436 + dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
13437 + return USB3_LPM_DISABLED;
13438 + }
13439 + }
13440 +
13441 ++ if (xhci->quirks & XHCI_INTEL_HOST)
13442 ++ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
13443 ++ else
13444 ++ timeout_ns = udev->u2_params.sel;
13445 ++
13446 + /* The U2 timeout is encoded in 256us intervals */
13447 + timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
13448 + /* If the necessary timeout value is bigger than what we can set in the
13449 +diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c
13450 +index ff07e28906922..1f2ca22384b03 100644
13451 +--- a/drivers/usb/isp1760/isp1760-core.c
13452 ++++ b/drivers/usb/isp1760/isp1760-core.c
13453 +@@ -30,6 +30,7 @@ static int isp1760_init_core(struct isp1760_device *isp)
13454 + {
13455 + struct isp1760_hcd *hcd = &isp->hcd;
13456 + struct isp1760_udc *udc = &isp->udc;
13457 ++ u32 otg_ctrl;
13458 +
13459 + /* Low-level chip reset */
13460 + if (isp->rst_gpio) {
13461 +@@ -83,16 +84,17 @@ static int isp1760_init_core(struct isp1760_device *isp)
13462 + *
13463 + * TODO: Really support OTG. For now we configure port 1 in device mode
13464 + */
13465 +- if (((isp->devflags & ISP1760_FLAG_ISP1761) ||
13466 +- (isp->devflags & ISP1760_FLAG_ISP1763)) &&
13467 +- (isp->devflags & ISP1760_FLAG_PERIPHERAL_EN)) {
13468 +- isp1760_field_set(hcd->fields, HW_DM_PULLDOWN);
13469 +- isp1760_field_set(hcd->fields, HW_DP_PULLDOWN);
13470 +- isp1760_field_set(hcd->fields, HW_OTG_DISABLE);
13471 +- } else {
13472 +- isp1760_field_set(hcd->fields, HW_SW_SEL_HC_DC);
13473 +- isp1760_field_set(hcd->fields, HW_VBUS_DRV);
13474 +- isp1760_field_set(hcd->fields, HW_SEL_CP_EXT);
13475 ++ if (isp->devflags & ISP1760_FLAG_ISP1761) {
13476 ++ if (isp->devflags & ISP1760_FLAG_PERIPHERAL_EN) {
13477 ++ otg_ctrl = (ISP176x_HW_DM_PULLDOWN_CLEAR |
13478 ++ ISP176x_HW_DP_PULLDOWN_CLEAR |
13479 ++ ISP176x_HW_OTG_DISABLE);
13480 ++ } else {
13481 ++ otg_ctrl = (ISP176x_HW_SW_SEL_HC_DC_CLEAR |
13482 ++ ISP176x_HW_VBUS_DRV |
13483 ++ ISP176x_HW_SEL_CP_EXT);
13484 ++ }
13485 ++ isp1760_reg_write(hcd->regs, ISP176x_HC_OTG_CTRL, otg_ctrl);
13486 + }
13487 +
13488 + dev_info(isp->dev, "%s bus width: %u, oc: %s\n",
13489 +@@ -235,20 +237,20 @@ static const struct reg_field isp1760_hc_reg_fields[] = {
13490 + [HC_ISO_IRQ_MASK_AND] = REG_FIELD(ISP176x_HC_ISO_IRQ_MASK_AND, 0, 31),
13491 + [HC_INT_IRQ_MASK_AND] = REG_FIELD(ISP176x_HC_INT_IRQ_MASK_AND, 0, 31),
13492 + [HC_ATL_IRQ_MASK_AND] = REG_FIELD(ISP176x_HC_ATL_IRQ_MASK_AND, 0, 31),
13493 +- [HW_OTG_DISABLE] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 10, 10),
13494 +- [HW_SW_SEL_HC_DC] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 7, 7),
13495 +- [HW_VBUS_DRV] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 4, 4),
13496 +- [HW_SEL_CP_EXT] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 3, 3),
13497 +- [HW_DM_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 2, 2),
13498 +- [HW_DP_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 1, 1),
13499 +- [HW_DP_PULLUP] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 0, 0),
13500 +- [HW_OTG_DISABLE_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 10, 10),
13501 +- [HW_SW_SEL_HC_DC_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 7, 7),
13502 +- [HW_VBUS_DRV_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 4, 4),
13503 +- [HW_SEL_CP_EXT_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 3, 3),
13504 +- [HW_DM_PULLDOWN_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 2, 2),
13505 +- [HW_DP_PULLDOWN_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 1, 1),
13506 +- [HW_DP_PULLUP_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 0, 0),
13507 ++ [HW_OTG_DISABLE_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 26, 26),
13508 ++ [HW_SW_SEL_HC_DC_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 23, 23),
13509 ++ [HW_VBUS_DRV_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 20, 20),
13510 ++ [HW_SEL_CP_EXT_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 19, 19),
13511 ++ [HW_DM_PULLDOWN_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 18, 18),
13512 ++ [HW_DP_PULLDOWN_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 17, 17),
13513 ++ [HW_DP_PULLUP_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 16, 16),
13514 ++ [HW_OTG_DISABLE] = REG_FIELD(ISP176x_HC_OTG_CTRL, 10, 10),
13515 ++ [HW_SW_SEL_HC_DC] = REG_FIELD(ISP176x_HC_OTG_CTRL, 7, 7),
13516 ++ [HW_VBUS_DRV] = REG_FIELD(ISP176x_HC_OTG_CTRL, 4, 4),
13517 ++ [HW_SEL_CP_EXT] = REG_FIELD(ISP176x_HC_OTG_CTRL, 3, 3),
13518 ++ [HW_DM_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL, 2, 2),
13519 ++ [HW_DP_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL, 1, 1),
13520 ++ [HW_DP_PULLUP] = REG_FIELD(ISP176x_HC_OTG_CTRL, 0, 0),
13521 + };
13522 +
13523 + static const struct reg_field isp1763_hc_reg_fields[] = {
13524 +diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
13525 +index 27168b4a4ef22..e517376c32917 100644
13526 +--- a/drivers/usb/isp1760/isp1760-hcd.c
13527 ++++ b/drivers/usb/isp1760/isp1760-hcd.c
13528 +@@ -182,7 +182,7 @@ struct urb_listitem {
13529 + struct urb *urb;
13530 + };
13531 +
13532 +-static const u32 isp1763_hc_portsc1_fields[] = {
13533 ++static const u32 isp176x_hc_portsc1_fields[] = {
13534 + [PORT_OWNER] = BIT(13),
13535 + [PORT_POWER] = BIT(12),
13536 + [PORT_LSTATUS] = BIT(10),
13537 +@@ -205,27 +205,28 @@ static u32 isp1760_hcd_read(struct usb_hcd *hcd, u32 field)
13538 + }
13539 +
13540 + /*
13541 +- * We need, in isp1763, to write directly the values to the portsc1
13542 ++ * We need, in isp176x, to write directly the values to the portsc1
13543 + * register so it will make the other values to trigger.
13544 + */
13545 + static void isp1760_hcd_portsc1_set_clear(struct isp1760_hcd *priv, u32 field,
13546 + u32 val)
13547 + {
13548 +- u32 bit = isp1763_hc_portsc1_fields[field];
13549 +- u32 port_status = readl(priv->base + ISP1763_HC_PORTSC1);
13550 ++ u32 bit = isp176x_hc_portsc1_fields[field];
13551 ++ u16 portsc1_reg = priv->is_isp1763 ? ISP1763_HC_PORTSC1 :
13552 ++ ISP176x_HC_PORTSC1;
13553 ++ u32 port_status = readl(priv->base + portsc1_reg);
13554 +
13555 + if (val)
13556 +- writel(port_status | bit, priv->base + ISP1763_HC_PORTSC1);
13557 ++ writel(port_status | bit, priv->base + portsc1_reg);
13558 + else
13559 +- writel(port_status & ~bit, priv->base + ISP1763_HC_PORTSC1);
13560 ++ writel(port_status & ~bit, priv->base + portsc1_reg);
13561 + }
13562 +
13563 + static void isp1760_hcd_write(struct usb_hcd *hcd, u32 field, u32 val)
13564 + {
13565 + struct isp1760_hcd *priv = hcd_to_priv(hcd);
13566 +
13567 +- if (unlikely(priv->is_isp1763 &&
13568 +- (field >= PORT_OWNER && field <= PORT_CONNECT)))
13569 ++ if (unlikely((field >= PORT_OWNER && field <= PORT_CONNECT)))
13570 + return isp1760_hcd_portsc1_set_clear(priv, field, val);
13571 +
13572 + isp1760_field_write(priv->fields, field, val);
13573 +@@ -367,8 +368,7 @@ static void isp1760_mem_read(struct usb_hcd *hcd, u32 src_offset, void *dst,
13574 + {
13575 + struct isp1760_hcd *priv = hcd_to_priv(hcd);
13576 +
13577 +- isp1760_hcd_write(hcd, MEM_BANK_SEL, ISP_BANK_0);
13578 +- isp1760_hcd_write(hcd, MEM_START_ADDR, src_offset);
13579 ++ isp1760_reg_write(priv->regs, ISP176x_HC_MEMORY, src_offset);
13580 + ndelay(100);
13581 +
13582 + bank_reads8(priv->base, src_offset, ISP_BANK_0, dst, bytes);
13583 +@@ -496,8 +496,7 @@ static void isp1760_ptd_read(struct usb_hcd *hcd, u32 ptd_offset, u32 slot,
13584 + u16 src_offset = ptd_offset + slot * sizeof(*ptd);
13585 + struct isp1760_hcd *priv = hcd_to_priv(hcd);
13586 +
13587 +- isp1760_hcd_write(hcd, MEM_BANK_SEL, ISP_BANK_0);
13588 +- isp1760_hcd_write(hcd, MEM_START_ADDR, src_offset);
13589 ++ isp1760_reg_write(priv->regs, ISP176x_HC_MEMORY, src_offset);
13590 + ndelay(90);
13591 +
13592 + bank_reads8(priv->base, src_offset, ISP_BANK_0, (void *)ptd,
13593 +@@ -588,8 +587,8 @@ static void init_memory(struct isp1760_hcd *priv)
13594 +
13595 + payload_addr = PAYLOAD_OFFSET;
13596 +
13597 +- for (i = 0, curr = 0; i < ARRAY_SIZE(mem->blocks); i++) {
13598 +- for (j = 0; j < mem->blocks[i]; j++, curr++) {
13599 ++ for (i = 0, curr = 0; i < ARRAY_SIZE(mem->blocks); i++, curr += j) {
13600 ++ for (j = 0; j < mem->blocks[i]; j++) {
13601 + priv->memory_pool[curr + j].start = payload_addr;
13602 + priv->memory_pool[curr + j].size = mem->blocks_size[i];
13603 + priv->memory_pool[curr + j].free = 1;
13604 +@@ -1826,9 +1825,11 @@ static void packetize_urb(struct usb_hcd *hcd,
13605 + goto cleanup;
13606 +
13607 + if (len > mem->blocks_size[ISP176x_BLOCK_NUM - 1])
13608 +- len = mem->blocks_size[ISP176x_BLOCK_NUM - 1];
13609 ++ this_qtd_len = mem->blocks_size[ISP176x_BLOCK_NUM - 1];
13610 ++ else
13611 ++ this_qtd_len = len;
13612 +
13613 +- this_qtd_len = qtd_fill(qtd, buf, len);
13614 ++ this_qtd_len = qtd_fill(qtd, buf, this_qtd_len);
13615 + list_add_tail(&qtd->qtd_list, head);
13616 +
13617 + len -= this_qtd_len;
13618 +diff --git a/drivers/usb/isp1760/isp1760-regs.h b/drivers/usb/isp1760/isp1760-regs.h
13619 +index 94ea60c20b2a4..3a6751197e970 100644
13620 +--- a/drivers/usb/isp1760/isp1760-regs.h
13621 ++++ b/drivers/usb/isp1760/isp1760-regs.h
13622 +@@ -61,6 +61,7 @@
13623 + #define ISP176x_HC_INT_IRQ_MASK_AND 0x328
13624 + #define ISP176x_HC_ATL_IRQ_MASK_AND 0x32c
13625 +
13626 ++#define ISP176x_HC_OTG_CTRL 0x374
13627 + #define ISP176x_HC_OTG_CTRL_SET 0x374
13628 + #define ISP176x_HC_OTG_CTRL_CLEAR 0x376
13629 +
13630 +@@ -179,6 +180,21 @@ enum isp176x_host_controller_fields {
13631 + #define ISP176x_DC_IESUSP BIT(3)
13632 + #define ISP176x_DC_IEBRST BIT(0)
13633 +
13634 ++#define ISP176x_HW_OTG_DISABLE_CLEAR BIT(26)
13635 ++#define ISP176x_HW_SW_SEL_HC_DC_CLEAR BIT(23)
13636 ++#define ISP176x_HW_VBUS_DRV_CLEAR BIT(20)
13637 ++#define ISP176x_HW_SEL_CP_EXT_CLEAR BIT(19)
13638 ++#define ISP176x_HW_DM_PULLDOWN_CLEAR BIT(18)
13639 ++#define ISP176x_HW_DP_PULLDOWN_CLEAR BIT(17)
13640 ++#define ISP176x_HW_DP_PULLUP_CLEAR BIT(16)
13641 ++#define ISP176x_HW_OTG_DISABLE BIT(10)
13642 ++#define ISP176x_HW_SW_SEL_HC_DC BIT(7)
13643 ++#define ISP176x_HW_VBUS_DRV BIT(4)
13644 ++#define ISP176x_HW_SEL_CP_EXT BIT(3)
13645 ++#define ISP176x_HW_DM_PULLDOWN BIT(2)
13646 ++#define ISP176x_HW_DP_PULLDOWN BIT(1)
13647 ++#define ISP176x_HW_DP_PULLUP BIT(0)
13648 ++
13649 + #define ISP176x_DC_ENDPTYP_ISOC 0x01
13650 + #define ISP176x_DC_ENDPTYP_BULK 0x02
13651 + #define ISP176x_DC_ENDPTYP_INTERRUPT 0x03
13652 +diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
13653 +index a78da59d6417b..5cafd23345cad 100644
13654 +--- a/drivers/usb/isp1760/isp1760-udc.c
13655 ++++ b/drivers/usb/isp1760/isp1760-udc.c
13656 +@@ -1363,7 +1363,7 @@ static irqreturn_t isp1760_udc_irq(int irq, void *dev)
13657 +
13658 + status = isp1760_udc_irq_get_status(udc);
13659 +
13660 +- if (status & DC_IEVBUS) {
13661 ++ if (status & ISP176x_DC_IEVBUS) {
13662 + dev_dbg(udc->isp->dev, "%s(VBUS)\n", __func__);
13663 + /* The VBUS interrupt is only triggered when VBUS appears. */
13664 + spin_lock(&udc->lock);
13665 +@@ -1371,7 +1371,7 @@ static irqreturn_t isp1760_udc_irq(int irq, void *dev)
13666 + spin_unlock(&udc->lock);
13667 + }
13668 +
13669 +- if (status & DC_IEBRST) {
13670 ++ if (status & ISP176x_DC_IEBRST) {
13671 + dev_dbg(udc->isp->dev, "%s(BRST)\n", __func__);
13672 +
13673 + isp1760_udc_reset(udc);
13674 +@@ -1391,18 +1391,18 @@ static irqreturn_t isp1760_udc_irq(int irq, void *dev)
13675 + }
13676 + }
13677 +
13678 +- if (status & DC_IEP0SETUP) {
13679 ++ if (status & ISP176x_DC_IEP0SETUP) {
13680 + dev_dbg(udc->isp->dev, "%s(EP0SETUP)\n", __func__);
13681 +
13682 + isp1760_ep0_setup(udc);
13683 + }
13684 +
13685 +- if (status & DC_IERESM) {
13686 ++ if (status & ISP176x_DC_IERESM) {
13687 + dev_dbg(udc->isp->dev, "%s(RESM)\n", __func__);
13688 + isp1760_udc_resume(udc);
13689 + }
13690 +
13691 +- if (status & DC_IESUSP) {
13692 ++ if (status & ISP176x_DC_IESUSP) {
13693 + dev_dbg(udc->isp->dev, "%s(SUSP)\n", __func__);
13694 +
13695 + spin_lock(&udc->lock);
13696 +@@ -1413,7 +1413,7 @@ static irqreturn_t isp1760_udc_irq(int irq, void *dev)
13697 + spin_unlock(&udc->lock);
13698 + }
13699 +
13700 +- if (status & DC_IEHS_STA) {
13701 ++ if (status & ISP176x_DC_IEHS_STA) {
13702 + dev_dbg(udc->isp->dev, "%s(HS_STA)\n", __func__);
13703 + udc->gadget.speed = USB_SPEED_HIGH;
13704 + }
13705 +diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
13706 +index 5892f3ce0cdc8..ce9fc46c92661 100644
13707 +--- a/drivers/usb/musb/musb_dsps.c
13708 ++++ b/drivers/usb/musb/musb_dsps.c
13709 +@@ -890,23 +890,22 @@ static int dsps_probe(struct platform_device *pdev)
13710 + if (!glue->usbss_base)
13711 + return -ENXIO;
13712 +
13713 +- if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
13714 +- ret = dsps_setup_optional_vbus_irq(pdev, glue);
13715 +- if (ret)
13716 +- goto err_iounmap;
13717 +- }
13718 +-
13719 + platform_set_drvdata(pdev, glue);
13720 + pm_runtime_enable(&pdev->dev);
13721 + ret = dsps_create_musb_pdev(glue, pdev);
13722 + if (ret)
13723 + goto err;
13724 +
13725 ++ if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
13726 ++ ret = dsps_setup_optional_vbus_irq(pdev, glue);
13727 ++ if (ret)
13728 ++ goto err;
13729 ++ }
13730 ++
13731 + return 0;
13732 +
13733 + err:
13734 + pm_runtime_disable(&pdev->dev);
13735 +-err_iounmap:
13736 + iounmap(glue->usbss_base);
13737 + return ret;
13738 + }
13739 +diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
13740 +index 4ba6bcdaa8e9d..b07b2925ff78b 100644
13741 +--- a/drivers/usb/usbip/vhci_hcd.c
13742 ++++ b/drivers/usb/usbip/vhci_hcd.c
13743 +@@ -455,8 +455,14 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
13744 + vhci_hcd->port_status[rhport] &= ~(1 << USB_PORT_FEAT_RESET);
13745 + vhci_hcd->re_timeout = 0;
13746 +
13747 ++ /*
13748 ++ * A few drivers do usb reset during probe when
13749 ++ * the device could be in VDEV_ST_USED state
13750 ++ */
13751 + if (vhci_hcd->vdev[rhport].ud.status ==
13752 +- VDEV_ST_NOTASSIGNED) {
13753 ++ VDEV_ST_NOTASSIGNED ||
13754 ++ vhci_hcd->vdev[rhport].ud.status ==
13755 ++ VDEV_ST_USED) {
13756 + usbip_dbg_vhci_rh(
13757 + " enable rhport %d (status %u)\n",
13758 + rhport,
13759 +@@ -957,8 +963,32 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
13760 + spin_lock(&vdev->priv_lock);
13761 +
13762 + list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
13763 ++ struct urb *urb;
13764 ++
13765 ++ /* give back urb of unsent unlink request */
13766 + pr_info("unlink cleanup tx %lu\n", unlink->unlink_seqnum);
13767 ++
13768 ++ urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
13769 ++ if (!urb) {
13770 ++ list_del(&unlink->list);
13771 ++ kfree(unlink);
13772 ++ continue;
13773 ++ }
13774 ++
13775 ++ urb->status = -ENODEV;
13776 ++
13777 ++ usb_hcd_unlink_urb_from_ep(hcd, urb);
13778 ++
13779 + list_del(&unlink->list);
13780 ++
13781 ++ spin_unlock(&vdev->priv_lock);
13782 ++ spin_unlock_irqrestore(&vhci->lock, flags);
13783 ++
13784 ++ usb_hcd_giveback_urb(hcd, urb, urb->status);
13785 ++
13786 ++ spin_lock_irqsave(&vhci->lock, flags);
13787 ++ spin_lock(&vdev->priv_lock);
13788 ++
13789 + kfree(unlink);
13790 + }
13791 +
13792 +diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
13793 +index 67d0bf4efa160..e44bf736e2b22 100644
13794 +--- a/drivers/vfio/Kconfig
13795 ++++ b/drivers/vfio/Kconfig
13796 +@@ -29,7 +29,7 @@ menuconfig VFIO
13797 +
13798 + If you don't know what to do here, say N.
13799 +
13800 +-menuconfig VFIO_NOIOMMU
13801 ++config VFIO_NOIOMMU
13802 + bool "VFIO No-IOMMU support"
13803 + depends on VFIO
13804 + help
13805 +diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
13806 +index 3e006da477523..84c56f525889f 100644
13807 +--- a/drivers/video/fbdev/asiliantfb.c
13808 ++++ b/drivers/video/fbdev/asiliantfb.c
13809 +@@ -227,6 +227,9 @@ static int asiliantfb_check_var(struct fb_var_screeninfo *var,
13810 + {
13811 + unsigned long Ftarget, ratio, remainder;
13812 +
13813 ++ if (!var->pixclock)
13814 ++ return -EINVAL;
13815 ++
13816 + ratio = 1000000 / var->pixclock;
13817 + remainder = 1000000 % var->pixclock;
13818 + Ftarget = 1000000 * ratio + (1000000 * remainder) / var->pixclock;
13819 +diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
13820 +index 8fbde92ae8b9c..25801e8e3f74a 100644
13821 +--- a/drivers/video/fbdev/kyro/fbdev.c
13822 ++++ b/drivers/video/fbdev/kyro/fbdev.c
13823 +@@ -372,6 +372,11 @@ static int kyro_dev_overlay_viewport_set(u32 x, u32 y, u32 ulWidth, u32 ulHeight
13824 + /* probably haven't called CreateOverlay yet */
13825 + return -EINVAL;
13826 +
13827 ++ if (ulWidth == 0 || ulWidth == 0xffffffff ||
13828 ++ ulHeight == 0 || ulHeight == 0xffffffff ||
13829 ++ (x < 2 && ulWidth + 2 == 0))
13830 ++ return -EINVAL;
13831 ++
13832 + /* Stop Ramdac Output */
13833 + DisableRamdacOutput(deviceInfo.pSTGReg);
13834 +
13835 +@@ -394,6 +399,9 @@ static int kyrofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
13836 + {
13837 + struct kyrofb_info *par = info->par;
13838 +
13839 ++ if (!var->pixclock)
13840 ++ return -EINVAL;
13841 ++
13842 + if (var->bits_per_pixel != 16 && var->bits_per_pixel != 32) {
13843 + printk(KERN_WARNING "kyrofb: depth not supported: %u\n", var->bits_per_pixel);
13844 + return -EINVAL;
13845 +diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
13846 +index 55554b0433cb4..84d5e23ad7d38 100644
13847 +--- a/drivers/video/fbdev/riva/fbdev.c
13848 ++++ b/drivers/video/fbdev/riva/fbdev.c
13849 +@@ -1084,6 +1084,9 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
13850 + int mode_valid = 0;
13851 +
13852 + NVTRACE_ENTER();
13853 ++ if (!var->pixclock)
13854 ++ return -EINVAL;
13855 ++
13856 + switch (var->bits_per_pixel) {
13857 + case 1 ... 8:
13858 + var->red.offset = var->green.offset = var->blue.offset = 0;
13859 +diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
13860 +index b3f604669e2c3..643c6c2d0b728 100644
13861 +--- a/drivers/watchdog/iTCO_wdt.c
13862 ++++ b/drivers/watchdog/iTCO_wdt.c
13863 +@@ -362,7 +362,7 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
13864 + * Otherwise, the BIOS generally reboots when the SMI triggers.
13865 + */
13866 + if (p->smi_res &&
13867 +- (SMI_EN(p) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
13868 ++ (inl(SMI_EN(p)) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
13869 + tmrval /= 2;
13870 +
13871 + /* from the specs: */
13872 +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
13873 +index 9e7d9d0c763dd..b1492cb5c6be5 100644
13874 +--- a/fs/btrfs/block-group.c
13875 ++++ b/fs/btrfs/block-group.c
13876 +@@ -1561,7 +1561,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
13877 + div64_u64(zone_unusable * 100, bg->length));
13878 + trace_btrfs_reclaim_block_group(bg);
13879 + ret = btrfs_relocate_chunk(fs_info, bg->start);
13880 +- if (ret)
13881 ++ if (ret && ret != -EAGAIN)
13882 + btrfs_err(fs_info, "error relocating chunk %llu",
13883 + bg->start);
13884 +
13885 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
13886 +index e5e53e592d4f9..4aa4f4760b726 100644
13887 +--- a/fs/btrfs/ctree.h
13888 ++++ b/fs/btrfs/ctree.h
13889 +@@ -2781,10 +2781,11 @@ enum btrfs_flush_state {
13890 + FLUSH_DELAYED_REFS = 4,
13891 + FLUSH_DELALLOC = 5,
13892 + FLUSH_DELALLOC_WAIT = 6,
13893 +- ALLOC_CHUNK = 7,
13894 +- ALLOC_CHUNK_FORCE = 8,
13895 +- RUN_DELAYED_IPUTS = 9,
13896 +- COMMIT_TRANS = 10,
13897 ++ FLUSH_DELALLOC_FULL = 7,
13898 ++ ALLOC_CHUNK = 8,
13899 ++ ALLOC_CHUNK_FORCE = 9,
13900 ++ RUN_DELAYED_IPUTS = 10,
13901 ++ COMMIT_TRANS = 11,
13902 + };
13903 +
13904 + int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
13905 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
13906 +index a59ab7b9aea08..b2f713c759e87 100644
13907 +--- a/fs/btrfs/disk-io.c
13908 ++++ b/fs/btrfs/disk-io.c
13909 +@@ -3314,6 +3314,30 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
13910 + */
13911 + fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
13912 +
13913 ++ /*
13914 ++ * Flag our filesystem as having big metadata blocks if they are bigger
13915 ++ * than the page size.
13916 ++ */
13917 ++ if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
13918 ++ if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
13919 ++ btrfs_info(fs_info,
13920 ++ "flagging fs with big metadata feature");
13921 ++ features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
13922 ++ }
13923 ++
13924 ++ /* Set up fs_info before parsing mount options */
13925 ++ nodesize = btrfs_super_nodesize(disk_super);
13926 ++ sectorsize = btrfs_super_sectorsize(disk_super);
13927 ++ stripesize = sectorsize;
13928 ++ fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
13929 ++ fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
13930 ++
13931 ++ fs_info->nodesize = nodesize;
13932 ++ fs_info->sectorsize = sectorsize;
13933 ++ fs_info->sectorsize_bits = ilog2(sectorsize);
13934 ++ fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
13935 ++ fs_info->stripesize = stripesize;
13936 ++
13937 + ret = btrfs_parse_options(fs_info, options, sb->s_flags);
13938 + if (ret) {
13939 + err = ret;
13940 +@@ -3340,30 +3364,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
13941 + if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
13942 + btrfs_info(fs_info, "has skinny extents");
13943 +
13944 +- /*
13945 +- * flag our filesystem as having big metadata blocks if
13946 +- * they are bigger than the page size
13947 +- */
13948 +- if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
13949 +- if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
13950 +- btrfs_info(fs_info,
13951 +- "flagging fs with big metadata feature");
13952 +- features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
13953 +- }
13954 +-
13955 +- nodesize = btrfs_super_nodesize(disk_super);
13956 +- sectorsize = btrfs_super_sectorsize(disk_super);
13957 +- stripesize = sectorsize;
13958 +- fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
13959 +- fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
13960 +-
13961 +- /* Cache block sizes */
13962 +- fs_info->nodesize = nodesize;
13963 +- fs_info->sectorsize = sectorsize;
13964 +- fs_info->sectorsize_bits = ilog2(sectorsize);
13965 +- fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
13966 +- fs_info->stripesize = stripesize;
13967 +-
13968 + /*
13969 + * mixed block groups end up with duplicate but slightly offset
13970 + * extent buffers for the same range. It leads to corruptions
13971 +diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
13972 +index 2131ae5b9ed78..c92643e4c6787 100644
13973 +--- a/fs/btrfs/free-space-cache.c
13974 ++++ b/fs/btrfs/free-space-cache.c
13975 +@@ -2652,8 +2652,11 @@ int btrfs_remove_free_space(struct btrfs_block_group *block_group,
13976 + * btrfs_pin_extent_for_log_replay() when replaying the log.
13977 + * Advance the pointer not to overwrite the tree-log nodes.
13978 + */
13979 +- if (block_group->alloc_offset < offset + bytes)
13980 +- block_group->alloc_offset = offset + bytes;
13981 ++ if (block_group->start + block_group->alloc_offset <
13982 ++ offset + bytes) {
13983 ++ block_group->alloc_offset =
13984 ++ offset + bytes - block_group->start;
13985 ++ }
13986 + return 0;
13987 + }
13988 +
13989 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
13990 +index bd5689fa290e7..8132d503c83d7 100644
13991 +--- a/fs/btrfs/inode.c
13992 ++++ b/fs/btrfs/inode.c
13993 +@@ -1290,11 +1290,6 @@ static noinline void async_cow_submit(struct btrfs_work *work)
13994 + nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
13995 + PAGE_SHIFT;
13996 +
13997 +- /* atomic_sub_return implies a barrier */
13998 +- if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
13999 +- 5 * SZ_1M)
14000 +- cond_wake_up_nomb(&fs_info->async_submit_wait);
14001 +-
14002 + /*
14003 + * ->inode could be NULL if async_chunk_start has failed to compress,
14004 + * in which case we don't have anything to submit, yet we need to
14005 +@@ -1303,6 +1298,11 @@ static noinline void async_cow_submit(struct btrfs_work *work)
14006 + */
14007 + if (async_chunk->inode)
14008 + submit_compressed_extents(async_chunk);
14009 ++
14010 ++ /* atomic_sub_return implies a barrier */
14011 ++ if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
14012 ++ 5 * SZ_1M)
14013 ++ cond_wake_up_nomb(&fs_info->async_submit_wait);
14014 + }
14015 +
14016 + static noinline void async_cow_free(struct btrfs_work *work)
14017 +@@ -5088,15 +5088,13 @@ static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
14018 + int ret;
14019 +
14020 + /*
14021 +- * Still need to make sure the inode looks like it's been updated so
14022 +- * that any holes get logged if we fsync.
14023 ++ * If NO_HOLES is enabled, we don't need to do anything.
14024 ++ * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
14025 ++ * or btrfs_update_inode() will be called, which guarantee that the next
14026 ++ * fsync will know this inode was changed and needs to be logged.
14027 + */
14028 +- if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
14029 +- inode->last_trans = fs_info->generation;
14030 +- inode->last_sub_trans = root->log_transid;
14031 +- inode->last_log_commit = root->last_log_commit;
14032 ++ if (btrfs_fs_incompat(fs_info, NO_HOLES))
14033 + return 0;
14034 +- }
14035 +
14036 + /*
14037 + * 1 - for the one we're dropping
14038 +@@ -9809,10 +9807,6 @@ static int start_delalloc_inodes(struct btrfs_root *root,
14039 + &work->work);
14040 + } else {
14041 + ret = sync_inode(inode, wbc);
14042 +- if (!ret &&
14043 +- test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
14044 +- &BTRFS_I(inode)->runtime_flags))
14045 +- ret = sync_inode(inode, wbc);
14046 + btrfs_add_delayed_iput(inode);
14047 + if (ret || wbc->nr_to_write <= 0)
14048 + goto out;
14049 +diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
14050 +index 5c0f8481e25e0..182d9fb3f5e94 100644
14051 +--- a/fs/btrfs/ordered-data.c
14052 ++++ b/fs/btrfs/ordered-data.c
14053 +@@ -1052,6 +1052,7 @@ static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
14054 + u64 len)
14055 + {
14056 + struct inode *inode = ordered->inode;
14057 ++ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
14058 + u64 file_offset = ordered->file_offset + pos;
14059 + u64 disk_bytenr = ordered->disk_bytenr + pos;
14060 + u64 num_bytes = len;
14061 +@@ -1069,6 +1070,13 @@ static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
14062 + else
14063 + type = __ffs(flags_masked);
14064 +
14065 ++ /*
14066 ++ * The splitting extent is already counted and will be added again
14067 ++ * in btrfs_add_ordered_extent_*(). Subtract num_bytes to avoid
14068 ++ * double counting.
14069 ++ */
14070 ++ percpu_counter_add_batch(&fs_info->ordered_bytes, -num_bytes,
14071 ++ fs_info->delalloc_batch);
14072 + if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered->flags)) {
14073 + WARN_ON_ONCE(1);
14074 + ret = btrfs_add_ordered_extent_compress(BTRFS_I(inode),
14075 +diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
14076 +index f79bf85f24399..46e8415fa2c55 100644
14077 +--- a/fs/btrfs/space-info.c
14078 ++++ b/fs/btrfs/space-info.c
14079 +@@ -493,6 +493,11 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
14080 + long time_left;
14081 + int loops;
14082 +
14083 ++ delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
14084 ++ ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
14085 ++ if (delalloc_bytes == 0 && ordered_bytes == 0)
14086 ++ return;
14087 ++
14088 + /* Calc the number of the pages we need flush for space reservation */
14089 + if (to_reclaim == U64_MAX) {
14090 + items = U64_MAX;
14091 +@@ -500,22 +505,21 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
14092 + /*
14093 + * to_reclaim is set to however much metadata we need to
14094 + * reclaim, but reclaiming that much data doesn't really track
14095 +- * exactly, so increase the amount to reclaim by 2x in order to
14096 +- * make sure we're flushing enough delalloc to hopefully reclaim
14097 +- * some metadata reservations.
14098 ++ * exactly. What we really want to do is reclaim full inode's
14099 ++ * worth of reservations, however that's not available to us
14100 ++ * here. We will take a fraction of the delalloc bytes for our
14101 ++ * flushing loops and hope for the best. Delalloc will expand
14102 ++ * the amount we write to cover an entire dirty extent, which
14103 ++ * will reclaim the metadata reservation for that range. If
14104 ++ * it's not enough subsequent flush stages will be more
14105 ++ * aggressive.
14106 + */
14107 ++ to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
14108 + items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
14109 +- to_reclaim = items * EXTENT_SIZE_PER_ITEM;
14110 + }
14111 +
14112 + trans = (struct btrfs_trans_handle *)current->journal_info;
14113 +
14114 +- delalloc_bytes = percpu_counter_sum_positive(
14115 +- &fs_info->delalloc_bytes);
14116 +- ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
14117 +- if (delalloc_bytes == 0 && ordered_bytes == 0)
14118 +- return;
14119 +-
14120 + /*
14121 + * If we are doing more ordered than delalloc we need to just wait on
14122 + * ordered extents, otherwise we'll waste time trying to flush delalloc
14123 +@@ -528,9 +532,49 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
14124 + while ((delalloc_bytes || ordered_bytes) && loops < 3) {
14125 + u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
14126 + long nr_pages = min_t(u64, temp, LONG_MAX);
14127 ++ int async_pages;
14128 +
14129 + btrfs_start_delalloc_roots(fs_info, nr_pages, true);
14130 +
14131 ++ /*
14132 ++ * We need to make sure any outstanding async pages are now
14133 ++ * processed before we continue. This is because things like
14134 ++ * sync_inode() try to be smart and skip writing if the inode is
14135 ++ * marked clean. We don't use filemap_fwrite for flushing
14136 ++ * because we want to control how many pages we write out at a
14137 ++ * time, thus this is the only safe way to make sure we've
14138 ++ * waited for outstanding compressed workers to have started
14139 ++ * their jobs and thus have ordered extents set up properly.
14140 ++ *
14141 ++ * This exists because we do not want to wait for each
14142 ++ * individual inode to finish its async work, we simply want to
14143 ++ * start the IO on everybody, and then come back here and wait
14144 ++ * for all of the async work to catch up. Once we're done with
14145 ++ * that we know we'll have ordered extents for everything and we
14146 ++ * can decide if we wait for that or not.
14147 ++ *
14148 ++ * If we choose to replace this in the future, make absolutely
14149 ++ * sure that the proper waiting is being done in the async case,
14150 ++ * as there have been bugs in that area before.
14151 ++ */
14152 ++ async_pages = atomic_read(&fs_info->async_delalloc_pages);
14153 ++ if (!async_pages)
14154 ++ goto skip_async;
14155 ++
14156 ++ /*
14157 ++ * We don't want to wait forever, if we wrote less pages in this
14158 ++ * loop than we have outstanding, only wait for that number of
14159 ++ * pages, otherwise we can wait for all async pages to finish
14160 ++ * before continuing.
14161 ++ */
14162 ++ if (async_pages > nr_pages)
14163 ++ async_pages -= nr_pages;
14164 ++ else
14165 ++ async_pages = 0;
14166 ++ wait_event(fs_info->async_submit_wait,
14167 ++ atomic_read(&fs_info->async_delalloc_pages) <=
14168 ++ async_pages);
14169 ++skip_async:
14170 + loops++;
14171 + if (wait_ordered && !trans) {
14172 + btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
14173 +@@ -595,8 +639,11 @@ static void flush_space(struct btrfs_fs_info *fs_info,
14174 + break;
14175 + case FLUSH_DELALLOC:
14176 + case FLUSH_DELALLOC_WAIT:
14177 ++ case FLUSH_DELALLOC_FULL:
14178 ++ if (state == FLUSH_DELALLOC_FULL)
14179 ++ num_bytes = U64_MAX;
14180 + shrink_delalloc(fs_info, space_info, num_bytes,
14181 +- state == FLUSH_DELALLOC_WAIT, for_preempt);
14182 ++ state != FLUSH_DELALLOC, for_preempt);
14183 + break;
14184 + case FLUSH_DELAYED_REFS_NR:
14185 + case FLUSH_DELAYED_REFS:
14186 +@@ -686,7 +733,7 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
14187 + {
14188 + u64 global_rsv_size = fs_info->global_block_rsv.reserved;
14189 + u64 ordered, delalloc;
14190 +- u64 thresh = div_factor_fine(space_info->total_bytes, 98);
14191 ++ u64 thresh = div_factor_fine(space_info->total_bytes, 90);
14192 + u64 used;
14193 +
14194 + /* If we're just plain full then async reclaim just slows us down. */
14195 +@@ -694,6 +741,20 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
14196 + global_rsv_size) >= thresh)
14197 + return false;
14198 +
14199 ++ used = space_info->bytes_may_use + space_info->bytes_pinned;
14200 ++
14201 ++ /* The total flushable belongs to the global rsv, don't flush. */
14202 ++ if (global_rsv_size >= used)
14203 ++ return false;
14204 ++
14205 ++ /*
14206 ++ * 128MiB is 1/4 of the maximum global rsv size. If we have less than
14207 ++ * that devoted to other reservations then there's no sense in flushing,
14208 ++ * we don't have a lot of things that need flushing.
14209 ++ */
14210 ++ if (used - global_rsv_size <= SZ_128M)
14211 ++ return false;
14212 ++
14213 + /*
14214 + * We have tickets queued, bail so we don't compete with the async
14215 + * flushers.
14216 +@@ -904,6 +965,14 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
14217 + commit_cycles--;
14218 + }
14219 +
14220 ++ /*
14221 ++ * We do not want to empty the system of delalloc unless we're
14222 ++ * under heavy pressure, so allow one trip through the flushing
14223 ++ * logic before we start doing a FLUSH_DELALLOC_FULL.
14224 ++ */
14225 ++ if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
14226 ++ flush_state++;
14227 ++
14228 + /*
14229 + * We don't want to force a chunk allocation until we've tried
14230 + * pretty hard to reclaim space. Think of the case where we
14231 +@@ -1067,7 +1136,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
14232 + * so if we now have space to allocate do the force chunk allocation.
14233 + */
14234 + static const enum btrfs_flush_state data_flush_states[] = {
14235 +- FLUSH_DELALLOC_WAIT,
14236 ++ FLUSH_DELALLOC_FULL,
14237 + RUN_DELAYED_IPUTS,
14238 + COMMIT_TRANS,
14239 + ALLOC_CHUNK_FORCE,
14240 +@@ -1156,6 +1225,7 @@ static const enum btrfs_flush_state evict_flush_states[] = {
14241 + FLUSH_DELAYED_REFS,
14242 + FLUSH_DELALLOC,
14243 + FLUSH_DELALLOC_WAIT,
14244 ++ FLUSH_DELALLOC_FULL,
14245 + ALLOC_CHUNK,
14246 + COMMIT_TRANS,
14247 + };
14248 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
14249 +index e6430ac9bbe85..7037e5855d2a8 100644
14250 +--- a/fs/btrfs/tree-log.c
14251 ++++ b/fs/btrfs/tree-log.c
14252 +@@ -753,7 +753,9 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
14253 + */
14254 + ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
14255 + ins.offset);
14256 +- if (ret == 0) {
14257 ++ if (ret < 0) {
14258 ++ goto out;
14259 ++ } else if (ret == 0) {
14260 + btrfs_init_generic_ref(&ref,
14261 + BTRFS_ADD_DELAYED_REF,
14262 + ins.objectid, ins.offset, 0);
14263 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
14264 +index 354ffd8f81af9..10dd2d210b0f4 100644
14265 +--- a/fs/btrfs/volumes.c
14266 ++++ b/fs/btrfs/volumes.c
14267 +@@ -1130,6 +1130,9 @@ static void btrfs_close_one_device(struct btrfs_device *device)
14268 + fs_devices->rw_devices--;
14269 + }
14270 +
14271 ++ if (device->devid == BTRFS_DEV_REPLACE_DEVID)
14272 ++ clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
14273 ++
14274 + if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
14275 + fs_devices->missing_devices--;
14276 +
14277 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
14278 +index 39db97f149b9b..ba562efdf07b8 100644
14279 +--- a/fs/ceph/caps.c
14280 ++++ b/fs/ceph/caps.c
14281 +@@ -1746,6 +1746,9 @@ struct ceph_cap_flush *ceph_alloc_cap_flush(void)
14282 + struct ceph_cap_flush *cf;
14283 +
14284 + cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
14285 ++ if (!cf)
14286 ++ return NULL;
14287 ++
14288 + cf->is_capsnap = false;
14289 + return cf;
14290 + }
14291 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
14292 +index c5785fd3f52e8..606fd7d6cb713 100644
14293 +--- a/fs/cifs/sess.c
14294 ++++ b/fs/cifs/sess.c
14295 +@@ -877,7 +877,7 @@ sess_alloc_buffer(struct sess_data *sess_data, int wct)
14296 + return 0;
14297 +
14298 + out_free_smb_buf:
14299 +- kfree(smb_buf);
14300 ++ cifs_small_buf_release(smb_buf);
14301 + sess_data->iov[0].iov_base = NULL;
14302 + sess_data->iov[0].iov_len = 0;
14303 + sess_data->buf0_type = CIFS_NO_BUFFER;
14304 +diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
14305 +index 455561826c7dc..b8b3f1160afa6 100644
14306 +--- a/fs/f2fs/compress.c
14307 ++++ b/fs/f2fs/compress.c
14308 +@@ -1340,12 +1340,6 @@ out_destroy_crypt:
14309 +
14310 + for (--i; i >= 0; i--)
14311 + fscrypt_finalize_bounce_page(&cc->cpages[i]);
14312 +- for (i = 0; i < cc->nr_cpages; i++) {
14313 +- if (!cc->cpages[i])
14314 +- continue;
14315 +- f2fs_compress_free_page(cc->cpages[i]);
14316 +- cc->cpages[i] = NULL;
14317 +- }
14318 + out_put_cic:
14319 + kmem_cache_free(cic_entry_slab, cic);
14320 + out_put_dnode:
14321 +@@ -1356,6 +1350,12 @@ out_unlock_op:
14322 + else
14323 + f2fs_unlock_op(sbi);
14324 + out_free:
14325 ++ for (i = 0; i < cc->nr_cpages; i++) {
14326 ++ if (!cc->cpages[i])
14327 ++ continue;
14328 ++ f2fs_compress_free_page(cc->cpages[i]);
14329 ++ cc->cpages[i] = NULL;
14330 ++ }
14331 + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
14332 + cc->cpages = NULL;
14333 + return -EAGAIN;
14334 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
14335 +index d2cf48c5a2e49..a86f004c0c07e 100644
14336 +--- a/fs/f2fs/data.c
14337 ++++ b/fs/f2fs/data.c
14338 +@@ -116,6 +116,7 @@ struct bio_post_read_ctx {
14339 + struct f2fs_sb_info *sbi;
14340 + struct work_struct work;
14341 + unsigned int enabled_steps;
14342 ++ block_t fs_blkaddr;
14343 + };
14344 +
14345 + static void f2fs_finish_read_bio(struct bio *bio)
14346 +@@ -228,7 +229,7 @@ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
14347 + struct bio_vec *bv;
14348 + struct bvec_iter_all iter_all;
14349 + bool all_compressed = true;
14350 +- block_t blkaddr = SECTOR_TO_BLOCK(ctx->bio->bi_iter.bi_sector);
14351 ++ block_t blkaddr = ctx->fs_blkaddr;
14352 +
14353 + bio_for_each_segment_all(bv, ctx->bio, iter_all) {
14354 + struct page *page = bv->bv_page;
14355 +@@ -1003,6 +1004,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
14356 + ctx->bio = bio;
14357 + ctx->sbi = sbi;
14358 + ctx->enabled_steps = post_read_steps;
14359 ++ ctx->fs_blkaddr = blkaddr;
14360 + bio->bi_private = ctx;
14361 + }
14362 +
14363 +@@ -1490,7 +1492,21 @@ next_dnode:
14364 + if (err) {
14365 + if (flag == F2FS_GET_BLOCK_BMAP)
14366 + map->m_pblk = 0;
14367 ++
14368 + if (err == -ENOENT) {
14369 ++ /*
14370 ++ * There is one exceptional case that read_node_page()
14371 ++ * may return -ENOENT due to filesystem has been
14372 ++ * shutdown or cp_error, so force to convert error
14373 ++ * number to EIO for such case.
14374 ++ */
14375 ++ if (map->m_may_create &&
14376 ++ (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
14377 ++ f2fs_cp_error(sbi))) {
14378 ++ err = -EIO;
14379 ++ goto unlock_out;
14380 ++ }
14381 ++
14382 + err = 0;
14383 + if (map->m_next_pgofs)
14384 + *map->m_next_pgofs =
14385 +@@ -2137,6 +2153,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
14386 + continue;
14387 + }
14388 + unlock_page(page);
14389 ++ if (for_write)
14390 ++ put_page(page);
14391 + cc->rpages[i] = NULL;
14392 + cc->nr_rpages--;
14393 + }
14394 +@@ -2498,6 +2516,8 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
14395 + return true;
14396 + if (f2fs_is_atomic_file(inode))
14397 + return true;
14398 ++ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
14399 ++ return true;
14400 +
14401 + /* swap file is migrating in aligned write mode */
14402 + if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
14403 +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
14404 +index 456651682daf4..c250bf46ef5ed 100644
14405 +--- a/fs/f2fs/dir.c
14406 ++++ b/fs/f2fs/dir.c
14407 +@@ -1000,6 +1000,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
14408 + struct f2fs_sb_info *sbi = F2FS_I_SB(d->inode);
14409 + struct blk_plug plug;
14410 + bool readdir_ra = sbi->readdir_ra == 1;
14411 ++ bool found_valid_dirent = false;
14412 + int err = 0;
14413 +
14414 + bit_pos = ((unsigned long)ctx->pos % d->max);
14415 +@@ -1014,13 +1015,15 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
14416 +
14417 + de = &d->dentry[bit_pos];
14418 + if (de->name_len == 0) {
14419 ++ if (found_valid_dirent || !bit_pos) {
14420 ++ printk_ratelimited(
14421 ++ "%sF2FS-fs (%s): invalid namelen(0), ino:%u, run fsck to fix.",
14422 ++ KERN_WARNING, sbi->sb->s_id,
14423 ++ le32_to_cpu(de->ino));
14424 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
14425 ++ }
14426 + bit_pos++;
14427 + ctx->pos = start_pos + bit_pos;
14428 +- printk_ratelimited(
14429 +- "%sF2FS-fs (%s): invalid namelen(0), ino:%u, run fsck to fix.",
14430 +- KERN_WARNING, sbi->sb->s_id,
14431 +- le32_to_cpu(de->ino));
14432 +- set_sbi_flag(sbi, SBI_NEED_FSCK);
14433 + continue;
14434 + }
14435 +
14436 +@@ -1063,6 +1066,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
14437 + f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
14438 +
14439 + ctx->pos = start_pos + bit_pos;
14440 ++ found_valid_dirent = true;
14441 + }
14442 + out:
14443 + if (readdir_ra)
14444 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
14445 +index ee8eb33e2c25c..db95829904e5c 100644
14446 +--- a/fs/f2fs/f2fs.h
14447 ++++ b/fs/f2fs/f2fs.h
14448 +@@ -43,6 +43,7 @@ enum {
14449 + FAULT_KVMALLOC,
14450 + FAULT_PAGE_ALLOC,
14451 + FAULT_PAGE_GET,
14452 ++ FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */
14453 + FAULT_ALLOC_NID,
14454 + FAULT_ORPHAN,
14455 + FAULT_BLOCK,
14456 +@@ -4137,7 +4138,8 @@ static inline void set_compress_context(struct inode *inode)
14457 + 1 << COMPRESS_CHKSUM : 0;
14458 + F2FS_I(inode)->i_cluster_size =
14459 + 1 << F2FS_I(inode)->i_log_cluster_size;
14460 +- if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 &&
14461 ++ if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 ||
14462 ++ F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) &&
14463 + F2FS_OPTION(sbi).compress_level)
14464 + F2FS_I(inode)->i_compress_flag |=
14465 + F2FS_OPTION(sbi).compress_level <<
14466 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
14467 +index 97d48c5bdebcb..74f934da825f2 100644
14468 +--- a/fs/f2fs/file.c
14469 ++++ b/fs/f2fs/file.c
14470 +@@ -1084,7 +1084,6 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
14471 + }
14472 +
14473 + if (pg_start < pg_end) {
14474 +- struct address_space *mapping = inode->i_mapping;
14475 + loff_t blk_start, blk_end;
14476 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
14477 +
14478 +@@ -1096,8 +1095,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
14479 + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
14480 + down_write(&F2FS_I(inode)->i_mmap_sem);
14481 +
14482 +- truncate_inode_pages_range(mapping, blk_start,
14483 +- blk_end - 1);
14484 ++ truncate_pagecache_range(inode, blk_start, blk_end - 1);
14485 +
14486 + f2fs_lock_op(sbi);
14487 + ret = f2fs_truncate_hole(inode, pg_start, pg_end);
14488 +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
14489 +index 0e42ee5f77707..70234a7040c88 100644
14490 +--- a/fs/f2fs/gc.c
14491 ++++ b/fs/f2fs/gc.c
14492 +@@ -1497,8 +1497,10 @@ next_step:
14493 + int err;
14494 +
14495 + if (S_ISREG(inode->i_mode)) {
14496 +- if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
14497 ++ if (!down_write_trylock(&fi->i_gc_rwsem[READ])) {
14498 ++ sbi->skipped_gc_rwsem++;
14499 + continue;
14500 ++ }
14501 + if (!down_write_trylock(
14502 + &fi->i_gc_rwsem[WRITE])) {
14503 + sbi->skipped_gc_rwsem++;
14504 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
14505 +index 0be9e2d7120e3..1b0fe6e64b7d3 100644
14506 +--- a/fs/f2fs/node.c
14507 ++++ b/fs/f2fs/node.c
14508 +@@ -1321,7 +1321,8 @@ static int read_node_page(struct page *page, int op_flags)
14509 + if (err)
14510 + return err;
14511 +
14512 +- if (unlikely(ni.blk_addr == NULL_ADDR) ||
14513 ++ /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
14514 ++ if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR) ||
14515 + is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
14516 + ClearPageUptodate(page);
14517 + return -ENOENT;
14518 +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
14519 +index 15cc89eef28d6..f9b7fb785e1d7 100644
14520 +--- a/fs/f2fs/segment.c
14521 ++++ b/fs/f2fs/segment.c
14522 +@@ -3563,7 +3563,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
14523 + goto drop_bio;
14524 + }
14525 +
14526 +- if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || f2fs_cp_error(sbi)) {
14527 ++ if (f2fs_cp_error(sbi)) {
14528 + err = -EIO;
14529 + goto drop_bio;
14530 + }
14531 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
14532 +index ce703e6fdafc0..2b093a209ae40 100644
14533 +--- a/fs/f2fs/super.c
14534 ++++ b/fs/f2fs/super.c
14535 +@@ -2071,11 +2071,10 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
14536 + bool need_restart_ckpt = false, need_stop_ckpt = false;
14537 + bool need_restart_flush = false, need_stop_flush = false;
14538 + bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
14539 +- bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
14540 ++ bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
14541 + bool no_io_align = !F2FS_IO_ALIGNED(sbi);
14542 + bool no_atgc = !test_opt(sbi, ATGC);
14543 + bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
14544 +- bool checkpoint_changed;
14545 + #ifdef CONFIG_QUOTA
14546 + int i, j;
14547 + #endif
14548 +@@ -2120,8 +2119,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
14549 + err = parse_options(sb, data, true);
14550 + if (err)
14551 + goto restore_opts;
14552 +- checkpoint_changed =
14553 +- disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
14554 +
14555 + /*
14556 + * Previous and new state of filesystem is RO,
14557 +@@ -2243,7 +2240,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
14558 + need_stop_flush = true;
14559 + }
14560 +
14561 +- if (checkpoint_changed) {
14562 ++ if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
14563 + if (test_opt(sbi, DISABLE_CHECKPOINT)) {
14564 + err = f2fs_disable_checkpoint(sbi);
14565 + if (err)
14566 +@@ -2527,6 +2524,33 @@ static int f2fs_enable_quotas(struct super_block *sb)
14567 + return 0;
14568 + }
14569 +
14570 ++static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
14571 ++{
14572 ++ struct quota_info *dqopt = sb_dqopt(sbi->sb);
14573 ++ struct address_space *mapping = dqopt->files[type]->i_mapping;
14574 ++ int ret = 0;
14575 ++
14576 ++ ret = dquot_writeback_dquots(sbi->sb, type);
14577 ++ if (ret)
14578 ++ goto out;
14579 ++
14580 ++ ret = filemap_fdatawrite(mapping);
14581 ++ if (ret)
14582 ++ goto out;
14583 ++
14584 ++ /* if we are using journalled quota */
14585 ++ if (is_journalled_quota(sbi))
14586 ++ goto out;
14587 ++
14588 ++ ret = filemap_fdatawait(mapping);
14589 ++
14590 ++ truncate_inode_pages(&dqopt->files[type]->i_data, 0);
14591 ++out:
14592 ++ if (ret)
14593 ++ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
14594 ++ return ret;
14595 ++}
14596 ++
14597 + int f2fs_quota_sync(struct super_block *sb, int type)
14598 + {
14599 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
14600 +@@ -2534,57 +2558,42 @@ int f2fs_quota_sync(struct super_block *sb, int type)
14601 + int cnt;
14602 + int ret;
14603 +
14604 +- /*
14605 +- * do_quotactl
14606 +- * f2fs_quota_sync
14607 +- * down_read(quota_sem)
14608 +- * dquot_writeback_dquots()
14609 +- * f2fs_dquot_commit
14610 +- * block_operation
14611 +- * down_read(quota_sem)
14612 +- */
14613 +- f2fs_lock_op(sbi);
14614 +-
14615 +- down_read(&sbi->quota_sem);
14616 +- ret = dquot_writeback_dquots(sb, type);
14617 +- if (ret)
14618 +- goto out;
14619 +-
14620 + /*
14621 + * Now when everything is written we can discard the pagecache so
14622 + * that userspace sees the changes.
14623 + */
14624 + for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
14625 +- struct address_space *mapping;
14626 +
14627 + if (type != -1 && cnt != type)
14628 + continue;
14629 +- if (!sb_has_quota_active(sb, cnt))
14630 +- continue;
14631 +
14632 +- mapping = dqopt->files[cnt]->i_mapping;
14633 ++ if (!sb_has_quota_active(sb, type))
14634 ++ return 0;
14635 +
14636 +- ret = filemap_fdatawrite(mapping);
14637 +- if (ret)
14638 +- goto out;
14639 ++ inode_lock(dqopt->files[cnt]);
14640 +
14641 +- /* if we are using journalled quota */
14642 +- if (is_journalled_quota(sbi))
14643 +- continue;
14644 ++ /*
14645 ++ * do_quotactl
14646 ++ * f2fs_quota_sync
14647 ++ * down_read(quota_sem)
14648 ++ * dquot_writeback_dquots()
14649 ++ * f2fs_dquot_commit
14650 ++ * block_operation
14651 ++ * down_read(quota_sem)
14652 ++ */
14653 ++ f2fs_lock_op(sbi);
14654 ++ down_read(&sbi->quota_sem);
14655 +
14656 +- ret = filemap_fdatawait(mapping);
14657 +- if (ret)
14658 +- set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
14659 ++ ret = f2fs_quota_sync_file(sbi, cnt);
14660 ++
14661 ++ up_read(&sbi->quota_sem);
14662 ++ f2fs_unlock_op(sbi);
14663 +
14664 +- inode_lock(dqopt->files[cnt]);
14665 +- truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
14666 + inode_unlock(dqopt->files[cnt]);
14667 ++
14668 ++ if (ret)
14669 ++ break;
14670 + }
14671 +-out:
14672 +- if (ret)
14673 +- set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
14674 +- up_read(&sbi->quota_sem);
14675 +- f2fs_unlock_op(sbi);
14676 + return ret;
14677 + }
14678 +
14679 +@@ -3217,11 +3226,13 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
14680 + return -EFSCORRUPTED;
14681 + }
14682 +
14683 +- if (le32_to_cpu(raw_super->cp_payload) >
14684 +- (blocks_per_seg - F2FS_CP_PACKS)) {
14685 +- f2fs_info(sbi, "Insane cp_payload (%u > %u)",
14686 ++ if (le32_to_cpu(raw_super->cp_payload) >=
14687 ++ (blocks_per_seg - F2FS_CP_PACKS -
14688 ++ NR_CURSEG_PERSIST_TYPE)) {
14689 ++ f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
14690 + le32_to_cpu(raw_super->cp_payload),
14691 +- blocks_per_seg - F2FS_CP_PACKS);
14692 ++ blocks_per_seg - F2FS_CP_PACKS -
14693 ++ NR_CURSEG_PERSIST_TYPE);
14694 + return -EFSCORRUPTED;
14695 + }
14696 +
14697 +@@ -3257,6 +3268,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
14698 + unsigned int cp_pack_start_sum, cp_payload;
14699 + block_t user_block_count, valid_user_blocks;
14700 + block_t avail_node_count, valid_node_count;
14701 ++ unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
14702 + int i, j;
14703 +
14704 + total = le32_to_cpu(raw_super->segment_count);
14705 +@@ -3387,6 +3399,17 @@ skip_cross:
14706 + return 1;
14707 + }
14708 +
14709 ++ nat_blocks = nat_segs << log_blocks_per_seg;
14710 ++ nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
14711 ++ nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
14712 ++ if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
14713 ++ (cp_payload + F2FS_CP_PACKS +
14714 ++ NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
14715 ++ f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
14716 ++ cp_payload, nat_bits_blocks);
14717 ++ return -EFSCORRUPTED;
14718 ++ }
14719 ++
14720 + if (unlikely(f2fs_cp_error(sbi))) {
14721 + f2fs_err(sbi, "A bug case: need to run fsck");
14722 + return 1;
14723 +diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
14724 +index 751bc5b1cddf9..6104f627cc712 100644
14725 +--- a/fs/fscache/cookie.c
14726 ++++ b/fs/fscache/cookie.c
14727 +@@ -74,10 +74,8 @@ void fscache_free_cookie(struct fscache_cookie *cookie)
14728 + static int fscache_set_key(struct fscache_cookie *cookie,
14729 + const void *index_key, size_t index_key_len)
14730 + {
14731 +- unsigned long long h;
14732 + u32 *buf;
14733 + int bufs;
14734 +- int i;
14735 +
14736 + bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf));
14737 +
14738 +@@ -91,17 +89,7 @@ static int fscache_set_key(struct fscache_cookie *cookie,
14739 + }
14740 +
14741 + memcpy(buf, index_key, index_key_len);
14742 +-
14743 +- /* Calculate a hash and combine this with the length in the first word
14744 +- * or first half word
14745 +- */
14746 +- h = (unsigned long)cookie->parent;
14747 +- h += index_key_len + cookie->type;
14748 +-
14749 +- for (i = 0; i < bufs; i++)
14750 +- h += buf[i];
14751 +-
14752 +- cookie->key_hash = h ^ (h >> 32);
14753 ++ cookie->key_hash = fscache_hash(0, buf, bufs);
14754 + return 0;
14755 + }
14756 +
14757 +diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
14758 +index c483863b740ad..aee639d980bad 100644
14759 +--- a/fs/fscache/internal.h
14760 ++++ b/fs/fscache/internal.h
14761 +@@ -97,6 +97,8 @@ extern struct workqueue_struct *fscache_object_wq;
14762 + extern struct workqueue_struct *fscache_op_wq;
14763 + DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
14764 +
14765 ++extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n);
14766 ++
14767 + static inline bool fscache_object_congested(void)
14768 + {
14769 + return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq);
14770 +diff --git a/fs/fscache/main.c b/fs/fscache/main.c
14771 +index c1e6cc9091aac..4207f98e405fd 100644
14772 +--- a/fs/fscache/main.c
14773 ++++ b/fs/fscache/main.c
14774 +@@ -93,6 +93,45 @@ static struct ctl_table fscache_sysctls_root[] = {
14775 + };
14776 + #endif
14777 +
14778 ++/*
14779 ++ * Mixing scores (in bits) for (7,20):
14780 ++ * Input delta: 1-bit 2-bit
14781 ++ * 1 round: 330.3 9201.6
14782 ++ * 2 rounds: 1246.4 25475.4
14783 ++ * 3 rounds: 1907.1 31295.1
14784 ++ * 4 rounds: 2042.3 31718.6
14785 ++ * Perfect: 2048 31744
14786 ++ * (32*64) (32*31/2 * 64)
14787 ++ */
14788 ++#define HASH_MIX(x, y, a) \
14789 ++ ( x ^= (a), \
14790 ++ y ^= x, x = rol32(x, 7),\
14791 ++ x += y, y = rol32(y,20),\
14792 ++ y *= 9 )
14793 ++
14794 ++static inline unsigned int fold_hash(unsigned long x, unsigned long y)
14795 ++{
14796 ++ /* Use arch-optimized multiply if one exists */
14797 ++ return __hash_32(y ^ __hash_32(x));
14798 ++}
14799 ++
14800 ++/*
14801 ++ * Generate a hash. This is derived from full_name_hash(), but we want to be
14802 ++ * sure it is arch independent and that it doesn't change as bits of the
14803 ++ * computed hash value might appear on disk. The caller also guarantees that
14804 ++ * the hashed data will be a series of aligned 32-bit words.
14805 ++ */
14806 ++unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n)
14807 ++{
14808 ++ unsigned int a, x = 0, y = salt;
14809 ++
14810 ++ for (; n; n--) {
14811 ++ a = *data++;
14812 ++ HASH_MIX(x, y, a);
14813 ++ }
14814 ++ return fold_hash(x, y);
14815 ++}
14816 ++
14817 + /*
14818 + * initialise the fs caching module
14819 + */
14820 +diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
14821 +index 54d3fbeb3002f..384565d63eea8 100644
14822 +--- a/fs/gfs2/glops.c
14823 ++++ b/fs/gfs2/glops.c
14824 +@@ -610,16 +610,13 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl)
14825 + j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
14826 +
14827 + error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
14828 +- if (error)
14829 +- gfs2_consist(sdp);
14830 +- if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
14831 +- gfs2_consist(sdp);
14832 +-
14833 +- /* Initialize some head of the log stuff */
14834 +- if (!gfs2_withdrawn(sdp)) {
14835 +- sdp->sd_log_sequence = head.lh_sequence + 1;
14836 +- gfs2_log_pointers_init(sdp, head.lh_blkno);
14837 +- }
14838 ++ if (gfs2_assert_withdraw_delayed(sdp, !error))
14839 ++ return error;
14840 ++ if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
14841 ++ GFS2_LOG_HEAD_UNMOUNT))
14842 ++ return -EIO;
14843 ++ sdp->sd_log_sequence = head.lh_sequence + 1;
14844 ++ gfs2_log_pointers_init(sdp, head.lh_blkno);
14845 + }
14846 + return 0;
14847 + }
14848 +diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
14849 +index dac040162ecc1..50578f881e6de 100644
14850 +--- a/fs/gfs2/lock_dlm.c
14851 ++++ b/fs/gfs2/lock_dlm.c
14852 +@@ -299,6 +299,11 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
14853 + gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
14854 + gfs2_update_request_times(gl);
14855 +
14856 ++ /* don't want to call dlm if we've unmounted the lock protocol */
14857 ++ if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
14858 ++ gfs2_glock_free(gl);
14859 ++ return;
14860 ++ }
14861 + /* don't want to skip dlm_unlock writing the lvb when lock has one */
14862 +
14863 + if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
14864 +diff --git a/fs/io-wq.c b/fs/io-wq.c
14865 +index 2cc7f75ff24d7..cb5d84f6b7693 100644
14866 +--- a/fs/io-wq.c
14867 ++++ b/fs/io-wq.c
14868 +@@ -236,9 +236,9 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
14869 + * We need a worker. If we find a free one, we're good. If not, and we're
14870 + * below the max number of workers, create one.
14871 + */
14872 +-static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
14873 ++static void io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
14874 + {
14875 +- bool ret;
14876 ++ bool do_create = false, first = false;
14877 +
14878 + /*
14879 + * Most likely an attempt to queue unbounded work on an io_wq that
14880 +@@ -247,25 +247,18 @@ static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
14881 + if (unlikely(!acct->max_workers))
14882 + pr_warn_once("io-wq is not configured for unbound workers");
14883 +
14884 +- rcu_read_lock();
14885 +- ret = io_wqe_activate_free_worker(wqe);
14886 +- rcu_read_unlock();
14887 +-
14888 +- if (!ret) {
14889 +- bool do_create = false, first = false;
14890 +-
14891 +- raw_spin_lock_irq(&wqe->lock);
14892 +- if (acct->nr_workers < acct->max_workers) {
14893 +- atomic_inc(&acct->nr_running);
14894 +- atomic_inc(&wqe->wq->worker_refs);
14895 +- if (!acct->nr_workers)
14896 +- first = true;
14897 +- acct->nr_workers++;
14898 +- do_create = true;
14899 +- }
14900 +- raw_spin_unlock_irq(&wqe->lock);
14901 +- if (do_create)
14902 +- create_io_worker(wqe->wq, wqe, acct->index, first);
14903 ++ raw_spin_lock_irq(&wqe->lock);
14904 ++ if (acct->nr_workers < acct->max_workers) {
14905 ++ if (!acct->nr_workers)
14906 ++ first = true;
14907 ++ acct->nr_workers++;
14908 ++ do_create = true;
14909 ++ }
14910 ++ raw_spin_unlock_irq(&wqe->lock);
14911 ++ if (do_create) {
14912 ++ atomic_inc(&acct->nr_running);
14913 ++ atomic_inc(&wqe->wq->worker_refs);
14914 ++ create_io_worker(wqe->wq, wqe, acct->index, first);
14915 + }
14916 + }
14917 +
14918 +@@ -793,7 +786,8 @@ append:
14919 + static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
14920 + {
14921 + struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
14922 +- int work_flags;
14923 ++ unsigned work_flags = work->flags;
14924 ++ bool do_create;
14925 + unsigned long flags;
14926 +
14927 + /*
14928 +@@ -806,15 +800,19 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
14929 + return;
14930 + }
14931 +
14932 +- work_flags = work->flags;
14933 + raw_spin_lock_irqsave(&wqe->lock, flags);
14934 + io_wqe_insert_work(wqe, work);
14935 + wqe->flags &= ~IO_WQE_FLAG_STALLED;
14936 ++
14937 ++ rcu_read_lock();
14938 ++ do_create = !io_wqe_activate_free_worker(wqe);
14939 ++ rcu_read_unlock();
14940 ++
14941 + raw_spin_unlock_irqrestore(&wqe->lock, flags);
14942 +
14943 +- if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
14944 +- !atomic_read(&acct->nr_running))
14945 +- io_wqe_wake_worker(wqe, acct);
14946 ++ if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
14947 ++ !atomic_read(&acct->nr_running)))
14948 ++ io_wqe_create_worker(wqe, acct);
14949 + }
14950 +
14951 + void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
14952 +diff --git a/fs/io_uring.c b/fs/io_uring.c
14953 +index 14bebc62db2d4..c5d4638f6d7fd 100644
14954 +--- a/fs/io_uring.c
14955 ++++ b/fs/io_uring.c
14956 +@@ -3484,7 +3484,7 @@ static int io_renameat_prep(struct io_kiocb *req,
14957 +
14958 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
14959 + return -EINVAL;
14960 +- if (sqe->ioprio || sqe->buf_index)
14961 ++ if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
14962 + return -EINVAL;
14963 + if (unlikely(req->flags & REQ_F_FIXED_FILE))
14964 + return -EBADF;
14965 +@@ -3535,7 +3535,8 @@ static int io_unlinkat_prep(struct io_kiocb *req,
14966 +
14967 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
14968 + return -EINVAL;
14969 +- if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
14970 ++ if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
14971 ++ sqe->splice_fd_in)
14972 + return -EINVAL;
14973 + if (unlikely(req->flags & REQ_F_FIXED_FILE))
14974 + return -EBADF;
14975 +@@ -3581,8 +3582,8 @@ static int io_shutdown_prep(struct io_kiocb *req,
14976 + #if defined(CONFIG_NET)
14977 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
14978 + return -EINVAL;
14979 +- if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
14980 +- sqe->buf_index)
14981 ++ if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
14982 ++ sqe->buf_index || sqe->splice_fd_in))
14983 + return -EINVAL;
14984 +
14985 + req->shutdown.how = READ_ONCE(sqe->len);
14986 +@@ -3730,7 +3731,8 @@ static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
14987 +
14988 + if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
14989 + return -EINVAL;
14990 +- if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
14991 ++ if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
14992 ++ sqe->splice_fd_in))
14993 + return -EINVAL;
14994 +
14995 + req->sync.flags = READ_ONCE(sqe->fsync_flags);
14996 +@@ -3763,7 +3765,8 @@ static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
14997 + static int io_fallocate_prep(struct io_kiocb *req,
14998 + const struct io_uring_sqe *sqe)
14999 + {
15000 +- if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
15001 ++ if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
15002 ++ sqe->splice_fd_in)
15003 + return -EINVAL;
15004 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
15005 + return -EINVAL;
15006 +@@ -3794,7 +3797,7 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
15007 + const char __user *fname;
15008 + int ret;
15009 +
15010 +- if (unlikely(sqe->ioprio || sqe->buf_index))
15011 ++ if (unlikely(sqe->ioprio || sqe->buf_index || sqe->splice_fd_in))
15012 + return -EINVAL;
15013 + if (unlikely(req->flags & REQ_F_FIXED_FILE))
15014 + return -EBADF;
15015 +@@ -3918,7 +3921,8 @@ static int io_remove_buffers_prep(struct io_kiocb *req,
15016 + struct io_provide_buf *p = &req->pbuf;
15017 + u64 tmp;
15018 +
15019 +- if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
15020 ++ if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
15021 ++ sqe->splice_fd_in)
15022 + return -EINVAL;
15023 +
15024 + tmp = READ_ONCE(sqe->fd);
15025 +@@ -3989,7 +3993,7 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
15026 + struct io_provide_buf *p = &req->pbuf;
15027 + u64 tmp;
15028 +
15029 +- if (sqe->ioprio || sqe->rw_flags)
15030 ++ if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
15031 + return -EINVAL;
15032 +
15033 + tmp = READ_ONCE(sqe->fd);
15034 +@@ -4076,7 +4080,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
15035 + const struct io_uring_sqe *sqe)
15036 + {
15037 + #if defined(CONFIG_EPOLL)
15038 +- if (sqe->ioprio || sqe->buf_index)
15039 ++ if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
15040 + return -EINVAL;
15041 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
15042 + return -EINVAL;
15043 +@@ -4122,7 +4126,7 @@ static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
15044 + static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
15045 + {
15046 + #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
15047 +- if (sqe->ioprio || sqe->buf_index || sqe->off)
15048 ++ if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
15049 + return -EINVAL;
15050 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
15051 + return -EINVAL;
15052 +@@ -4157,7 +4161,7 @@ static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
15053 +
15054 + static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
15055 + {
15056 +- if (sqe->ioprio || sqe->buf_index || sqe->addr)
15057 ++ if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
15058 + return -EINVAL;
15059 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
15060 + return -EINVAL;
15061 +@@ -4195,7 +4199,7 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
15062 + {
15063 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
15064 + return -EINVAL;
15065 +- if (sqe->ioprio || sqe->buf_index)
15066 ++ if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
15067 + return -EINVAL;
15068 + if (req->flags & REQ_F_FIXED_FILE)
15069 + return -EBADF;
15070 +@@ -4231,7 +4235,7 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
15071 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
15072 + return -EINVAL;
15073 + if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
15074 +- sqe->rw_flags || sqe->buf_index)
15075 ++ sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
15076 + return -EINVAL;
15077 + if (req->flags & REQ_F_FIXED_FILE)
15078 + return -EBADF;
15079 +@@ -4292,7 +4296,8 @@ static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
15080 +
15081 + if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
15082 + return -EINVAL;
15083 +- if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
15084 ++ if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
15085 ++ sqe->splice_fd_in))
15086 + return -EINVAL;
15087 +
15088 + req->sync.off = READ_ONCE(sqe->off);
15089 +@@ -4719,7 +4724,7 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
15090 +
15091 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
15092 + return -EINVAL;
15093 +- if (sqe->ioprio || sqe->len || sqe->buf_index)
15094 ++ if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->splice_fd_in)
15095 + return -EINVAL;
15096 +
15097 + accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
15098 +@@ -4767,7 +4772,8 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
15099 +
15100 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
15101 + return -EINVAL;
15102 +- if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
15103 ++ if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
15104 ++ sqe->splice_fd_in)
15105 + return -EINVAL;
15106 +
15107 + conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
15108 +@@ -5375,7 +5381,7 @@ static int io_poll_update_prep(struct io_kiocb *req,
15109 +
15110 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
15111 + return -EINVAL;
15112 +- if (sqe->ioprio || sqe->buf_index)
15113 ++ if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
15114 + return -EINVAL;
15115 + flags = READ_ONCE(sqe->len);
15116 + if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
15117 +@@ -5610,7 +5616,7 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
15118 + return -EINVAL;
15119 + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
15120 + return -EINVAL;
15121 +- if (sqe->ioprio || sqe->buf_index || sqe->len)
15122 ++ if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
15123 + return -EINVAL;
15124 +
15125 + tr->addr = READ_ONCE(sqe->addr);
15126 +@@ -5669,7 +5675,8 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
15127 +
15128 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
15129 + return -EINVAL;
15130 +- if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
15131 ++ if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
15132 ++ sqe->splice_fd_in)
15133 + return -EINVAL;
15134 + if (off && is_timeout_link)
15135 + return -EINVAL;
15136 +@@ -5820,7 +5827,8 @@ static int io_async_cancel_prep(struct io_kiocb *req,
15137 + return -EINVAL;
15138 + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
15139 + return -EINVAL;
15140 +- if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
15141 ++ if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
15142 ++ sqe->splice_fd_in)
15143 + return -EINVAL;
15144 +
15145 + req->cancel.addr = READ_ONCE(sqe->addr);
15146 +@@ -5877,7 +5885,7 @@ static int io_rsrc_update_prep(struct io_kiocb *req,
15147 + {
15148 + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
15149 + return -EINVAL;
15150 +- if (sqe->ioprio || sqe->rw_flags)
15151 ++ if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
15152 + return -EINVAL;
15153 +
15154 + req->rsrc_update.offset = READ_ONCE(sqe->off);
15155 +@@ -6302,6 +6310,7 @@ static void io_wq_submit_work(struct io_wq_work *work)
15156 + if (timeout)
15157 + io_queue_linked_timeout(timeout);
15158 +
15159 ++ /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
15160 + if (work->flags & IO_WQ_WORK_CANCEL)
15161 + ret = -ECANCELED;
15162 +
15163 +@@ -7126,14 +7135,14 @@ static void **io_alloc_page_table(size_t size)
15164 + size_t init_size = size;
15165 + void **table;
15166 +
15167 +- table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL);
15168 ++ table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
15169 + if (!table)
15170 + return NULL;
15171 +
15172 + for (i = 0; i < nr_tables; i++) {
15173 + unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
15174 +
15175 +- table[i] = kzalloc(this_size, GFP_KERNEL);
15176 ++ table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
15177 + if (!table[i]) {
15178 + io_free_page_table(table, init_size);
15179 + return NULL;
15180 +@@ -9129,8 +9138,8 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
15181 + * Must be after io_uring_del_task_file() (removes nodes under
15182 + * uring_lock) to avoid race with io_uring_try_cancel_iowq().
15183 + */
15184 +- tctx->io_wq = NULL;
15185 + io_wq_put_and_exit(wq);
15186 ++ tctx->io_wq = NULL;
15187 + }
15188 + }
15189 +
15190 +diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
15191 +index 87ccb3438becd..b06138c6190be 100644
15192 +--- a/fs/iomap/buffered-io.c
15193 ++++ b/fs/iomap/buffered-io.c
15194 +@@ -1016,7 +1016,7 @@ iomap_finish_page_writeback(struct inode *inode, struct page *page,
15195 +
15196 + if (error) {
15197 + SetPageError(page);
15198 +- mapping_set_error(inode->i_mapping, -EIO);
15199 ++ mapping_set_error(inode->i_mapping, error);
15200 + }
15201 +
15202 + WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
15203 +diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
15204 +index 498cb70c2c0d0..273a81971ed57 100644
15205 +--- a/fs/lockd/svclock.c
15206 ++++ b/fs/lockd/svclock.c
15207 +@@ -395,28 +395,10 @@ nlmsvc_release_lockowner(struct nlm_lock *lock)
15208 + nlmsvc_put_lockowner(lock->fl.fl_owner);
15209 + }
15210 +
15211 +-static void nlmsvc_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
15212 +-{
15213 +- struct nlm_lockowner *nlm_lo = (struct nlm_lockowner *)fl->fl_owner;
15214 +- new->fl_owner = nlmsvc_get_lockowner(nlm_lo);
15215 +-}
15216 +-
15217 +-static void nlmsvc_locks_release_private(struct file_lock *fl)
15218 +-{
15219 +- nlmsvc_put_lockowner((struct nlm_lockowner *)fl->fl_owner);
15220 +-}
15221 +-
15222 +-static const struct file_lock_operations nlmsvc_lock_ops = {
15223 +- .fl_copy_lock = nlmsvc_locks_copy_lock,
15224 +- .fl_release_private = nlmsvc_locks_release_private,
15225 +-};
15226 +-
15227 + void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
15228 + pid_t pid)
15229 + {
15230 + fl->fl_owner = nlmsvc_find_lockowner(host, pid);
15231 +- if (fl->fl_owner != NULL)
15232 +- fl->fl_ops = &nlmsvc_lock_ops;
15233 + }
15234 +
15235 + /*
15236 +@@ -788,9 +770,21 @@ nlmsvc_notify_blocked(struct file_lock *fl)
15237 + printk(KERN_WARNING "lockd: notification for unknown block!\n");
15238 + }
15239 +
15240 ++static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
15241 ++{
15242 ++ return nlmsvc_get_lockowner(owner);
15243 ++}
15244 ++
15245 ++static void nlmsvc_put_owner(fl_owner_t owner)
15246 ++{
15247 ++ nlmsvc_put_lockowner(owner);
15248 ++}
15249 ++
15250 + const struct lock_manager_operations nlmsvc_lock_operations = {
15251 + .lm_notify = nlmsvc_notify_blocked,
15252 + .lm_grant = nlmsvc_grant_deferred,
15253 ++ .lm_get_owner = nlmsvc_get_owner,
15254 ++ .lm_put_owner = nlmsvc_put_owner,
15255 + };
15256 +
15257 + /*
15258 +diff --git a/fs/nfs/export.c b/fs/nfs/export.c
15259 +index 37a1a88df7717..d772c20bbfd15 100644
15260 +--- a/fs/nfs/export.c
15261 ++++ b/fs/nfs/export.c
15262 +@@ -180,5 +180,5 @@ const struct export_operations nfs_export_ops = {
15263 + .fetch_iversion = nfs_fetch_iversion,
15264 + .flags = EXPORT_OP_NOWCC|EXPORT_OP_NOSUBTREECHK|
15265 + EXPORT_OP_CLOSE_BEFORE_UNLINK|EXPORT_OP_REMOTE_FS|
15266 +- EXPORT_OP_NOATOMIC_ATTR,
15267 ++ EXPORT_OP_NOATOMIC_ATTR|EXPORT_OP_SYNC_LOCKS,
15268 + };
15269 +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
15270 +index ef14ea0b6ab8d..51049499e98ff 100644
15271 +--- a/fs/nfs/pnfs.c
15272 ++++ b/fs/nfs/pnfs.c
15273 +@@ -335,7 +335,7 @@ static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
15274 +
15275 + static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq)
15276 + {
15277 +- if (pnfs_seqid_is_newer(newseq, lo->plh_barrier))
15278 ++ if (pnfs_seqid_is_newer(newseq, lo->plh_barrier) || !lo->plh_barrier)
15279 + lo->plh_barrier = newseq;
15280 + }
15281 +
15282 +@@ -347,11 +347,15 @@ pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
15283 + iomode = IOMODE_ANY;
15284 + lo->plh_return_iomode = iomode;
15285 + set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
15286 +- if (seq != 0) {
15287 +- WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
15288 ++ /*
15289 ++ * We must set lo->plh_return_seq to avoid livelocks with
15290 ++ * pnfs_layout_need_return()
15291 ++ */
15292 ++ if (seq == 0)
15293 ++ seq = be32_to_cpu(lo->plh_stateid.seqid);
15294 ++ if (!lo->plh_return_seq || pnfs_seqid_is_newer(seq, lo->plh_return_seq))
15295 + lo->plh_return_seq = seq;
15296 +- pnfs_barrier_update(lo, seq);
15297 +- }
15298 ++ pnfs_barrier_update(lo, seq);
15299 + }
15300 +
15301 + static void
15302 +@@ -1000,7 +1004,7 @@ pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
15303 + {
15304 + u32 seqid = be32_to_cpu(stateid->seqid);
15305 +
15306 +- return !pnfs_seqid_is_newer(seqid, lo->plh_barrier) && lo->plh_barrier;
15307 ++ return lo->plh_barrier && pnfs_seqid_is_newer(lo->plh_barrier, seqid);
15308 + }
15309 +
15310 + /* lget is set to 1 if called from inside send_layoutget call chain */
15311 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
15312 +index 2bedc7839ec56..3d805f5b1f5d2 100644
15313 +--- a/fs/nfsd/nfs4state.c
15314 ++++ b/fs/nfsd/nfs4state.c
15315 +@@ -6835,6 +6835,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
15316 + struct nfsd4_blocked_lock *nbl = NULL;
15317 + struct file_lock *file_lock = NULL;
15318 + struct file_lock *conflock = NULL;
15319 ++ struct super_block *sb;
15320 + __be32 status = 0;
15321 + int lkflg;
15322 + int err;
15323 +@@ -6856,6 +6857,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
15324 + dprintk("NFSD: nfsd4_lock: permission denied!\n");
15325 + return status;
15326 + }
15327 ++ sb = cstate->current_fh.fh_dentry->d_sb;
15328 +
15329 + if (lock->lk_is_new) {
15330 + if (nfsd4_has_session(cstate))
15331 +@@ -6904,7 +6906,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
15332 + fp = lock_stp->st_stid.sc_file;
15333 + switch (lock->lk_type) {
15334 + case NFS4_READW_LT:
15335 +- if (nfsd4_has_session(cstate))
15336 ++ if (nfsd4_has_session(cstate) &&
15337 ++ !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
15338 + fl_flags |= FL_SLEEP;
15339 + fallthrough;
15340 + case NFS4_READ_LT:
15341 +@@ -6916,7 +6919,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
15342 + fl_type = F_RDLCK;
15343 + break;
15344 + case NFS4_WRITEW_LT:
15345 +- if (nfsd4_has_session(cstate))
15346 ++ if (nfsd4_has_session(cstate) &&
15347 ++ !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
15348 + fl_flags |= FL_SLEEP;
15349 + fallthrough;
15350 + case NFS4_WRITE_LT:
15351 +@@ -7036,8 +7040,7 @@ out:
15352 + /*
15353 + * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
15354 + * so we do a temporary open here just to get an open file to pass to
15355 +- * vfs_test_lock. (Arguably perhaps test_lock should be done with an
15356 +- * inode operation.)
15357 ++ * vfs_test_lock.
15358 + */
15359 + static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
15360 + {
15361 +@@ -7052,7 +7055,9 @@ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct
15362 + NFSD_MAY_READ));
15363 + if (err)
15364 + goto out;
15365 ++ lock->fl_file = nf->nf_file;
15366 + err = nfserrno(vfs_test_lock(nf->nf_file, lock));
15367 ++ lock->fl_file = NULL;
15368 + out:
15369 + fh_unlock(fhp);
15370 + nfsd_file_put(nf);
15371 +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
15372 +index 93efe7048a771..7c1850adec288 100644
15373 +--- a/fs/overlayfs/dir.c
15374 ++++ b/fs/overlayfs/dir.c
15375 +@@ -542,8 +542,10 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
15376 + goto out_cleanup;
15377 + }
15378 + err = ovl_instantiate(dentry, inode, newdentry, hardlink);
15379 +- if (err)
15380 +- goto out_cleanup;
15381 ++ if (err) {
15382 ++ ovl_cleanup(udir, newdentry);
15383 ++ dput(newdentry);
15384 ++ }
15385 + out_dput:
15386 + dput(upper);
15387 + out_unlock:
15388 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
15389 +index 5c2d806e6ae53..c830cc4ea60fb 100644
15390 +--- a/fs/userfaultfd.c
15391 ++++ b/fs/userfaultfd.c
15392 +@@ -33,11 +33,6 @@ int sysctl_unprivileged_userfaultfd __read_mostly;
15393 +
15394 + static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
15395 +
15396 +-enum userfaultfd_state {
15397 +- UFFD_STATE_WAIT_API,
15398 +- UFFD_STATE_RUNNING,
15399 +-};
15400 +-
15401 + /*
15402 + * Start with fault_pending_wqh and fault_wqh so they're more likely
15403 + * to be in the same cacheline.
15404 +@@ -69,8 +64,6 @@ struct userfaultfd_ctx {
15405 + unsigned int flags;
15406 + /* features requested from the userspace */
15407 + unsigned int features;
15408 +- /* state machine */
15409 +- enum userfaultfd_state state;
15410 + /* released */
15411 + bool released;
15412 + /* memory mappings are changing because of non-cooperative event */
15413 +@@ -104,6 +97,14 @@ struct userfaultfd_wake_range {
15414 + unsigned long len;
15415 + };
15416 +
15417 ++/* internal indication that UFFD_API ioctl was successfully executed */
15418 ++#define UFFD_FEATURE_INITIALIZED (1u << 31)
15419 ++
15420 ++static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
15421 ++{
15422 ++ return ctx->features & UFFD_FEATURE_INITIALIZED;
15423 ++}
15424 ++
15425 + static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
15426 + int wake_flags, void *key)
15427 + {
15428 +@@ -666,7 +667,6 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
15429 +
15430 + refcount_set(&ctx->refcount, 1);
15431 + ctx->flags = octx->flags;
15432 +- ctx->state = UFFD_STATE_RUNNING;
15433 + ctx->features = octx->features;
15434 + ctx->released = false;
15435 + ctx->mmap_changing = false;
15436 +@@ -943,38 +943,33 @@ static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
15437 +
15438 + poll_wait(file, &ctx->fd_wqh, wait);
15439 +
15440 +- switch (ctx->state) {
15441 +- case UFFD_STATE_WAIT_API:
15442 ++ if (!userfaultfd_is_initialized(ctx))
15443 + return EPOLLERR;
15444 +- case UFFD_STATE_RUNNING:
15445 +- /*
15446 +- * poll() never guarantees that read won't block.
15447 +- * userfaults can be waken before they're read().
15448 +- */
15449 +- if (unlikely(!(file->f_flags & O_NONBLOCK)))
15450 +- return EPOLLERR;
15451 +- /*
15452 +- * lockless access to see if there are pending faults
15453 +- * __pollwait last action is the add_wait_queue but
15454 +- * the spin_unlock would allow the waitqueue_active to
15455 +- * pass above the actual list_add inside
15456 +- * add_wait_queue critical section. So use a full
15457 +- * memory barrier to serialize the list_add write of
15458 +- * add_wait_queue() with the waitqueue_active read
15459 +- * below.
15460 +- */
15461 +- ret = 0;
15462 +- smp_mb();
15463 +- if (waitqueue_active(&ctx->fault_pending_wqh))
15464 +- ret = EPOLLIN;
15465 +- else if (waitqueue_active(&ctx->event_wqh))
15466 +- ret = EPOLLIN;
15467 +
15468 +- return ret;
15469 +- default:
15470 +- WARN_ON_ONCE(1);
15471 ++ /*
15472 ++ * poll() never guarantees that read won't block.
15473 ++ * userfaults can be waken before they're read().
15474 ++ */
15475 ++ if (unlikely(!(file->f_flags & O_NONBLOCK)))
15476 + return EPOLLERR;
15477 +- }
15478 ++ /*
15479 ++ * lockless access to see if there are pending faults
15480 ++ * __pollwait last action is the add_wait_queue but
15481 ++ * the spin_unlock would allow the waitqueue_active to
15482 ++ * pass above the actual list_add inside
15483 ++ * add_wait_queue critical section. So use a full
15484 ++ * memory barrier to serialize the list_add write of
15485 ++ * add_wait_queue() with the waitqueue_active read
15486 ++ * below.
15487 ++ */
15488 ++ ret = 0;
15489 ++ smp_mb();
15490 ++ if (waitqueue_active(&ctx->fault_pending_wqh))
15491 ++ ret = EPOLLIN;
15492 ++ else if (waitqueue_active(&ctx->event_wqh))
15493 ++ ret = EPOLLIN;
15494 ++
15495 ++ return ret;
15496 + }
15497 +
15498 + static const struct file_operations userfaultfd_fops;
15499 +@@ -1169,7 +1164,7 @@ static ssize_t userfaultfd_read(struct file *file, char __user *buf,
15500 + int no_wait = file->f_flags & O_NONBLOCK;
15501 + struct inode *inode = file_inode(file);
15502 +
15503 +- if (ctx->state == UFFD_STATE_WAIT_API)
15504 ++ if (!userfaultfd_is_initialized(ctx))
15505 + return -EINVAL;
15506 +
15507 + for (;;) {
15508 +@@ -1908,9 +1903,10 @@ out:
15509 + static inline unsigned int uffd_ctx_features(__u64 user_features)
15510 + {
15511 + /*
15512 +- * For the current set of features the bits just coincide
15513 ++ * For the current set of features the bits just coincide. Set
15514 ++ * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
15515 + */
15516 +- return (unsigned int)user_features;
15517 ++ return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
15518 + }
15519 +
15520 + /*
15521 +@@ -1923,12 +1919,10 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
15522 + {
15523 + struct uffdio_api uffdio_api;
15524 + void __user *buf = (void __user *)arg;
15525 ++ unsigned int ctx_features;
15526 + int ret;
15527 + __u64 features;
15528 +
15529 +- ret = -EINVAL;
15530 +- if (ctx->state != UFFD_STATE_WAIT_API)
15531 +- goto out;
15532 + ret = -EFAULT;
15533 + if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
15534 + goto out;
15535 +@@ -1952,9 +1946,13 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
15536 + ret = -EFAULT;
15537 + if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
15538 + goto out;
15539 +- ctx->state = UFFD_STATE_RUNNING;
15540 ++
15541 + /* only enable the requested features for this uffd context */
15542 +- ctx->features = uffd_ctx_features(features);
15543 ++ ctx_features = uffd_ctx_features(features);
15544 ++ ret = -EINVAL;
15545 ++ if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
15546 ++ goto err_out;
15547 ++
15548 + ret = 0;
15549 + out:
15550 + return ret;
15551 +@@ -1971,7 +1969,7 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
15552 + int ret = -EINVAL;
15553 + struct userfaultfd_ctx *ctx = file->private_data;
15554 +
15555 +- if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
15556 ++ if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
15557 + return -EINVAL;
15558 +
15559 + switch(cmd) {
15560 +@@ -2085,7 +2083,6 @@ SYSCALL_DEFINE1(userfaultfd, int, flags)
15561 + refcount_set(&ctx->refcount, 1);
15562 + ctx->flags = flags;
15563 + ctx->features = 0;
15564 +- ctx->state = UFFD_STATE_WAIT_API;
15565 + ctx->released = false;
15566 + ctx->mmap_changing = false;
15567 + ctx->mm = current->mm;
15568 +diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
15569 +index 47accec68cb0f..f603325c0c30d 100644
15570 +--- a/include/crypto/public_key.h
15571 ++++ b/include/crypto/public_key.h
15572 +@@ -38,9 +38,9 @@ extern void public_key_free(struct public_key *key);
15573 + struct public_key_signature {
15574 + struct asymmetric_key_id *auth_ids[2];
15575 + u8 *s; /* Signature */
15576 +- u32 s_size; /* Number of bytes in signature */
15577 + u8 *digest;
15578 +- u8 digest_size; /* Number of bytes in digest */
15579 ++ u32 s_size; /* Number of bytes in signature */
15580 ++ u32 digest_size; /* Number of bytes in digest */
15581 + const char *pkey_algo;
15582 + const char *hash_algo;
15583 + const char *encoding;
15584 +diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
15585 +index 6bf8b2b789919..f99d3417f3042 100644
15586 +--- a/include/drm/drm_auth.h
15587 ++++ b/include/drm/drm_auth.h
15588 +@@ -107,6 +107,7 @@ struct drm_master {
15589 + };
15590 +
15591 + struct drm_master *drm_master_get(struct drm_master *master);
15592 ++struct drm_master *drm_file_get_master(struct drm_file *file_priv);
15593 + void drm_master_put(struct drm_master **master);
15594 + bool drm_is_current_master(struct drm_file *fpriv);
15595 +
15596 +diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
15597 +index b81b3bfb08c8d..726cfe0ff5f5c 100644
15598 +--- a/include/drm/drm_file.h
15599 ++++ b/include/drm/drm_file.h
15600 +@@ -226,15 +226,27 @@ struct drm_file {
15601 + /**
15602 + * @master:
15603 + *
15604 +- * Master this node is currently associated with. Only relevant if
15605 +- * drm_is_primary_client() returns true. Note that this only
15606 +- * matches &drm_device.master if the master is the currently active one.
15607 ++ * Master this node is currently associated with. Protected by struct
15608 ++ * &drm_device.master_mutex, and serialized by @master_lookup_lock.
15609 ++ *
15610 ++ * Only relevant if drm_is_primary_client() returns true. Note that
15611 ++ * this only matches &drm_device.master if the master is the currently
15612 ++ * active one.
15613 ++ *
15614 ++ * When dereferencing this pointer, either hold struct
15615 ++ * &drm_device.master_mutex for the duration of the pointer's use, or
15616 ++ * use drm_file_get_master() if struct &drm_device.master_mutex is not
15617 ++ * currently held and there is no other need to hold it. This prevents
15618 ++ * @master from being freed during use.
15619 + *
15620 + * See also @authentication and @is_master and the :ref:`section on
15621 + * primary nodes and authentication <drm_primary_node>`.
15622 + */
15623 + struct drm_master *master;
15624 +
15625 ++ /** @master_lock: Serializes @master. */
15626 ++ spinlock_t master_lookup_lock;
15627 ++
15628 + /** @pid: Process that opened this file. */
15629 + struct pid *pid;
15630 +
15631 +diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
15632 +index 232daaec56e44..4711b96dae0c7 100644
15633 +--- a/include/linux/ethtool.h
15634 ++++ b/include/linux/ethtool.h
15635 +@@ -17,8 +17,6 @@
15636 + #include <linux/compat.h>
15637 + #include <uapi/linux/ethtool.h>
15638 +
15639 +-#ifdef CONFIG_COMPAT
15640 +-
15641 + struct compat_ethtool_rx_flow_spec {
15642 + u32 flow_type;
15643 + union ethtool_flow_union h_u;
15644 +@@ -38,8 +36,6 @@ struct compat_ethtool_rxnfc {
15645 + u32 rule_locs[];
15646 + };
15647 +
15648 +-#endif /* CONFIG_COMPAT */
15649 +-
15650 + #include <linux/rculist.h>
15651 +
15652 + /**
15653 +diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
15654 +index fe848901fcc3a..3260fe7148462 100644
15655 +--- a/include/linux/exportfs.h
15656 ++++ b/include/linux/exportfs.h
15657 +@@ -221,6 +221,8 @@ struct export_operations {
15658 + #define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply
15659 + atomic attribute updates
15660 + */
15661 ++#define EXPORT_OP_SYNC_LOCKS (0x20) /* Filesystem can't do
15662 ++ asychronous blocking locks */
15663 + unsigned long flags;
15664 + };
15665 +
15666 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
15667 +index f7ca1a3870ea5..1faebe1cd0ed5 100644
15668 +--- a/include/linux/hugetlb.h
15669 ++++ b/include/linux/hugetlb.h
15670 +@@ -858,6 +858,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
15671 +
15672 + void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
15673 +
15674 ++static inline void hugetlb_count_init(struct mm_struct *mm)
15675 ++{
15676 ++ atomic_long_set(&mm->hugetlb_usage, 0);
15677 ++}
15678 ++
15679 + static inline void hugetlb_count_add(long l, struct mm_struct *mm)
15680 + {
15681 + atomic_long_add(l, &mm->hugetlb_usage);
15682 +@@ -1042,6 +1047,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
15683 + return &mm->page_table_lock;
15684 + }
15685 +
15686 ++static inline void hugetlb_count_init(struct mm_struct *mm)
15687 ++{
15688 ++}
15689 ++
15690 + static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
15691 + {
15692 + }
15693 +diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
15694 +index 0b8d1fdda3a11..c137396129db6 100644
15695 +--- a/include/linux/hugetlb_cgroup.h
15696 ++++ b/include/linux/hugetlb_cgroup.h
15697 +@@ -121,6 +121,13 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
15698 + css_put(&h_cg->css);
15699 + }
15700 +
15701 ++static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
15702 ++ struct resv_map *resv_map)
15703 ++{
15704 ++ if (resv_map->css)
15705 ++ css_get(resv_map->css);
15706 ++}
15707 ++
15708 + extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
15709 + struct hugetlb_cgroup **ptr);
15710 + extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
15711 +@@ -199,6 +206,11 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
15712 + {
15713 + }
15714 +
15715 ++static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
15716 ++ struct resv_map *resv_map)
15717 ++{
15718 ++}
15719 ++
15720 + static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
15721 + struct hugetlb_cgroup **ptr)
15722 + {
15723 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
15724 +index d0fa0b31994d0..05a65eb155f76 100644
15725 +--- a/include/linux/intel-iommu.h
15726 ++++ b/include/linux/intel-iommu.h
15727 +@@ -124,9 +124,9 @@
15728 + #define DMAR_MTRR_PHYSMASK8_REG 0x208
15729 + #define DMAR_MTRR_PHYSBASE9_REG 0x210
15730 + #define DMAR_MTRR_PHYSMASK9_REG 0x218
15731 +-#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */
15732 +-#define DMAR_VCMD_REG 0xe10 /* Virtual command register */
15733 +-#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */
15734 ++#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */
15735 ++#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
15736 ++#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */
15737 +
15738 + #define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg)
15739 + #define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg)
15740 +diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
15741 +index a7fd2c3ccb777..d01b504ce06fe 100644
15742 +--- a/include/linux/memory_hotplug.h
15743 ++++ b/include/linux/memory_hotplug.h
15744 +@@ -339,8 +339,8 @@ extern void sparse_remove_section(struct mem_section *ms,
15745 + unsigned long map_offset, struct vmem_altmap *altmap);
15746 + extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
15747 + unsigned long pnum);
15748 +-extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
15749 +- unsigned long nr_pages);
15750 ++extern struct zone *zone_for_pfn_range(int online_type, int nid,
15751 ++ unsigned long start_pfn, unsigned long nr_pages);
15752 + extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
15753 + struct mhp_params *params);
15754 + void arch_remove_linear_mapping(u64 start, u64 size);
15755 +diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
15756 +index d9680b798b211..955c82b4737c5 100644
15757 +--- a/include/linux/rcupdate.h
15758 ++++ b/include/linux/rcupdate.h
15759 +@@ -167,7 +167,7 @@ void synchronize_rcu_tasks(void);
15760 + # define synchronize_rcu_tasks synchronize_rcu
15761 + # endif
15762 +
15763 +-# ifdef CONFIG_TASKS_RCU_TRACE
15764 ++# ifdef CONFIG_TASKS_TRACE_RCU
15765 + # define rcu_tasks_trace_qs(t) \
15766 + do { \
15767 + if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
15768 +diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
15769 +index d1672de9ca89e..87b325aec5085 100644
15770 +--- a/include/linux/rtmutex.h
15771 ++++ b/include/linux/rtmutex.h
15772 +@@ -52,17 +52,22 @@ do { \
15773 + } while (0)
15774 +
15775 + #ifdef CONFIG_DEBUG_LOCK_ALLOC
15776 +-#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
15777 +- , .dep_map = { .name = #mutexname }
15778 ++#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
15779 ++ .dep_map = { \
15780 ++ .name = #mutexname, \
15781 ++ .wait_type_inner = LD_WAIT_SLEEP, \
15782 ++ }
15783 + #else
15784 + #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
15785 + #endif
15786 +
15787 +-#define __RT_MUTEX_INITIALIZER(mutexname) \
15788 +- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
15789 +- , .waiters = RB_ROOT_CACHED \
15790 +- , .owner = NULL \
15791 +- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
15792 ++#define __RT_MUTEX_INITIALIZER(mutexname) \
15793 ++{ \
15794 ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock), \
15795 ++ .waiters = RB_ROOT_CACHED, \
15796 ++ .owner = NULL, \
15797 ++ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
15798 ++}
15799 +
15800 + #define DEFINE_RT_MUTEX(mutexname) \
15801 + struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
15802 +diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
15803 +index c8c39f22d3b16..59cd97da895b7 100644
15804 +--- a/include/linux/sunrpc/xprt.h
15805 ++++ b/include/linux/sunrpc/xprt.h
15806 +@@ -432,6 +432,7 @@ void xprt_release_write(struct rpc_xprt *, struct rpc_task *);
15807 + #define XPRT_CONGESTED (9)
15808 + #define XPRT_CWND_WAIT (10)
15809 + #define XPRT_WRITE_SPACE (11)
15810 ++#define XPRT_SND_IS_COOKIE (12)
15811 +
15812 + static inline void xprt_set_connected(struct rpc_xprt *xprt)
15813 + {
15814 +diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
15815 +index 0da94a6dee15e..b5ab452fca5b5 100644
15816 +--- a/include/linux/vt_kern.h
15817 ++++ b/include/linux/vt_kern.h
15818 +@@ -148,26 +148,26 @@ void hide_boot_cursor(bool hide);
15819 +
15820 + /* keyboard provided interfaces */
15821 + int vt_do_diacrit(unsigned int cmd, void __user *up, int eperm);
15822 +-int vt_do_kdskbmode(int console, unsigned int arg);
15823 +-int vt_do_kdskbmeta(int console, unsigned int arg);
15824 ++int vt_do_kdskbmode(unsigned int console, unsigned int arg);
15825 ++int vt_do_kdskbmeta(unsigned int console, unsigned int arg);
15826 + int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc,
15827 + int perm);
15828 + int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
15829 +- int console);
15830 ++ unsigned int console);
15831 + int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm);
15832 +-int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm);
15833 +-int vt_do_kdgkbmode(int console);
15834 +-int vt_do_kdgkbmeta(int console);
15835 +-void vt_reset_unicode(int console);
15836 ++int vt_do_kdskled(unsigned int console, int cmd, unsigned long arg, int perm);
15837 ++int vt_do_kdgkbmode(unsigned int console);
15838 ++int vt_do_kdgkbmeta(unsigned int console);
15839 ++void vt_reset_unicode(unsigned int console);
15840 + int vt_get_shift_state(void);
15841 +-void vt_reset_keyboard(int console);
15842 +-int vt_get_leds(int console, int flag);
15843 +-int vt_get_kbd_mode_bit(int console, int bit);
15844 +-void vt_set_kbd_mode_bit(int console, int bit);
15845 +-void vt_clr_kbd_mode_bit(int console, int bit);
15846 +-void vt_set_led_state(int console, int leds);
15847 +-void vt_kbd_con_start(int console);
15848 +-void vt_kbd_con_stop(int console);
15849 ++void vt_reset_keyboard(unsigned int console);
15850 ++int vt_get_leds(unsigned int console, int flag);
15851 ++int vt_get_kbd_mode_bit(unsigned int console, int bit);
15852 ++void vt_set_kbd_mode_bit(unsigned int console, int bit);
15853 ++void vt_clr_kbd_mode_bit(unsigned int console, int bit);
15854 ++void vt_set_led_state(unsigned int console, int leds);
15855 ++void vt_kbd_con_start(unsigned int console);
15856 ++void vt_kbd_con_stop(unsigned int console);
15857 +
15858 + void vc_scrolldelta_helper(struct vc_data *c, int lines,
15859 + unsigned int rolled_over, void *_base, unsigned int size);
15860 +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
15861 +index db4312e44d470..c17e5557a0078 100644
15862 +--- a/include/net/bluetooth/hci_core.h
15863 ++++ b/include/net/bluetooth/hci_core.h
15864 +@@ -1412,6 +1412,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
15865 + !hci_dev_test_flag(dev, HCI_AUTO_OFF))
15866 + #define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
15867 + hci_dev_test_flag(dev, HCI_SC_ENABLED))
15868 ++#define rpa_valid(dev) (bacmp(&dev->rpa, BDADDR_ANY) && \
15869 ++ !hci_dev_test_flag(dev, HCI_RPA_EXPIRED))
15870 ++#define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \
15871 ++ !adv->rpa_expired)
15872 +
15873 + #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
15874 + ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
15875 +diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
15876 +index 1b9d75aedb225..3961461d9c8bc 100644
15877 +--- a/include/net/flow_offload.h
15878 ++++ b/include/net/flow_offload.h
15879 +@@ -451,6 +451,7 @@ struct flow_block_offload {
15880 + struct list_head *driver_block_list;
15881 + struct netlink_ext_ack *extack;
15882 + struct Qdisc *sch;
15883 ++ struct list_head *cb_list_head;
15884 + };
15885 +
15886 + enum tc_setup_type;
15887 +diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
15888 +index b671b1f2ce0fd..3b93509af246c 100644
15889 +--- a/include/trace/events/btrfs.h
15890 ++++ b/include/trace/events/btrfs.h
15891 +@@ -94,6 +94,7 @@ struct btrfs_space_info;
15892 + EM( FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS") \
15893 + EM( FLUSH_DELALLOC, "FLUSH_DELALLOC") \
15894 + EM( FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT") \
15895 ++ EM( FLUSH_DELALLOC_FULL, "FLUSH_DELALLOC_FULL") \
15896 + EM( FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR") \
15897 + EM( FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS") \
15898 + EM( ALLOC_CHUNK, "ALLOC_CHUNK") \
15899 +diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
15900 +index be07b5470f4bb..f51bc8f368134 100644
15901 +--- a/include/uapi/linux/serial_reg.h
15902 ++++ b/include/uapi/linux/serial_reg.h
15903 +@@ -62,6 +62,7 @@
15904 + * ST16C654: 8 16 56 60 8 16 32 56 PORT_16654
15905 + * TI16C750: 1 16 32 56 xx xx xx xx PORT_16750
15906 + * TI16C752: 8 16 56 60 8 16 32 56
15907 ++ * OX16C950: 16 32 112 120 16 32 64 112 PORT_16C950
15908 + * Tegra: 1 4 8 14 16 8 4 1 PORT_TEGRA
15909 + */
15910 + #define UART_FCR_R_TRIG_00 0x00
15911 +diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
15912 +index dadae6255d055..f2faa13534e57 100644
15913 +--- a/kernel/dma/debug.c
15914 ++++ b/kernel/dma/debug.c
15915 +@@ -792,7 +792,7 @@ static int dump_show(struct seq_file *seq, void *v)
15916 + }
15917 + DEFINE_SHOW_ATTRIBUTE(dump);
15918 +
15919 +-static void dma_debug_fs_init(void)
15920 ++static int __init dma_debug_fs_init(void)
15921 + {
15922 + struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
15923 +
15924 +@@ -805,7 +805,10 @@ static void dma_debug_fs_init(void)
15925 + debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
15926 + debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
15927 + debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
15928 ++
15929 ++ return 0;
15930 + }
15931 ++core_initcall_sync(dma_debug_fs_init);
15932 +
15933 + static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
15934 + {
15935 +@@ -890,8 +893,6 @@ static int dma_debug_init(void)
15936 + spin_lock_init(&dma_entry_hash[i].lock);
15937 + }
15938 +
15939 +- dma_debug_fs_init();
15940 +-
15941 + nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
15942 + for (i = 0; i < nr_pages; ++i)
15943 + dma_debug_create_entries(GFP_KERNEL);
15944 +diff --git a/kernel/fork.c b/kernel/fork.c
15945 +index 44f4c2d83763f..cbba21e3a58df 100644
15946 +--- a/kernel/fork.c
15947 ++++ b/kernel/fork.c
15948 +@@ -1050,6 +1050,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
15949 + mm->pmd_huge_pte = NULL;
15950 + #endif
15951 + mm_init_uprobes_state(mm);
15952 ++ hugetlb_count_init(mm);
15953 +
15954 + if (current->mm) {
15955 + mm->flags = current->mm->flags & MMF_INIT_MASK;
15956 +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
15957 +index ad0db322ed3b4..1a7e3f838077b 100644
15958 +--- a/kernel/locking/rtmutex.c
15959 ++++ b/kernel/locking/rtmutex.c
15960 +@@ -1556,7 +1556,7 @@ void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
15961 + struct lock_class_key *key)
15962 + {
15963 + debug_check_no_locks_freed((void *)lock, sizeof(*lock));
15964 +- lockdep_init_map(&lock->dep_map, name, key, 0);
15965 ++ lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
15966 +
15967 + __rt_mutex_basic_init(lock);
15968 + }
15969 +diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
15970 +index ca43239a255ad..cb5a25a8a0cc7 100644
15971 +--- a/kernel/pid_namespace.c
15972 ++++ b/kernel/pid_namespace.c
15973 +@@ -51,7 +51,8 @@ static struct kmem_cache *create_pid_cachep(unsigned int level)
15974 + mutex_lock(&pid_caches_mutex);
15975 + /* Name collision forces to do allocation under mutex. */
15976 + if (!*pkc)
15977 +- *pkc = kmem_cache_create(name, len, 0, SLAB_HWCACHE_ALIGN, 0);
15978 ++ *pkc = kmem_cache_create(name, len, 0,
15979 ++ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, 0);
15980 + mutex_unlock(&pid_caches_mutex);
15981 + /* current can fail, but someone else can succeed. */
15982 + return READ_ONCE(*pkc);
15983 +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
15984 +index 142a58d124d95..6dad7da8f383e 100644
15985 +--- a/kernel/printk/printk.c
15986 ++++ b/kernel/printk/printk.c
15987 +@@ -2545,6 +2545,7 @@ void console_unlock(void)
15988 + bool do_cond_resched, retry;
15989 + struct printk_info info;
15990 + struct printk_record r;
15991 ++ u64 __maybe_unused next_seq;
15992 +
15993 + if (console_suspended) {
15994 + up_console_sem();
15995 +@@ -2654,8 +2655,10 @@ skip:
15996 + cond_resched();
15997 + }
15998 +
15999 +- console_locked = 0;
16000 ++ /* Get consistent value of the next-to-be-used sequence number. */
16001 ++ next_seq = console_seq;
16002 +
16003 ++ console_locked = 0;
16004 + up_console_sem();
16005 +
16006 + /*
16007 +@@ -2664,7 +2667,7 @@ skip:
16008 + * there's a new owner and the console_unlock() from them will do the
16009 + * flush, no worries.
16010 + */
16011 +- retry = prb_read_valid(prb, console_seq, NULL);
16012 ++ retry = prb_read_valid(prb, next_seq, NULL);
16013 + printk_safe_exit_irqrestore(flags);
16014 +
16015 + if (retry && console_trylock())
16016 +diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
16017 +index de1dc3bb7f701..6ce104242b23d 100644
16018 +--- a/kernel/rcu/tree_plugin.h
16019 ++++ b/kernel/rcu/tree_plugin.h
16020 +@@ -2982,17 +2982,17 @@ static void noinstr rcu_dynticks_task_exit(void)
16021 + /* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
16022 + static void rcu_dynticks_task_trace_enter(void)
16023 + {
16024 +-#ifdef CONFIG_TASKS_RCU_TRACE
16025 ++#ifdef CONFIG_TASKS_TRACE_RCU
16026 + if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
16027 + current->trc_reader_special.b.need_mb = true;
16028 +-#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
16029 ++#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
16030 + }
16031 +
16032 + /* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
16033 + static void rcu_dynticks_task_trace_exit(void)
16034 + {
16035 +-#ifdef CONFIG_TASKS_RCU_TRACE
16036 ++#ifdef CONFIG_TASKS_TRACE_RCU
16037 + if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
16038 + current->trc_reader_special.b.need_mb = false;
16039 +-#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
16040 ++#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
16041 + }
16042 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
16043 +index a2403432f3abb..399c37c95392e 100644
16044 +--- a/kernel/sched/core.c
16045 ++++ b/kernel/sched/core.c
16046 +@@ -8536,7 +8536,6 @@ static void balance_push(struct rq *rq)
16047 + struct task_struct *push_task = rq->curr;
16048 +
16049 + lockdep_assert_rq_held(rq);
16050 +- SCHED_WARN_ON(rq->cpu != smp_processor_id());
16051 +
16052 + /*
16053 + * Ensure the thing is persistent until balance_push_set(.on = false);
16054 +@@ -8544,9 +8543,10 @@ static void balance_push(struct rq *rq)
16055 + rq->balance_callback = &balance_push_callback;
16056 +
16057 + /*
16058 +- * Only active while going offline.
16059 ++ * Only active while going offline and when invoked on the outgoing
16060 ++ * CPU.
16061 + */
16062 +- if (!cpu_dying(rq->cpu))
16063 ++ if (!cpu_dying(rq->cpu) || rq != this_rq())
16064 + return;
16065 +
16066 + /*
16067 +diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
16068 +index b61eefe5ccf53..7b3c754821e55 100644
16069 +--- a/kernel/trace/trace_osnoise.c
16070 ++++ b/kernel/trace/trace_osnoise.c
16071 +@@ -1548,7 +1548,7 @@ static int start_kthread(unsigned int cpu)
16072 + static int start_per_cpu_kthreads(struct trace_array *tr)
16073 + {
16074 + struct cpumask *current_mask = &save_cpumask;
16075 +- int retval;
16076 ++ int retval = 0;
16077 + int cpu;
16078 +
16079 + get_online_cpus();
16080 +@@ -1568,13 +1568,13 @@ static int start_per_cpu_kthreads(struct trace_array *tr)
16081 + retval = start_kthread(cpu);
16082 + if (retval) {
16083 + stop_per_cpu_kthreads();
16084 +- return retval;
16085 ++ break;
16086 + }
16087 + }
16088 +
16089 + put_online_cpus();
16090 +
16091 +- return 0;
16092 ++ return retval;
16093 + }
16094 +
16095 + #ifdef CONFIG_HOTPLUG_CPU
16096 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
16097 +index f148eacda55a9..542c2d03dab65 100644
16098 +--- a/kernel/workqueue.c
16099 ++++ b/kernel/workqueue.c
16100 +@@ -5902,6 +5902,13 @@ static void __init wq_numa_init(void)
16101 + return;
16102 + }
16103 +
16104 ++ for_each_possible_cpu(cpu) {
16105 ++ if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
16106 ++ pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
16107 ++ return;
16108 ++ }
16109 ++ }
16110 ++
16111 + wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
16112 + BUG_ON(!wq_update_unbound_numa_attrs_buf);
16113 +
16114 +@@ -5919,11 +5926,6 @@ static void __init wq_numa_init(void)
16115 +
16116 + for_each_possible_cpu(cpu) {
16117 + node = cpu_to_node(cpu);
16118 +- if (WARN_ON(node == NUMA_NO_NODE)) {
16119 +- pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
16120 +- /* happens iff arch is bonkers, let's just proceed */
16121 +- return;
16122 +- }
16123 + cpumask_set_cpu(cpu, tbl[node]);
16124 + }
16125 +
16126 +diff --git a/lib/test_bpf.c b/lib/test_bpf.c
16127 +index d500320778c76..f6d5d30d01bf2 100644
16128 +--- a/lib/test_bpf.c
16129 ++++ b/lib/test_bpf.c
16130 +@@ -4286,8 +4286,8 @@ static struct bpf_test tests[] = {
16131 + .u.insns_int = {
16132 + BPF_LD_IMM64(R0, 0),
16133 + BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
16134 +- BPF_STX_MEM(BPF_W, R10, R1, -40),
16135 +- BPF_LDX_MEM(BPF_W, R0, R10, -40),
16136 ++ BPF_STX_MEM(BPF_DW, R10, R1, -40),
16137 ++ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
16138 + BPF_EXIT_INSN(),
16139 + },
16140 + INTERNAL,
16141 +@@ -6659,7 +6659,14 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
16142 + u64 duration;
16143 + u32 ret;
16144 +
16145 +- if (test->test[i].data_size == 0 &&
16146 ++ /*
16147 ++ * NOTE: Several sub-tests may be present, in which case
16148 ++ * a zero {data_size, result} tuple indicates the end of
16149 ++ * the sub-test array. The first test is always run,
16150 ++ * even if both data_size and result happen to be zero.
16151 ++ */
16152 ++ if (i > 0 &&
16153 ++ test->test[i].data_size == 0 &&
16154 + test->test[i].result == 0)
16155 + break;
16156 +
16157 +diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c
16158 +index f93b1e145ada7..16b1d3a3a4975 100644
16159 +--- a/lib/test_stackinit.c
16160 ++++ b/lib/test_stackinit.c
16161 +@@ -67,10 +67,10 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
16162 + #define INIT_STRUCT_none /**/
16163 + #define INIT_STRUCT_zero = { }
16164 + #define INIT_STRUCT_static_partial = { .two = 0, }
16165 +-#define INIT_STRUCT_static_all = { .one = arg->one, \
16166 +- .two = arg->two, \
16167 +- .three = arg->three, \
16168 +- .four = arg->four, \
16169 ++#define INIT_STRUCT_static_all = { .one = 0, \
16170 ++ .two = 0, \
16171 ++ .three = 0, \
16172 ++ .four = 0, \
16173 + }
16174 + #define INIT_STRUCT_dynamic_partial = { .two = arg->two, }
16175 + #define INIT_STRUCT_dynamic_all = { .one = arg->one, \
16176 +@@ -84,8 +84,7 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
16177 + var.one = 0; \
16178 + var.two = 0; \
16179 + var.three = 0; \
16180 +- memset(&var.four, 0, \
16181 +- sizeof(var.four))
16182 ++ var.four = 0
16183 +
16184 + /*
16185 + * @name: unique string name for the test
16186 +@@ -210,18 +209,13 @@ struct test_small_hole {
16187 + unsigned long four;
16188 + };
16189 +
16190 +-/* Try to trigger unhandled padding in a structure. */
16191 +-struct test_aligned {
16192 +- u32 internal1;
16193 +- u64 internal2;
16194 +-} __aligned(64);
16195 +-
16196 ++/* Trigger unhandled padding in a structure. */
16197 + struct test_big_hole {
16198 + u8 one;
16199 + u8 two;
16200 + u8 three;
16201 + /* 61 byte padding hole here. */
16202 +- struct test_aligned four;
16203 ++ u8 four __aligned(64);
16204 + } __aligned(64);
16205 +
16206 + struct test_trailing_hole {
16207 +diff --git a/mm/hmm.c b/mm/hmm.c
16208 +index fad6be2bf0727..842e265992380 100644
16209 +--- a/mm/hmm.c
16210 ++++ b/mm/hmm.c
16211 +@@ -295,10 +295,13 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
16212 + goto fault;
16213 +
16214 + /*
16215 ++ * Bypass devmap pte such as DAX page when all pfn requested
16216 ++ * flags(pfn_req_flags) are fulfilled.
16217 + * Since each architecture defines a struct page for the zero page, just
16218 + * fall through and treat it like a normal page.
16219 + */
16220 +- if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
16221 ++ if (pte_special(pte) && !pte_devmap(pte) &&
16222 ++ !is_zero_pfn(pte_pfn(pte))) {
16223 + if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
16224 + pte_unmap(ptep);
16225 + return -EFAULT;
16226 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
16227 +index 8ea35ba6699f2..6c583ef079e3d 100644
16228 +--- a/mm/hugetlb.c
16229 ++++ b/mm/hugetlb.c
16230 +@@ -4033,8 +4033,10 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
16231 + * after this open call completes. It is therefore safe to take a
16232 + * new reference here without additional locking.
16233 + */
16234 +- if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
16235 ++ if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
16236 ++ resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
16237 + kref_get(&resv->refs);
16238 ++ }
16239 + }
16240 +
16241 + static void hugetlb_vm_op_close(struct vm_area_struct *vma)
16242 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
16243 +index 470400cc75136..83811c976c0cb 100644
16244 +--- a/mm/memory-failure.c
16245 ++++ b/mm/memory-failure.c
16246 +@@ -68,7 +68,7 @@ atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
16247 +
16248 + static bool __page_handle_poison(struct page *page)
16249 + {
16250 +- bool ret;
16251 ++ int ret;
16252 +
16253 + zone_pcp_disable(page_zone(page));
16254 + ret = dissolve_free_huge_page(page);
16255 +@@ -76,7 +76,7 @@ static bool __page_handle_poison(struct page *page)
16256 + ret = take_page_off_buddy(page);
16257 + zone_pcp_enable(page_zone(page));
16258 +
16259 +- return ret;
16260 ++ return ret > 0;
16261 + }
16262 +
16263 + static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
16264 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
16265 +index 86c3af79e874e..97698a761221e 100644
16266 +--- a/mm/memory_hotplug.c
16267 ++++ b/mm/memory_hotplug.c
16268 +@@ -708,8 +708,8 @@ static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn
16269 + return movable_node_enabled ? movable_zone : kernel_zone;
16270 + }
16271 +
16272 +-struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
16273 +- unsigned long nr_pages)
16274 ++struct zone *zone_for_pfn_range(int online_type, int nid,
16275 ++ unsigned long start_pfn, unsigned long nr_pages)
16276 + {
16277 + if (online_type == MMOP_ONLINE_KERNEL)
16278 + return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
16279 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
16280 +index e32360e902744..54f6eaff18c52 100644
16281 +--- a/mm/mempolicy.c
16282 ++++ b/mm/mempolicy.c
16283 +@@ -1965,17 +1965,26 @@ unsigned int mempolicy_slab_node(void)
16284 + */
16285 + static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
16286 + {
16287 +- unsigned nnodes = nodes_weight(pol->nodes);
16288 +- unsigned target;
16289 ++ nodemask_t nodemask = pol->nodes;
16290 ++ unsigned int target, nnodes;
16291 + int i;
16292 + int nid;
16293 ++ /*
16294 ++ * The barrier will stabilize the nodemask in a register or on
16295 ++ * the stack so that it will stop changing under the code.
16296 ++ *
16297 ++ * Between first_node() and next_node(), pol->nodes could be changed
16298 ++ * by other threads. So we put pol->nodes in a local stack.
16299 ++ */
16300 ++ barrier();
16301 +
16302 ++ nnodes = nodes_weight(nodemask);
16303 + if (!nnodes)
16304 + return numa_node_id();
16305 + target = (unsigned int)n % nnodes;
16306 +- nid = first_node(pol->nodes);
16307 ++ nid = first_node(nodemask);
16308 + for (i = 0; i < target; i++)
16309 +- nid = next_node(nid, pol->nodes);
16310 ++ nid = next_node(nid, nodemask);
16311 + return nid;
16312 + }
16313 +
16314 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
16315 +index eeb3a9cb36bb4..7a28f7db7d286 100644
16316 +--- a/mm/page_alloc.c
16317 ++++ b/mm/page_alloc.c
16318 +@@ -3445,8 +3445,10 @@ void free_unref_page_list(struct list_head *list)
16319 + /* Prepare pages for freeing */
16320 + list_for_each_entry_safe(page, next, list, lru) {
16321 + pfn = page_to_pfn(page);
16322 +- if (!free_unref_page_prepare(page, pfn, 0))
16323 ++ if (!free_unref_page_prepare(page, pfn, 0)) {
16324 + list_del(&page->lru);
16325 ++ continue;
16326 ++ }
16327 +
16328 + /*
16329 + * Free isolated pages directly to the allocator, see
16330 +diff --git a/mm/vmscan.c b/mm/vmscan.c
16331 +index eeae2f6bc5320..f1782b816c983 100644
16332 +--- a/mm/vmscan.c
16333 ++++ b/mm/vmscan.c
16334 +@@ -2592,7 +2592,7 @@ out:
16335 + cgroup_size = max(cgroup_size, protection);
16336 +
16337 + scan = lruvec_size - lruvec_size * protection /
16338 +- cgroup_size;
16339 ++ (cgroup_size + 1);
16340 +
16341 + /*
16342 + * Minimally target SWAP_CLUSTER_MAX pages to keep
16343 +diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
16344 +index f4fea28e05da6..3ec1a51a6944e 100644
16345 +--- a/net/9p/trans_xen.c
16346 ++++ b/net/9p/trans_xen.c
16347 +@@ -138,7 +138,7 @@ static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
16348 +
16349 + static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
16350 + {
16351 +- struct xen_9pfs_front_priv *priv = NULL;
16352 ++ struct xen_9pfs_front_priv *priv;
16353 + RING_IDX cons, prod, masked_cons, masked_prod;
16354 + unsigned long flags;
16355 + u32 size = p9_req->tc.size;
16356 +@@ -151,7 +151,7 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
16357 + break;
16358 + }
16359 + read_unlock(&xen_9pfs_lock);
16360 +- if (!priv || priv->client != client)
16361 ++ if (list_entry_is_head(priv, &xen_9pfs_devs, list))
16362 + return -EINVAL;
16363 +
16364 + num = p9_req->tc.tag % priv->num_rings;
16365 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
16366 +index 1c30182025645..0d0b958b7fe7e 100644
16367 +--- a/net/bluetooth/hci_event.c
16368 ++++ b/net/bluetooth/hci_event.c
16369 +@@ -40,6 +40,8 @@
16370 + #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
16371 + "\x00\x00\x00\x00\x00\x00\x00\x00"
16372 +
16373 ++#define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
16374 ++
16375 + /* Handle HCI Event packets */
16376 +
16377 + static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
16378 +@@ -1171,6 +1173,12 @@ static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
16379 +
16380 + bacpy(&hdev->random_addr, sent);
16381 +
16382 ++ if (!bacmp(&hdev->rpa, sent)) {
16383 ++ hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
16384 ++ queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
16385 ++ secs_to_jiffies(hdev->rpa_timeout));
16386 ++ }
16387 ++
16388 + hci_dev_unlock(hdev);
16389 + }
16390 +
16391 +@@ -1201,24 +1209,30 @@ static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
16392 + {
16393 + __u8 status = *((__u8 *) skb->data);
16394 + struct hci_cp_le_set_adv_set_rand_addr *cp;
16395 +- struct adv_info *adv_instance;
16396 ++ struct adv_info *adv;
16397 +
16398 + if (status)
16399 + return;
16400 +
16401 + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
16402 +- if (!cp)
16403 ++ /* Update only in case the adv instance since handle 0x00 shall be using
16404 ++ * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
16405 ++ * non-extended adverting.
16406 ++ */
16407 ++ if (!cp || !cp->handle)
16408 + return;
16409 +
16410 + hci_dev_lock(hdev);
16411 +
16412 +- if (!cp->handle) {
16413 +- /* Store in hdev for instance 0 (Set adv and Directed advs) */
16414 +- bacpy(&hdev->random_addr, &cp->bdaddr);
16415 +- } else {
16416 +- adv_instance = hci_find_adv_instance(hdev, cp->handle);
16417 +- if (adv_instance)
16418 +- bacpy(&adv_instance->random_addr, &cp->bdaddr);
16419 ++ adv = hci_find_adv_instance(hdev, cp->handle);
16420 ++ if (adv) {
16421 ++ bacpy(&adv->random_addr, &cp->bdaddr);
16422 ++ if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
16423 ++ adv->rpa_expired = false;
16424 ++ queue_delayed_work(hdev->workqueue,
16425 ++ &adv->rpa_expired_cb,
16426 ++ secs_to_jiffies(hdev->rpa_timeout));
16427 ++ }
16428 + }
16429 +
16430 + hci_dev_unlock(hdev);
16431 +@@ -3268,11 +3282,9 @@ unlock:
16432 + hci_dev_unlock(hdev);
16433 + }
16434 +
16435 +-static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev,
16436 +- u16 opcode, u8 ncmd)
16437 ++static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
16438 + {
16439 +- if (opcode != HCI_OP_NOP)
16440 +- cancel_delayed_work(&hdev->cmd_timer);
16441 ++ cancel_delayed_work(&hdev->cmd_timer);
16442 +
16443 + if (!test_bit(HCI_RESET, &hdev->flags)) {
16444 + if (ncmd) {
16445 +@@ -3647,7 +3659,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
16446 + break;
16447 + }
16448 +
16449 +- handle_cmd_cnt_and_timer(hdev, *opcode, ev->ncmd);
16450 ++ handle_cmd_cnt_and_timer(hdev, ev->ncmd);
16451 +
16452 + hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
16453 + req_complete_skb);
16454 +@@ -3748,7 +3760,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
16455 + break;
16456 + }
16457 +
16458 +- handle_cmd_cnt_and_timer(hdev, *opcode, ev->ncmd);
16459 ++ handle_cmd_cnt_and_timer(hdev, ev->ncmd);
16460 +
16461 + /* Indicate request completion if the command failed. Also, if
16462 + * we're not waiting for a special event and we get a success
16463 +@@ -4382,6 +4394,21 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
16464 +
16465 + switch (ev->status) {
16466 + case 0x00:
16467 ++ /* The synchronous connection complete event should only be
16468 ++ * sent once per new connection. Receiving a successful
16469 ++ * complete event when the connection status is already
16470 ++ * BT_CONNECTED means that the device is misbehaving and sent
16471 ++ * multiple complete event packets for the same new connection.
16472 ++ *
16473 ++ * Registering the device more than once can corrupt kernel
16474 ++ * memory, hence upon detecting this invalid event, we report
16475 ++ * an error and ignore the packet.
16476 ++ */
16477 ++ if (conn->state == BT_CONNECTED) {
16478 ++ bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
16479 ++ goto unlock;
16480 ++ }
16481 ++
16482 + conn->handle = __le16_to_cpu(ev->handle);
16483 + conn->state = BT_CONNECTED;
16484 + conn->type = ev->link_type;
16485 +@@ -5104,9 +5131,64 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
16486 + }
16487 + #endif
16488 +
16489 ++static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
16490 ++ u8 bdaddr_type, bdaddr_t *local_rpa)
16491 ++{
16492 ++ if (conn->out) {
16493 ++ conn->dst_type = bdaddr_type;
16494 ++ conn->resp_addr_type = bdaddr_type;
16495 ++ bacpy(&conn->resp_addr, bdaddr);
16496 ++
16497 ++ /* Check if the controller has set a Local RPA then it must be
16498 ++ * used instead or hdev->rpa.
16499 ++ */
16500 ++ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
16501 ++ conn->init_addr_type = ADDR_LE_DEV_RANDOM;
16502 ++ bacpy(&conn->init_addr, local_rpa);
16503 ++ } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
16504 ++ conn->init_addr_type = ADDR_LE_DEV_RANDOM;
16505 ++ bacpy(&conn->init_addr, &conn->hdev->rpa);
16506 ++ } else {
16507 ++ hci_copy_identity_address(conn->hdev, &conn->init_addr,
16508 ++ &conn->init_addr_type);
16509 ++ }
16510 ++ } else {
16511 ++ conn->resp_addr_type = conn->hdev->adv_addr_type;
16512 ++ /* Check if the controller has set a Local RPA then it must be
16513 ++ * used instead or hdev->rpa.
16514 ++ */
16515 ++ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
16516 ++ conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
16517 ++ bacpy(&conn->resp_addr, local_rpa);
16518 ++ } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
16519 ++ /* In case of ext adv, resp_addr will be updated in
16520 ++ * Adv Terminated event.
16521 ++ */
16522 ++ if (!ext_adv_capable(conn->hdev))
16523 ++ bacpy(&conn->resp_addr,
16524 ++ &conn->hdev->random_addr);
16525 ++ } else {
16526 ++ bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
16527 ++ }
16528 ++
16529 ++ conn->init_addr_type = bdaddr_type;
16530 ++ bacpy(&conn->init_addr, bdaddr);
16531 ++
16532 ++ /* For incoming connections, set the default minimum
16533 ++ * and maximum connection interval. They will be used
16534 ++ * to check if the parameters are in range and if not
16535 ++ * trigger the connection update procedure.
16536 ++ */
16537 ++ conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
16538 ++ conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
16539 ++ }
16540 ++}
16541 ++
16542 + static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
16543 +- bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
16544 +- u16 interval, u16 latency, u16 supervision_timeout)
16545 ++ bdaddr_t *bdaddr, u8 bdaddr_type,
16546 ++ bdaddr_t *local_rpa, u8 role, u16 handle,
16547 ++ u16 interval, u16 latency,
16548 ++ u16 supervision_timeout)
16549 + {
16550 + struct hci_conn_params *params;
16551 + struct hci_conn *conn;
16552 +@@ -5154,32 +5236,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
16553 + cancel_delayed_work(&conn->le_conn_timeout);
16554 + }
16555 +
16556 +- if (!conn->out) {
16557 +- /* Set the responder (our side) address type based on
16558 +- * the advertising address type.
16559 +- */
16560 +- conn->resp_addr_type = hdev->adv_addr_type;
16561 +- if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
16562 +- /* In case of ext adv, resp_addr will be updated in
16563 +- * Adv Terminated event.
16564 +- */
16565 +- if (!ext_adv_capable(hdev))
16566 +- bacpy(&conn->resp_addr, &hdev->random_addr);
16567 +- } else {
16568 +- bacpy(&conn->resp_addr, &hdev->bdaddr);
16569 +- }
16570 +-
16571 +- conn->init_addr_type = bdaddr_type;
16572 +- bacpy(&conn->init_addr, bdaddr);
16573 +-
16574 +- /* For incoming connections, set the default minimum
16575 +- * and maximum connection interval. They will be used
16576 +- * to check if the parameters are in range and if not
16577 +- * trigger the connection update procedure.
16578 +- */
16579 +- conn->le_conn_min_interval = hdev->le_conn_min_interval;
16580 +- conn->le_conn_max_interval = hdev->le_conn_max_interval;
16581 +- }
16582 ++ le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
16583 +
16584 + /* Lookup the identity address from the stored connection
16585 + * address and address type.
16586 +@@ -5290,7 +5347,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
16587 + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
16588 +
16589 + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
16590 +- ev->role, le16_to_cpu(ev->handle),
16591 ++ NULL, ev->role, le16_to_cpu(ev->handle),
16592 + le16_to_cpu(ev->interval),
16593 + le16_to_cpu(ev->latency),
16594 + le16_to_cpu(ev->supervision_timeout));
16595 +@@ -5304,7 +5361,7 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
16596 + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
16597 +
16598 + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
16599 +- ev->role, le16_to_cpu(ev->handle),
16600 ++ &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
16601 + le16_to_cpu(ev->interval),
16602 + le16_to_cpu(ev->latency),
16603 + le16_to_cpu(ev->supervision_timeout));
16604 +@@ -5340,7 +5397,8 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
16605 + if (conn) {
16606 + struct adv_info *adv_instance;
16607 +
16608 +- if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
16609 ++ if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
16610 ++ bacmp(&conn->resp_addr, BDADDR_ANY))
16611 + return;
16612 +
16613 + if (!ev->handle) {
16614 +diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
16615 +index 1d14adc023e96..f15626607b2d6 100644
16616 +--- a/net/bluetooth/hci_request.c
16617 ++++ b/net/bluetooth/hci_request.c
16618 +@@ -2072,8 +2072,6 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
16619 + * current RPA has expired then generate a new one.
16620 + */
16621 + if (use_rpa) {
16622 +- int to;
16623 +-
16624 + /* If Controller supports LL Privacy use own address type is
16625 + * 0x03
16626 + */
16627 +@@ -2084,14 +2082,10 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
16628 + *own_addr_type = ADDR_LE_DEV_RANDOM;
16629 +
16630 + if (adv_instance) {
16631 +- if (!adv_instance->rpa_expired &&
16632 +- !bacmp(&adv_instance->random_addr, &hdev->rpa))
16633 ++ if (adv_rpa_valid(adv_instance))
16634 + return 0;
16635 +-
16636 +- adv_instance->rpa_expired = false;
16637 + } else {
16638 +- if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
16639 +- !bacmp(&hdev->random_addr, &hdev->rpa))
16640 ++ if (rpa_valid(hdev))
16641 + return 0;
16642 + }
16643 +
16644 +@@ -2103,14 +2097,6 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
16645 +
16646 + bacpy(rand_addr, &hdev->rpa);
16647 +
16648 +- to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
16649 +- if (adv_instance)
16650 +- queue_delayed_work(hdev->workqueue,
16651 +- &adv_instance->rpa_expired_cb, to);
16652 +- else
16653 +- queue_delayed_work(hdev->workqueue,
16654 +- &hdev->rpa_expired, to);
16655 +-
16656 + return 0;
16657 + }
16658 +
16659 +@@ -2153,6 +2139,30 @@ void __hci_req_clear_ext_adv_sets(struct hci_request *req)
16660 + hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
16661 + }
16662 +
16663 ++static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
16664 ++{
16665 ++ struct hci_dev *hdev = req->hdev;
16666 ++
16667 ++ /* If we're advertising or initiating an LE connection we can't
16668 ++ * go ahead and change the random address at this time. This is
16669 ++ * because the eventual initiator address used for the
16670 ++ * subsequently created connection will be undefined (some
16671 ++ * controllers use the new address and others the one we had
16672 ++ * when the operation started).
16673 ++ *
16674 ++ * In this kind of scenario skip the update and let the random
16675 ++ * address be updated at the next cycle.
16676 ++ */
16677 ++ if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
16678 ++ hci_lookup_le_connect(hdev)) {
16679 ++ bt_dev_dbg(hdev, "Deferring random address update");
16680 ++ hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
16681 ++ return;
16682 ++ }
16683 ++
16684 ++ hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
16685 ++}
16686 ++
16687 + int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
16688 + {
16689 + struct hci_cp_le_set_ext_adv_params cp;
16690 +@@ -2255,6 +2265,13 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
16691 + } else {
16692 + if (!bacmp(&random_addr, &hdev->random_addr))
16693 + return 0;
16694 ++ /* Instance 0x00 doesn't have an adv_info, instead it
16695 ++ * uses hdev->random_addr to track its address so
16696 ++ * whenever it needs to be updated this also set the
16697 ++ * random address since hdev->random_addr is shared with
16698 ++ * scan state machine.
16699 ++ */
16700 ++ set_random_addr(req, &random_addr);
16701 + }
16702 +
16703 + memset(&cp, 0, sizeof(cp));
16704 +@@ -2512,30 +2529,6 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
16705 + false);
16706 + }
16707 +
16708 +-static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
16709 +-{
16710 +- struct hci_dev *hdev = req->hdev;
16711 +-
16712 +- /* If we're advertising or initiating an LE connection we can't
16713 +- * go ahead and change the random address at this time. This is
16714 +- * because the eventual initiator address used for the
16715 +- * subsequently created connection will be undefined (some
16716 +- * controllers use the new address and others the one we had
16717 +- * when the operation started).
16718 +- *
16719 +- * In this kind of scenario skip the update and let the random
16720 +- * address be updated at the next cycle.
16721 +- */
16722 +- if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
16723 +- hci_lookup_le_connect(hdev)) {
16724 +- bt_dev_dbg(hdev, "Deferring random address update");
16725 +- hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
16726 +- return;
16727 +- }
16728 +-
16729 +- hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
16730 +-}
16731 +-
16732 + int hci_update_random_address(struct hci_request *req, bool require_privacy,
16733 + bool use_rpa, u8 *own_addr_type)
16734 + {
16735 +@@ -2547,8 +2540,6 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
16736 + * the current RPA in use, then generate a new one.
16737 + */
16738 + if (use_rpa) {
16739 +- int to;
16740 +-
16741 + /* If Controller supports LL Privacy use own address type is
16742 + * 0x03
16743 + */
16744 +@@ -2558,8 +2549,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
16745 + else
16746 + *own_addr_type = ADDR_LE_DEV_RANDOM;
16747 +
16748 +- if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
16749 +- !bacmp(&hdev->random_addr, &hdev->rpa))
16750 ++ if (rpa_valid(hdev))
16751 + return 0;
16752 +
16753 + err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
16754 +@@ -2570,9 +2560,6 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
16755 +
16756 + set_random_addr(req, &hdev->rpa);
16757 +
16758 +- to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
16759 +- queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
16760 +-
16761 + return 0;
16762 + }
16763 +
16764 +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
16765 +index b5ab842c7c4a8..110cfd6aa2b77 100644
16766 +--- a/net/bluetooth/sco.c
16767 ++++ b/net/bluetooth/sco.c
16768 +@@ -48,6 +48,8 @@ struct sco_conn {
16769 + spinlock_t lock;
16770 + struct sock *sk;
16771 +
16772 ++ struct delayed_work timeout_work;
16773 ++
16774 + unsigned int mtu;
16775 + };
16776 +
16777 +@@ -74,9 +76,20 @@ struct sco_pinfo {
16778 + #define SCO_CONN_TIMEOUT (HZ * 40)
16779 + #define SCO_DISCONN_TIMEOUT (HZ * 2)
16780 +
16781 +-static void sco_sock_timeout(struct timer_list *t)
16782 ++static void sco_sock_timeout(struct work_struct *work)
16783 + {
16784 +- struct sock *sk = from_timer(sk, t, sk_timer);
16785 ++ struct sco_conn *conn = container_of(work, struct sco_conn,
16786 ++ timeout_work.work);
16787 ++ struct sock *sk;
16788 ++
16789 ++ sco_conn_lock(conn);
16790 ++ sk = conn->sk;
16791 ++ if (sk)
16792 ++ sock_hold(sk);
16793 ++ sco_conn_unlock(conn);
16794 ++
16795 ++ if (!sk)
16796 ++ return;
16797 +
16798 + BT_DBG("sock %p state %d", sk, sk->sk_state);
16799 +
16800 +@@ -90,14 +103,21 @@ static void sco_sock_timeout(struct timer_list *t)
16801 +
16802 + static void sco_sock_set_timer(struct sock *sk, long timeout)
16803 + {
16804 ++ if (!sco_pi(sk)->conn)
16805 ++ return;
16806 ++
16807 + BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
16808 +- sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
16809 ++ cancel_delayed_work(&sco_pi(sk)->conn->timeout_work);
16810 ++ schedule_delayed_work(&sco_pi(sk)->conn->timeout_work, timeout);
16811 + }
16812 +
16813 + static void sco_sock_clear_timer(struct sock *sk)
16814 + {
16815 ++ if (!sco_pi(sk)->conn)
16816 ++ return;
16817 ++
16818 + BT_DBG("sock %p state %d", sk, sk->sk_state);
16819 +- sk_stop_timer(sk, &sk->sk_timer);
16820 ++ cancel_delayed_work(&sco_pi(sk)->conn->timeout_work);
16821 + }
16822 +
16823 + /* ---- SCO connections ---- */
16824 +@@ -177,6 +197,9 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
16825 + sco_chan_del(sk, err);
16826 + bh_unlock_sock(sk);
16827 + sock_put(sk);
16828 ++
16829 ++ /* Ensure no more work items will run before freeing conn. */
16830 ++ cancel_delayed_work_sync(&conn->timeout_work);
16831 + }
16832 +
16833 + hcon->sco_data = NULL;
16834 +@@ -191,6 +214,8 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
16835 + sco_pi(sk)->conn = conn;
16836 + conn->sk = sk;
16837 +
16838 ++ INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout);
16839 ++
16840 + if (parent)
16841 + bt_accept_enqueue(parent, sk, true);
16842 + }
16843 +@@ -210,44 +235,32 @@ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
16844 + return err;
16845 + }
16846 +
16847 +-static int sco_connect(struct sock *sk)
16848 ++static int sco_connect(struct hci_dev *hdev, struct sock *sk)
16849 + {
16850 + struct sco_conn *conn;
16851 + struct hci_conn *hcon;
16852 +- struct hci_dev *hdev;
16853 + int err, type;
16854 +
16855 + BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
16856 +
16857 +- hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR);
16858 +- if (!hdev)
16859 +- return -EHOSTUNREACH;
16860 +-
16861 +- hci_dev_lock(hdev);
16862 +-
16863 + if (lmp_esco_capable(hdev) && !disable_esco)
16864 + type = ESCO_LINK;
16865 + else
16866 + type = SCO_LINK;
16867 +
16868 + if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
16869 +- (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
16870 +- err = -EOPNOTSUPP;
16871 +- goto done;
16872 +- }
16873 ++ (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev)))
16874 ++ return -EOPNOTSUPP;
16875 +
16876 + hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
16877 + sco_pi(sk)->setting);
16878 +- if (IS_ERR(hcon)) {
16879 +- err = PTR_ERR(hcon);
16880 +- goto done;
16881 +- }
16882 ++ if (IS_ERR(hcon))
16883 ++ return PTR_ERR(hcon);
16884 +
16885 + conn = sco_conn_add(hcon);
16886 + if (!conn) {
16887 + hci_conn_drop(hcon);
16888 +- err = -ENOMEM;
16889 +- goto done;
16890 ++ return -ENOMEM;
16891 + }
16892 +
16893 + /* Update source addr of the socket */
16894 +@@ -255,7 +268,7 @@ static int sco_connect(struct sock *sk)
16895 +
16896 + err = sco_chan_add(conn, sk, NULL);
16897 + if (err)
16898 +- goto done;
16899 ++ return err;
16900 +
16901 + if (hcon->state == BT_CONNECTED) {
16902 + sco_sock_clear_timer(sk);
16903 +@@ -265,9 +278,6 @@ static int sco_connect(struct sock *sk)
16904 + sco_sock_set_timer(sk, sk->sk_sndtimeo);
16905 + }
16906 +
16907 +-done:
16908 +- hci_dev_unlock(hdev);
16909 +- hci_dev_put(hdev);
16910 + return err;
16911 + }
16912 +
16913 +@@ -496,8 +506,6 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
16914 +
16915 + sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
16916 +
16917 +- timer_setup(&sk->sk_timer, sco_sock_timeout, 0);
16918 +-
16919 + bt_sock_link(&sco_sk_list, sk);
16920 + return sk;
16921 + }
16922 +@@ -562,6 +570,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
16923 + {
16924 + struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
16925 + struct sock *sk = sock->sk;
16926 ++ struct hci_dev *hdev;
16927 + int err;
16928 +
16929 + BT_DBG("sk %p", sk);
16930 +@@ -576,12 +585,19 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
16931 + if (sk->sk_type != SOCK_SEQPACKET)
16932 + return -EINVAL;
16933 +
16934 ++ hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR);
16935 ++ if (!hdev)
16936 ++ return -EHOSTUNREACH;
16937 ++ hci_dev_lock(hdev);
16938 ++
16939 + lock_sock(sk);
16940 +
16941 + /* Set destination address and psm */
16942 + bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
16943 +
16944 +- err = sco_connect(sk);
16945 ++ err = sco_connect(hdev, sk);
16946 ++ hci_dev_unlock(hdev);
16947 ++ hci_dev_put(hdev);
16948 + if (err)
16949 + goto done;
16950 +
16951 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
16952 +index 4b2415d34873a..bac0184cf3de7 100644
16953 +--- a/net/core/flow_dissector.c
16954 ++++ b/net/core/flow_dissector.c
16955 +@@ -1056,8 +1056,10 @@ proto_again:
16956 + FLOW_DISSECTOR_KEY_IPV4_ADDRS,
16957 + target_container);
16958 +
16959 +- memcpy(&key_addrs->v4addrs, &iph->saddr,
16960 +- sizeof(key_addrs->v4addrs));
16961 ++ memcpy(&key_addrs->v4addrs.src, &iph->saddr,
16962 ++ sizeof(key_addrs->v4addrs.src));
16963 ++ memcpy(&key_addrs->v4addrs.dst, &iph->daddr,
16964 ++ sizeof(key_addrs->v4addrs.dst));
16965 + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
16966 + }
16967 +
16968 +@@ -1101,8 +1103,10 @@ proto_again:
16969 + FLOW_DISSECTOR_KEY_IPV6_ADDRS,
16970 + target_container);
16971 +
16972 +- memcpy(&key_addrs->v6addrs, &iph->saddr,
16973 +- sizeof(key_addrs->v6addrs));
16974 ++ memcpy(&key_addrs->v6addrs.src, &iph->saddr,
16975 ++ sizeof(key_addrs->v6addrs.src));
16976 ++ memcpy(&key_addrs->v6addrs.dst, &iph->daddr,
16977 ++ sizeof(key_addrs->v6addrs.dst));
16978 + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
16979 + }
16980 +
16981 +diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
16982 +index 715b67f6c62f3..e3f0d59068117 100644
16983 +--- a/net/core/flow_offload.c
16984 ++++ b/net/core/flow_offload.c
16985 +@@ -321,6 +321,7 @@ EXPORT_SYMBOL(flow_block_cb_setup_simple);
16986 + static DEFINE_MUTEX(flow_indr_block_lock);
16987 + static LIST_HEAD(flow_block_indr_list);
16988 + static LIST_HEAD(flow_block_indr_dev_list);
16989 ++static LIST_HEAD(flow_indir_dev_list);
16990 +
16991 + struct flow_indr_dev {
16992 + struct list_head list;
16993 +@@ -346,6 +347,33 @@ static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
16994 + return indr_dev;
16995 + }
16996 +
16997 ++struct flow_indir_dev_info {
16998 ++ void *data;
16999 ++ struct net_device *dev;
17000 ++ struct Qdisc *sch;
17001 ++ enum tc_setup_type type;
17002 ++ void (*cleanup)(struct flow_block_cb *block_cb);
17003 ++ struct list_head list;
17004 ++ enum flow_block_command command;
17005 ++ enum flow_block_binder_type binder_type;
17006 ++ struct list_head *cb_list;
17007 ++};
17008 ++
17009 ++static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
17010 ++{
17011 ++ struct flow_block_offload bo;
17012 ++ struct flow_indir_dev_info *cur;
17013 ++
17014 ++ list_for_each_entry(cur, &flow_indir_dev_list, list) {
17015 ++ memset(&bo, 0, sizeof(bo));
17016 ++ bo.command = cur->command;
17017 ++ bo.binder_type = cur->binder_type;
17018 ++ INIT_LIST_HEAD(&bo.cb_list);
17019 ++ cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
17020 ++ list_splice(&bo.cb_list, cur->cb_list);
17021 ++ }
17022 ++}
17023 ++
17024 + int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
17025 + {
17026 + struct flow_indr_dev *indr_dev;
17027 +@@ -367,6 +395,7 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
17028 + }
17029 +
17030 + list_add(&indr_dev->list, &flow_block_indr_dev_list);
17031 ++ existing_qdiscs_register(cb, cb_priv);
17032 + mutex_unlock(&flow_indr_block_lock);
17033 +
17034 + return 0;
17035 +@@ -463,7 +492,59 @@ out:
17036 + }
17037 + EXPORT_SYMBOL(flow_indr_block_cb_alloc);
17038 +
17039 +-int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
17040 ++static struct flow_indir_dev_info *find_indir_dev(void *data)
17041 ++{
17042 ++ struct flow_indir_dev_info *cur;
17043 ++
17044 ++ list_for_each_entry(cur, &flow_indir_dev_list, list) {
17045 ++ if (cur->data == data)
17046 ++ return cur;
17047 ++ }
17048 ++ return NULL;
17049 ++}
17050 ++
17051 ++static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
17052 ++ enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
17053 ++ struct flow_block_offload *bo)
17054 ++{
17055 ++ struct flow_indir_dev_info *info;
17056 ++
17057 ++ info = find_indir_dev(data);
17058 ++ if (info)
17059 ++ return -EEXIST;
17060 ++
17061 ++ info = kzalloc(sizeof(*info), GFP_KERNEL);
17062 ++ if (!info)
17063 ++ return -ENOMEM;
17064 ++
17065 ++ info->data = data;
17066 ++ info->dev = dev;
17067 ++ info->sch = sch;
17068 ++ info->type = type;
17069 ++ info->cleanup = cleanup;
17070 ++ info->command = bo->command;
17071 ++ info->binder_type = bo->binder_type;
17072 ++ info->cb_list = bo->cb_list_head;
17073 ++
17074 ++ list_add(&info->list, &flow_indir_dev_list);
17075 ++ return 0;
17076 ++}
17077 ++
17078 ++static int indir_dev_remove(void *data)
17079 ++{
17080 ++ struct flow_indir_dev_info *info;
17081 ++
17082 ++ info = find_indir_dev(data);
17083 ++ if (!info)
17084 ++ return -ENOENT;
17085 ++
17086 ++ list_del(&info->list);
17087 ++
17088 ++ kfree(info);
17089 ++ return 0;
17090 ++}
17091 ++
17092 ++int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
17093 + enum tc_setup_type type, void *data,
17094 + struct flow_block_offload *bo,
17095 + void (*cleanup)(struct flow_block_cb *block_cb))
17096 +@@ -471,6 +552,12 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
17097 + struct flow_indr_dev *this;
17098 +
17099 + mutex_lock(&flow_indr_block_lock);
17100 ++
17101 ++ if (bo->command == FLOW_BLOCK_BIND)
17102 ++ indir_dev_add(data, dev, sch, type, cleanup, bo);
17103 ++ else if (bo->command == FLOW_BLOCK_UNBIND)
17104 ++ indir_dev_remove(data);
17105 ++
17106 + list_for_each_entry(this, &flow_block_indr_dev_list, list)
17107 + this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
17108 +
17109 +diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
17110 +index baa5d10043cb0..6134b180f59f8 100644
17111 +--- a/net/ethtool/ioctl.c
17112 ++++ b/net/ethtool/ioctl.c
17113 +@@ -7,6 +7,7 @@
17114 + * the information ethtool needs.
17115 + */
17116 +
17117 ++#include <linux/compat.h>
17118 + #include <linux/module.h>
17119 + #include <linux/types.h>
17120 + #include <linux/capability.h>
17121 +@@ -807,6 +808,120 @@ out:
17122 + return ret;
17123 + }
17124 +
17125 ++static noinline_for_stack int
17126 ++ethtool_rxnfc_copy_from_compat(struct ethtool_rxnfc *rxnfc,
17127 ++ const struct compat_ethtool_rxnfc __user *useraddr,
17128 ++ size_t size)
17129 ++{
17130 ++ struct compat_ethtool_rxnfc crxnfc = {};
17131 ++
17132 ++ /* We expect there to be holes between fs.m_ext and
17133 ++ * fs.ring_cookie and at the end of fs, but nowhere else.
17134 ++ * On non-x86, no conversion should be needed.
17135 ++ */
17136 ++ BUILD_BUG_ON(!IS_ENABLED(CONFIG_X86_64) &&
17137 ++ sizeof(struct compat_ethtool_rxnfc) !=
17138 ++ sizeof(struct ethtool_rxnfc));
17139 ++ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) +
17140 ++ sizeof(useraddr->fs.m_ext) !=
17141 ++ offsetof(struct ethtool_rxnfc, fs.m_ext) +
17142 ++ sizeof(rxnfc->fs.m_ext));
17143 ++ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.location) -
17144 ++ offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
17145 ++ offsetof(struct ethtool_rxnfc, fs.location) -
17146 ++ offsetof(struct ethtool_rxnfc, fs.ring_cookie));
17147 ++
17148 ++ if (copy_from_user(&crxnfc, useraddr, min(size, sizeof(crxnfc))))
17149 ++ return -EFAULT;
17150 ++
17151 ++ *rxnfc = (struct ethtool_rxnfc) {
17152 ++ .cmd = crxnfc.cmd,
17153 ++ .flow_type = crxnfc.flow_type,
17154 ++ .data = crxnfc.data,
17155 ++ .fs = {
17156 ++ .flow_type = crxnfc.fs.flow_type,
17157 ++ .h_u = crxnfc.fs.h_u,
17158 ++ .h_ext = crxnfc.fs.h_ext,
17159 ++ .m_u = crxnfc.fs.m_u,
17160 ++ .m_ext = crxnfc.fs.m_ext,
17161 ++ .ring_cookie = crxnfc.fs.ring_cookie,
17162 ++ .location = crxnfc.fs.location,
17163 ++ },
17164 ++ .rule_cnt = crxnfc.rule_cnt,
17165 ++ };
17166 ++
17167 ++ return 0;
17168 ++}
17169 ++
17170 ++static int ethtool_rxnfc_copy_from_user(struct ethtool_rxnfc *rxnfc,
17171 ++ const void __user *useraddr,
17172 ++ size_t size)
17173 ++{
17174 ++ if (compat_need_64bit_alignment_fixup())
17175 ++ return ethtool_rxnfc_copy_from_compat(rxnfc, useraddr, size);
17176 ++
17177 ++ if (copy_from_user(rxnfc, useraddr, size))
17178 ++ return -EFAULT;
17179 ++
17180 ++ return 0;
17181 ++}
17182 ++
17183 ++static int ethtool_rxnfc_copy_to_compat(void __user *useraddr,
17184 ++ const struct ethtool_rxnfc *rxnfc,
17185 ++ size_t size, const u32 *rule_buf)
17186 ++{
17187 ++ struct compat_ethtool_rxnfc crxnfc;
17188 ++
17189 ++ memset(&crxnfc, 0, sizeof(crxnfc));
17190 ++ crxnfc = (struct compat_ethtool_rxnfc) {
17191 ++ .cmd = rxnfc->cmd,
17192 ++ .flow_type = rxnfc->flow_type,
17193 ++ .data = rxnfc->data,
17194 ++ .fs = {
17195 ++ .flow_type = rxnfc->fs.flow_type,
17196 ++ .h_u = rxnfc->fs.h_u,
17197 ++ .h_ext = rxnfc->fs.h_ext,
17198 ++ .m_u = rxnfc->fs.m_u,
17199 ++ .m_ext = rxnfc->fs.m_ext,
17200 ++ .ring_cookie = rxnfc->fs.ring_cookie,
17201 ++ .location = rxnfc->fs.location,
17202 ++ },
17203 ++ .rule_cnt = rxnfc->rule_cnt,
17204 ++ };
17205 ++
17206 ++ if (copy_to_user(useraddr, &crxnfc, min(size, sizeof(crxnfc))))
17207 ++ return -EFAULT;
17208 ++
17209 ++ return 0;
17210 ++}
17211 ++
17212 ++static int ethtool_rxnfc_copy_to_user(void __user *useraddr,
17213 ++ const struct ethtool_rxnfc *rxnfc,
17214 ++ size_t size, const u32 *rule_buf)
17215 ++{
17216 ++ int ret;
17217 ++
17218 ++ if (compat_need_64bit_alignment_fixup()) {
17219 ++ ret = ethtool_rxnfc_copy_to_compat(useraddr, rxnfc, size,
17220 ++ rule_buf);
17221 ++ useraddr += offsetof(struct compat_ethtool_rxnfc, rule_locs);
17222 ++ } else {
17223 ++ ret = copy_to_user(useraddr, &rxnfc, size);
17224 ++ useraddr += offsetof(struct ethtool_rxnfc, rule_locs);
17225 ++ }
17226 ++
17227 ++ if (ret)
17228 ++ return -EFAULT;
17229 ++
17230 ++ if (rule_buf) {
17231 ++ if (copy_to_user(useraddr, rule_buf,
17232 ++ rxnfc->rule_cnt * sizeof(u32)))
17233 ++ return -EFAULT;
17234 ++ }
17235 ++
17236 ++ return 0;
17237 ++}
17238 ++
17239 + static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
17240 + u32 cmd, void __user *useraddr)
17241 + {
17242 +@@ -825,7 +940,7 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
17243 + info_size = (offsetof(struct ethtool_rxnfc, data) +
17244 + sizeof(info.data));
17245 +
17246 +- if (copy_from_user(&info, useraddr, info_size))
17247 ++ if (ethtool_rxnfc_copy_from_user(&info, useraddr, info_size))
17248 + return -EFAULT;
17249 +
17250 + rc = dev->ethtool_ops->set_rxnfc(dev, &info);
17251 +@@ -833,7 +948,7 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
17252 + return rc;
17253 +
17254 + if (cmd == ETHTOOL_SRXCLSRLINS &&
17255 +- copy_to_user(useraddr, &info, info_size))
17256 ++ ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL))
17257 + return -EFAULT;
17258 +
17259 + return 0;
17260 +@@ -859,7 +974,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
17261 + info_size = (offsetof(struct ethtool_rxnfc, data) +
17262 + sizeof(info.data));
17263 +
17264 +- if (copy_from_user(&info, useraddr, info_size))
17265 ++ if (ethtool_rxnfc_copy_from_user(&info, useraddr, info_size))
17266 + return -EFAULT;
17267 +
17268 + /* If FLOW_RSS was requested then user-space must be using the
17269 +@@ -867,7 +982,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
17270 + */
17271 + if (cmd == ETHTOOL_GRXFH && info.flow_type & FLOW_RSS) {
17272 + info_size = sizeof(info);
17273 +- if (copy_from_user(&info, useraddr, info_size))
17274 ++ if (ethtool_rxnfc_copy_from_user(&info, useraddr, info_size))
17275 + return -EFAULT;
17276 + /* Since malicious users may modify the original data,
17277 + * we need to check whether FLOW_RSS is still requested.
17278 +@@ -893,18 +1008,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
17279 + if (ret < 0)
17280 + goto err_out;
17281 +
17282 +- ret = -EFAULT;
17283 +- if (copy_to_user(useraddr, &info, info_size))
17284 +- goto err_out;
17285 +-
17286 +- if (rule_buf) {
17287 +- useraddr += offsetof(struct ethtool_rxnfc, rule_locs);
17288 +- if (copy_to_user(useraddr, rule_buf,
17289 +- info.rule_cnt * sizeof(u32)))
17290 +- goto err_out;
17291 +- }
17292 +- ret = 0;
17293 +-
17294 ++ ret = ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, rule_buf);
17295 + err_out:
17296 + kfree(rule_buf);
17297 +
17298 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
17299 +index 8d8a8da3ae7e0..a202dcec0dc27 100644
17300 +--- a/net/ipv4/ip_output.c
17301 ++++ b/net/ipv4/ip_output.c
17302 +@@ -446,8 +446,9 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
17303 + {
17304 + BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
17305 + offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
17306 +- memcpy(&iph->saddr, &fl4->saddr,
17307 +- sizeof(fl4->saddr) + sizeof(fl4->daddr));
17308 ++
17309 ++ iph->saddr = fl4->saddr;
17310 ++ iph->daddr = fl4->daddr;
17311 + }
17312 +
17313 + /* Note: skb->sk can be different from sk, in case of tunnels */
17314 +diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
17315 +index 25fa4c01a17f6..f1e90fc1cd187 100644
17316 +--- a/net/ipv4/tcp_fastopen.c
17317 ++++ b/net/ipv4/tcp_fastopen.c
17318 +@@ -379,8 +379,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
17319 + return NULL;
17320 + }
17321 +
17322 +- if (syn_data &&
17323 +- tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
17324 ++ if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
17325 + goto fastopen;
17326 +
17327 + if (foc->len == 0) {
17328 +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
17329 +index 1e5e9fc455230..cd96cd337aa89 100644
17330 +--- a/net/mac80211/iface.c
17331 ++++ b/net/mac80211/iface.c
17332 +@@ -2001,9 +2001,16 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
17333 +
17334 + netdev_set_default_ethtool_ops(ndev, &ieee80211_ethtool_ops);
17335 +
17336 +- /* MTU range: 256 - 2304 */
17337 ++ /* MTU range is normally 256 - 2304, where the upper limit is
17338 ++ * the maximum MSDU size. Monitor interfaces send and receive
17339 ++ * MPDU and A-MSDU frames which may be much larger so we do
17340 ++ * not impose an upper limit in that case.
17341 ++ */
17342 + ndev->min_mtu = 256;
17343 +- ndev->max_mtu = local->hw.max_mtu;
17344 ++ if (type == NL80211_IFTYPE_MONITOR)
17345 ++ ndev->max_mtu = 0;
17346 ++ else
17347 ++ ndev->max_mtu = local->hw.max_mtu;
17348 +
17349 + ret = cfg80211_register_netdevice(ndev);
17350 + if (ret) {
17351 +diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
17352 +index f92006cec94c4..cbd9f59098b74 100644
17353 +--- a/net/netfilter/nf_flow_table_offload.c
17354 ++++ b/net/netfilter/nf_flow_table_offload.c
17355 +@@ -1097,6 +1097,7 @@ static void nf_flow_table_block_offload_init(struct flow_block_offload *bo,
17356 + bo->command = cmd;
17357 + bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
17358 + bo->extack = extack;
17359 ++ bo->cb_list_head = &flowtable->flow_block.cb_list;
17360 + INIT_LIST_HEAD(&bo->cb_list);
17361 + }
17362 +
17363 +diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
17364 +index b58d73a965232..9656c16462222 100644
17365 +--- a/net/netfilter/nf_tables_offload.c
17366 ++++ b/net/netfilter/nf_tables_offload.c
17367 +@@ -353,6 +353,7 @@ static void nft_flow_block_offload_init(struct flow_block_offload *bo,
17368 + bo->command = cmd;
17369 + bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
17370 + bo->extack = extack;
17371 ++ bo->cb_list_head = &basechain->flow_block.cb_list;
17372 + INIT_LIST_HEAD(&bo->cb_list);
17373 + }
17374 +
17375 +diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
17376 +index 639c337c885b1..272bcdb1392df 100644
17377 +--- a/net/netfilter/nft_compat.c
17378 ++++ b/net/netfilter/nft_compat.c
17379 +@@ -683,14 +683,12 @@ static int nfnl_compat_get_rcu(struct sk_buff *skb,
17380 + goto out_put;
17381 + }
17382 +
17383 +- ret = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
17384 +- MSG_DONTWAIT);
17385 +- if (ret > 0)
17386 +- ret = 0;
17387 ++ ret = nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
17388 + out_put:
17389 + rcu_read_lock();
17390 + module_put(THIS_MODULE);
17391 +- return ret == -EAGAIN ? -ENOBUFS : ret;
17392 ++
17393 ++ return ret;
17394 + }
17395 +
17396 + static const struct nla_policy nfnl_compat_policy_get[NFTA_COMPAT_MAX+1] = {
17397 +diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
17398 +index 000bb3da4f77f..894e6b8f1a868 100644
17399 +--- a/net/netlabel/netlabel_cipso_v4.c
17400 ++++ b/net/netlabel/netlabel_cipso_v4.c
17401 +@@ -144,8 +144,8 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
17402 + return -ENOMEM;
17403 + doi_def->map.std = kzalloc(sizeof(*doi_def->map.std), GFP_KERNEL);
17404 + if (doi_def->map.std == NULL) {
17405 +- ret_val = -ENOMEM;
17406 +- goto add_std_failure;
17407 ++ kfree(doi_def);
17408 ++ return -ENOMEM;
17409 + }
17410 + doi_def->type = CIPSO_V4_MAP_TRANS;
17411 +
17412 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
17413 +index 380f95aacdec9..24b7cf447bc55 100644
17414 +--- a/net/netlink/af_netlink.c
17415 ++++ b/net/netlink/af_netlink.c
17416 +@@ -2545,13 +2545,15 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
17417 + /* errors reported via destination sk->sk_err, but propagate
17418 + * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
17419 + err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
17420 ++ if (err == -ESRCH)
17421 ++ err = 0;
17422 + }
17423 +
17424 + if (report) {
17425 + int err2;
17426 +
17427 + err2 = nlmsg_unicast(sk, skb, portid);
17428 +- if (!err || err == -ESRCH)
17429 ++ if (!err)
17430 + err = err2;
17431 + }
17432 +
17433 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
17434 +index e3e79e9bd7067..9b276d14be4c4 100644
17435 +--- a/net/sched/cls_api.c
17436 ++++ b/net/sched/cls_api.c
17437 +@@ -634,6 +634,7 @@ static void tcf_block_offload_init(struct flow_block_offload *bo,
17438 + bo->block_shared = shared;
17439 + bo->extack = extack;
17440 + bo->sch = sch;
17441 ++ bo->cb_list_head = &flow_block->cb_list;
17442 + INIT_LIST_HEAD(&bo->cb_list);
17443 + }
17444 +
17445 +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
17446 +index 9c79374457a00..1ab2fc933a214 100644
17447 +--- a/net/sched/sch_taprio.c
17448 ++++ b/net/sched/sch_taprio.c
17449 +@@ -1513,7 +1513,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
17450 + taprio_set_picos_per_byte(dev, q);
17451 +
17452 + if (mqprio) {
17453 +- netdev_set_num_tc(dev, mqprio->num_tc);
17454 ++ err = netdev_set_num_tc(dev, mqprio->num_tc);
17455 ++ if (err)
17456 ++ goto free_sched;
17457 + for (i = 0; i < mqprio->num_tc; i++)
17458 + netdev_set_tc_queue(dev, i,
17459 + mqprio->count[i],
17460 +diff --git a/net/socket.c b/net/socket.c
17461 +index 8808b3617dac9..c5b6f5c5cad98 100644
17462 +--- a/net/socket.c
17463 ++++ b/net/socket.c
17464 +@@ -3154,128 +3154,6 @@ static int compat_dev_ifconf(struct net *net, struct compat_ifconf __user *uifc3
17465 + return 0;
17466 + }
17467 +
17468 +-static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
17469 +-{
17470 +- struct compat_ethtool_rxnfc __user *compat_rxnfc;
17471 +- bool convert_in = false, convert_out = false;
17472 +- size_t buf_size = 0;
17473 +- struct ethtool_rxnfc __user *rxnfc = NULL;
17474 +- struct ifreq ifr;
17475 +- u32 rule_cnt = 0, actual_rule_cnt;
17476 +- u32 ethcmd;
17477 +- u32 data;
17478 +- int ret;
17479 +-
17480 +- if (get_user(data, &ifr32->ifr_ifru.ifru_data))
17481 +- return -EFAULT;
17482 +-
17483 +- compat_rxnfc = compat_ptr(data);
17484 +-
17485 +- if (get_user(ethcmd, &compat_rxnfc->cmd))
17486 +- return -EFAULT;
17487 +-
17488 +- /* Most ethtool structures are defined without padding.
17489 +- * Unfortunately struct ethtool_rxnfc is an exception.
17490 +- */
17491 +- switch (ethcmd) {
17492 +- default:
17493 +- break;
17494 +- case ETHTOOL_GRXCLSRLALL:
17495 +- /* Buffer size is variable */
17496 +- if (get_user(rule_cnt, &compat_rxnfc->rule_cnt))
17497 +- return -EFAULT;
17498 +- if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32))
17499 +- return -ENOMEM;
17500 +- buf_size += rule_cnt * sizeof(u32);
17501 +- fallthrough;
17502 +- case ETHTOOL_GRXRINGS:
17503 +- case ETHTOOL_GRXCLSRLCNT:
17504 +- case ETHTOOL_GRXCLSRULE:
17505 +- case ETHTOOL_SRXCLSRLINS:
17506 +- convert_out = true;
17507 +- fallthrough;
17508 +- case ETHTOOL_SRXCLSRLDEL:
17509 +- buf_size += sizeof(struct ethtool_rxnfc);
17510 +- convert_in = true;
17511 +- rxnfc = compat_alloc_user_space(buf_size);
17512 +- break;
17513 +- }
17514 +-
17515 +- if (copy_from_user(&ifr.ifr_name, &ifr32->ifr_name, IFNAMSIZ))
17516 +- return -EFAULT;
17517 +-
17518 +- ifr.ifr_data = convert_in ? rxnfc : (void __user *)compat_rxnfc;
17519 +-
17520 +- if (convert_in) {
17521 +- /* We expect there to be holes between fs.m_ext and
17522 +- * fs.ring_cookie and at the end of fs, but nowhere else.
17523 +- */
17524 +- BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) +
17525 +- sizeof(compat_rxnfc->fs.m_ext) !=
17526 +- offsetof(struct ethtool_rxnfc, fs.m_ext) +
17527 +- sizeof(rxnfc->fs.m_ext));
17528 +- BUILD_BUG_ON(
17529 +- offsetof(struct compat_ethtool_rxnfc, fs.location) -
17530 +- offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
17531 +- offsetof(struct ethtool_rxnfc, fs.location) -
17532 +- offsetof(struct ethtool_rxnfc, fs.ring_cookie));
17533 +-
17534 +- if (copy_in_user(rxnfc, compat_rxnfc,
17535 +- (void __user *)(&rxnfc->fs.m_ext + 1) -
17536 +- (void __user *)rxnfc) ||
17537 +- copy_in_user(&rxnfc->fs.ring_cookie,
17538 +- &compat_rxnfc->fs.ring_cookie,
17539 +- (void __user *)(&rxnfc->fs.location + 1) -
17540 +- (void __user *)&rxnfc->fs.ring_cookie))
17541 +- return -EFAULT;
17542 +- if (ethcmd == ETHTOOL_GRXCLSRLALL) {
17543 +- if (put_user(rule_cnt, &rxnfc->rule_cnt))
17544 +- return -EFAULT;
17545 +- } else if (copy_in_user(&rxnfc->rule_cnt,
17546 +- &compat_rxnfc->rule_cnt,
17547 +- sizeof(rxnfc->rule_cnt)))
17548 +- return -EFAULT;
17549 +- }
17550 +-
17551 +- ret = dev_ioctl(net, SIOCETHTOOL, &ifr, NULL);
17552 +- if (ret)
17553 +- return ret;
17554 +-
17555 +- if (convert_out) {
17556 +- if (copy_in_user(compat_rxnfc, rxnfc,
17557 +- (const void __user *)(&rxnfc->fs.m_ext + 1) -
17558 +- (const void __user *)rxnfc) ||
17559 +- copy_in_user(&compat_rxnfc->fs.ring_cookie,
17560 +- &rxnfc->fs.ring_cookie,
17561 +- (const void __user *)(&rxnfc->fs.location + 1) -
17562 +- (const void __user *)&rxnfc->fs.ring_cookie) ||
17563 +- copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
17564 +- sizeof(rxnfc->rule_cnt)))
17565 +- return -EFAULT;
17566 +-
17567 +- if (ethcmd == ETHTOOL_GRXCLSRLALL) {
17568 +- /* As an optimisation, we only copy the actual
17569 +- * number of rules that the underlying
17570 +- * function returned. Since Mallory might
17571 +- * change the rule count in user memory, we
17572 +- * check that it is less than the rule count
17573 +- * originally given (as the user buffer size),
17574 +- * which has been range-checked.
17575 +- */
17576 +- if (get_user(actual_rule_cnt, &rxnfc->rule_cnt))
17577 +- return -EFAULT;
17578 +- if (actual_rule_cnt < rule_cnt)
17579 +- rule_cnt = actual_rule_cnt;
17580 +- if (copy_in_user(&compat_rxnfc->rule_locs[0],
17581 +- &rxnfc->rule_locs[0],
17582 +- rule_cnt * sizeof(u32)))
17583 +- return -EFAULT;
17584 +- }
17585 +- }
17586 +-
17587 +- return 0;
17588 +-}
17589 +-
17590 + static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32)
17591 + {
17592 + compat_uptr_t uptr32;
17593 +@@ -3432,8 +3310,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
17594 + return old_bridge_ioctl(argp);
17595 + case SIOCGIFCONF:
17596 + return compat_dev_ifconf(net, argp);
17597 +- case SIOCETHTOOL:
17598 +- return ethtool_ioctl(net, argp);
17599 + case SIOCWANDEV:
17600 + return compat_siocwandev(net, argp);
17601 + case SIOCGIFMAP:
17602 +@@ -3446,6 +3322,7 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
17603 + return sock->ops->gettstamp(sock, argp, cmd == SIOCGSTAMP_OLD,
17604 + !COMPAT_USE_64BIT_TIME);
17605 +
17606 ++ case SIOCETHTOOL:
17607 + case SIOCBONDSLAVEINFOQUERY:
17608 + case SIOCBONDINFOQUERY:
17609 + case SIOCSHWTSTAMP:
17610 +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
17611 +index a81be45f40d9f..3d685fe328fad 100644
17612 +--- a/net/sunrpc/auth_gss/svcauth_gss.c
17613 ++++ b/net/sunrpc/auth_gss/svcauth_gss.c
17614 +@@ -1980,7 +1980,7 @@ gss_svc_init_net(struct net *net)
17615 + goto out2;
17616 + return 0;
17617 + out2:
17618 +- destroy_use_gss_proxy_proc_entry(net);
17619 ++ rsi_cache_destroy_net(net);
17620 + out1:
17621 + rsc_cache_destroy_net(net);
17622 + return rv;
17623 +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
17624 +index fb6db09725c76..d55e980521da8 100644
17625 +--- a/net/sunrpc/xprt.c
17626 ++++ b/net/sunrpc/xprt.c
17627 +@@ -775,9 +775,9 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
17628 + /* Try to schedule an autoclose RPC call */
17629 + if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
17630 + queue_work(xprtiod_workqueue, &xprt->task_cleanup);
17631 +- else if (xprt->snd_task)
17632 ++ else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
17633 + rpc_wake_up_queued_task_set_status(&xprt->pending,
17634 +- xprt->snd_task, -ENOTCONN);
17635 ++ xprt->snd_task, -ENOTCONN);
17636 + spin_unlock(&xprt->transport_lock);
17637 + }
17638 + EXPORT_SYMBOL_GPL(xprt_force_disconnect);
17639 +@@ -866,12 +866,14 @@ bool xprt_lock_connect(struct rpc_xprt *xprt,
17640 + goto out;
17641 + if (xprt->snd_task != task)
17642 + goto out;
17643 ++ set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
17644 + xprt->snd_task = cookie;
17645 + ret = true;
17646 + out:
17647 + spin_unlock(&xprt->transport_lock);
17648 + return ret;
17649 + }
17650 ++EXPORT_SYMBOL_GPL(xprt_lock_connect);
17651 +
17652 + void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
17653 + {
17654 +@@ -881,12 +883,14 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
17655 + if (!test_bit(XPRT_LOCKED, &xprt->state))
17656 + goto out;
17657 + xprt->snd_task =NULL;
17658 ++ clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
17659 + xprt->ops->release_xprt(xprt, NULL);
17660 + xprt_schedule_autodisconnect(xprt);
17661 + out:
17662 + spin_unlock(&xprt->transport_lock);
17663 + wake_up_bit(&xprt->state, XPRT_LOCKED);
17664 + }
17665 ++EXPORT_SYMBOL_GPL(xprt_unlock_connect);
17666 +
17667 + /**
17668 + * xprt_connect - schedule a transport connect operation
17669 +diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
17670 +index 9c2ffc67c0fde..975aef16ad345 100644
17671 +--- a/net/sunrpc/xprtrdma/transport.c
17672 ++++ b/net/sunrpc/xprtrdma/transport.c
17673 +@@ -250,12 +250,9 @@ xprt_rdma_connect_worker(struct work_struct *work)
17674 + xprt->stat.connect_start;
17675 + xprt_set_connected(xprt);
17676 + rc = -EAGAIN;
17677 +- } else {
17678 +- /* Force a call to xprt_rdma_close to clean up */
17679 +- spin_lock(&xprt->transport_lock);
17680 +- set_bit(XPRT_CLOSE_WAIT, &xprt->state);
17681 +- spin_unlock(&xprt->transport_lock);
17682 +- }
17683 ++ } else
17684 ++ rpcrdma_xprt_disconnect(r_xprt);
17685 ++ xprt_unlock_connect(xprt, r_xprt);
17686 + xprt_wake_pending_tasks(xprt, rc);
17687 + }
17688 +
17689 +@@ -489,6 +486,8 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
17690 + struct rpcrdma_ep *ep = r_xprt->rx_ep;
17691 + unsigned long delay;
17692 +
17693 ++ WARN_ON_ONCE(!xprt_lock_connect(xprt, task, r_xprt));
17694 ++
17695 + delay = 0;
17696 + if (ep && ep->re_connect_status != 0) {
17697 + delay = xprt_reconnect_delay(xprt);
17698 +diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
17699 +index 649c23518ec04..5a11e318a0d99 100644
17700 +--- a/net/sunrpc/xprtrdma/verbs.c
17701 ++++ b/net/sunrpc/xprtrdma/verbs.c
17702 +@@ -1416,11 +1416,6 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
17703 +
17704 + rc = ib_post_recv(ep->re_id->qp, wr,
17705 + (const struct ib_recv_wr **)&bad_wr);
17706 +- if (atomic_dec_return(&ep->re_receiving) > 0)
17707 +- complete(&ep->re_done);
17708 +-
17709 +-out:
17710 +- trace_xprtrdma_post_recvs(r_xprt, count, rc);
17711 + if (rc) {
17712 + for (wr = bad_wr; wr;) {
17713 + struct rpcrdma_rep *rep;
17714 +@@ -1431,6 +1426,11 @@ out:
17715 + --count;
17716 + }
17717 + }
17718 ++ if (atomic_dec_return(&ep->re_receiving) > 0)
17719 ++ complete(&ep->re_done);
17720 ++
17721 ++out:
17722 ++ trace_xprtrdma_post_recvs(r_xprt, count, rc);
17723 + ep->re_receive_count += count;
17724 + return;
17725 + }
17726 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
17727 +index e573dcecdd66f..02b071dbdd225 100644
17728 +--- a/net/sunrpc/xprtsock.c
17729 ++++ b/net/sunrpc/xprtsock.c
17730 +@@ -1656,7 +1656,7 @@ static int xs_get_srcport(struct sock_xprt *transport)
17731 + unsigned short get_srcport(struct rpc_xprt *xprt)
17732 + {
17733 + struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
17734 +- return sock->srcport;
17735 ++ return xs_sock_getport(sock->sock);
17736 + }
17737 + EXPORT_SYMBOL(get_srcport);
17738 +
17739 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
17740 +index 8754bd885169d..a155cfaf01f2e 100644
17741 +--- a/net/tipc/socket.c
17742 ++++ b/net/tipc/socket.c
17743 +@@ -1886,6 +1886,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
17744 + bool connected = !tipc_sk_type_connectionless(sk);
17745 + struct tipc_sock *tsk = tipc_sk(sk);
17746 + int rc, err, hlen, dlen, copy;
17747 ++ struct tipc_skb_cb *skb_cb;
17748 + struct sk_buff_head xmitq;
17749 + struct tipc_msg *hdr;
17750 + struct sk_buff *skb;
17751 +@@ -1909,6 +1910,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
17752 + if (unlikely(rc))
17753 + goto exit;
17754 + skb = skb_peek(&sk->sk_receive_queue);
17755 ++ skb_cb = TIPC_SKB_CB(skb);
17756 + hdr = buf_msg(skb);
17757 + dlen = msg_data_sz(hdr);
17758 + hlen = msg_hdr_sz(hdr);
17759 +@@ -1928,18 +1930,33 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
17760 +
17761 + /* Capture data if non-error msg, otherwise just set return value */
17762 + if (likely(!err)) {
17763 +- copy = min_t(int, dlen, buflen);
17764 +- if (unlikely(copy != dlen))
17765 +- m->msg_flags |= MSG_TRUNC;
17766 +- rc = skb_copy_datagram_msg(skb, hlen, m, copy);
17767 ++ int offset = skb_cb->bytes_read;
17768 ++
17769 ++ copy = min_t(int, dlen - offset, buflen);
17770 ++ rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
17771 ++ if (unlikely(rc))
17772 ++ goto exit;
17773 ++ if (unlikely(offset + copy < dlen)) {
17774 ++ if (flags & MSG_EOR) {
17775 ++ if (!(flags & MSG_PEEK))
17776 ++ skb_cb->bytes_read = offset + copy;
17777 ++ } else {
17778 ++ m->msg_flags |= MSG_TRUNC;
17779 ++ skb_cb->bytes_read = 0;
17780 ++ }
17781 ++ } else {
17782 ++ if (flags & MSG_EOR)
17783 ++ m->msg_flags |= MSG_EOR;
17784 ++ skb_cb->bytes_read = 0;
17785 ++ }
17786 + } else {
17787 + copy = 0;
17788 + rc = 0;
17789 +- if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
17790 ++ if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
17791 + rc = -ECONNRESET;
17792 ++ goto exit;
17793 ++ }
17794 + }
17795 +- if (unlikely(rc))
17796 +- goto exit;
17797 +
17798 + /* Mark message as group event if applicable */
17799 + if (unlikely(grp_evt)) {
17800 +@@ -1962,9 +1979,10 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
17801 + tipc_node_distr_xmit(sock_net(sk), &xmitq);
17802 + }
17803 +
17804 +- tsk_advance_rx_queue(sk);
17805 ++ if (!skb_cb->bytes_read)
17806 ++ tsk_advance_rx_queue(sk);
17807 +
17808 +- if (likely(!connected))
17809 ++ if (likely(!connected) || skb_cb->bytes_read)
17810 + goto exit;
17811 +
17812 + /* Send connection flow control advertisement when applicable */
17813 +diff --git a/samples/bpf/test_override_return.sh b/samples/bpf/test_override_return.sh
17814 +index e68b9ee6814b8..35db26f736b9d 100755
17815 +--- a/samples/bpf/test_override_return.sh
17816 ++++ b/samples/bpf/test_override_return.sh
17817 +@@ -1,5 +1,6 @@
17818 + #!/bin/bash
17819 +
17820 ++rm -r tmpmnt
17821 + rm -f testfile.img
17822 + dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1
17823 + DEVICE=$(losetup --show -f testfile.img)
17824 +diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c
17825 +index fdcd6580dd736..8be7ce18d3ba0 100644
17826 +--- a/samples/bpf/tracex7_user.c
17827 ++++ b/samples/bpf/tracex7_user.c
17828 +@@ -14,6 +14,11 @@ int main(int argc, char **argv)
17829 + int ret = 0;
17830 + FILE *f;
17831 +
17832 ++ if (!argv[1]) {
17833 ++ fprintf(stderr, "ERROR: Run with the btrfs device argument!\n");
17834 ++ return 0;
17835 ++ }
17836 ++
17837 + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
17838 + obj = bpf_object__open_file(filename, NULL);
17839 + if (libbpf_get_error(obj)) {
17840 +diff --git a/samples/pktgen/pktgen_sample03_burst_single_flow.sh b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
17841 +index ab87de4402772..8bf2fdffba16e 100755
17842 +--- a/samples/pktgen/pktgen_sample03_burst_single_flow.sh
17843 ++++ b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
17844 +@@ -85,7 +85,7 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
17845 + done
17846 +
17847 + # Run if user hits control-c
17848 +-function control_c() {
17849 ++function print_result() {
17850 + # Print results
17851 + for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
17852 + dev=${DEV}@${thread}
17853 +@@ -94,11 +94,13 @@ function control_c() {
17854 + done
17855 + }
17856 + # trap keyboard interrupt (Ctrl-C)
17857 +-trap control_c SIGINT
17858 ++trap true SIGINT
17859 +
17860 + if [ -z "$APPEND" ]; then
17861 + echo "Running... ctrl^C to stop" >&2
17862 + pg_ctrl "start"
17863 ++
17864 ++ print_result
17865 + else
17866 + echo "Append mode: config done. Do more or use 'pg_ctrl start' to run"
17867 + fi
17868 +diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c
17869 +index 6c0f229db36a1..b4aaeab377545 100644
17870 +--- a/samples/vfio-mdev/mbochs.c
17871 ++++ b/samples/vfio-mdev/mbochs.c
17872 +@@ -129,7 +129,7 @@ static dev_t mbochs_devt;
17873 + static struct class *mbochs_class;
17874 + static struct cdev mbochs_cdev;
17875 + static struct device mbochs_dev;
17876 +-static int mbochs_used_mbytes;
17877 ++static atomic_t mbochs_avail_mbytes;
17878 + static const struct vfio_device_ops mbochs_dev_ops;
17879 +
17880 + struct vfio_region_info_ext {
17881 +@@ -507,18 +507,22 @@ static int mbochs_reset(struct mdev_state *mdev_state)
17882 +
17883 + static int mbochs_probe(struct mdev_device *mdev)
17884 + {
17885 ++ int avail_mbytes = atomic_read(&mbochs_avail_mbytes);
17886 + const struct mbochs_type *type =
17887 + &mbochs_types[mdev_get_type_group_id(mdev)];
17888 + struct device *dev = mdev_dev(mdev);
17889 + struct mdev_state *mdev_state;
17890 + int ret = -ENOMEM;
17891 +
17892 +- if (type->mbytes + mbochs_used_mbytes > max_mbytes)
17893 +- return -ENOMEM;
17894 ++ do {
17895 ++ if (avail_mbytes < type->mbytes)
17896 ++ return -ENOSPC;
17897 ++ } while (!atomic_try_cmpxchg(&mbochs_avail_mbytes, &avail_mbytes,
17898 ++ avail_mbytes - type->mbytes));
17899 +
17900 + mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
17901 + if (mdev_state == NULL)
17902 +- return -ENOMEM;
17903 ++ goto err_avail;
17904 + vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mbochs_dev_ops);
17905 +
17906 + mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL);
17907 +@@ -549,17 +553,17 @@ static int mbochs_probe(struct mdev_device *mdev)
17908 + mbochs_create_config_space(mdev_state);
17909 + mbochs_reset(mdev_state);
17910 +
17911 +- mbochs_used_mbytes += type->mbytes;
17912 +-
17913 + ret = vfio_register_group_dev(&mdev_state->vdev);
17914 + if (ret)
17915 + goto err_mem;
17916 + dev_set_drvdata(&mdev->dev, mdev_state);
17917 + return 0;
17918 +-
17919 + err_mem:
17920 ++ kfree(mdev_state->pages);
17921 + kfree(mdev_state->vconfig);
17922 + kfree(mdev_state);
17923 ++err_avail:
17924 ++ atomic_add(type->mbytes, &mbochs_avail_mbytes);
17925 + return ret;
17926 + }
17927 +
17928 +@@ -567,8 +571,8 @@ static void mbochs_remove(struct mdev_device *mdev)
17929 + {
17930 + struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
17931 +
17932 +- mbochs_used_mbytes -= mdev_state->type->mbytes;
17933 + vfio_unregister_group_dev(&mdev_state->vdev);
17934 ++ atomic_add(mdev_state->type->mbytes, &mbochs_avail_mbytes);
17935 + kfree(mdev_state->pages);
17936 + kfree(mdev_state->vconfig);
17937 + kfree(mdev_state);
17938 +@@ -1355,7 +1359,7 @@ static ssize_t available_instances_show(struct mdev_type *mtype,
17939 + {
17940 + const struct mbochs_type *type =
17941 + &mbochs_types[mtype_get_type_group_id(mtype)];
17942 +- int count = (max_mbytes - mbochs_used_mbytes) / type->mbytes;
17943 ++ int count = atomic_read(&mbochs_avail_mbytes) / type->mbytes;
17944 +
17945 + return sprintf(buf, "%d\n", count);
17946 + }
17947 +@@ -1437,6 +1441,8 @@ static int __init mbochs_dev_init(void)
17948 + {
17949 + int ret = 0;
17950 +
17951 ++ atomic_set(&mbochs_avail_mbytes, max_mbytes);
17952 ++
17953 + ret = alloc_chrdev_region(&mbochs_devt, 0, MINORMASK + 1, MBOCHS_NAME);
17954 + if (ret < 0) {
17955 + pr_err("Error: failed to register mbochs_dev, err: %d\n", ret);
17956 +diff --git a/scripts/gen_ksymdeps.sh b/scripts/gen_ksymdeps.sh
17957 +index 1324986e1362c..725e8c9c1b53f 100755
17958 +--- a/scripts/gen_ksymdeps.sh
17959 ++++ b/scripts/gen_ksymdeps.sh
17960 +@@ -4,7 +4,13 @@
17961 + set -e
17962 +
17963 + # List of exported symbols
17964 +-ksyms=$($NM $1 | sed -n 's/.*__ksym_marker_\(.*\)/\1/p' | tr A-Z a-z)
17965 ++#
17966 ++# If the object has no symbol, $NM warns 'no symbols'.
17967 ++# Suppress the stderr.
17968 ++# TODO:
17969 ++# Use -q instead of 2>/dev/null when we upgrade the minimum version of
17970 ++# binutils to 2.37, llvm to 13.0.0.
17971 ++ksyms=$($NM $1 2>/dev/null | sed -n 's/.*__ksym_marker_\(.*\)/\1/p' | tr A-Z a-z)
17972 +
17973 + if [ -z "$ksyms" ]; then
17974 + exit 0
17975 +diff --git a/scripts/subarch.include b/scripts/subarch.include
17976 +index 650682821126c..776849a3c500f 100644
17977 +--- a/scripts/subarch.include
17978 ++++ b/scripts/subarch.include
17979 +@@ -7,7 +7,7 @@
17980 + SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
17981 + -e s/sun4u/sparc64/ \
17982 + -e s/arm.*/arm/ -e s/sa110/arm/ \
17983 +- -e s/s390x/s390/ -e s/parisc64/parisc/ \
17984 ++ -e s/s390x/s390/ \
17985 + -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
17986 + -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
17987 + -e s/riscv.*/riscv/)
17988 +diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
17989 +index 1f391f6a3d470..d2186e2757be8 100644
17990 +--- a/security/smack/smack_access.c
17991 ++++ b/security/smack/smack_access.c
17992 +@@ -81,23 +81,22 @@ int log_policy = SMACK_AUDIT_DENIED;
17993 + int smk_access_entry(char *subject_label, char *object_label,
17994 + struct list_head *rule_list)
17995 + {
17996 +- int may = -ENOENT;
17997 + struct smack_rule *srp;
17998 +
17999 + list_for_each_entry_rcu(srp, rule_list, list) {
18000 + if (srp->smk_object->smk_known == object_label &&
18001 + srp->smk_subject->smk_known == subject_label) {
18002 +- may = srp->smk_access;
18003 +- break;
18004 ++ int may = srp->smk_access;
18005 ++ /*
18006 ++ * MAY_WRITE implies MAY_LOCK.
18007 ++ */
18008 ++ if ((may & MAY_WRITE) == MAY_WRITE)
18009 ++ may |= MAY_LOCK;
18010 ++ return may;
18011 + }
18012 + }
18013 +
18014 +- /*
18015 +- * MAY_WRITE implies MAY_LOCK.
18016 +- */
18017 +- if ((may & MAY_WRITE) == MAY_WRITE)
18018 +- may |= MAY_LOCK;
18019 +- return may;
18020 ++ return -ENOENT;
18021 + }
18022 +
18023 + /**
18024 +diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
18025 +index ec04e3386bc0e..8617793ed9557 100644
18026 +--- a/sound/soc/atmel/Kconfig
18027 ++++ b/sound/soc/atmel/Kconfig
18028 +@@ -11,7 +11,6 @@ if SND_ATMEL_SOC
18029 +
18030 + config SND_ATMEL_SOC_PDC
18031 + bool
18032 +- depends on HAS_DMA
18033 +
18034 + config SND_ATMEL_SOC_DMA
18035 + bool
18036 +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
18037 +index 91a6d712eb585..c403fb6725944 100644
18038 +--- a/sound/soc/intel/boards/bytcr_rt5640.c
18039 ++++ b/sound/soc/intel/boards/bytcr_rt5640.c
18040 +@@ -290,9 +290,6 @@ static const struct snd_soc_dapm_widget byt_rt5640_widgets[] = {
18041 + static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = {
18042 + {"Headphone", NULL, "Platform Clock"},
18043 + {"Headset Mic", NULL, "Platform Clock"},
18044 +- {"Internal Mic", NULL, "Platform Clock"},
18045 +- {"Speaker", NULL, "Platform Clock"},
18046 +-
18047 + {"Headset Mic", NULL, "MICBIAS1"},
18048 + {"IN2P", NULL, "Headset Mic"},
18049 + {"Headphone", NULL, "HPOL"},
18050 +@@ -300,19 +297,23 @@ static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = {
18051 + };
18052 +
18053 + static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic1_map[] = {
18054 ++ {"Internal Mic", NULL, "Platform Clock"},
18055 + {"DMIC1", NULL, "Internal Mic"},
18056 + };
18057 +
18058 + static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic2_map[] = {
18059 ++ {"Internal Mic", NULL, "Platform Clock"},
18060 + {"DMIC2", NULL, "Internal Mic"},
18061 + };
18062 +
18063 + static const struct snd_soc_dapm_route byt_rt5640_intmic_in1_map[] = {
18064 ++ {"Internal Mic", NULL, "Platform Clock"},
18065 + {"Internal Mic", NULL, "MICBIAS1"},
18066 + {"IN1P", NULL, "Internal Mic"},
18067 + };
18068 +
18069 + static const struct snd_soc_dapm_route byt_rt5640_intmic_in3_map[] = {
18070 ++ {"Internal Mic", NULL, "Platform Clock"},
18071 + {"Internal Mic", NULL, "MICBIAS1"},
18072 + {"IN3P", NULL, "Internal Mic"},
18073 + };
18074 +@@ -354,6 +355,7 @@ static const struct snd_soc_dapm_route byt_rt5640_ssp0_aif2_map[] = {
18075 + };
18076 +
18077 + static const struct snd_soc_dapm_route byt_rt5640_stereo_spk_map[] = {
18078 ++ {"Speaker", NULL, "Platform Clock"},
18079 + {"Speaker", NULL, "SPOLP"},
18080 + {"Speaker", NULL, "SPOLN"},
18081 + {"Speaker", NULL, "SPORP"},
18082 +@@ -361,6 +363,7 @@ static const struct snd_soc_dapm_route byt_rt5640_stereo_spk_map[] = {
18083 + };
18084 +
18085 + static const struct snd_soc_dapm_route byt_rt5640_mono_spk_map[] = {
18086 ++ {"Speaker", NULL, "Platform Clock"},
18087 + {"Speaker", NULL, "SPOLP"},
18088 + {"Speaker", NULL, "SPOLN"},
18089 + };
18090 +diff --git a/sound/soc/intel/boards/sof_pcm512x.c b/sound/soc/intel/boards/sof_pcm512x.c
18091 +index 2ec9c62366e2e..6815204e58d58 100644
18092 +--- a/sound/soc/intel/boards/sof_pcm512x.c
18093 ++++ b/sound/soc/intel/boards/sof_pcm512x.c
18094 +@@ -26,11 +26,16 @@
18095 +
18096 + #define SOF_PCM512X_SSP_CODEC(quirk) ((quirk) & GENMASK(3, 0))
18097 + #define SOF_PCM512X_SSP_CODEC_MASK (GENMASK(3, 0))
18098 ++#define SOF_PCM512X_ENABLE_SSP_CAPTURE BIT(4)
18099 ++#define SOF_PCM512X_ENABLE_DMIC BIT(5)
18100 +
18101 + #define IDISP_CODEC_MASK 0x4
18102 +
18103 + /* Default: SSP5 */
18104 +-static unsigned long sof_pcm512x_quirk = SOF_PCM512X_SSP_CODEC(5);
18105 ++static unsigned long sof_pcm512x_quirk =
18106 ++ SOF_PCM512X_SSP_CODEC(5) |
18107 ++ SOF_PCM512X_ENABLE_SSP_CAPTURE |
18108 ++ SOF_PCM512X_ENABLE_DMIC;
18109 +
18110 + static bool is_legacy_cpu;
18111 +
18112 +@@ -244,8 +249,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
18113 + links[id].dpcm_playback = 1;
18114 + /*
18115 + * capture only supported with specific versions of the Hifiberry DAC+
18116 +- * links[id].dpcm_capture = 1;
18117 + */
18118 ++ if (sof_pcm512x_quirk & SOF_PCM512X_ENABLE_SSP_CAPTURE)
18119 ++ links[id].dpcm_capture = 1;
18120 + links[id].no_pcm = 1;
18121 + links[id].cpus = &cpus[id];
18122 + links[id].num_cpus = 1;
18123 +@@ -380,6 +386,9 @@ static int sof_audio_probe(struct platform_device *pdev)
18124 +
18125 + ssp_codec = sof_pcm512x_quirk & SOF_PCM512X_SSP_CODEC_MASK;
18126 +
18127 ++ if (!(sof_pcm512x_quirk & SOF_PCM512X_ENABLE_DMIC))
18128 ++ dmic_be_num = 0;
18129 ++
18130 + /* compute number of dai links */
18131 + sof_audio_card_pcm512x.num_links = 1 + dmic_be_num + hdmi_num;
18132 +
18133 +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
18134 +index 476ef1897961d..79c6cf2c14bfb 100644
18135 +--- a/sound/soc/intel/skylake/skl-messages.c
18136 ++++ b/sound/soc/intel/skylake/skl-messages.c
18137 +@@ -802,9 +802,12 @@ static u16 skl_get_module_param_size(struct skl_dev *skl,
18138 +
18139 + case SKL_MODULE_TYPE_BASE_OUTFMT:
18140 + case SKL_MODULE_TYPE_MIC_SELECT:
18141 +- case SKL_MODULE_TYPE_KPB:
18142 + return sizeof(struct skl_base_outfmt_cfg);
18143 +
18144 ++ case SKL_MODULE_TYPE_MIXER:
18145 ++ case SKL_MODULE_TYPE_KPB:
18146 ++ return sizeof(struct skl_base_cfg);
18147 ++
18148 + default:
18149 + /*
18150 + * return only base cfg when no specific module type is
18151 +@@ -857,10 +860,14 @@ static int skl_set_module_format(struct skl_dev *skl,
18152 +
18153 + case SKL_MODULE_TYPE_BASE_OUTFMT:
18154 + case SKL_MODULE_TYPE_MIC_SELECT:
18155 +- case SKL_MODULE_TYPE_KPB:
18156 + skl_set_base_outfmt_format(skl, module_config, *param_data);
18157 + break;
18158 +
18159 ++ case SKL_MODULE_TYPE_MIXER:
18160 ++ case SKL_MODULE_TYPE_KPB:
18161 ++ skl_set_base_module_format(skl, module_config, *param_data);
18162 ++ break;
18163 ++
18164 + default:
18165 + skl_set_base_module_format(skl, module_config, *param_data);
18166 + break;
18167 +diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
18168 +index b1ca64d2f7ea6..031d5dc7e6601 100644
18169 +--- a/sound/soc/intel/skylake/skl-pcm.c
18170 ++++ b/sound/soc/intel/skylake/skl-pcm.c
18171 +@@ -1317,21 +1317,6 @@ static int skl_get_module_info(struct skl_dev *skl,
18172 + return -EIO;
18173 + }
18174 +
18175 +- list_for_each_entry(module, &skl->uuid_list, list) {
18176 +- if (guid_equal(uuid_mod, &module->uuid)) {
18177 +- mconfig->id.module_id = module->id;
18178 +- if (mconfig->module)
18179 +- mconfig->module->loadable = module->is_loadable;
18180 +- ret = 0;
18181 +- break;
18182 +- }
18183 +- }
18184 +-
18185 +- if (ret)
18186 +- return ret;
18187 +-
18188 +- uuid_mod = &module->uuid;
18189 +- ret = -EIO;
18190 + for (i = 0; i < skl->nr_modules; i++) {
18191 + skl_module = skl->modules[i];
18192 + uuid_tplg = &skl_module->uuid;
18193 +@@ -1341,10 +1326,18 @@ static int skl_get_module_info(struct skl_dev *skl,
18194 + break;
18195 + }
18196 + }
18197 ++
18198 + if (skl->nr_modules && ret)
18199 + return ret;
18200 +
18201 ++ ret = -EIO;
18202 + list_for_each_entry(module, &skl->uuid_list, list) {
18203 ++ if (guid_equal(uuid_mod, &module->uuid)) {
18204 ++ mconfig->id.module_id = module->id;
18205 ++ mconfig->module->loadable = module->is_loadable;
18206 ++ ret = 0;
18207 ++ }
18208 ++
18209 + for (i = 0; i < MAX_IN_QUEUE; i++) {
18210 + pin_id = &mconfig->m_in_pin[i].id;
18211 + if (guid_equal(&pin_id->mod_uuid, &module->uuid))
18212 +@@ -1358,7 +1351,7 @@ static int skl_get_module_info(struct skl_dev *skl,
18213 + }
18214 + }
18215 +
18216 +- return 0;
18217 ++ return ret;
18218 + }
18219 +
18220 + static int skl_populate_modules(struct skl_dev *skl)
18221 +diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
18222 +index c7dc3509bceb6..b65dfbc3545b9 100644
18223 +--- a/sound/soc/rockchip/rockchip_i2s.c
18224 ++++ b/sound/soc/rockchip/rockchip_i2s.c
18225 +@@ -186,7 +186,9 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
18226 + {
18227 + struct rk_i2s_dev *i2s = to_info(cpu_dai);
18228 + unsigned int mask = 0, val = 0;
18229 ++ int ret = 0;
18230 +
18231 ++ pm_runtime_get_sync(cpu_dai->dev);
18232 + mask = I2S_CKR_MSS_MASK;
18233 + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
18234 + case SND_SOC_DAIFMT_CBS_CFS:
18235 +@@ -199,7 +201,8 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
18236 + i2s->is_master_mode = false;
18237 + break;
18238 + default:
18239 +- return -EINVAL;
18240 ++ ret = -EINVAL;
18241 ++ goto err_pm_put;
18242 + }
18243 +
18244 + regmap_update_bits(i2s->regmap, I2S_CKR, mask, val);
18245 +@@ -213,7 +216,8 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
18246 + val = I2S_CKR_CKP_POS;
18247 + break;
18248 + default:
18249 +- return -EINVAL;
18250 ++ ret = -EINVAL;
18251 ++ goto err_pm_put;
18252 + }
18253 +
18254 + regmap_update_bits(i2s->regmap, I2S_CKR, mask, val);
18255 +@@ -229,14 +233,15 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
18256 + case SND_SOC_DAIFMT_I2S:
18257 + val = I2S_TXCR_IBM_NORMAL;
18258 + break;
18259 +- case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
18260 +- val = I2S_TXCR_TFS_PCM;
18261 +- break;
18262 +- case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
18263 ++ case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 bit mode */
18264 + val = I2S_TXCR_TFS_PCM | I2S_TXCR_PBM_MODE(1);
18265 + break;
18266 ++ case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
18267 ++ val = I2S_TXCR_TFS_PCM;
18268 ++ break;
18269 + default:
18270 +- return -EINVAL;
18271 ++ ret = -EINVAL;
18272 ++ goto err_pm_put;
18273 + }
18274 +
18275 + regmap_update_bits(i2s->regmap, I2S_TXCR, mask, val);
18276 +@@ -252,19 +257,23 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
18277 + case SND_SOC_DAIFMT_I2S:
18278 + val = I2S_RXCR_IBM_NORMAL;
18279 + break;
18280 +- case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
18281 +- val = I2S_RXCR_TFS_PCM;
18282 +- break;
18283 +- case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
18284 ++ case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 bit mode */
18285 + val = I2S_RXCR_TFS_PCM | I2S_RXCR_PBM_MODE(1);
18286 + break;
18287 ++ case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
18288 ++ val = I2S_RXCR_TFS_PCM;
18289 ++ break;
18290 + default:
18291 +- return -EINVAL;
18292 ++ ret = -EINVAL;
18293 ++ goto err_pm_put;
18294 + }
18295 +
18296 + regmap_update_bits(i2s->regmap, I2S_RXCR, mask, val);
18297 +
18298 +- return 0;
18299 ++err_pm_put:
18300 ++ pm_runtime_put(cpu_dai->dev);
18301 ++
18302 ++ return ret;
18303 + }
18304 +
18305 + static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
18306 +diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
18307 +index 0ebee1ed06a90..5f1e72edfee04 100644
18308 +--- a/sound/soc/sh/rcar/adg.c
18309 ++++ b/sound/soc/sh/rcar/adg.c
18310 +@@ -391,9 +391,9 @@ static struct clk *rsnd_adg_create_null_clk(struct rsnd_priv *priv,
18311 + struct clk *clk;
18312 +
18313 + clk = clk_register_fixed_rate(dev, name, parent, 0, 0);
18314 +- if (IS_ERR(clk)) {
18315 ++ if (IS_ERR_OR_NULL(clk)) {
18316 + dev_err(dev, "create null clk error\n");
18317 +- return NULL;
18318 ++ return ERR_CAST(clk);
18319 + }
18320 +
18321 + return clk;
18322 +@@ -430,9 +430,9 @@ static int rsnd_adg_get_clkin(struct rsnd_priv *priv)
18323 + for (i = 0; i < CLKMAX; i++) {
18324 + clk = devm_clk_get(dev, clk_name[i]);
18325 +
18326 +- if (IS_ERR(clk))
18327 ++ if (IS_ERR_OR_NULL(clk))
18328 + clk = rsnd_adg_null_clk_get(priv);
18329 +- if (IS_ERR(clk))
18330 ++ if (IS_ERR_OR_NULL(clk))
18331 + goto err;
18332 +
18333 + adg->clk[i] = clk;
18334 +@@ -582,7 +582,7 @@ static int rsnd_adg_get_clkout(struct rsnd_priv *priv)
18335 + if (!count) {
18336 + clk = clk_register_fixed_rate(dev, clkout_name[CLKOUT],
18337 + parent_clk_name, 0, req_rate[0]);
18338 +- if (IS_ERR(clk))
18339 ++ if (IS_ERR_OR_NULL(clk))
18340 + goto err;
18341 +
18342 + adg->clkout[CLKOUT] = clk;
18343 +@@ -596,7 +596,7 @@ static int rsnd_adg_get_clkout(struct rsnd_priv *priv)
18344 + clk = clk_register_fixed_rate(dev, clkout_name[i],
18345 + parent_clk_name, 0,
18346 + req_rate[0]);
18347 +- if (IS_ERR(clk))
18348 ++ if (IS_ERR_OR_NULL(clk))
18349 + goto err;
18350 +
18351 + adg->clkout[i] = clk;
18352 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
18353 +index d1c570ca21ea7..b944f56a469a6 100644
18354 +--- a/sound/soc/soc-pcm.c
18355 ++++ b/sound/soc/soc-pcm.c
18356 +@@ -2001,6 +2001,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
18357 + struct snd_soc_pcm_runtime *be;
18358 + struct snd_soc_dpcm *dpcm;
18359 + int ret = 0;
18360 ++ unsigned long flags;
18361 ++ enum snd_soc_dpcm_state state;
18362 +
18363 + for_each_dpcm_be(fe, stream, dpcm) {
18364 + struct snd_pcm_substream *be_substream;
18365 +@@ -2017,76 +2019,141 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
18366 +
18367 + switch (cmd) {
18368 + case SNDRV_PCM_TRIGGER_START:
18369 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18370 + if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
18371 + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
18372 +- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
18373 ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) {
18374 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18375 + continue;
18376 ++ }
18377 ++ state = be->dpcm[stream].state;
18378 ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
18379 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18380 +
18381 + ret = soc_pcm_trigger(be_substream, cmd);
18382 +- if (ret)
18383 ++ if (ret) {
18384 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18385 ++ be->dpcm[stream].state = state;
18386 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18387 + goto end;
18388 ++ }
18389 +
18390 +- be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
18391 + break;
18392 + case SNDRV_PCM_TRIGGER_RESUME:
18393 +- if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
18394 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18395 ++ if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) {
18396 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18397 + continue;
18398 ++ }
18399 ++
18400 ++ state = be->dpcm[stream].state;
18401 ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
18402 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18403 +
18404 + ret = soc_pcm_trigger(be_substream, cmd);
18405 +- if (ret)
18406 ++ if (ret) {
18407 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18408 ++ be->dpcm[stream].state = state;
18409 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18410 + goto end;
18411 ++ }
18412 +
18413 +- be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
18414 + break;
18415 + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
18416 +- if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
18417 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18418 ++ if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) {
18419 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18420 + continue;
18421 ++ }
18422 ++
18423 ++ state = be->dpcm[stream].state;
18424 ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
18425 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18426 +
18427 + ret = soc_pcm_trigger(be_substream, cmd);
18428 +- if (ret)
18429 ++ if (ret) {
18430 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18431 ++ be->dpcm[stream].state = state;
18432 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18433 + goto end;
18434 ++ }
18435 +
18436 +- be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
18437 + break;
18438 + case SNDRV_PCM_TRIGGER_STOP:
18439 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18440 + if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) &&
18441 +- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
18442 ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) {
18443 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18444 + continue;
18445 ++ }
18446 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18447 +
18448 + if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
18449 + continue;
18450 +
18451 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18452 ++ state = be->dpcm[stream].state;
18453 ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
18454 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18455 ++
18456 + ret = soc_pcm_trigger(be_substream, cmd);
18457 +- if (ret)
18458 ++ if (ret) {
18459 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18460 ++ be->dpcm[stream].state = state;
18461 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18462 + goto end;
18463 ++ }
18464 +
18465 +- be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
18466 + break;
18467 + case SNDRV_PCM_TRIGGER_SUSPEND:
18468 +- if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
18469 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18470 ++ if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) {
18471 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18472 + continue;
18473 ++ }
18474 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18475 +
18476 + if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
18477 + continue;
18478 +
18479 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18480 ++ state = be->dpcm[stream].state;
18481 ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
18482 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18483 ++
18484 + ret = soc_pcm_trigger(be_substream, cmd);
18485 +- if (ret)
18486 ++ if (ret) {
18487 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18488 ++ be->dpcm[stream].state = state;
18489 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18490 + goto end;
18491 ++ }
18492 +
18493 +- be->dpcm[stream].state = SND_SOC_DPCM_STATE_SUSPEND;
18494 + break;
18495 + case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
18496 +- if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
18497 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18498 ++ if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) {
18499 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18500 + continue;
18501 ++ }
18502 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18503 +
18504 + if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
18505 + continue;
18506 +
18507 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18508 ++ state = be->dpcm[stream].state;
18509 ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
18510 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18511 ++
18512 + ret = soc_pcm_trigger(be_substream, cmd);
18513 +- if (ret)
18514 ++ if (ret) {
18515 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
18516 ++ be->dpcm[stream].state = state;
18517 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
18518 + goto end;
18519 ++ }
18520 +
18521 +- be->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
18522 + break;
18523 + }
18524 + }
18525 +diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
18526 +index 017a5a5e56cd1..64ec6d4858348 100644
18527 +--- a/sound/soc/ti/davinci-mcasp.c
18528 ++++ b/sound/soc/ti/davinci-mcasp.c
18529 +@@ -83,6 +83,8 @@ struct davinci_mcasp {
18530 + struct snd_pcm_substream *substreams[2];
18531 + unsigned int dai_fmt;
18532 +
18533 ++ u32 iec958_status;
18534 ++
18535 + /* Audio can not be enabled due to missing parameter(s) */
18536 + bool missing_audio_param;
18537 +
18538 +@@ -757,6 +759,9 @@ static int davinci_mcasp_set_tdm_slot(struct snd_soc_dai *dai,
18539 + {
18540 + struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
18541 +
18542 ++ if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
18543 ++ return 0;
18544 ++
18545 + dev_dbg(mcasp->dev,
18546 + "%s() tx_mask 0x%08x rx_mask 0x%08x slots %d width %d\n",
18547 + __func__, tx_mask, rx_mask, slots, slot_width);
18548 +@@ -827,6 +832,20 @@ static int davinci_config_channel_size(struct davinci_mcasp *mcasp,
18549 + mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, RXROT(rx_rotate),
18550 + RXROT(7));
18551 + mcasp_set_reg(mcasp, DAVINCI_MCASP_RXMASK_REG, mask);
18552 ++ } else {
18553 ++ /*
18554 ++ * according to the TRM it should be TXROT=0, this one works:
18555 ++ * 16 bit to 23-8 (TXROT=6, rotate 24 bits)
18556 ++ * 24 bit to 23-0 (TXROT=0, rotate 0 bits)
18557 ++ *
18558 ++ * TXROT = 0 only works with 24bit samples
18559 ++ */
18560 ++ tx_rotate = (sample_width / 4 + 2) & 0x7;
18561 ++
18562 ++ mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXROT(tx_rotate),
18563 ++ TXROT(7));
18564 ++ mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXSSZ(15),
18565 ++ TXSSZ(0x0F));
18566 + }
18567 +
18568 + mcasp_set_reg(mcasp, DAVINCI_MCASP_TXMASK_REG, mask);
18569 +@@ -842,10 +861,16 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
18570 + u8 tx_ser = 0;
18571 + u8 rx_ser = 0;
18572 + u8 slots = mcasp->tdm_slots;
18573 +- u8 max_active_serializers = (channels + slots - 1) / slots;
18574 +- u8 max_rx_serializers, max_tx_serializers;
18575 ++ u8 max_active_serializers, max_rx_serializers, max_tx_serializers;
18576 + int active_serializers, numevt;
18577 + u32 reg;
18578 ++
18579 ++ /* In DIT mode we only allow maximum of one serializers for now */
18580 ++ if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
18581 ++ max_active_serializers = 1;
18582 ++ else
18583 ++ max_active_serializers = (channels + slots - 1) / slots;
18584 ++
18585 + /* Default configuration */
18586 + if (mcasp->version < MCASP_VERSION_3)
18587 + mcasp_set_bits(mcasp, DAVINCI_MCASP_PWREMUMGT_REG, MCASP_SOFT);
18588 +@@ -1031,16 +1056,18 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream,
18589 + static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp,
18590 + unsigned int rate)
18591 + {
18592 +- u32 cs_value = 0;
18593 +- u8 *cs_bytes = (u8*) &cs_value;
18594 ++ u8 *cs_bytes = (u8 *)&mcasp->iec958_status;
18595 +
18596 +- /* Set the TX format : 24 bit right rotation, 32 bit slot, Pad 0
18597 +- and LSB first */
18598 +- mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXROT(6) | TXSSZ(15));
18599 ++ if (!mcasp->dat_port)
18600 ++ mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXSEL);
18601 ++ else
18602 ++ mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXSEL);
18603 +
18604 + /* Set TX frame synch : DIT Mode, 1 bit width, internal, rising edge */
18605 + mcasp_set_reg(mcasp, DAVINCI_MCASP_TXFMCTL_REG, AFSXE | FSXMOD(0x180));
18606 +
18607 ++ mcasp_set_reg(mcasp, DAVINCI_MCASP_TXMASK_REG, 0xFFFF);
18608 ++
18609 + /* Set the TX tdm : for all the slots */
18610 + mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, 0xFFFFFFFF);
18611 +
18612 +@@ -1049,16 +1076,8 @@ static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp,
18613 +
18614 + mcasp_clr_bits(mcasp, DAVINCI_MCASP_XEVTCTL_REG, TXDATADMADIS);
18615 +
18616 +- /* Only 44100 and 48000 are valid, both have the same setting */
18617 +- mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG, AHCLKXDIV(3));
18618 +-
18619 +- /* Enable the DIT */
18620 +- mcasp_set_bits(mcasp, DAVINCI_MCASP_TXDITCTL_REG, DITEN);
18621 +-
18622 + /* Set S/PDIF channel status bits */
18623 +- cs_bytes[0] = IEC958_AES0_CON_NOT_COPYRIGHT;
18624 +- cs_bytes[1] = IEC958_AES1_CON_PCM_CODER;
18625 +-
18626 ++ cs_bytes[3] &= ~IEC958_AES3_CON_FS;
18627 + switch (rate) {
18628 + case 22050:
18629 + cs_bytes[3] |= IEC958_AES3_CON_FS_22050;
18630 +@@ -1088,12 +1107,15 @@ static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp,
18631 + cs_bytes[3] |= IEC958_AES3_CON_FS_192000;
18632 + break;
18633 + default:
18634 +- printk(KERN_WARNING "unsupported sampling rate: %d\n", rate);
18635 ++ dev_err(mcasp->dev, "unsupported sampling rate: %d\n", rate);
18636 + return -EINVAL;
18637 + }
18638 +
18639 +- mcasp_set_reg(mcasp, DAVINCI_MCASP_DITCSRA_REG, cs_value);
18640 +- mcasp_set_reg(mcasp, DAVINCI_MCASP_DITCSRB_REG, cs_value);
18641 ++ mcasp_set_reg(mcasp, DAVINCI_MCASP_DITCSRA_REG, mcasp->iec958_status);
18642 ++ mcasp_set_reg(mcasp, DAVINCI_MCASP_DITCSRB_REG, mcasp->iec958_status);
18643 ++
18644 ++ /* Enable the DIT */
18645 ++ mcasp_set_bits(mcasp, DAVINCI_MCASP_TXDITCTL_REG, DITEN);
18646 +
18647 + return 0;
18648 + }
18649 +@@ -1237,12 +1259,18 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
18650 + int slots = mcasp->tdm_slots;
18651 + int rate = params_rate(params);
18652 + int sbits = params_width(params);
18653 ++ unsigned int bclk_target;
18654 +
18655 + if (mcasp->slot_width)
18656 + sbits = mcasp->slot_width;
18657 +
18658 ++ if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE)
18659 ++ bclk_target = rate * sbits * slots;
18660 ++ else
18661 ++ bclk_target = rate * 128;
18662 ++
18663 + davinci_mcasp_calc_clk_div(mcasp, mcasp->sysclk_freq,
18664 +- rate * sbits * slots, true);
18665 ++ bclk_target, true);
18666 + }
18667 +
18668 + ret = mcasp_common_hw_param(mcasp, substream->stream,
18669 +@@ -1598,6 +1626,77 @@ static const struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
18670 + .set_tdm_slot = davinci_mcasp_set_tdm_slot,
18671 + };
18672 +
18673 ++static int davinci_mcasp_iec958_info(struct snd_kcontrol *kcontrol,
18674 ++ struct snd_ctl_elem_info *uinfo)
18675 ++{
18676 ++ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
18677 ++ uinfo->count = 1;
18678 ++
18679 ++ return 0;
18680 ++}
18681 ++
18682 ++static int davinci_mcasp_iec958_get(struct snd_kcontrol *kcontrol,
18683 ++ struct snd_ctl_elem_value *uctl)
18684 ++{
18685 ++ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
18686 ++ struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
18687 ++
18688 ++ memcpy(uctl->value.iec958.status, &mcasp->iec958_status,
18689 ++ sizeof(mcasp->iec958_status));
18690 ++
18691 ++ return 0;
18692 ++}
18693 ++
18694 ++static int davinci_mcasp_iec958_put(struct snd_kcontrol *kcontrol,
18695 ++ struct snd_ctl_elem_value *uctl)
18696 ++{
18697 ++ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
18698 ++ struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
18699 ++
18700 ++ memcpy(&mcasp->iec958_status, uctl->value.iec958.status,
18701 ++ sizeof(mcasp->iec958_status));
18702 ++
18703 ++ return 0;
18704 ++}
18705 ++
18706 ++static int davinci_mcasp_iec958_con_mask_get(struct snd_kcontrol *kcontrol,
18707 ++ struct snd_ctl_elem_value *ucontrol)
18708 ++{
18709 ++ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
18710 ++ struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
18711 ++
18712 ++ memset(ucontrol->value.iec958.status, 0xff, sizeof(mcasp->iec958_status));
18713 ++ return 0;
18714 ++}
18715 ++
18716 ++static const struct snd_kcontrol_new davinci_mcasp_iec958_ctls[] = {
18717 ++ {
18718 ++ .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
18719 ++ SNDRV_CTL_ELEM_ACCESS_VOLATILE),
18720 ++ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
18721 ++ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
18722 ++ .info = davinci_mcasp_iec958_info,
18723 ++ .get = davinci_mcasp_iec958_get,
18724 ++ .put = davinci_mcasp_iec958_put,
18725 ++ }, {
18726 ++ .access = SNDRV_CTL_ELEM_ACCESS_READ,
18727 ++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
18728 ++ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK),
18729 ++ .info = davinci_mcasp_iec958_info,
18730 ++ .get = davinci_mcasp_iec958_con_mask_get,
18731 ++ },
18732 ++};
18733 ++
18734 ++static void davinci_mcasp_init_iec958_status(struct davinci_mcasp *mcasp)
18735 ++{
18736 ++ unsigned char *cs = (u8 *)&mcasp->iec958_status;
18737 ++
18738 ++ cs[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_NONE;
18739 ++ cs[1] = IEC958_AES1_CON_PCM_CODER;
18740 ++ cs[2] = IEC958_AES2_CON_SOURCE_UNSPEC | IEC958_AES2_CON_CHANNEL_UNSPEC;
18741 ++ cs[3] = IEC958_AES3_CON_CLOCK_1000PPM;
18742 ++}
18743 ++
18744 + static int davinci_mcasp_dai_probe(struct snd_soc_dai *dai)
18745 + {
18746 + struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
18747 +@@ -1605,6 +1704,12 @@ static int davinci_mcasp_dai_probe(struct snd_soc_dai *dai)
18748 + dai->playback_dma_data = &mcasp->dma_data[SNDRV_PCM_STREAM_PLAYBACK];
18749 + dai->capture_dma_data = &mcasp->dma_data[SNDRV_PCM_STREAM_CAPTURE];
18750 +
18751 ++ if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE) {
18752 ++ davinci_mcasp_init_iec958_status(mcasp);
18753 ++ snd_soc_add_dai_controls(dai, davinci_mcasp_iec958_ctls,
18754 ++ ARRAY_SIZE(davinci_mcasp_iec958_ctls));
18755 ++ }
18756 ++
18757 + return 0;
18758 + }
18759 +
18760 +@@ -1651,7 +1756,8 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
18761 + .channels_min = 1,
18762 + .channels_max = 384,
18763 + .rates = DAVINCI_MCASP_RATES,
18764 +- .formats = DAVINCI_MCASP_PCM_FMTS,
18765 ++ .formats = SNDRV_PCM_FMTBIT_S16_LE |
18766 ++ SNDRV_PCM_FMTBIT_S24_LE,
18767 + },
18768 + .ops = &davinci_mcasp_dai_ops,
18769 + },
18770 +@@ -1871,6 +1977,8 @@ out:
18771 + } else {
18772 + mcasp->tdm_slots = pdata->tdm_slots;
18773 + }
18774 ++ } else {
18775 ++ mcasp->tdm_slots = 32;
18776 + }
18777 +
18778 + mcasp->num_serializer = pdata->num_serializer;
18779 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
18780 +index 2234d5c33177a..d27e017ebfbea 100644
18781 +--- a/tools/lib/bpf/libbpf.c
18782 ++++ b/tools/lib/bpf/libbpf.c
18783 +@@ -3894,6 +3894,42 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
18784 + return 0;
18785 + }
18786 +
18787 ++static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
18788 ++{
18789 ++ char file[PATH_MAX], buff[4096];
18790 ++ FILE *fp;
18791 ++ __u32 val;
18792 ++ int err;
18793 ++
18794 ++ snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
18795 ++ memset(info, 0, sizeof(*info));
18796 ++
18797 ++ fp = fopen(file, "r");
18798 ++ if (!fp) {
18799 ++ err = -errno;
18800 ++ pr_warn("failed to open %s: %d. No procfs support?\n", file,
18801 ++ err);
18802 ++ return err;
18803 ++ }
18804 ++
18805 ++ while (fgets(buff, sizeof(buff), fp)) {
18806 ++ if (sscanf(buff, "map_type:\t%u", &val) == 1)
18807 ++ info->type = val;
18808 ++ else if (sscanf(buff, "key_size:\t%u", &val) == 1)
18809 ++ info->key_size = val;
18810 ++ else if (sscanf(buff, "value_size:\t%u", &val) == 1)
18811 ++ info->value_size = val;
18812 ++ else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
18813 ++ info->max_entries = val;
18814 ++ else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
18815 ++ info->map_flags = val;
18816 ++ }
18817 ++
18818 ++ fclose(fp);
18819 ++
18820 ++ return 0;
18821 ++}
18822 ++
18823 + int bpf_map__reuse_fd(struct bpf_map *map, int fd)
18824 + {
18825 + struct bpf_map_info info = {};
18826 +@@ -3902,6 +3938,8 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
18827 + char *new_name;
18828 +
18829 + err = bpf_obj_get_info_by_fd(fd, &info, &len);
18830 ++ if (err && errno == EINVAL)
18831 ++ err = bpf_get_map_info_from_fdinfo(fd, &info);
18832 + if (err)
18833 + return libbpf_err(err);
18834 +
18835 +@@ -4381,12 +4419,16 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
18836 + struct bpf_map_info map_info = {};
18837 + char msg[STRERR_BUFSIZE];
18838 + __u32 map_info_len;
18839 ++ int err;
18840 +
18841 + map_info_len = sizeof(map_info);
18842 +
18843 +- if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
18844 +- pr_warn("failed to get map info for map FD %d: %s\n",
18845 +- map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
18846 ++ err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
18847 ++ if (err && errno == EINVAL)
18848 ++ err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
18849 ++ if (err) {
18850 ++ pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
18851 ++ libbpf_strerror_r(errno, msg, sizeof(msg)));
18852 + return false;
18853 + }
18854 +
18855 +@@ -4614,10 +4656,13 @@ bpf_object__create_maps(struct bpf_object *obj)
18856 + char *cp, errmsg[STRERR_BUFSIZE];
18857 + unsigned int i, j;
18858 + int err;
18859 ++ bool retried;
18860 +
18861 + for (i = 0; i < obj->nr_maps; i++) {
18862 + map = &obj->maps[i];
18863 +
18864 ++ retried = false;
18865 ++retry:
18866 + if (map->pin_path) {
18867 + err = bpf_object__reuse_map(map);
18868 + if (err) {
18869 +@@ -4625,6 +4670,12 @@ bpf_object__create_maps(struct bpf_object *obj)
18870 + map->name);
18871 + goto err_out;
18872 + }
18873 ++ if (retried && map->fd < 0) {
18874 ++ pr_warn("map '%s': cannot find pinned map\n",
18875 ++ map->name);
18876 ++ err = -ENOENT;
18877 ++ goto err_out;
18878 ++ }
18879 + }
18880 +
18881 + if (map->fd >= 0) {
18882 +@@ -4658,9 +4709,13 @@ bpf_object__create_maps(struct bpf_object *obj)
18883 + if (map->pin_path && !map->pinned) {
18884 + err = bpf_map__pin(map, NULL);
18885 + if (err) {
18886 ++ zclose(map->fd);
18887 ++ if (!retried && err == -EEXIST) {
18888 ++ retried = true;
18889 ++ goto retry;
18890 ++ }
18891 + pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
18892 + map->name, map->pin_path, err);
18893 +- zclose(map->fd);
18894 + goto err_out;
18895 + }
18896 + }
18897 +diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
18898 +index f50ac31920d13..0328a1e08f659 100644
18899 +--- a/tools/testing/selftests/arm64/mte/mte_common_util.c
18900 ++++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
18901 +@@ -298,7 +298,7 @@ int mte_default_setup(void)
18902 + int ret;
18903 +
18904 + if (!(hwcaps2 & HWCAP2_MTE)) {
18905 +- ksft_print_msg("FAIL: MTE features unavailable\n");
18906 ++ ksft_print_msg("SKIP: MTE features unavailable\n");
18907 + return KSFT_SKIP;
18908 + }
18909 + /* Get current mte mode */
18910 +diff --git a/tools/testing/selftests/arm64/pauth/pac.c b/tools/testing/selftests/arm64/pauth/pac.c
18911 +index 592fe538506e3..b743daa772f55 100644
18912 +--- a/tools/testing/selftests/arm64/pauth/pac.c
18913 ++++ b/tools/testing/selftests/arm64/pauth/pac.c
18914 +@@ -25,13 +25,15 @@
18915 + do { \
18916 + unsigned long hwcaps = getauxval(AT_HWCAP); \
18917 + /* data key instructions are not in NOP space. This prevents a SIGILL */ \
18918 +- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled"); \
18919 ++ if (!(hwcaps & HWCAP_PACA)) \
18920 ++ SKIP(return, "PAUTH not enabled"); \
18921 + } while (0)
18922 + #define ASSERT_GENERIC_PAUTH_ENABLED() \
18923 + do { \
18924 + unsigned long hwcaps = getauxval(AT_HWCAP); \
18925 + /* generic key instructions are not in NOP space. This prevents a SIGILL */ \
18926 +- ASSERT_NE(0, hwcaps & HWCAP_PACG) TH_LOG("Generic PAUTH not enabled"); \
18927 ++ if (!(hwcaps & HWCAP_PACG)) \
18928 ++ SKIP(return, "Generic PAUTH not enabled"); \
18929 + } while (0)
18930 +
18931 + void sign_specific(struct signatures *sign, size_t val)
18932 +@@ -256,7 +258,7 @@ TEST(single_thread_different_keys)
18933 + unsigned long hwcaps = getauxval(AT_HWCAP);
18934 +
18935 + /* generic and data key instructions are not in NOP space. This prevents a SIGILL */
18936 +- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
18937 ++ ASSERT_PAUTH_ENABLED();
18938 + if (!(hwcaps & HWCAP_PACG)) {
18939 + TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
18940 + nkeys = NKEYS - 1;
18941 +@@ -299,7 +301,7 @@ TEST(exec_changed_keys)
18942 + unsigned long hwcaps = getauxval(AT_HWCAP);
18943 +
18944 + /* generic and data key instructions are not in NOP space. This prevents a SIGILL */
18945 +- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
18946 ++ ASSERT_PAUTH_ENABLED();
18947 + if (!(hwcaps & HWCAP_PACG)) {
18948 + TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
18949 + nkeys = NKEYS - 1;
18950 +diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
18951 +index 023cc532992d3..839f7ddaec16c 100644
18952 +--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
18953 ++++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
18954 +@@ -1,5 +1,7 @@
18955 + // SPDX-License-Identifier: GPL-2.0
18956 + #include <test_progs.h>
18957 ++#include <sys/time.h>
18958 ++#include <sys/resource.h>
18959 + #include "test_send_signal_kern.skel.h"
18960 +
18961 + int sigusr1_received = 0;
18962 +@@ -41,12 +43,23 @@ static void test_send_signal_common(struct perf_event_attr *attr,
18963 + }
18964 +
18965 + if (pid == 0) {
18966 ++ int old_prio;
18967 ++
18968 + /* install signal handler and notify parent */
18969 + signal(SIGUSR1, sigusr1_handler);
18970 +
18971 + close(pipe_c2p[0]); /* close read */
18972 + close(pipe_p2c[1]); /* close write */
18973 +
18974 ++ /* boost with a high priority so we got a higher chance
18975 ++ * that if an interrupt happens, the underlying task
18976 ++ * is this process.
18977 ++ */
18978 ++ errno = 0;
18979 ++ old_prio = getpriority(PRIO_PROCESS, 0);
18980 ++ ASSERT_OK(errno, "getpriority");
18981 ++ ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority");
18982 ++
18983 + /* notify parent signal handler is installed */
18984 + CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
18985 +
18986 +@@ -62,6 +75,9 @@ static void test_send_signal_common(struct perf_event_attr *attr,
18987 + /* wait for parent notification and exit */
18988 + CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
18989 +
18990 ++ /* restore the old priority */
18991 ++ ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority");
18992 ++
18993 + close(pipe_c2p[1]);
18994 + close(pipe_p2c[0]);
18995 + exit(0);
18996 +diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
18997 +index ec281b0363b82..86f97681ad898 100644
18998 +--- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
18999 ++++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
19000 +@@ -195,8 +195,10 @@ static void run_test(int cgroup_fd)
19001 +
19002 + pthread_mutex_lock(&server_started_mtx);
19003 + if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
19004 +- (void *)&server_fd)))
19005 ++ (void *)&server_fd))) {
19006 ++ pthread_mutex_unlock(&server_started_mtx);
19007 + goto close_server_fd;
19008 ++ }
19009 + pthread_cond_wait(&server_started, &server_started_mtx);
19010 + pthread_mutex_unlock(&server_started_mtx);
19011 +
19012 +diff --git a/tools/testing/selftests/bpf/progs/xdp_tx.c b/tools/testing/selftests/bpf/progs/xdp_tx.c
19013 +index 94e6c2b281cb6..5f725c720e008 100644
19014 +--- a/tools/testing/selftests/bpf/progs/xdp_tx.c
19015 ++++ b/tools/testing/selftests/bpf/progs/xdp_tx.c
19016 +@@ -3,7 +3,7 @@
19017 + #include <linux/bpf.h>
19018 + #include <bpf/bpf_helpers.h>
19019 +
19020 +-SEC("tx")
19021 ++SEC("xdp")
19022 + int xdp_tx(struct xdp_md *xdp)
19023 + {
19024 + return XDP_TX;
19025 +diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
19026 +index abdfc41f7685a..4fd01450a4089 100644
19027 +--- a/tools/testing/selftests/bpf/test_maps.c
19028 ++++ b/tools/testing/selftests/bpf/test_maps.c
19029 +@@ -985,7 +985,7 @@ static void test_sockmap(unsigned int tasks, void *data)
19030 +
19031 + FD_ZERO(&w);
19032 + FD_SET(sfd[3], &w);
19033 +- to.tv_sec = 1;
19034 ++ to.tv_sec = 30;
19035 + to.tv_usec = 0;
19036 + s = select(sfd[3] + 1, &w, NULL, NULL, &to);
19037 + if (s == -1) {
19038 +diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
19039 +index 6f103106a39bb..bfbf2277b61a6 100644
19040 +--- a/tools/testing/selftests/bpf/test_progs.c
19041 ++++ b/tools/testing/selftests/bpf/test_progs.c
19042 +@@ -148,18 +148,18 @@ void test__end_subtest()
19043 + struct prog_test_def *test = env.test;
19044 + int sub_error_cnt = test->error_cnt - test->old_error_cnt;
19045 +
19046 +- if (sub_error_cnt)
19047 +- env.fail_cnt++;
19048 +- else if (test->skip_cnt == 0)
19049 +- env.sub_succ_cnt++;
19050 +- skip_account();
19051 +-
19052 + dump_test_log(test, sub_error_cnt);
19053 +
19054 + fprintf(env.stdout, "#%d/%d %s:%s\n",
19055 + test->test_num, test->subtest_num, test->subtest_name,
19056 + sub_error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK"));
19057 +
19058 ++ if (sub_error_cnt)
19059 ++ env.fail_cnt++;
19060 ++ else if (test->skip_cnt == 0)
19061 ++ env.sub_succ_cnt++;
19062 ++ skip_account();
19063 ++
19064 + free(test->subtest_name);
19065 + test->subtest_name = NULL;
19066 + }
19067 +@@ -786,17 +786,18 @@ int main(int argc, char **argv)
19068 + test__end_subtest();
19069 +
19070 + test->tested = true;
19071 +- if (test->error_cnt)
19072 +- env.fail_cnt++;
19073 +- else
19074 +- env.succ_cnt++;
19075 +- skip_account();
19076 +
19077 + dump_test_log(test, test->error_cnt);
19078 +
19079 + fprintf(env.stdout, "#%d %s:%s\n",
19080 + test->test_num, test->test_name,
19081 +- test->error_cnt ? "FAIL" : "OK");
19082 ++ test->error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK"));
19083 ++
19084 ++ if (test->error_cnt)
19085 ++ env.fail_cnt++;
19086 ++ else
19087 ++ env.succ_cnt++;
19088 ++ skip_account();
19089 +
19090 + reset_affinity();
19091 + restore_netns();
19092 +diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh
19093 +index ba8ffcdaac302..995278e684b6e 100755
19094 +--- a/tools/testing/selftests/bpf/test_xdp_veth.sh
19095 ++++ b/tools/testing/selftests/bpf/test_xdp_veth.sh
19096 +@@ -108,7 +108,7 @@ ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1
19097 + ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2
19098 +
19099 + ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp_dummy
19100 +-ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec tx
19101 ++ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec xdp
19102 + ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp_dummy
19103 +
19104 + trap cleanup EXIT
19105 +diff --git a/tools/testing/selftests/firmware/fw_namespace.c b/tools/testing/selftests/firmware/fw_namespace.c
19106 +index 0e393cb5f42de..4c6f0cd83c5b0 100644
19107 +--- a/tools/testing/selftests/firmware/fw_namespace.c
19108 ++++ b/tools/testing/selftests/firmware/fw_namespace.c
19109 +@@ -129,7 +129,8 @@ int main(int argc, char **argv)
19110 + die("mounting tmpfs to /lib/firmware failed\n");
19111 +
19112 + sys_path = argv[1];
19113 +- asprintf(&fw_path, "/lib/firmware/%s", fw_name);
19114 ++ if (asprintf(&fw_path, "/lib/firmware/%s", fw_name) < 0)
19115 ++ die("error: failed to build full fw_path\n");
19116 +
19117 + setup_fw(fw_path);
19118 +
19119 +diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
19120 +index a6fac927ee82f..0cee6b067a374 100644
19121 +--- a/tools/testing/selftests/ftrace/test.d/functions
19122 ++++ b/tools/testing/selftests/ftrace/test.d/functions
19123 +@@ -115,7 +115,7 @@ check_requires() { # Check required files and tracers
19124 + echo "Required tracer $t is not configured."
19125 + exit_unsupported
19126 + fi
19127 +- elif [ $r != $i ]; then
19128 ++ elif [ "$r" != "$i" ]; then
19129 + if ! grep -Fq "$r" README ; then
19130 + echo "Required feature pattern \"$r\" is not in README."
19131 + exit_unsupported
19132 +diff --git a/tools/testing/selftests/nci/nci_dev.c b/tools/testing/selftests/nci/nci_dev.c
19133 +index 57b505cb15618..acd4125ff39fe 100644
19134 +--- a/tools/testing/selftests/nci/nci_dev.c
19135 ++++ b/tools/testing/selftests/nci/nci_dev.c
19136 +@@ -110,11 +110,11 @@ static int send_cmd_mt_nla(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
19137 + na->nla_type = nla_type[cnt];
19138 + na->nla_len = nla_len[cnt] + NLA_HDRLEN;
19139 +
19140 +- if (nla_len > 0)
19141 ++ if (nla_len[cnt] > 0)
19142 + memcpy(NLA_DATA(na), nla_data[cnt], nla_len[cnt]);
19143 +
19144 +- msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
19145 +- prv_len = na->nla_len;
19146 ++ prv_len = NLA_ALIGN(nla_len[cnt]) + NLA_HDRLEN;
19147 ++ msg.n.nlmsg_len += prv_len;
19148 + }
19149 +
19150 + buf = (char *)&msg;
19151 +diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
19152 +index 9db867df76794..610334f86f631 100644
19153 +--- a/tools/thermal/tmon/Makefile
19154 ++++ b/tools/thermal/tmon/Makefile
19155 +@@ -10,7 +10,7 @@ override CFLAGS+= $(call cc-option,-O3,-O1) ${WARNFLAGS}
19156 + # Add "-fstack-protector" only if toolchain supports it.
19157 + override CFLAGS+= $(call cc-option,-fstack-protector-strong)
19158 + CC?= $(CROSS_COMPILE)gcc
19159 +-PKG_CONFIG?= pkg-config
19160 ++PKG_CONFIG?= $(CROSS_COMPILE)pkg-config
19161 +
19162 + override CFLAGS+=-D VERSION=\"$(VERSION)\"
19163 + LDFLAGS+=