Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.13 commit in: /
Date: Sat, 18 Sep 2021 16:08:57
Message-Id: 1631981319.9721ae214fbeef46bcc22f4e89853505717a4596.mpagano@gentoo
1 commit: 9721ae214fbeef46bcc22f4e89853505717a4596
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Sep 18 16:08:39 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Sep 18 16:08:39 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9721ae21
7
8 Linux patch 5.13.19
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 8 +
13 1018_linux-5.13.19.patch | 17110 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 17118 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index ab01fc2..2dcc6e6 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -111,6 +111,14 @@ Patch: 1016_linux-5.13.17.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.13.17
23
24 +Patch: 1017_linux-5.13.18.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.13.18
27 +
28 +Patch: 1018_linux-5.13.19.patch
29 +From: http://www.kernel.org
30 +Desc: Linux 5.13.19
31 +
32 Patch: 1500_XATTR_USER_PREFIX.patch
33 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
34 Desc: Support for namespace user.pax.* on tmpfs.
35
36 diff --git a/1018_linux-5.13.19.patch b/1018_linux-5.13.19.patch
37 new file mode 100644
38 index 0000000..5f873ad
39 --- /dev/null
40 +++ b/1018_linux-5.13.19.patch
41 @@ -0,0 +1,17110 @@
42 +diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
43 +index 9c2be821c2254..922c23bb4372a 100644
44 +--- a/Documentation/admin-guide/devices.txt
45 ++++ b/Documentation/admin-guide/devices.txt
46 +@@ -2993,10 +2993,10 @@
47 + 65 = /dev/infiniband/issm1 Second InfiniBand IsSM device
48 + ...
49 + 127 = /dev/infiniband/issm63 63rd InfiniBand IsSM device
50 +- 128 = /dev/infiniband/uverbs0 First InfiniBand verbs device
51 +- 129 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
52 ++ 192 = /dev/infiniband/uverbs0 First InfiniBand verbs device
53 ++ 193 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
54 + ...
55 +- 159 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
56 ++ 223 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
57 +
58 + 232 char Biometric Devices
59 + 0 = /dev/biometric/sensor0/fingerprint first fingerprint sensor on first device
60 +diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
61 +index 38dc56a577604..ecec514b31550 100644
62 +--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
63 ++++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
64 +@@ -43,19 +43,19 @@ group emmc_nb
65 +
66 + group pwm0
67 + - pin 11 (GPIO1-11)
68 +- - functions pwm, gpio
69 ++ - functions pwm, led, gpio
70 +
71 + group pwm1
72 + - pin 12
73 +- - functions pwm, gpio
74 ++ - functions pwm, led, gpio
75 +
76 + group pwm2
77 + - pin 13
78 +- - functions pwm, gpio
79 ++ - functions pwm, led, gpio
80 +
81 + group pwm3
82 + - pin 14
83 +- - functions pwm, gpio
84 ++ - functions pwm, led, gpio
85 +
86 + group pmic1
87 + - pin 7
88 +diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
89 +index 53d396650afbe..7f52d9079d764 100644
90 +--- a/Documentation/filesystems/f2fs.rst
91 ++++ b/Documentation/filesystems/f2fs.rst
92 +@@ -185,6 +185,7 @@ fault_type=%d Support configuring fault injection type, should be
93 + FAULT_KVMALLOC 0x000000002
94 + FAULT_PAGE_ALLOC 0x000000004
95 + FAULT_PAGE_GET 0x000000008
96 ++ FAULT_ALLOC_BIO 0x000000010 (obsolete)
97 + FAULT_ALLOC_NID 0x000000020
98 + FAULT_ORPHAN 0x000000040
99 + FAULT_BLOCK 0x000000080
100 +@@ -289,6 +290,9 @@ compress_mode=%s Control file compression mode. This supports "fs" and "user"
101 + choosing the target file and the timing. The user can do manual
102 + compression/decompression on the compression enabled files using
103 + ioctls.
104 ++compress_cache Support to use address space of a filesystem managed inode to
105 ++ cache compressed block, in order to improve cache hit ratio of
106 ++ random read.
107 + inlinecrypt When possible, encrypt/decrypt the contents of encrypted
108 + files using the blk-crypto framework rather than
109 + filesystem-layer encryption. This allows the use of
110 +diff --git a/Makefile b/Makefile
111 +index ddbd64b92a723..528a5c37bc8d2 100644
112 +--- a/Makefile
113 ++++ b/Makefile
114 +@@ -1,7 +1,7 @@
115 + # SPDX-License-Identifier: GPL-2.0
116 + VERSION = 5
117 + PATCHLEVEL = 13
118 +-SUBLEVEL = 18
119 ++SUBLEVEL = 19
120 + EXTRAVERSION =
121 + NAME = Opossums on Parade
122 +
123 +@@ -404,6 +404,11 @@ ifeq ($(ARCH),sparc64)
124 + SRCARCH := sparc
125 + endif
126 +
127 ++# Additional ARCH settings for parisc
128 ++ifeq ($(ARCH),parisc64)
129 ++ SRCARCH := parisc
130 ++endif
131 ++
132 + export cross_compiling :=
133 + ifneq ($(SRCARCH),$(SUBARCH))
134 + cross_compiling := 1
135 +diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
136 +index 8eb70c1febce3..356f70cfcd3bb 100644
137 +--- a/arch/arm/boot/compressed/Makefile
138 ++++ b/arch/arm/boot/compressed/Makefile
139 +@@ -85,6 +85,8 @@ compress-$(CONFIG_KERNEL_LZ4) = lz4
140 + libfdt_objs := fdt_rw.o fdt_ro.o fdt_wip.o fdt.o
141 +
142 + ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y)
143 ++CFLAGS_REMOVE_atags_to_fdt.o += -Wframe-larger-than=${CONFIG_FRAME_WARN}
144 ++CFLAGS_atags_to_fdt.o += -Wframe-larger-than=1280
145 + OBJS += $(libfdt_objs) atags_to_fdt.o
146 + endif
147 + ifeq ($(CONFIG_USE_OF),y)
148 +diff --git a/arch/arm/boot/dts/at91-kizbox3_common.dtsi b/arch/arm/boot/dts/at91-kizbox3_common.dtsi
149 +index c4b3750495da8..abe27adfa4d65 100644
150 +--- a/arch/arm/boot/dts/at91-kizbox3_common.dtsi
151 ++++ b/arch/arm/boot/dts/at91-kizbox3_common.dtsi
152 +@@ -336,7 +336,7 @@
153 + };
154 +
155 + &shutdown_controller {
156 +- atmel,shdwc-debouncer = <976>;
157 ++ debounce-delay-us = <976>;
158 + atmel,wakeup-rtc-timer;
159 +
160 + input@0 {
161 +diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
162 +index ebbc9b23aef1c..b1068cca42287 100644
163 +--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
164 ++++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
165 +@@ -662,7 +662,7 @@
166 + };
167 +
168 + &shutdown_controller {
169 +- atmel,shdwc-debouncer = <976>;
170 ++ debounce-delay-us = <976>;
171 + status = "okay";
172 +
173 + input@0 {
174 +diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
175 +index a9e6fee55a2a8..8034e5dacc808 100644
176 +--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
177 ++++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
178 +@@ -138,7 +138,7 @@
179 + };
180 +
181 + shdwc@f8048010 {
182 +- atmel,shdwc-debouncer = <976>;
183 ++ debounce-delay-us = <976>;
184 + atmel,wakeup-rtc-timer;
185 +
186 + input@0 {
187 +diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
188 +index ff83967fd0082..c145c4e5ef582 100644
189 +--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
190 ++++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
191 +@@ -205,7 +205,7 @@
192 + };
193 +
194 + &shutdown_controller {
195 +- atmel,shdwc-debouncer = <976>;
196 ++ debounce-delay-us = <976>;
197 + atmel,wakeup-rtc-timer;
198 +
199 + input@0 {
200 +diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
201 +index bd64721fa23ca..34faca597c352 100644
202 +--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
203 ++++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
204 +@@ -693,7 +693,7 @@
205 + };
206 +
207 + &shutdown_controller {
208 +- atmel,shdwc-debouncer = <976>;
209 ++ debounce-delay-us = <976>;
210 + atmel,wakeup-rtc-timer;
211 +
212 + input@0 {
213 +diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
214 +index dfd150eb0fd86..3f972a4086c37 100644
215 +--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
216 ++++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
217 +@@ -203,7 +203,7 @@
218 + };
219 +
220 + shdwc@f8048010 {
221 +- atmel,shdwc-debouncer = <976>;
222 ++ debounce-delay-us = <976>;
223 +
224 + input@0 {
225 + reg = <0>;
226 +diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
227 +index 509c732a0d8b4..627b7bf88d83b 100644
228 +--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
229 ++++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
230 +@@ -347,7 +347,7 @@
231 + };
232 +
233 + shdwc@f8048010 {
234 +- atmel,shdwc-debouncer = <976>;
235 ++ debounce-delay-us = <976>;
236 + atmel,wakeup-rtc-timer;
237 +
238 + input@0 {
239 +diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
240 +index be040b6a02fa8..1f3ee60fb102f 100644
241 +--- a/arch/arm/boot/dts/imx53-ppd.dts
242 ++++ b/arch/arm/boot/dts/imx53-ppd.dts
243 +@@ -70,6 +70,12 @@
244 + clock-frequency = <11289600>;
245 + };
246 +
247 ++ achc_24M: achc-clock {
248 ++ compatible = "fixed-clock";
249 ++ #clock-cells = <0>;
250 ++ clock-frequency = <24000000>;
251 ++ };
252 ++
253 + sgtlsound: sound {
254 + compatible = "fsl,imx53-cpuvo-sgtl5000",
255 + "fsl,imx-audio-sgtl5000";
256 +@@ -314,16 +320,13 @@
257 + &gpio4 12 GPIO_ACTIVE_LOW>;
258 + status = "okay";
259 +
260 +- spidev0: spi@0 {
261 +- compatible = "ge,achc";
262 +- reg = <0>;
263 +- spi-max-frequency = <1000000>;
264 +- };
265 +-
266 +- spidev1: spi@1 {
267 +- compatible = "ge,achc";
268 +- reg = <1>;
269 +- spi-max-frequency = <1000000>;
270 ++ spidev0: spi@1 {
271 ++ compatible = "ge,achc", "nxp,kinetis-k20";
272 ++ reg = <1>, <0>;
273 ++ vdd-supply = <&reg_3v3>;
274 ++ vdda-supply = <&reg_3v3>;
275 ++ clocks = <&achc_24M>;
276 ++ reset-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
277 + };
278 +
279 + gpioxra0: gpio@2 {
280 +diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
281 +index 2687c4e890ba8..e36d590e83732 100644
282 +--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
283 ++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
284 +@@ -1262,9 +1262,9 @@
285 + <&mmcc DSI1_BYTE_CLK>,
286 + <&mmcc DSI_PIXEL_CLK>,
287 + <&mmcc DSI1_ESC_CLK>;
288 +- clock-names = "iface_clk", "bus_clk", "core_mmss_clk",
289 +- "src_clk", "byte_clk", "pixel_clk",
290 +- "core_clk";
291 ++ clock-names = "iface", "bus", "core_mmss",
292 ++ "src", "byte", "pixel",
293 ++ "core";
294 +
295 + assigned-clocks = <&mmcc DSI1_BYTE_SRC>,
296 + <&mmcc DSI1_ESC_SRC>,
297 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
298 +index 6cf1c8b4c6e28..c9577ba2973d3 100644
299 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
300 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
301 +@@ -172,15 +172,15 @@
302 + sgtl5000_tx_endpoint: endpoint@0 {
303 + reg = <0>;
304 + remote-endpoint = <&sai2a_endpoint>;
305 +- frame-master;
306 +- bitclock-master;
307 ++ frame-master = <&sgtl5000_tx_endpoint>;
308 ++ bitclock-master = <&sgtl5000_tx_endpoint>;
309 + };
310 +
311 + sgtl5000_rx_endpoint: endpoint@1 {
312 + reg = <1>;
313 + remote-endpoint = <&sai2b_endpoint>;
314 +- frame-master;
315 +- bitclock-master;
316 ++ frame-master = <&sgtl5000_rx_endpoint>;
317 ++ bitclock-master = <&sgtl5000_rx_endpoint>;
318 + };
319 + };
320 +
321 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
322 +index 64dca5b7f748d..6885948f3024e 100644
323 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
324 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
325 +@@ -220,8 +220,8 @@
326 + &i2c4 {
327 + hdmi-transmitter@3d {
328 + compatible = "adi,adv7513";
329 +- reg = <0x3d>, <0x2d>, <0x4d>, <0x5d>;
330 +- reg-names = "main", "cec", "edid", "packet";
331 ++ reg = <0x3d>, <0x4d>, <0x2d>, <0x5d>;
332 ++ reg-names = "main", "edid", "cec", "packet";
333 + clocks = <&cec_clock>;
334 + clock-names = "cec";
335 +
336 +@@ -239,8 +239,6 @@
337 + adi,input-depth = <8>;
338 + adi,input-colorspace = "rgb";
339 + adi,input-clock = "1x";
340 +- adi,input-style = <1>;
341 +- adi,input-justification = "evenly";
342 +
343 + ports {
344 + #address-cells = <1>;
345 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
346 +index 59f18846cf5d0..586aac8a998c0 100644
347 +--- a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
348 ++++ b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
349 +@@ -220,15 +220,15 @@
350 + cs42l51_tx_endpoint: endpoint@0 {
351 + reg = <0>;
352 + remote-endpoint = <&sai2a_endpoint>;
353 +- frame-master;
354 +- bitclock-master;
355 ++ frame-master = <&cs42l51_tx_endpoint>;
356 ++ bitclock-master = <&cs42l51_tx_endpoint>;
357 + };
358 +
359 + cs42l51_rx_endpoint: endpoint@1 {
360 + reg = <1>;
361 + remote-endpoint = <&sai2b_endpoint>;
362 +- frame-master;
363 +- bitclock-master;
364 ++ frame-master = <&cs42l51_rx_endpoint>;
365 ++ bitclock-master = <&cs42l51_rx_endpoint>;
366 + };
367 + };
368 + };
369 +diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
370 +index 14cd3238355b7..2c74993f1a9e8 100644
371 +--- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
372 ++++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
373 +@@ -716,7 +716,6 @@
374 + nvidia,xcvr-setup-use-fuses;
375 + nvidia,xcvr-lsfslew = <2>;
376 + nvidia,xcvr-lsrslew = <2>;
377 +- vbus-supply = <&vdd_vbus1>;
378 + };
379 +
380 + usb@c5008000 {
381 +@@ -728,7 +727,7 @@
382 + nvidia,xcvr-setup-use-fuses;
383 + nvidia,xcvr-lsfslew = <2>;
384 + nvidia,xcvr-lsrslew = <2>;
385 +- vbus-supply = <&vdd_vbus3>;
386 ++ vbus-supply = <&vdd_5v0_sys>;
387 + };
388 +
389 + brcm_wifi_pwrseq: wifi-pwrseq {
390 +@@ -988,28 +987,6 @@
391 + vin-supply = <&vdd_5v0_sys>;
392 + };
393 +
394 +- vdd_vbus1: regulator@4 {
395 +- compatible = "regulator-fixed";
396 +- regulator-name = "vdd_usb1_vbus";
397 +- regulator-min-microvolt = <5000000>;
398 +- regulator-max-microvolt = <5000000>;
399 +- regulator-always-on;
400 +- gpio = <&gpio TEGRA_GPIO(D, 0) GPIO_ACTIVE_HIGH>;
401 +- enable-active-high;
402 +- vin-supply = <&vdd_5v0_sys>;
403 +- };
404 +-
405 +- vdd_vbus3: regulator@5 {
406 +- compatible = "regulator-fixed";
407 +- regulator-name = "vdd_usb3_vbus";
408 +- regulator-min-microvolt = <5000000>;
409 +- regulator-max-microvolt = <5000000>;
410 +- regulator-always-on;
411 +- gpio = <&gpio TEGRA_GPIO(D, 3) GPIO_ACTIVE_HIGH>;
412 +- enable-active-high;
413 +- vin-supply = <&vdd_5v0_sys>;
414 +- };
415 +-
416 + sound {
417 + compatible = "nvidia,tegra-audio-wm8903-picasso",
418 + "nvidia,tegra-audio-wm8903";
419 +diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi
420 +index 95e6bccdb4f6e..dd4d506683de7 100644
421 +--- a/arch/arm/boot/dts/tegra20-tamonten.dtsi
422 ++++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi
423 +@@ -185,8 +185,9 @@
424 + nvidia,pins = "ata", "atb", "atc", "atd", "ate",
425 + "cdev1", "cdev2", "dap1", "dtb", "gma",
426 + "gmb", "gmc", "gmd", "gme", "gpu7",
427 +- "gpv", "i2cp", "pta", "rm", "slxa",
428 +- "slxk", "spia", "spib", "uac";
429 ++ "gpv", "i2cp", "irrx", "irtx", "pta",
430 ++ "rm", "slxa", "slxk", "spia", "spib",
431 ++ "uac";
432 + nvidia,pull = <TEGRA_PIN_PULL_NONE>;
433 + nvidia,tristate = <TEGRA_PIN_DISABLE>;
434 + };
435 +@@ -211,7 +212,7 @@
436 + conf_ddc {
437 + nvidia,pins = "ddc", "dta", "dtd", "kbca",
438 + "kbcb", "kbcc", "kbcd", "kbce", "kbcf",
439 +- "sdc";
440 ++ "sdc", "uad", "uca";
441 + nvidia,pull = <TEGRA_PIN_PULL_UP>;
442 + nvidia,tristate = <TEGRA_PIN_DISABLE>;
443 + };
444 +@@ -221,10 +222,9 @@
445 + "lvp0", "owc", "sdb";
446 + nvidia,tristate = <TEGRA_PIN_ENABLE>;
447 + };
448 +- conf_irrx {
449 +- nvidia,pins = "irrx", "irtx", "sdd", "spic",
450 +- "spie", "spih", "uaa", "uab", "uad",
451 +- "uca", "ucb";
452 ++ conf_sdd {
453 ++ nvidia,pins = "sdd", "spic", "spie", "spih",
454 ++ "uaa", "uab", "ucb";
455 + nvidia,pull = <TEGRA_PIN_PULL_UP>;
456 + nvidia,tristate = <TEGRA_PIN_ENABLE>;
457 + };
458 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
459 +index be81330db14f6..02641191682e0 100644
460 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
461 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
462 +@@ -32,14 +32,14 @@
463 + };
464 + };
465 +
466 +- reg_vcc3v3: vcc3v3 {
467 ++ reg_vcc3v3: regulator-vcc3v3 {
468 + compatible = "regulator-fixed";
469 + regulator-name = "vcc3v3";
470 + regulator-min-microvolt = <3300000>;
471 + regulator-max-microvolt = <3300000>;
472 + };
473 +
474 +- reg_vdd_cpu_gpu: vdd-cpu-gpu {
475 ++ reg_vdd_cpu_gpu: regulator-vdd-cpu-gpu {
476 + compatible = "regulator-fixed";
477 + regulator-name = "vdd-cpu-gpu";
478 + regulator-min-microvolt = <1135000>;
479 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
480 +index db3d303093f61..6d22efbd645cb 100644
481 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
482 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
483 +@@ -83,15 +83,9 @@
484 + };
485 +
486 + eeprom@52 {
487 +- compatible = "atmel,24c512";
488 ++ compatible = "onnn,cat24c04", "atmel,24c04";
489 + reg = <0x52>;
490 + };
491 +-
492 +- eeprom@53 {
493 +- compatible = "atmel,24c512";
494 +- reg = <0x53>;
495 +- };
496 +-
497 + };
498 + };
499 + };
500 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
501 +index 60acdf0b689ee..7025aad8ae897 100644
502 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
503 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
504 +@@ -59,14 +59,9 @@
505 + };
506 +
507 + eeprom@52 {
508 +- compatible = "atmel,24c512";
509 ++ compatible = "onnn,cat24c05", "atmel,24c04";
510 + reg = <0x52>;
511 + };
512 +-
513 +- eeprom@53 {
514 +- compatible = "atmel,24c512";
515 +- reg = <0x53>;
516 +- };
517 + };
518 +
519 + &i2c3 {
520 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi
521 +index c769fadbd008f..00f86cada30d2 100644
522 +--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi
523 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi
524 +@@ -278,70 +278,86 @@
525 +
526 + pmic@69 {
527 + compatible = "mps,mp5416";
528 +- pinctrl-names = "default";
529 +- pinctrl-0 = <&pinctrl_pmic>;
530 + reg = <0x69>;
531 +
532 + regulators {
533 ++ /* vdd_0p95: DRAM/GPU/VPU */
534 + buck1 {
535 +- regulator-name = "vdd_0p95";
536 +- regulator-min-microvolt = <805000>;
537 ++ regulator-name = "buck1";
538 ++ regulator-min-microvolt = <800000>;
539 + regulator-max-microvolt = <1000000>;
540 +- regulator-max-microamp = <2500000>;
541 ++ regulator-min-microamp = <3800000>;
542 ++ regulator-max-microamp = <6800000>;
543 + regulator-boot-on;
544 ++ regulator-always-on;
545 + };
546 +
547 ++ /* vdd_soc */
548 + buck2 {
549 +- regulator-name = "vdd_soc";
550 +- regulator-min-microvolt = <805000>;
551 ++ regulator-name = "buck2";
552 ++ regulator-min-microvolt = <800000>;
553 + regulator-max-microvolt = <900000>;
554 +- regulator-max-microamp = <1000000>;
555 ++ regulator-min-microamp = <2200000>;
556 ++ regulator-max-microamp = <5200000>;
557 + regulator-boot-on;
558 ++ regulator-always-on;
559 + };
560 +
561 ++ /* vdd_arm */
562 + buck3_reg: buck3 {
563 +- regulator-name = "vdd_arm";
564 +- regulator-min-microvolt = <805000>;
565 ++ regulator-name = "buck3";
566 ++ regulator-min-microvolt = <800000>;
567 + regulator-max-microvolt = <1000000>;
568 +- regulator-max-microamp = <2200000>;
569 +- regulator-boot-on;
570 ++ regulator-min-microamp = <3800000>;
571 ++ regulator-max-microamp = <6800000>;
572 ++ regulator-always-on;
573 + };
574 +
575 ++ /* vdd_1p8 */
576 + buck4 {
577 +- regulator-name = "vdd_1p8";
578 ++ regulator-name = "buck4";
579 + regulator-min-microvolt = <1800000>;
580 + regulator-max-microvolt = <1800000>;
581 +- regulator-max-microamp = <500000>;
582 ++ regulator-min-microamp = <2200000>;
583 ++ regulator-max-microamp = <5200000>;
584 + regulator-boot-on;
585 ++ regulator-always-on;
586 + };
587 +
588 ++ /* nvcc_snvs_1p8 */
589 + ldo1 {
590 +- regulator-name = "nvcc_snvs_1p8";
591 ++ regulator-name = "ldo1";
592 + regulator-min-microvolt = <1800000>;
593 + regulator-max-microvolt = <1800000>;
594 +- regulator-max-microamp = <300000>;
595 + regulator-boot-on;
596 ++ regulator-always-on;
597 + };
598 +
599 ++ /* vdd_snvs_0p8 */
600 + ldo2 {
601 +- regulator-name = "vdd_snvs_0p8";
602 ++ regulator-name = "ldo2";
603 + regulator-min-microvolt = <800000>;
604 + regulator-max-microvolt = <800000>;
605 + regulator-boot-on;
606 ++ regulator-always-on;
607 + };
608 +
609 ++ /* vdd_0p9 */
610 + ldo3 {
611 +- regulator-name = "vdd_0p95";
612 +- regulator-min-microvolt = <800000>;
613 +- regulator-max-microvolt = <800000>;
614 ++ regulator-name = "ldo3";
615 ++ regulator-min-microvolt = <900000>;
616 ++ regulator-max-microvolt = <900000>;
617 + regulator-boot-on;
618 ++ regulator-always-on;
619 + };
620 +
621 ++ /* vdd_1p8 */
622 + ldo4 {
623 +- regulator-name = "vdd_1p8";
624 ++ regulator-name = "ldo4";
625 + regulator-min-microvolt = <1800000>;
626 + regulator-max-microvolt = <1800000>;
627 + regulator-boot-on;
628 ++ regulator-always-on;
629 + };
630 + };
631 + };
632 +@@ -426,12 +442,6 @@
633 + >;
634 + };
635 +
636 +- pinctrl_pmic: pmicgrp {
637 +- fsl,pins = <
638 +- MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x41
639 +- >;
640 +- };
641 +-
642 + pinctrl_uart2: uart2grp {
643 + fsl,pins = <
644 + MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140
645 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
646 +index 905b68a3daa5a..8e4a0ce99790b 100644
647 +--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
648 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
649 +@@ -46,7 +46,7 @@
650 + pinctrl-0 = <&pinctrl_reg_usb1_en>;
651 + compatible = "regulator-fixed";
652 + regulator-name = "usb_otg1_vbus";
653 +- gpio = <&gpio1 12 GPIO_ACTIVE_HIGH>;
654 ++ gpio = <&gpio1 10 GPIO_ACTIVE_HIGH>;
655 + enable-active-high;
656 + regulator-min-microvolt = <5000000>;
657 + regulator-max-microvolt = <5000000>;
658 +@@ -156,7 +156,8 @@
659 +
660 + pinctrl_reg_usb1_en: regusb1grp {
661 + fsl,pins = <
662 +- MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x41
663 ++ MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x41
664 ++ MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x141
665 + MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41
666 + >;
667 + };
668 +diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
669 +index 9928a87f593a5..b0bcda8cc51f4 100644
670 +--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
671 ++++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
672 +@@ -1227,13 +1227,13 @@
673 +
674 + cpu@0 {
675 + device_type = "cpu";
676 +- compatible = "nvidia,denver";
677 ++ compatible = "nvidia,tegra132-denver";
678 + reg = <0>;
679 + };
680 +
681 + cpu@1 {
682 + device_type = "cpu";
683 +- compatible = "nvidia,denver";
684 ++ compatible = "nvidia,tegra132-denver";
685 + reg = <1>;
686 + };
687 + };
688 +diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
689 +index 2e40b60472833..203318aa660f7 100644
690 +--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
691 ++++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
692 +@@ -2005,7 +2005,7 @@
693 + };
694 +
695 + pcie_ep@14160000 {
696 +- compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
697 ++ compatible = "nvidia,tegra194-pcie-ep";
698 + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX4A>;
699 + reg = <0x00 0x14160000 0x0 0x00020000>, /* appl registers (128K) */
700 + <0x00 0x36040000 0x0 0x00040000>, /* iATU_DMA reg space (256K) */
701 +@@ -2037,7 +2037,7 @@
702 + };
703 +
704 + pcie_ep@14180000 {
705 +- compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
706 ++ compatible = "nvidia,tegra194-pcie-ep";
707 + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
708 + reg = <0x00 0x14180000 0x0 0x00020000>, /* appl registers (128K) */
709 + <0x00 0x38040000 0x0 0x00040000>, /* iATU_DMA reg space (256K) */
710 +@@ -2069,7 +2069,7 @@
711 + };
712 +
713 + pcie_ep@141a0000 {
714 +- compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
715 ++ compatible = "nvidia,tegra194-pcie-ep";
716 + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>;
717 + reg = <0x00 0x141a0000 0x0 0x00020000>, /* appl registers (128K) */
718 + <0x00 0x3a040000 0x0 0x00040000>, /* iATU_DMA reg space (256K) */
719 +diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
720 +index 9fa5b028e4f39..23ee1bfa43189 100644
721 +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
722 ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
723 +@@ -151,7 +151,7 @@
724 + #size-cells = <2>;
725 + ranges;
726 +
727 +- rpm_msg_ram: memory@0x60000 {
728 ++ rpm_msg_ram: memory@60000 {
729 + reg = <0x0 0x60000 0x0 0x6000>;
730 + no-map;
731 + };
732 +diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
733 +index e8c37a1693d3b..cc08dc4eb56a5 100644
734 +--- a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
735 ++++ b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
736 +@@ -20,7 +20,7 @@
737 + stdout-path = "serial0";
738 + };
739 +
740 +- memory {
741 ++ memory@40000000 {
742 + device_type = "memory";
743 + reg = <0x0 0x40000000 0x0 0x20000000>;
744 + };
745 +diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
746 +index a32e5e79ab0b7..e8db62470b230 100644
747 +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
748 ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
749 +@@ -567,10 +567,10 @@
750 +
751 + pcie1: pci@10000000 {
752 + compatible = "qcom,pcie-ipq8074";
753 +- reg = <0x10000000 0xf1d
754 +- 0x10000f20 0xa8
755 +- 0x00088000 0x2000
756 +- 0x10100000 0x1000>;
757 ++ reg = <0x10000000 0xf1d>,
758 ++ <0x10000f20 0xa8>,
759 ++ <0x00088000 0x2000>,
760 ++ <0x10100000 0x1000>;
761 + reg-names = "dbi", "elbi", "parf", "config";
762 + device_type = "pci";
763 + linux,pci-domain = <1>;
764 +@@ -629,10 +629,10 @@
765 +
766 + pcie0: pci@20000000 {
767 + compatible = "qcom,pcie-ipq8074";
768 +- reg = <0x20000000 0xf1d
769 +- 0x20000f20 0xa8
770 +- 0x00080000 0x2000
771 +- 0x20100000 0x1000>;
772 ++ reg = <0x20000000 0xf1d>,
773 ++ <0x20000f20 0xa8>,
774 ++ <0x00080000 0x2000>,
775 ++ <0x20100000 0x1000>;
776 + reg-names = "dbi", "elbi", "parf", "config";
777 + device_type = "pci";
778 + linux,pci-domain = <0>;
779 +diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
780 +index f9f0b5aa6a266..87a3217e88efa 100644
781 +--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
782 ++++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
783 +@@ -15,16 +15,18 @@
784 + chosen { };
785 +
786 + clocks {
787 +- xo_board: xo_board {
788 ++ xo_board: xo-board {
789 + compatible = "fixed-clock";
790 + #clock-cells = <0>;
791 + clock-frequency = <19200000>;
792 ++ clock-output-names = "xo_board";
793 + };
794 +
795 +- sleep_clk: sleep_clk {
796 ++ sleep_clk: sleep-clk {
797 + compatible = "fixed-clock";
798 + #clock-cells = <0>;
799 + clock-frequency = <32768>;
800 ++ clock-output-names = "sleep_clk";
801 + };
802 + };
803 +
804 +diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
805 +index ce430ba9c1183..957487f84eadc 100644
806 +--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
807 ++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
808 +@@ -17,14 +17,14 @@
809 + chosen { };
810 +
811 + clocks {
812 +- xo_board: xo_board {
813 ++ xo_board: xo-board {
814 + compatible = "fixed-clock";
815 + #clock-cells = <0>;
816 + clock-frequency = <19200000>;
817 + clock-output-names = "xo_board";
818 + };
819 +
820 +- sleep_clk: sleep_clk {
821 ++ sleep_clk: sleep-clk {
822 + compatible = "fixed-clock";
823 + #clock-cells = <0>;
824 + clock-frequency = <32764>;
825 +diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
826 +index f91a928466c3b..06a0ae773ad50 100644
827 +--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
828 ++++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
829 +@@ -17,14 +17,14 @@
830 + chosen { };
831 +
832 + clocks {
833 +- xo_board: xo_board {
834 ++ xo_board: xo-board {
835 + compatible = "fixed-clock";
836 + #clock-cells = <0>;
837 + clock-frequency = <19200000>;
838 + clock-output-names = "xo_board";
839 + };
840 +
841 +- sleep_clk: sleep_clk {
842 ++ sleep_clk: sleep-clk {
843 + compatible = "fixed-clock";
844 + #clock-cells = <0>;
845 + clock-frequency = <32764>;
846 +@@ -343,10 +343,19 @@
847 + };
848 +
849 + qhee_code: qhee-code@85800000 {
850 +- reg = <0x0 0x85800000 0x0 0x3700000>;
851 ++ reg = <0x0 0x85800000 0x0 0x600000>;
852 + no-map;
853 + };
854 +
855 ++ rmtfs_mem: memory@85e00000 {
856 ++ compatible = "qcom,rmtfs-mem";
857 ++ reg = <0x0 0x85e00000 0x0 0x200000>;
858 ++ no-map;
859 ++
860 ++ qcom,client-id = <1>;
861 ++ qcom,vmid = <15>;
862 ++ };
863 ++
864 + smem_region: smem-mem@86000000 {
865 + reg = <0 0x86000000 0 0x200000>;
866 + no-map;
867 +@@ -357,58 +366,44 @@
868 + no-map;
869 + };
870 +
871 +- modem_fw_mem: modem-fw-region@8ac00000 {
872 ++ mpss_region: mpss@8ac00000 {
873 + reg = <0x0 0x8ac00000 0x0 0x7e00000>;
874 + no-map;
875 + };
876 +
877 +- adsp_fw_mem: adsp-fw-region@92a00000 {
878 ++ adsp_region: adsp@92a00000 {
879 + reg = <0x0 0x92a00000 0x0 0x1e00000>;
880 + no-map;
881 + };
882 +
883 +- pil_mba_mem: pil-mba-region@94800000 {
884 ++ mba_region: mba@94800000 {
885 + reg = <0x0 0x94800000 0x0 0x200000>;
886 + no-map;
887 + };
888 +
889 +- buffer_mem: buffer-region@94a00000 {
890 ++ buffer_mem: tzbuffer@94a00000 {
891 + reg = <0x0 0x94a00000 0x0 0x100000>;
892 + no-map;
893 + };
894 +
895 +- venus_fw_mem: venus-fw-region@9f800000 {
896 ++ venus_region: venus@9f800000 {
897 + reg = <0x0 0x9f800000 0x0 0x800000>;
898 + no-map;
899 + };
900 +
901 +- secure_region2: secure-region2@f7c00000 {
902 +- reg = <0x0 0xf7c00000 0x0 0x5c00000>;
903 +- no-map;
904 +- };
905 +-
906 + adsp_mem: adsp-region@f6000000 {
907 + reg = <0x0 0xf6000000 0x0 0x800000>;
908 + no-map;
909 + };
910 +
911 +- qseecom_ta_mem: qseecom-ta-region@fec00000 {
912 +- reg = <0x0 0xfec00000 0x0 0x1000000>;
913 +- no-map;
914 +- };
915 +-
916 + qseecom_mem: qseecom-region@f6800000 {
917 + reg = <0x0 0xf6800000 0x0 0x1400000>;
918 + no-map;
919 + };
920 +
921 +- secure_display_memory: secure-region@f5c00000 {
922 +- reg = <0x0 0xf5c00000 0x0 0x5c00000>;
923 +- no-map;
924 +- };
925 +-
926 +- cont_splash_mem: cont-splash-region@9d400000 {
927 +- reg = <0x0 0x9d400000 0x0 0x23ff000>;
928 ++ zap_shader_region: gpu@fed00000 {
929 ++ compatible = "shared-dma-pool";
930 ++ reg = <0x0 0xfed00000 0x0 0xa00000>;
931 + no-map;
932 + };
933 + };
934 +@@ -527,14 +522,18 @@
935 + reg = <0x01f40000 0x20000>;
936 + };
937 +
938 +- tlmm: pinctrl@3000000 {
939 ++ tlmm: pinctrl@3100000 {
940 + compatible = "qcom,sdm630-pinctrl";
941 +- reg = <0x03000000 0xc00000>;
942 ++ reg = <0x03100000 0x400000>,
943 ++ <0x03500000 0x400000>,
944 ++ <0x03900000 0x400000>;
945 ++ reg-names = "south", "center", "north";
946 + interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
947 + gpio-controller;
948 +- #gpio-cells = <0x2>;
949 ++ gpio-ranges = <&tlmm 0 0 114>;
950 ++ #gpio-cells = <2>;
951 + interrupt-controller;
952 +- #interrupt-cells = <0x2>;
953 ++ #interrupt-cells = <2>;
954 +
955 + blsp1_uart1_default: blsp1-uart1-default {
956 + pins = "gpio0", "gpio1", "gpio2", "gpio3";
957 +@@ -554,40 +553,48 @@
958 + bias-disable;
959 + };
960 +
961 +- blsp2_uart1_tx_active: blsp2-uart1-tx-active {
962 +- pins = "gpio16";
963 +- drive-strength = <2>;
964 +- bias-disable;
965 +- };
966 +-
967 +- blsp2_uart1_tx_sleep: blsp2-uart1-tx-sleep {
968 +- pins = "gpio16";
969 +- drive-strength = <2>;
970 +- bias-pull-up;
971 +- };
972 ++ blsp2_uart1_default: blsp2-uart1-active {
973 ++ tx-rts {
974 ++ pins = "gpio16", "gpio19";
975 ++ function = "blsp_uart5";
976 ++ drive-strength = <2>;
977 ++ bias-disable;
978 ++ };
979 +
980 +- blsp2_uart1_rxcts_active: blsp2-uart1-rxcts-active {
981 +- pins = "gpio17", "gpio18";
982 +- drive-strength = <2>;
983 +- bias-disable;
984 +- };
985 ++ rx {
986 ++ /*
987 ++ * Avoid garbage data while BT module
988 ++ * is powered off or not driving signal
989 ++ */
990 ++ pins = "gpio17";
991 ++ function = "blsp_uart5";
992 ++ drive-strength = <2>;
993 ++ bias-pull-up;
994 ++ };
995 +
996 +- blsp2_uart1_rxcts_sleep: blsp2-uart1-rxcts-sleep {
997 +- pins = "gpio17", "gpio18";
998 +- drive-strength = <2>;
999 +- bias-no-pull;
1000 ++ cts {
1001 ++ /* Match the pull of the BT module */
1002 ++ pins = "gpio18";
1003 ++ function = "blsp_uart5";
1004 ++ drive-strength = <2>;
1005 ++ bias-pull-down;
1006 ++ };
1007 + };
1008 +
1009 +- blsp2_uart1_rfr_active: blsp2-uart1-rfr-active {
1010 +- pins = "gpio19";
1011 +- drive-strength = <2>;
1012 +- bias-disable;
1013 +- };
1014 ++ blsp2_uart1_sleep: blsp2-uart1-sleep {
1015 ++ tx {
1016 ++ pins = "gpio16";
1017 ++ function = "gpio";
1018 ++ drive-strength = <2>;
1019 ++ bias-pull-up;
1020 ++ };
1021 +
1022 +- blsp2_uart1_rfr_sleep: blsp2-uart1-rfr-sleep {
1023 +- pins = "gpio19";
1024 +- drive-strength = <2>;
1025 +- bias-no-pull;
1026 ++ rx-cts-rts {
1027 ++ pins = "gpio17", "gpio18", "gpio19";
1028 ++ function = "gpio";
1029 ++ drive-strength = <2>;
1030 ++ bias-no-pull;
1031 ++ };
1032 + };
1033 +
1034 + i2c1_default: i2c1-default {
1035 +@@ -686,50 +693,106 @@
1036 + bias-pull-up;
1037 + };
1038 +
1039 +- sdc1_clk_on: sdc1-clk-on {
1040 +- pins = "sdc1_clk";
1041 +- bias-disable;
1042 +- drive-strength = <16>;
1043 +- };
1044 ++ sdc1_state_on: sdc1-on {
1045 ++ clk {
1046 ++ pins = "sdc1_clk";
1047 ++ bias-disable;
1048 ++ drive-strength = <16>;
1049 ++ };
1050 +
1051 +- sdc1_clk_off: sdc1-clk-off {
1052 +- pins = "sdc1_clk";
1053 +- bias-disable;
1054 +- drive-strength = <2>;
1055 +- };
1056 ++ cmd {
1057 ++ pins = "sdc1_cmd";
1058 ++ bias-pull-up;
1059 ++ drive-strength = <10>;
1060 ++ };
1061 +
1062 +- sdc1_cmd_on: sdc1-cmd-on {
1063 +- pins = "sdc1_cmd";
1064 +- bias-pull-up;
1065 +- drive-strength = <10>;
1066 +- };
1067 ++ data {
1068 ++ pins = "sdc1_data";
1069 ++ bias-pull-up;
1070 ++ drive-strength = <10>;
1071 ++ };
1072 +
1073 +- sdc1_cmd_off: sdc1-cmd-off {
1074 +- pins = "sdc1_cmd";
1075 +- bias-pull-up;
1076 +- drive-strength = <2>;
1077 ++ rclk {
1078 ++ pins = "sdc1_rclk";
1079 ++ bias-pull-down;
1080 ++ };
1081 + };
1082 +
1083 +- sdc1_data_on: sdc1-data-on {
1084 +- pins = "sdc1_data";
1085 +- bias-pull-up;
1086 +- drive-strength = <8>;
1087 +- };
1088 ++ sdc1_state_off: sdc1-off {
1089 ++ clk {
1090 ++ pins = "sdc1_clk";
1091 ++ bias-disable;
1092 ++ drive-strength = <2>;
1093 ++ };
1094 +
1095 +- sdc1_data_off: sdc1-data-off {
1096 +- pins = "sdc1_data";
1097 +- bias-pull-up;
1098 +- drive-strength = <2>;
1099 ++ cmd {
1100 ++ pins = "sdc1_cmd";
1101 ++ bias-pull-up;
1102 ++ drive-strength = <2>;
1103 ++ };
1104 ++
1105 ++ data {
1106 ++ pins = "sdc1_data";
1107 ++ bias-pull-up;
1108 ++ drive-strength = <2>;
1109 ++ };
1110 ++
1111 ++ rclk {
1112 ++ pins = "sdc1_rclk";
1113 ++ bias-pull-down;
1114 ++ };
1115 + };
1116 +
1117 +- sdc1_rclk_on: sdc1-rclk-on {
1118 +- pins = "sdc1_rclk";
1119 +- bias-pull-down;
1120 ++ sdc2_state_on: sdc2-on {
1121 ++ clk {
1122 ++ pins = "sdc2_clk";
1123 ++ bias-disable;
1124 ++ drive-strength = <16>;
1125 ++ };
1126 ++
1127 ++ cmd {
1128 ++ pins = "sdc2_cmd";
1129 ++ bias-pull-up;
1130 ++ drive-strength = <10>;
1131 ++ };
1132 ++
1133 ++ data {
1134 ++ pins = "sdc2_data";
1135 ++ bias-pull-up;
1136 ++ drive-strength = <10>;
1137 ++ };
1138 ++
1139 ++ sd-cd {
1140 ++ pins = "gpio54";
1141 ++ bias-pull-up;
1142 ++ drive-strength = <2>;
1143 ++ };
1144 + };
1145 +
1146 +- sdc1_rclk_off: sdc1-rclk-off {
1147 +- pins = "sdc1_rclk";
1148 +- bias-pull-down;
1149 ++ sdc2_state_off: sdc2-off {
1150 ++ clk {
1151 ++ pins = "sdc2_clk";
1152 ++ bias-disable;
1153 ++ drive-strength = <2>;
1154 ++ };
1155 ++
1156 ++ cmd {
1157 ++ pins = "sdc2_cmd";
1158 ++ bias-pull-up;
1159 ++ drive-strength = <2>;
1160 ++ };
1161 ++
1162 ++ data {
1163 ++ pins = "sdc2_data";
1164 ++ bias-pull-up;
1165 ++ drive-strength = <2>;
1166 ++ };
1167 ++
1168 ++ sd-cd {
1169 ++ pins = "gpio54";
1170 ++ bias-disable;
1171 ++ drive-strength = <2>;
1172 ++ };
1173 + };
1174 + };
1175 +
1176 +@@ -823,8 +886,8 @@
1177 + clock-names = "core", "iface", "xo", "ice";
1178 +
1179 + pinctrl-names = "default", "sleep";
1180 +- pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
1181 +- pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
1182 ++ pinctrl-0 = <&sdc1_state_on>;
1183 ++ pinctrl-1 = <&sdc1_state_off>;
1184 +
1185 + bus-width = <8>;
1186 + non-removable;
1187 +@@ -969,10 +1032,8 @@
1188 + dmas = <&blsp2_dma 0>, <&blsp2_dma 1>;
1189 + dma-names = "tx", "rx";
1190 + pinctrl-names = "default", "sleep";
1191 +- pinctrl-0 = <&blsp2_uart1_tx_active &blsp2_uart1_rxcts_active
1192 +- &blsp2_uart1_rfr_active>;
1193 +- pinctrl-1 = <&blsp2_uart1_tx_sleep &blsp2_uart1_rxcts_sleep
1194 +- &blsp2_uart1_rfr_sleep>;
1195 ++ pinctrl-0 = <&blsp2_uart1_default>;
1196 ++ pinctrl-1 = <&blsp2_uart1_sleep>;
1197 + status = "disabled";
1198 + };
1199 +
1200 +diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
1201 +index 1316bea3eab52..6d28bfd9a8f59 100644
1202 +--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
1203 ++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
1204 +@@ -3773,7 +3773,7 @@
1205 + };
1206 + };
1207 +
1208 +- epss_l3: interconnect@18591000 {
1209 ++ epss_l3: interconnect@18590000 {
1210 + compatible = "qcom,sm8250-epss-l3";
1211 + reg = <0 0x18590000 0 0x1000>;
1212 +
1213 +diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
1214 +index b83fb24954b77..3198acb2aad8c 100644
1215 +--- a/arch/arm64/include/asm/el2_setup.h
1216 ++++ b/arch/arm64/include/asm/el2_setup.h
1217 +@@ -149,8 +149,17 @@
1218 + ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4
1219 + cbz x1, .Lskip_fgt_\@
1220 +
1221 +- msr_s SYS_HDFGRTR_EL2, xzr
1222 +- msr_s SYS_HDFGWTR_EL2, xzr
1223 ++ mov x0, xzr
1224 ++ mrs x1, id_aa64dfr0_el1
1225 ++ ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
1226 ++ cmp x1, #3
1227 ++ b.lt .Lset_fgt_\@
1228 ++ /* Disable PMSNEVFR_EL1 read and write traps */
1229 ++ orr x0, x0, #(1 << 62)
1230 ++
1231 ++.Lset_fgt_\@:
1232 ++ msr_s SYS_HDFGRTR_EL2, x0
1233 ++ msr_s SYS_HDFGWTR_EL2, x0
1234 + msr_s SYS_HFGRTR_EL2, xzr
1235 + msr_s SYS_HFGWTR_EL2, xzr
1236 + msr_s SYS_HFGITR_EL2, xzr
1237 +diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
1238 +index d44df9d62fc9c..a54ce2646cba2 100644
1239 +--- a/arch/arm64/include/asm/kernel-pgtable.h
1240 ++++ b/arch/arm64/include/asm/kernel-pgtable.h
1241 +@@ -65,8 +65,8 @@
1242 + #define EARLY_KASLR (0)
1243 + #endif
1244 +
1245 +-#define EARLY_ENTRIES(vstart, vend, shift) (((vend) >> (shift)) \
1246 +- - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
1247 ++#define EARLY_ENTRIES(vstart, vend, shift) \
1248 ++ ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
1249 +
1250 + #define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
1251 +
1252 +diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
1253 +index 75beffe2ee8a8..e9c30859f80cd 100644
1254 +--- a/arch/arm64/include/asm/mmu.h
1255 ++++ b/arch/arm64/include/asm/mmu.h
1256 +@@ -27,11 +27,32 @@ typedef struct {
1257 + } mm_context_t;
1258 +
1259 + /*
1260 +- * This macro is only used by the TLBI and low-level switch_mm() code,
1261 +- * neither of which can race with an ASID change. We therefore don't
1262 +- * need to reload the counter using atomic64_read().
1263 ++ * We use atomic64_read() here because the ASID for an 'mm_struct' can
1264 ++ * be reallocated when scheduling one of its threads following a
1265 ++ * rollover event (see new_context() and flush_context()). In this case,
1266 ++ * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
1267 ++ * may use a stale ASID. This is fine in principle as the new ASID is
1268 ++ * guaranteed to be clean in the TLB, but the TLBI routines have to take
1269 ++ * care to handle the following race:
1270 ++ *
1271 ++ * CPU 0 CPU 1 CPU 2
1272 ++ *
1273 ++ * // ptep_clear_flush(mm)
1274 ++ * xchg_relaxed(pte, 0)
1275 ++ * DSB ISHST
1276 ++ * old = ASID(mm)
1277 ++ * | <rollover>
1278 ++ * | new = new_context(mm)
1279 ++ * \-----------------> atomic_set(mm->context.id, new)
1280 ++ * cpu_switch_mm(mm)
1281 ++ * // Hardware walk of pte using new ASID
1282 ++ * TLBI(old)
1283 ++ *
1284 ++ * In this scenario, the barrier on CPU 0 and the dependency on CPU 1
1285 ++ * ensure that the page-table walker on CPU 1 *must* see the invalid PTE
1286 ++ * written by CPU 0.
1287 + */
1288 +-#define ASID(mm) ((mm)->context.id.counter & 0xffff)
1289 ++#define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff)
1290 +
1291 + static inline bool arm64_kernel_unmapped_at_el0(void)
1292 + {
1293 +diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
1294 +index cc3f5a33ff9c5..36f02892e1df8 100644
1295 +--- a/arch/arm64/include/asm/tlbflush.h
1296 ++++ b/arch/arm64/include/asm/tlbflush.h
1297 +@@ -245,9 +245,10 @@ static inline void flush_tlb_all(void)
1298 +
1299 + static inline void flush_tlb_mm(struct mm_struct *mm)
1300 + {
1301 +- unsigned long asid = __TLBI_VADDR(0, ASID(mm));
1302 ++ unsigned long asid;
1303 +
1304 + dsb(ishst);
1305 ++ asid = __TLBI_VADDR(0, ASID(mm));
1306 + __tlbi(aside1is, asid);
1307 + __tlbi_user(aside1is, asid);
1308 + dsb(ish);
1309 +@@ -256,9 +257,10 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
1310 + static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
1311 + unsigned long uaddr)
1312 + {
1313 +- unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
1314 ++ unsigned long addr;
1315 +
1316 + dsb(ishst);
1317 ++ addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
1318 + __tlbi(vale1is, addr);
1319 + __tlbi_user(vale1is, addr);
1320 + }
1321 +@@ -283,9 +285,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
1322 + {
1323 + int num = 0;
1324 + int scale = 0;
1325 +- unsigned long asid = ASID(vma->vm_mm);
1326 +- unsigned long addr;
1327 +- unsigned long pages;
1328 ++ unsigned long asid, addr, pages;
1329 +
1330 + start = round_down(start, stride);
1331 + end = round_up(end, stride);
1332 +@@ -305,6 +305,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
1333 + }
1334 +
1335 + dsb(ishst);
1336 ++ asid = ASID(vma->vm_mm);
1337 +
1338 + /*
1339 + * When the CPU does not support TLB range operations, flush the TLB
1340 +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
1341 +index 96873dfa67fd5..3374bbd18fc66 100644
1342 +--- a/arch/arm64/kernel/head.S
1343 ++++ b/arch/arm64/kernel/head.S
1344 +@@ -176,7 +176,7 @@ SYM_CODE_END(preserve_boot_args)
1345 + * to be composed of multiple pages. (This effectively scales the end index).
1346 + *
1347 + * vstart: virtual address of start of range
1348 +- * vend: virtual address of end of range
1349 ++ * vend: virtual address of end of range - we map [vstart, vend]
1350 + * shift: shift used to transform virtual address into index
1351 + * ptrs: number of entries in page table
1352 + * istart: index in table corresponding to vstart
1353 +@@ -213,17 +213,18 @@ SYM_CODE_END(preserve_boot_args)
1354 + *
1355 + * tbl: location of page table
1356 + * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
1357 +- * vstart: start address to map
1358 +- * vend: end address to map - we map [vstart, vend]
1359 ++ * vstart: virtual address of start of range
1360 ++ * vend: virtual address of end of range - we map [vstart, vend - 1]
1361 + * flags: flags to use to map last level entries
1362 + * phys: physical address corresponding to vstart - physical memory is contiguous
1363 + * pgds: the number of pgd entries
1364 + *
1365 + * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
1366 +- * Preserves: vstart, vend, flags
1367 +- * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
1368 ++ * Preserves: vstart, flags
1369 ++ * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv
1370 + */
1371 + .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
1372 ++ sub \vend, \vend, #1
1373 + add \rtbl, \tbl, #PAGE_SIZE
1374 + mov \sv, \rtbl
1375 + mov \count, #0
1376 +diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
1377 +index 709d2c433c5e9..f6b1a88245db2 100644
1378 +--- a/arch/arm64/kernel/vmlinux.lds.S
1379 ++++ b/arch/arm64/kernel/vmlinux.lds.S
1380 +@@ -181,6 +181,8 @@ SECTIONS
1381 + /* everything from this point to __init_begin will be marked RO NX */
1382 + RO_DATA(PAGE_SIZE)
1383 +
1384 ++ HYPERVISOR_DATA_SECTIONS
1385 ++
1386 + idmap_pg_dir = .;
1387 + . += IDMAP_DIR_SIZE;
1388 + idmap_pg_end = .;
1389 +@@ -260,8 +262,6 @@ SECTIONS
1390 + _sdata = .;
1391 + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
1392 +
1393 +- HYPERVISOR_DATA_SECTIONS
1394 +-
1395 + /*
1396 + * Data written with the MMU off but read with the MMU on requires
1397 + * cache lines to be invalidated, discarding up to a Cache Writeback
1398 +diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus
1399 +index f1be832e2b746..d1e93a39cd3bc 100644
1400 +--- a/arch/m68k/Kconfig.bus
1401 ++++ b/arch/m68k/Kconfig.bus
1402 +@@ -63,7 +63,7 @@ source "drivers/zorro/Kconfig"
1403 +
1404 + endif
1405 +
1406 +-if !MMU
1407 ++if COLDFIRE
1408 +
1409 + config ISA_DMA_API
1410 + def_bool !M5272
1411 +diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c
1412 +index 0ddf03df62688..f451268f6c384 100644
1413 +--- a/arch/mips/mti-malta/malta-dtshim.c
1414 ++++ b/arch/mips/mti-malta/malta-dtshim.c
1415 +@@ -22,7 +22,7 @@
1416 + #define ROCIT_CONFIG_GEN1_MEMMAP_SHIFT 8
1417 + #define ROCIT_CONFIG_GEN1_MEMMAP_MASK (0xf << 8)
1418 +
1419 +-static unsigned char fdt_buf[16 << 10] __initdata;
1420 ++static unsigned char fdt_buf[16 << 10] __initdata __aligned(8);
1421 +
1422 + /* determined physical memory size, not overridden by command line args */
1423 + extern unsigned long physical_memsize;
1424 +diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
1425 +index bc657e55c15f8..98e4f97db5159 100644
1426 +--- a/arch/openrisc/kernel/entry.S
1427 ++++ b/arch/openrisc/kernel/entry.S
1428 +@@ -547,6 +547,7 @@ EXCEPTION_ENTRY(_external_irq_handler)
1429 + l.bnf 1f // ext irq enabled, all ok.
1430 + l.nop
1431 +
1432 ++#ifdef CONFIG_PRINTK
1433 + l.addi r1,r1,-0x8
1434 + l.movhi r3,hi(42f)
1435 + l.ori r3,r3,lo(42f)
1436 +@@ -560,6 +561,7 @@ EXCEPTION_ENTRY(_external_irq_handler)
1437 + .string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r"
1438 + .align 4
1439 + .previous
1440 ++#endif
1441 +
1442 + l.ori r4,r4,SPR_SR_IEE // fix the bug
1443 + // l.sw PT_SR(r1),r4
1444 +diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
1445 +index aed8ea29268bb..2d019aa73b8f0 100644
1446 +--- a/arch/parisc/Makefile
1447 ++++ b/arch/parisc/Makefile
1448 +@@ -25,18 +25,18 @@ CHECKFLAGS += -D__hppa__=1
1449 + ifdef CONFIG_64BIT
1450 + UTS_MACHINE := parisc64
1451 + CHECKFLAGS += -D__LP64__=1
1452 +-CC_ARCHES = hppa64
1453 + LD_BFD := elf64-hppa-linux
1454 + else # 32-bit
1455 +-CC_ARCHES = hppa hppa2.0 hppa1.1
1456 + LD_BFD := elf32-hppa-linux
1457 + endif
1458 +
1459 + # select defconfig based on actual architecture
1460 +-ifeq ($(shell uname -m),parisc64)
1461 ++ifeq ($(ARCH),parisc64)
1462 + KBUILD_DEFCONFIG := generic-64bit_defconfig
1463 ++ CC_ARCHES := hppa64
1464 + else
1465 + KBUILD_DEFCONFIG := generic-32bit_defconfig
1466 ++ CC_ARCHES := hppa hppa2.0 hppa1.1
1467 + endif
1468 +
1469 + export LD_BFD
1470 +diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
1471 +index fb1e94a3982bc..db1a47cf424dd 100644
1472 +--- a/arch/parisc/kernel/signal.c
1473 ++++ b/arch/parisc/kernel/signal.c
1474 +@@ -237,6 +237,12 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
1475 + #endif
1476 +
1477 + usp = (regs->gr[30] & ~(0x01UL));
1478 ++#ifdef CONFIG_64BIT
1479 ++ if (is_compat_task()) {
1480 ++ /* The gcc alloca implementation leaves garbage in the upper 32 bits of sp */
1481 ++ usp = (compat_uint_t)usp;
1482 ++ }
1483 ++#endif
1484 + /*FIXME: frame_size parameter is unused, remove it. */
1485 + frame = get_sigframe(&ksig->ka, usp, sizeof(*frame));
1486 +
1487 +diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
1488 +index 949ff9ccda5e7..dbf3ff8adc654 100644
1489 +--- a/arch/powerpc/configs/mpc885_ads_defconfig
1490 ++++ b/arch/powerpc/configs/mpc885_ads_defconfig
1491 +@@ -34,6 +34,7 @@ CONFIG_MTD_CFI_GEOMETRY=y
1492 + # CONFIG_MTD_CFI_I2 is not set
1493 + CONFIG_MTD_CFI_I4=y
1494 + CONFIG_MTD_CFI_AMDSTD=y
1495 ++CONFIG_MTD_PHYSMAP=y
1496 + CONFIG_MTD_PHYSMAP_OF=y
1497 + # CONFIG_BLK_DEV is not set
1498 + CONFIG_NETDEVICES=y
1499 +diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
1500 +index c6bbe9778d3cd..3c09109e708ef 100644
1501 +--- a/arch/powerpc/include/asm/pmc.h
1502 ++++ b/arch/powerpc/include/asm/pmc.h
1503 +@@ -34,6 +34,13 @@ static inline void ppc_set_pmu_inuse(int inuse)
1504 + #endif
1505 + }
1506 +
1507 ++#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1508 ++static inline int ppc_get_pmu_inuse(void)
1509 ++{
1510 ++ return get_paca()->pmcregs_in_use;
1511 ++}
1512 ++#endif
1513 ++
1514 + extern void power4_enable_pmcs(void);
1515 +
1516 + #else /* CONFIG_PPC64 */
1517 +diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
1518 +index df6b468976d53..fe505d8ed55bc 100644
1519 +--- a/arch/powerpc/kernel/smp.c
1520 ++++ b/arch/powerpc/kernel/smp.c
1521 +@@ -1085,7 +1085,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1522 + }
1523 +
1524 + if (cpu_to_chip_id(boot_cpuid) != -1) {
1525 +- int idx = num_possible_cpus() / threads_per_core;
1526 ++ int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1527 +
1528 + /*
1529 + * All threads of a core will all belong to the same core,
1530 +@@ -1503,6 +1503,7 @@ static void add_cpu_to_masks(int cpu)
1531 + * add it to it's own thread sibling mask.
1532 + */
1533 + cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1534 ++ cpumask_set_cpu(cpu, cpu_core_mask(cpu));
1535 +
1536 + for (i = first_thread; i < first_thread + threads_per_core; i++)
1537 + if (cpu_online(i))
1538 +@@ -1520,11 +1521,6 @@ static void add_cpu_to_masks(int cpu)
1539 + if (chip_id_lookup_table && ret)
1540 + chip_id = cpu_to_chip_id(cpu);
1541 +
1542 +- if (chip_id == -1) {
1543 +- cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
1544 +- goto out;
1545 +- }
1546 +-
1547 + if (shared_caches)
1548 + submask_fn = cpu_l2_cache_mask;
1549 +
1550 +@@ -1534,6 +1530,10 @@ static void add_cpu_to_masks(int cpu)
1551 + /* Skip all CPUs already part of current CPU core mask */
1552 + cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
1553 +
1554 ++ /* If chip_id is -1; limit the cpu_core_mask to within DIE*/
1555 ++ if (chip_id == -1)
1556 ++ cpumask_and(mask, mask, cpu_cpu_mask(cpu));
1557 ++
1558 + for_each_cpu(i, mask) {
1559 + if (chip_id == cpu_to_chip_id(i)) {
1560 + or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
1561 +@@ -1543,7 +1543,6 @@ static void add_cpu_to_masks(int cpu)
1562 + }
1563 + }
1564 +
1565 +-out:
1566 + free_cpumask_var(mask);
1567 + }
1568 +
1569 +diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
1570 +index ea0d9c36e177c..b64b734a5030f 100644
1571 +--- a/arch/powerpc/kernel/stacktrace.c
1572 ++++ b/arch/powerpc/kernel/stacktrace.c
1573 +@@ -8,6 +8,7 @@
1574 + * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
1575 + */
1576 +
1577 ++#include <linux/delay.h>
1578 + #include <linux/export.h>
1579 + #include <linux/kallsyms.h>
1580 + #include <linux/module.h>
1581 +diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
1582 +index d909c069363e0..e7924664a9445 100644
1583 +--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
1584 ++++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
1585 +@@ -64,10 +64,12 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
1586 + }
1587 + isync();
1588 +
1589 ++ pagefault_disable();
1590 + if (is_load)
1591 +- ret = copy_from_user_nofault(to, (const void __user *)from, n);
1592 ++ ret = __copy_from_user_inatomic(to, (const void __user *)from, n);
1593 + else
1594 +- ret = copy_to_user_nofault((void __user *)to, from, n);
1595 ++ ret = __copy_to_user_inatomic((void __user *)to, from, n);
1596 ++ pagefault_enable();
1597 +
1598 + /* switch the pid first to avoid running host with unallocated pid */
1599 + if (quadrant == 1 && pid != old_pid)
1600 +diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
1601 +index 083a4e037718d..e5ba96c41f3fc 100644
1602 +--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
1603 ++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
1604 +@@ -173,10 +173,13 @@ static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
1605 + idx -= stt->offset;
1606 + page = stt->pages[idx / TCES_PER_PAGE];
1607 + /*
1608 +- * page must not be NULL in real mode,
1609 +- * kvmppc_rm_ioba_validate() must have taken care of this.
1610 ++ * kvmppc_rm_ioba_validate() allows pages not be allocated if TCE is
1611 ++ * being cleared, otherwise it returns H_TOO_HARD and we skip this.
1612 + */
1613 +- WARN_ON_ONCE_RM(!page);
1614 ++ if (!page) {
1615 ++ WARN_ON_ONCE_RM(tce != 0);
1616 ++ return;
1617 ++ }
1618 + tbl = kvmppc_page_address(page);
1619 +
1620 + tbl[idx % TCES_PER_PAGE] = tce;
1621 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
1622 +index 395f98158e81e..890fbf4baf15e 100644
1623 +--- a/arch/powerpc/kvm/book3s_hv.c
1624 ++++ b/arch/powerpc/kvm/book3s_hv.c
1625 +@@ -59,6 +59,7 @@
1626 + #include <asm/kvm_book3s.h>
1627 + #include <asm/mmu_context.h>
1628 + #include <asm/lppaca.h>
1629 ++#include <asm/pmc.h>
1630 + #include <asm/processor.h>
1631 + #include <asm/cputhreads.h>
1632 + #include <asm/page.h>
1633 +@@ -3687,6 +3688,18 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
1634 + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
1635 + kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
1636 +
1637 ++#ifdef CONFIG_PPC_PSERIES
1638 ++ if (kvmhv_on_pseries()) {
1639 ++ barrier();
1640 ++ if (vcpu->arch.vpa.pinned_addr) {
1641 ++ struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
1642 ++ get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
1643 ++ } else {
1644 ++ get_lppaca()->pmcregs_in_use = 1;
1645 ++ }
1646 ++ barrier();
1647 ++ }
1648 ++#endif
1649 + kvmhv_load_guest_pmu(vcpu);
1650 +
1651 + msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
1652 +@@ -3823,6 +3836,13 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
1653 + save_pmu |= nesting_enabled(vcpu->kvm);
1654 +
1655 + kvmhv_save_guest_pmu(vcpu, save_pmu);
1656 ++#ifdef CONFIG_PPC_PSERIES
1657 ++ if (kvmhv_on_pseries()) {
1658 ++ barrier();
1659 ++ get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
1660 ++ barrier();
1661 ++ }
1662 ++#endif
1663 +
1664 + vc->entry_exit_map = 0x101;
1665 + vc->in_guest = 0;
1666 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
1667 +index f2bf98bdcea28..094a1076fd1fe 100644
1668 +--- a/arch/powerpc/mm/numa.c
1669 ++++ b/arch/powerpc/mm/numa.c
1670 +@@ -893,7 +893,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
1671 + static void __init find_possible_nodes(void)
1672 + {
1673 + struct device_node *rtas;
1674 +- const __be32 *domains;
1675 ++ const __be32 *domains = NULL;
1676 + int prop_length, max_nodes;
1677 + u32 i;
1678 +
1679 +@@ -909,9 +909,14 @@ static void __init find_possible_nodes(void)
1680 + * it doesn't exist, then fallback on ibm,max-associativity-domains.
1681 + * Current denotes what the platform can support compared to max
1682 + * which denotes what the Hypervisor can support.
1683 ++ *
1684 ++ * If the LPAR is migratable, new nodes might be activated after a LPM,
1685 ++ * so we should consider the max number in that case.
1686 + */
1687 +- domains = of_get_property(rtas, "ibm,current-associativity-domains",
1688 +- &prop_length);
1689 ++ if (!of_get_property(of_root, "ibm,migratable-partition", NULL))
1690 ++ domains = of_get_property(rtas,
1691 ++ "ibm,current-associativity-domains",
1692 ++ &prop_length);
1693 + if (!domains) {
1694 + domains = of_get_property(rtas, "ibm,max-associativity-domains",
1695 + &prop_length);
1696 +@@ -920,6 +925,8 @@ static void __init find_possible_nodes(void)
1697 + }
1698 +
1699 + max_nodes = of_read_number(&domains[min_common_depth], 1);
1700 ++ pr_info("Partition configured for %d NUMA nodes.\n", max_nodes);
1701 ++
1702 + for (i = 0; i < max_nodes; i++) {
1703 + if (!node_possible(i))
1704 + node_set(i, node_possible_map);
1705 +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
1706 +index 51622411a7ccd..35658b963d5ab 100644
1707 +--- a/arch/powerpc/perf/core-book3s.c
1708 ++++ b/arch/powerpc/perf/core-book3s.c
1709 +@@ -2251,18 +2251,10 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
1710 + */
1711 + unsigned long perf_instruction_pointer(struct pt_regs *regs)
1712 + {
1713 +- bool use_siar = regs_use_siar(regs);
1714 + unsigned long siar = mfspr(SPRN_SIAR);
1715 +
1716 +- if (ppmu && (ppmu->flags & PPMU_P10_DD1)) {
1717 +- if (siar)
1718 +- return siar;
1719 +- else
1720 +- return regs->nip;
1721 +- } else if (use_siar && siar_valid(regs))
1722 +- return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1723 +- else if (use_siar)
1724 +- return 0; // no valid instruction pointer
1725 ++ if (regs_use_siar(regs) && siar_valid(regs) && siar)
1726 ++ return siar + perf_ip_adjust(regs);
1727 + else
1728 + return regs->nip;
1729 + }
1730 +diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
1731 +index d48413e28c39e..c756228a081fb 100644
1732 +--- a/arch/powerpc/perf/hv-gpci.c
1733 ++++ b/arch/powerpc/perf/hv-gpci.c
1734 +@@ -175,7 +175,7 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
1735 + */
1736 + count = 0;
1737 + for (i = offset; i < offset + length; i++)
1738 +- count |= arg->bytes[i] << (i - offset);
1739 ++ count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8);
1740 +
1741 + *value = count;
1742 + out:
1743 +diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
1744 +index 3e388fa208d4f..519d517fedf4e 100644
1745 +--- a/arch/s390/include/asm/setup.h
1746 ++++ b/arch/s390/include/asm/setup.h
1747 +@@ -36,6 +36,7 @@
1748 + #define MACHINE_FLAG_NX BIT(15)
1749 + #define MACHINE_FLAG_GS BIT(16)
1750 + #define MACHINE_FLAG_SCC BIT(17)
1751 ++#define MACHINE_FLAG_PCI_MIO BIT(18)
1752 +
1753 + #define LPP_MAGIC BIT(31)
1754 + #define LPP_PID_MASK _AC(0xffffffff, UL)
1755 +@@ -109,6 +110,7 @@ extern unsigned long mio_wb_bit_mask;
1756 + #define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
1757 + #define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
1758 + #define MACHINE_HAS_SCC (S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
1759 ++#define MACHINE_HAS_PCI_MIO (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO)
1760 +
1761 + /*
1762 + * Console mode. Override with conmode=
1763 +diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
1764 +index e317fd4866c15..f16f4d054ae25 100644
1765 +--- a/arch/s390/include/asm/smp.h
1766 ++++ b/arch/s390/include/asm/smp.h
1767 +@@ -18,6 +18,7 @@ extern struct mutex smp_cpu_state_mutex;
1768 + extern unsigned int smp_cpu_mt_shift;
1769 + extern unsigned int smp_cpu_mtid;
1770 + extern __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
1771 ++extern cpumask_t cpu_setup_mask;
1772 +
1773 + extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
1774 +
1775 +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
1776 +index a361d2e70025c..661585587cbee 100644
1777 +--- a/arch/s390/kernel/early.c
1778 ++++ b/arch/s390/kernel/early.c
1779 +@@ -236,6 +236,10 @@ static __init void detect_machine_facilities(void)
1780 + clock_comparator_max = -1ULL >> 1;
1781 + __ctl_set_bit(0, 53);
1782 + }
1783 ++ if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
1784 ++ S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
1785 ++ /* the control bit is set during PCI initialization */
1786 ++ }
1787 + }
1788 +
1789 + static inline void save_vector_registers(void)
1790 +diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
1791 +index ab584e8e35275..9156653b56f69 100644
1792 +--- a/arch/s390/kernel/jump_label.c
1793 ++++ b/arch/s390/kernel/jump_label.c
1794 +@@ -36,7 +36,7 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
1795 + unsigned char *ipe = (unsigned char *)expected;
1796 + unsigned char *ipn = (unsigned char *)new;
1797 +
1798 +- pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
1799 ++ pr_emerg("Jump label code mismatch at %pS [%px]\n", ipc, ipc);
1800 + pr_emerg("Found: %6ph\n", ipc);
1801 + pr_emerg("Expected: %6ph\n", ipe);
1802 + pr_emerg("New: %6ph\n", ipn);
1803 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
1804 +index 1fb483e06a647..926ba86f645e3 100644
1805 +--- a/arch/s390/kernel/smp.c
1806 ++++ b/arch/s390/kernel/smp.c
1807 +@@ -96,6 +96,7 @@ __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
1808 + #endif
1809 +
1810 + static unsigned int smp_max_threads __initdata = -1U;
1811 ++cpumask_t cpu_setup_mask;
1812 +
1813 + static int __init early_nosmt(char *s)
1814 + {
1815 +@@ -883,13 +884,14 @@ static void smp_init_secondary(void)
1816 + vtime_init();
1817 + vdso_getcpu_init();
1818 + pfault_init();
1819 ++ cpumask_set_cpu(cpu, &cpu_setup_mask);
1820 ++ update_cpu_masks();
1821 + notify_cpu_starting(cpu);
1822 + if (topology_cpu_dedicated(cpu))
1823 + set_cpu_flag(CIF_DEDICATED_CPU);
1824 + else
1825 + clear_cpu_flag(CIF_DEDICATED_CPU);
1826 + set_cpu_online(cpu, true);
1827 +- update_cpu_masks();
1828 + inc_irq_stat(CPU_RST);
1829 + local_irq_enable();
1830 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1831 +@@ -945,10 +947,13 @@ early_param("possible_cpus", _setup_possible_cpus);
1832 + int __cpu_disable(void)
1833 + {
1834 + unsigned long cregs[16];
1835 ++ int cpu;
1836 +
1837 + /* Handle possible pending IPIs */
1838 + smp_handle_ext_call();
1839 +- set_cpu_online(smp_processor_id(), false);
1840 ++ cpu = smp_processor_id();
1841 ++ set_cpu_online(cpu, false);
1842 ++ cpumask_clear_cpu(cpu, &cpu_setup_mask);
1843 + update_cpu_masks();
1844 + /* Disable pseudo page faults on this cpu. */
1845 + pfault_fini();
1846 +diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
1847 +index 26aa2614ee352..eb4047c9da9a3 100644
1848 +--- a/arch/s390/kernel/topology.c
1849 ++++ b/arch/s390/kernel/topology.c
1850 +@@ -67,7 +67,7 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
1851 + static cpumask_t mask;
1852 +
1853 + cpumask_clear(&mask);
1854 +- if (!cpu_online(cpu))
1855 ++ if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
1856 + goto out;
1857 + cpumask_set_cpu(cpu, &mask);
1858 + switch (topology_mode) {
1859 +@@ -88,7 +88,7 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
1860 + case TOPOLOGY_MODE_SINGLE:
1861 + break;
1862 + }
1863 +- cpumask_and(&mask, &mask, cpu_online_mask);
1864 ++ cpumask_and(&mask, &mask, &cpu_setup_mask);
1865 + out:
1866 + cpumask_copy(dst, &mask);
1867 + }
1868 +@@ -99,16 +99,16 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
1869 + int i;
1870 +
1871 + cpumask_clear(&mask);
1872 +- if (!cpu_online(cpu))
1873 ++ if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
1874 + goto out;
1875 + cpumask_set_cpu(cpu, &mask);
1876 + if (topology_mode != TOPOLOGY_MODE_HW)
1877 + goto out;
1878 + cpu -= cpu % (smp_cpu_mtid + 1);
1879 +- for (i = 0; i <= smp_cpu_mtid; i++)
1880 +- if (cpu_present(cpu + i))
1881 ++ for (i = 0; i <= smp_cpu_mtid; i++) {
1882 ++ if (cpumask_test_cpu(cpu + i, &cpu_setup_mask))
1883 + cpumask_set_cpu(cpu + i, &mask);
1884 +- cpumask_and(&mask, &mask, cpu_online_mask);
1885 ++ }
1886 + out:
1887 + cpumask_copy(dst, &mask);
1888 + }
1889 +@@ -569,6 +569,7 @@ void __init topology_init_early(void)
1890 + alloc_masks(info, &book_info, 2);
1891 + alloc_masks(info, &drawer_info, 3);
1892 + out:
1893 ++ cpumask_set_cpu(0, &cpu_setup_mask);
1894 + __arch_update_cpu_topology();
1895 + __arch_update_dedicated_flag(NULL);
1896 + }
1897 +diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
1898 +index 8ac710de1ab1b..07bbee9b7320d 100644
1899 +--- a/arch/s390/mm/init.c
1900 ++++ b/arch/s390/mm/init.c
1901 +@@ -186,9 +186,9 @@ static void pv_init(void)
1902 + return;
1903 +
1904 + /* make sure bounce buffers are shared */
1905 ++ swiotlb_force = SWIOTLB_FORCE;
1906 + swiotlb_init(1);
1907 + swiotlb_update_mem_attributes();
1908 +- swiotlb_force = SWIOTLB_FORCE;
1909 + }
1910 +
1911 + void __init mem_init(void)
1912 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
1913 +index 77cd965cffefa..34839bad33e4d 100644
1914 +--- a/arch/s390/pci/pci.c
1915 ++++ b/arch/s390/pci/pci.c
1916 +@@ -893,7 +893,6 @@ static void zpci_mem_exit(void)
1917 + }
1918 +
1919 + static unsigned int s390_pci_probe __initdata = 1;
1920 +-static unsigned int s390_pci_no_mio __initdata;
1921 + unsigned int s390_pci_force_floating __initdata;
1922 + static unsigned int s390_pci_initialized;
1923 +
1924 +@@ -904,7 +903,7 @@ char * __init pcibios_setup(char *str)
1925 + return NULL;
1926 + }
1927 + if (!strcmp(str, "nomio")) {
1928 +- s390_pci_no_mio = 1;
1929 ++ S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
1930 + return NULL;
1931 + }
1932 + if (!strcmp(str, "force_floating")) {
1933 +@@ -935,7 +934,7 @@ static int __init pci_base_init(void)
1934 + return 0;
1935 + }
1936 +
1937 +- if (test_facility(153) && !s390_pci_no_mio) {
1938 ++ if (MACHINE_HAS_PCI_MIO) {
1939 + static_branch_enable(&have_mio);
1940 + ctl_set_bit(2, 5);
1941 + }
1942 +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
1943 +index 4fa0a42808951..ea87d9ed77e97 100644
1944 +--- a/arch/x86/kernel/cpu/mshyperv.c
1945 ++++ b/arch/x86/kernel/cpu/mshyperv.c
1946 +@@ -370,8 +370,6 @@ static void __init ms_hyperv_init_platform(void)
1947 + if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
1948 + wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, 0x1);
1949 + setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
1950 +- } else {
1951 +- mark_tsc_unstable("running on Hyper-V");
1952 + }
1953 +
1954 + /*
1955 +@@ -432,6 +430,13 @@ static void __init ms_hyperv_init_platform(void)
1956 + /* Register Hyper-V specific clocksource */
1957 + hv_init_clocksource();
1958 + #endif
1959 ++ /*
1960 ++ * TSC should be marked as unstable only after Hyper-V
1961 ++ * clocksource has been initialized. This ensures that the
1962 ++ * stability of the sched_clock is not altered.
1963 ++ */
1964 ++ if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT))
1965 ++ mark_tsc_unstable("running on Hyper-V");
1966 + }
1967 +
1968 + static bool __init ms_hyperv_x2apic_available(void)
1969 +diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
1970 +index ac06ca32e9ef7..5e6e236977c75 100644
1971 +--- a/arch/x86/xen/p2m.c
1972 ++++ b/arch/x86/xen/p2m.c
1973 +@@ -618,8 +618,8 @@ int xen_alloc_p2m_entry(unsigned long pfn)
1974 + }
1975 +
1976 + /* Expanded the p2m? */
1977 +- if (pfn > xen_p2m_last_pfn) {
1978 +- xen_p2m_last_pfn = pfn;
1979 ++ if (pfn >= xen_p2m_last_pfn) {
1980 ++ xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE);
1981 + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
1982 + }
1983 +
1984 +diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
1985 +index a3dda25a4e45e..eed02cf3d6b03 100644
1986 +--- a/arch/xtensa/platforms/iss/console.c
1987 ++++ b/arch/xtensa/platforms/iss/console.c
1988 +@@ -143,9 +143,13 @@ static const struct tty_operations serial_ops = {
1989 +
1990 + static int __init rs_init(void)
1991 + {
1992 +- tty_port_init(&serial_port);
1993 ++ int ret;
1994 +
1995 + serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES);
1996 ++ if (!serial_driver)
1997 ++ return -ENOMEM;
1998 ++
1999 ++ tty_port_init(&serial_port);
2000 +
2001 + /* Initialize the tty_driver structure */
2002 +
2003 +@@ -163,8 +167,15 @@ static int __init rs_init(void)
2004 + tty_set_operations(serial_driver, &serial_ops);
2005 + tty_port_link_device(&serial_port, serial_driver, 0);
2006 +
2007 +- if (tty_register_driver(serial_driver))
2008 +- panic("Couldn't register serial driver\n");
2009 ++ ret = tty_register_driver(serial_driver);
2010 ++ if (ret) {
2011 ++ pr_err("Couldn't register serial driver\n");
2012 ++ tty_driver_kref_put(serial_driver);
2013 ++ tty_port_destroy(&serial_port);
2014 ++
2015 ++ return ret;
2016 ++ }
2017 ++
2018 + return 0;
2019 + }
2020 +
2021 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
2022 +index 4df33cc08eee0..6dfda57349cc0 100644
2023 +--- a/block/bfq-iosched.c
2024 ++++ b/block/bfq-iosched.c
2025 +@@ -5258,7 +5258,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
2026 + if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
2027 + pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
2028 + bfqq->new_ioprio);
2029 +- bfqq->new_ioprio = IOPRIO_BE_NR;
2030 ++ bfqq->new_ioprio = IOPRIO_BE_NR - 1;
2031 + }
2032 +
2033 + bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
2034 +diff --git a/block/blk-zoned.c b/block/blk-zoned.c
2035 +index 250cb76ee6153..457eceabed2ec 100644
2036 +--- a/block/blk-zoned.c
2037 ++++ b/block/blk-zoned.c
2038 +@@ -288,9 +288,6 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
2039 + if (!blk_queue_is_zoned(q))
2040 + return -ENOTTY;
2041 +
2042 +- if (!capable(CAP_SYS_ADMIN))
2043 +- return -EACCES;
2044 +-
2045 + if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
2046 + return -EFAULT;
2047 +
2048 +@@ -349,9 +346,6 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
2049 + if (!blk_queue_is_zoned(q))
2050 + return -ENOTTY;
2051 +
2052 +- if (!capable(CAP_SYS_ADMIN))
2053 +- return -EACCES;
2054 +-
2055 + if (!(mode & FMODE_WRITE))
2056 + return -EBADF;
2057 +
2058 +diff --git a/block/bsg.c b/block/bsg.c
2059 +index bd10922d5cbb4..4d0ad5846ccfa 100644
2060 +--- a/block/bsg.c
2061 ++++ b/block/bsg.c
2062 +@@ -371,10 +371,13 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2063 + case SG_GET_RESERVED_SIZE:
2064 + case SG_SET_RESERVED_SIZE:
2065 + case SG_EMULATED_HOST:
2066 +- case SCSI_IOCTL_SEND_COMMAND:
2067 + return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
2068 + case SG_IO:
2069 + return bsg_sg_io(bd->queue, file->f_mode, uarg);
2070 ++ case SCSI_IOCTL_SEND_COMMAND:
2071 ++ pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n",
2072 ++ current->comm);
2073 ++ return -EINVAL;
2074 + default:
2075 + return -ENOTTY;
2076 + }
2077 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
2078 +index 44f434acfce08..0e6e73b8023fc 100644
2079 +--- a/drivers/ata/libata-core.c
2080 ++++ b/drivers/ata/libata-core.c
2081 +@@ -3950,6 +3950,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
2082 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
2083 + { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2084 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
2085 ++ { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2086 ++ ATA_HORKAGE_ZERO_AFTER_TRIM, },
2087 ++ { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2088 ++ ATA_HORKAGE_ZERO_AFTER_TRIM, },
2089 + { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
2090 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
2091 +
2092 +diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
2093 +index f0ef844428bb4..338c2e50f7591 100644
2094 +--- a/drivers/ata/sata_dwc_460ex.c
2095 ++++ b/drivers/ata/sata_dwc_460ex.c
2096 +@@ -1259,24 +1259,20 @@ static int sata_dwc_probe(struct platform_device *ofdev)
2097 + irq = irq_of_parse_and_map(np, 0);
2098 + if (irq == NO_IRQ) {
2099 + dev_err(&ofdev->dev, "no SATA DMA irq\n");
2100 +- err = -ENODEV;
2101 +- goto error_out;
2102 ++ return -ENODEV;
2103 + }
2104 +
2105 + #ifdef CONFIG_SATA_DWC_OLD_DMA
2106 + if (!of_find_property(np, "dmas", NULL)) {
2107 + err = sata_dwc_dma_init_old(ofdev, hsdev);
2108 + if (err)
2109 +- goto error_out;
2110 ++ return err;
2111 + }
2112 + #endif
2113 +
2114 + hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
2115 +- if (IS_ERR(hsdev->phy)) {
2116 +- err = PTR_ERR(hsdev->phy);
2117 +- hsdev->phy = NULL;
2118 +- goto error_out;
2119 +- }
2120 ++ if (IS_ERR(hsdev->phy))
2121 ++ return PTR_ERR(hsdev->phy);
2122 +
2123 + err = phy_init(hsdev->phy);
2124 + if (err)
2125 +diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
2126 +index 380ad1fdb7456..57f78d1cc9d84 100644
2127 +--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
2128 ++++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
2129 +@@ -67,6 +67,8 @@ struct fsl_mc_addr_translation_range {
2130 + #define MC_FAPR_PL BIT(18)
2131 + #define MC_FAPR_BMT BIT(17)
2132 +
2133 ++static phys_addr_t mc_portal_base_phys_addr;
2134 ++
2135 + /**
2136 + * fsl_mc_bus_match - device to driver matching callback
2137 + * @dev: the fsl-mc device to match against
2138 +@@ -219,7 +221,7 @@ static int scan_fsl_mc_bus(struct device *dev, void *data)
2139 + root_mc_dev = to_fsl_mc_device(dev);
2140 + root_mc_bus = to_fsl_mc_bus(root_mc_dev);
2141 + mutex_lock(&root_mc_bus->scan_mutex);
2142 +- dprc_scan_objects(root_mc_dev, NULL);
2143 ++ dprc_scan_objects(root_mc_dev, false);
2144 + mutex_unlock(&root_mc_bus->scan_mutex);
2145 +
2146 + exit:
2147 +@@ -702,14 +704,30 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
2148 + * If base address is in the region_desc use it otherwise
2149 + * revert to old mechanism
2150 + */
2151 +- if (region_desc.base_address)
2152 ++ if (region_desc.base_address) {
2153 + regions[i].start = region_desc.base_address +
2154 + region_desc.base_offset;
2155 +- else
2156 ++ } else {
2157 + error = translate_mc_addr(mc_dev, mc_region_type,
2158 + region_desc.base_offset,
2159 + &regions[i].start);
2160 +
2161 ++ /*
2162 ++ * Some versions of the MC firmware wrongly report
2163 ++ * 0 for register base address of the DPMCP associated
2164 ++ * with child DPRC objects thus rendering them unusable.
2165 ++ * This is particularly troublesome in ACPI boot
2166 ++ * scenarios where the legacy way of extracting this
2167 ++ * base address from the device tree does not apply.
2168 ++ * Given that DPMCPs share the same base address,
2169 ++ * workaround this by using the base address extracted
2170 ++ * from the root DPRC container.
2171 ++ */
2172 ++ if (is_fsl_mc_bus_dprc(mc_dev) &&
2173 ++ regions[i].start == region_desc.base_offset)
2174 ++ regions[i].start += mc_portal_base_phys_addr;
2175 ++ }
2176 ++
2177 + if (error < 0) {
2178 + dev_err(parent_dev,
2179 + "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
2180 +@@ -1125,6 +1143,8 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
2181 + plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2182 + mc_portal_phys_addr = plat_res->start;
2183 + mc_portal_size = resource_size(plat_res);
2184 ++ mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
2185 ++
2186 + error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
2187 + mc_portal_size, NULL,
2188 + FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
2189 +diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
2190 +index b4fc8d71daf20..b656d25a97678 100644
2191 +--- a/drivers/clk/at91/clk-generated.c
2192 ++++ b/drivers/clk/at91/clk-generated.c
2193 +@@ -128,6 +128,12 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
2194 + int i;
2195 + u32 div;
2196 +
2197 ++ /* do not look for a rate that is outside of our range */
2198 ++ if (gck->range.max && req->rate > gck->range.max)
2199 ++ req->rate = gck->range.max;
2200 ++ if (gck->range.min && req->rate < gck->range.min)
2201 ++ req->rate = gck->range.min;
2202 ++
2203 + for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
2204 + if (gck->chg_pid == i)
2205 + continue;
2206 +diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
2207 +index 2c309e3dc8e34..04e728538cefe 100644
2208 +--- a/drivers/clk/imx/clk-composite-8m.c
2209 ++++ b/drivers/clk/imx/clk-composite-8m.c
2210 +@@ -216,7 +216,8 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
2211 + div->width = PCG_PREDIV_WIDTH;
2212 + divider_ops = &imx8m_clk_composite_divider_ops;
2213 + mux_ops = &clk_mux_ops;
2214 +- flags |= CLK_SET_PARENT_GATE;
2215 ++ if (!(composite_flags & IMX_COMPOSITE_FW_MANAGED))
2216 ++ flags |= CLK_SET_PARENT_GATE;
2217 + }
2218 +
2219 + div->lock = &imx_ccm_lock;
2220 +diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
2221 +index f1919fafb1247..e92621fa8b9cd 100644
2222 +--- a/drivers/clk/imx/clk-imx8mm.c
2223 ++++ b/drivers/clk/imx/clk-imx8mm.c
2224 +@@ -407,10 +407,10 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
2225 + hws[IMX8MM_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
2226 + hws[IMX8MM_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
2227 +
2228 +- hws[IMX8MM_CLK_CLKOUT1_SEL] = imx_clk_hw_mux("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
2229 ++ hws[IMX8MM_CLK_CLKOUT1_SEL] = imx_clk_hw_mux2("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
2230 + hws[IMX8MM_CLK_CLKOUT1_DIV] = imx_clk_hw_divider("clkout1_div", "clkout1_sel", base + 0x128, 0, 4);
2231 + hws[IMX8MM_CLK_CLKOUT1] = imx_clk_hw_gate("clkout1", "clkout1_div", base + 0x128, 8);
2232 +- hws[IMX8MM_CLK_CLKOUT2_SEL] = imx_clk_hw_mux("clkout2_sel", base + 0x128, 20, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
2233 ++ hws[IMX8MM_CLK_CLKOUT2_SEL] = imx_clk_hw_mux2("clkout2_sel", base + 0x128, 20, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
2234 + hws[IMX8MM_CLK_CLKOUT2_DIV] = imx_clk_hw_divider("clkout2_div", "clkout2_sel", base + 0x128, 16, 4);
2235 + hws[IMX8MM_CLK_CLKOUT2] = imx_clk_hw_gate("clkout2", "clkout2_div", base + 0x128, 24);
2236 +
2237 +@@ -470,10 +470,11 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
2238 +
2239 + /*
2240 + * DRAM clocks are manipulated from TF-A outside clock framework.
2241 +- * Mark with GET_RATE_NOCACHE to always read div value from hardware
2242 ++ * The fw_managed helper sets GET_RATE_NOCACHE and clears SET_PARENT_GATE
2243 ++ * as div value should always be read from hardware
2244 + */
2245 +- hws[IMX8MM_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
2246 +- hws[IMX8MM_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mm_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
2247 ++ hws[IMX8MM_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000);
2248 ++ hws[IMX8MM_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mm_dram_apb_sels, base + 0xa080);
2249 +
2250 + /* IP */
2251 + hws[IMX8MM_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100);
2252 +diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
2253 +index 88f6630cd472f..0a76f969b28b3 100644
2254 +--- a/drivers/clk/imx/clk-imx8mn.c
2255 ++++ b/drivers/clk/imx/clk-imx8mn.c
2256 +@@ -453,10 +453,11 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
2257 +
2258 + /*
2259 + * DRAM clocks are manipulated from TF-A outside clock framework.
2260 +- * Mark with GET_RATE_NOCACHE to always read div value from hardware
2261 ++ * The fw_managed helper sets GET_RATE_NOCACHE and clears SET_PARENT_GATE
2262 ++ * as div value should always be read from hardware
2263 + */
2264 +- hws[IMX8MN_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
2265 +- hws[IMX8MN_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mn_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
2266 ++ hws[IMX8MN_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000);
2267 ++ hws[IMX8MN_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mn_dram_apb_sels, base + 0xa080);
2268 +
2269 + hws[IMX8MN_CLK_DISP_PIXEL] = imx8m_clk_hw_composite("disp_pixel", imx8mn_disp_pixel_sels, base + 0xa500);
2270 + hws[IMX8MN_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mn_sai2_sels, base + 0xa600);
2271 +diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
2272 +index c491bc9c61ce7..83cc2b1c32947 100644
2273 +--- a/drivers/clk/imx/clk-imx8mq.c
2274 ++++ b/drivers/clk/imx/clk-imx8mq.c
2275 +@@ -449,11 +449,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
2276 +
2277 + /*
2278 + * DRAM clocks are manipulated from TF-A outside clock framework.
2279 +- * Mark with GET_RATE_NOCACHE to always read div value from hardware
2280 ++ * The fw_managed helper sets GET_RATE_NOCACHE and clears SET_PARENT_GATE
2281 ++ * as div value should always be read from hardware
2282 + */
2283 + hws[IMX8MQ_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mq_dram_core_sels, ARRAY_SIZE(imx8mq_dram_core_sels), CLK_IS_CRITICAL);
2284 +- hws[IMX8MQ_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
2285 +- hws[IMX8MQ_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mq_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
2286 ++ hws[IMX8MQ_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000);
2287 ++ hws[IMX8MQ_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mq_dram_apb_sels, base + 0xa080);
2288 +
2289 + /* IP */
2290 + hws[IMX8MQ_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mq_vpu_g1_sels, base + 0xa100);
2291 +diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
2292 +index 7571603bee23b..e144f983fd8ce 100644
2293 +--- a/drivers/clk/imx/clk.h
2294 ++++ b/drivers/clk/imx/clk.h
2295 +@@ -530,8 +530,9 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
2296 + struct clk *div, struct clk *mux, struct clk *pll,
2297 + struct clk *step);
2298 +
2299 +-#define IMX_COMPOSITE_CORE BIT(0)
2300 +-#define IMX_COMPOSITE_BUS BIT(1)
2301 ++#define IMX_COMPOSITE_CORE BIT(0)
2302 ++#define IMX_COMPOSITE_BUS BIT(1)
2303 ++#define IMX_COMPOSITE_FW_MANAGED BIT(2)
2304 +
2305 + struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
2306 + const char * const *parent_names,
2307 +@@ -567,6 +568,17 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
2308 + ARRAY_SIZE(parent_names), reg, 0, \
2309 + flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
2310 +
2311 ++#define __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, flags) \
2312 ++ imx8m_clk_hw_composite_flags(name, parent_names, \
2313 ++ ARRAY_SIZE(parent_names), reg, IMX_COMPOSITE_FW_MANAGED, \
2314 ++ flags | CLK_GET_RATE_NOCACHE | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
2315 ++
2316 ++#define imx8m_clk_hw_fw_managed_composite(name, parent_names, reg) \
2317 ++ __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, 0)
2318 ++
2319 ++#define imx8m_clk_hw_fw_managed_composite_critical(name, parent_names, reg) \
2320 ++ __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, CLK_IS_CRITICAL)
2321 ++
2322 + #define __imx8m_clk_composite(name, parent_names, reg, flags) \
2323 + to_clk(__imx8m_clk_hw_composite(name, parent_names, reg, flags))
2324 +
2325 +diff --git a/drivers/clk/ralink/clk-mt7621.c b/drivers/clk/ralink/clk-mt7621.c
2326 +index 857da1e274be9..a2c045390f008 100644
2327 +--- a/drivers/clk/ralink/clk-mt7621.c
2328 ++++ b/drivers/clk/ralink/clk-mt7621.c
2329 +@@ -131,14 +131,7 @@ static int mt7621_gate_ops_init(struct device *dev,
2330 + struct mt7621_gate *sclk)
2331 + {
2332 + struct clk_init_data init = {
2333 +- /*
2334 +- * Until now no clock driver existed so
2335 +- * these SoC drivers are not prepared
2336 +- * yet for the clock. We don't want kernel to
2337 +- * disable anything so we add CLK_IS_CRITICAL
2338 +- * flag here.
2339 +- */
2340 +- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
2341 ++ .flags = CLK_SET_RATE_PARENT,
2342 + .num_parents = 1,
2343 + .parent_names = &sclk->parent_name,
2344 + .ops = &mt7621_gate_ops,
2345 +diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
2346 +index fe937bcdb4876..f7827b3b7fc1c 100644
2347 +--- a/drivers/clk/rockchip/clk-pll.c
2348 ++++ b/drivers/clk/rockchip/clk-pll.c
2349 +@@ -940,7 +940,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
2350 + switch (pll_type) {
2351 + case pll_rk3036:
2352 + case pll_rk3328:
2353 +- if (!pll->rate_table || IS_ERR(ctx->grf))
2354 ++ if (!pll->rate_table)
2355 + init.ops = &rockchip_rk3036_pll_clk_norate_ops;
2356 + else
2357 + init.ops = &rockchip_rk3036_pll_clk_ops;
2358 +diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
2359 +index 1cb21ea79c640..242e94c0cf8a3 100644
2360 +--- a/drivers/clk/socfpga/clk-agilex.c
2361 ++++ b/drivers/clk/socfpga/clk-agilex.c
2362 +@@ -107,10 +107,10 @@ static const struct clk_parent_data gpio_db_free_mux[] = {
2363 + };
2364 +
2365 + static const struct clk_parent_data psi_ref_free_mux[] = {
2366 +- { .fw_name = "main_pll_c3",
2367 +- .name = "main_pll_c3", },
2368 +- { .fw_name = "peri_pll_c3",
2369 +- .name = "peri_pll_c3", },
2370 ++ { .fw_name = "main_pll_c2",
2371 ++ .name = "main_pll_c2", },
2372 ++ { .fw_name = "peri_pll_c2",
2373 ++ .name = "peri_pll_c2", },
2374 + { .fw_name = "osc1",
2375 + .name = "osc1", },
2376 + { .fw_name = "cb-intosc-hs-div2-clk",
2377 +@@ -195,6 +195,13 @@ static const struct clk_parent_data sdmmc_mux[] = {
2378 + .name = "boot_clk", },
2379 + };
2380 +
2381 ++static const struct clk_parent_data s2f_user0_mux[] = {
2382 ++ { .fw_name = "s2f_user0_free_clk",
2383 ++ .name = "s2f_user0_free_clk", },
2384 ++ { .fw_name = "boot_clk",
2385 ++ .name = "boot_clk", },
2386 ++};
2387 ++
2388 + static const struct clk_parent_data s2f_user1_mux[] = {
2389 + { .fw_name = "s2f_user1_free_clk",
2390 + .name = "s2f_user1_free_clk", },
2391 +@@ -273,7 +280,7 @@ static const struct stratix10_perip_cnt_clock agilex_main_perip_cnt_clks[] = {
2392 + { AGILEX_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
2393 + ARRAY_SIZE(sdmmc_free_mux), 0, 0xE4, 0, 0, 0},
2394 + { AGILEX_S2F_USER0_FREE_CLK, "s2f_user0_free_clk", NULL, s2f_usr0_free_mux,
2395 +- ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0, 0},
2396 ++ ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0x30, 2},
2397 + { AGILEX_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", NULL, s2f_usr1_free_mux,
2398 + ARRAY_SIZE(s2f_usr1_free_mux), 0, 0xEC, 0, 0x88, 5},
2399 + { AGILEX_PSI_REF_FREE_CLK, "psi_ref_free_clk", NULL, psi_ref_free_mux,
2400 +@@ -319,6 +326,8 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
2401 + 4, 0x98, 0, 16, 0x88, 3, 0},
2402 + { AGILEX_SDMMC_CLK, "sdmmc_clk", NULL, sdmmc_mux, ARRAY_SIZE(sdmmc_mux), 0, 0x7C,
2403 + 5, 0, 0, 0, 0x88, 4, 4},
2404 ++ { AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_user0_mux, ARRAY_SIZE(s2f_user0_mux), 0, 0x24,
2405 ++ 6, 0, 0, 0, 0x30, 2, 0},
2406 + { AGILEX_S2F_USER1_CLK, "s2f_user1_clk", NULL, s2f_user1_mux, ARRAY_SIZE(s2f_user1_mux), 0, 0x7C,
2407 + 6, 0, 0, 0, 0x88, 5, 0},
2408 + { AGILEX_PSI_REF_CLK, "psi_ref_clk", NULL, psi_mux, ARRAY_SIZE(psi_mux), 0, 0x7C,
2409 +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
2410 +index e439b43c19ebe..8977e4de59157 100644
2411 +--- a/drivers/cpufreq/powernv-cpufreq.c
2412 ++++ b/drivers/cpufreq/powernv-cpufreq.c
2413 +@@ -36,6 +36,7 @@
2414 + #define MAX_PSTATE_SHIFT 32
2415 + #define LPSTATE_SHIFT 48
2416 + #define GPSTATE_SHIFT 56
2417 ++#define MAX_NR_CHIPS 32
2418 +
2419 + #define MAX_RAMP_DOWN_TIME 5120
2420 + /*
2421 +@@ -1051,12 +1052,20 @@ static int init_chip_info(void)
2422 + unsigned int *chip;
2423 + unsigned int cpu, i;
2424 + unsigned int prev_chip_id = UINT_MAX;
2425 ++ cpumask_t *chip_cpu_mask;
2426 + int ret = 0;
2427 +
2428 + chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
2429 + if (!chip)
2430 + return -ENOMEM;
2431 +
2432 ++ /* Allocate a chip cpu mask large enough to fit mask for all chips */
2433 ++ chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL);
2434 ++ if (!chip_cpu_mask) {
2435 ++ ret = -ENOMEM;
2436 ++ goto free_and_return;
2437 ++ }
2438 ++
2439 + for_each_possible_cpu(cpu) {
2440 + unsigned int id = cpu_to_chip_id(cpu);
2441 +
2442 +@@ -1064,22 +1073,25 @@ static int init_chip_info(void)
2443 + prev_chip_id = id;
2444 + chip[nr_chips++] = id;
2445 + }
2446 ++ cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]);
2447 + }
2448 +
2449 + chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
2450 + if (!chips) {
2451 + ret = -ENOMEM;
2452 +- goto free_and_return;
2453 ++ goto out_free_chip_cpu_mask;
2454 + }
2455 +
2456 + for (i = 0; i < nr_chips; i++) {
2457 + chips[i].id = chip[i];
2458 +- cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
2459 ++ cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]);
2460 + INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
2461 + for_each_cpu(cpu, &chips[i].mask)
2462 + per_cpu(chip_info, cpu) = &chips[i];
2463 + }
2464 +
2465 ++out_free_chip_cpu_mask:
2466 ++ kfree(chip_cpu_mask);
2467 + free_and_return:
2468 + kfree(chip);
2469 + return ret;
2470 +diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
2471 +index a2b5c6f60cf0e..ff164dec8422e 100644
2472 +--- a/drivers/cpuidle/cpuidle-pseries.c
2473 ++++ b/drivers/cpuidle/cpuidle-pseries.c
2474 +@@ -402,7 +402,7 @@ static void __init fixup_cede0_latency(void)
2475 + * pseries_idle_probe()
2476 + * Choose state table for shared versus dedicated partition
2477 + */
2478 +-static int pseries_idle_probe(void)
2479 ++static int __init pseries_idle_probe(void)
2480 + {
2481 +
2482 + if (cpuidle_disable != IDLE_NO_OVERRIDE)
2483 +@@ -419,7 +419,21 @@ static int pseries_idle_probe(void)
2484 + cpuidle_state_table = shared_states;
2485 + max_idle_state = ARRAY_SIZE(shared_states);
2486 + } else {
2487 +- fixup_cede0_latency();
2488 ++ /*
2489 ++ * Use firmware provided latency values
2490 ++ * starting with POWER10 platforms. In the
2491 ++ * case that we are running on a POWER10
2492 ++ * platform but in an earlier compat mode, we
2493 ++ * can still use the firmware provided values.
2494 ++ *
2495 ++ * However, on platforms prior to POWER10, we
2496 ++ * cannot rely on the accuracy of the firmware
2497 ++ * provided latency values. On such platforms,
2498 ++ * go with the conservative default estimate
2499 ++ * of 10us.
2500 ++ */
2501 ++ if (cpu_has_feature(CPU_FTR_ARCH_31) || pvr_version_is(PVR_POWER10))
2502 ++ fixup_cede0_latency();
2503 + cpuidle_state_table = dedicated_states;
2504 + max_idle_state = NR_DEDICATED_STATES;
2505 + }
2506 +diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
2507 +index 91808402e0bf2..2ecb0e1f65d8d 100644
2508 +--- a/drivers/crypto/ccp/sev-dev.c
2509 ++++ b/drivers/crypto/ccp/sev-dev.c
2510 +@@ -300,6 +300,9 @@ static int __sev_platform_shutdown_locked(int *error)
2511 + struct sev_device *sev = psp_master->sev_data;
2512 + int ret;
2513 +
2514 ++ if (sev->state == SEV_STATE_UNINIT)
2515 ++ return 0;
2516 ++
2517 + ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
2518 + if (ret)
2519 + return ret;
2520 +@@ -1019,6 +1022,20 @@ e_err:
2521 + return ret;
2522 + }
2523 +
2524 ++static void sev_firmware_shutdown(struct sev_device *sev)
2525 ++{
2526 ++ sev_platform_shutdown(NULL);
2527 ++
2528 ++ if (sev_es_tmr) {
2529 ++ /* The TMR area was encrypted, flush it from the cache */
2530 ++ wbinvd_on_all_cpus();
2531 ++
2532 ++ free_pages((unsigned long)sev_es_tmr,
2533 ++ get_order(SEV_ES_TMR_SIZE));
2534 ++ sev_es_tmr = NULL;
2535 ++ }
2536 ++}
2537 ++
2538 + void sev_dev_destroy(struct psp_device *psp)
2539 + {
2540 + struct sev_device *sev = psp->sev_data;
2541 +@@ -1026,6 +1043,8 @@ void sev_dev_destroy(struct psp_device *psp)
2542 + if (!sev)
2543 + return;
2544 +
2545 ++ sev_firmware_shutdown(sev);
2546 ++
2547 + if (sev->misc)
2548 + kref_put(&misc_dev->refcount, sev_exit);
2549 +
2550 +@@ -1056,21 +1075,6 @@ void sev_pci_init(void)
2551 + if (sev_get_api_version())
2552 + goto err;
2553 +
2554 +- /*
2555 +- * If platform is not in UNINIT state then firmware upgrade and/or
2556 +- * platform INIT command will fail. These command require UNINIT state.
2557 +- *
2558 +- * In a normal boot we should never run into case where the firmware
2559 +- * is not in UNINIT state on boot. But in case of kexec boot, a reboot
2560 +- * may not go through a typical shutdown sequence and may leave the
2561 +- * firmware in INIT or WORKING state.
2562 +- */
2563 +-
2564 +- if (sev->state != SEV_STATE_UNINIT) {
2565 +- sev_platform_shutdown(NULL);
2566 +- sev->state = SEV_STATE_UNINIT;
2567 +- }
2568 +-
2569 + if (sev_version_greater_or_equal(0, 15) &&
2570 + sev_update_firmware(sev->dev) == 0)
2571 + sev_get_api_version();
2572 +@@ -1115,17 +1119,10 @@ err:
2573 +
2574 + void sev_pci_exit(void)
2575 + {
2576 +- if (!psp_master->sev_data)
2577 +- return;
2578 +-
2579 +- sev_platform_shutdown(NULL);
2580 ++ struct sev_device *sev = psp_master->sev_data;
2581 +
2582 +- if (sev_es_tmr) {
2583 +- /* The TMR area was encrypted, flush it from the cache */
2584 +- wbinvd_on_all_cpus();
2585 ++ if (!sev)
2586 ++ return;
2587 +
2588 +- free_pages((unsigned long)sev_es_tmr,
2589 +- get_order(SEV_ES_TMR_SIZE));
2590 +- sev_es_tmr = NULL;
2591 +- }
2592 ++ sev_firmware_shutdown(sev);
2593 + }
2594 +diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
2595 +index 6fb6ba35f89d4..9bcc1884c06a1 100644
2596 +--- a/drivers/crypto/ccp/sp-pci.c
2597 ++++ b/drivers/crypto/ccp/sp-pci.c
2598 +@@ -241,6 +241,17 @@ e_err:
2599 + return ret;
2600 + }
2601 +
2602 ++static void sp_pci_shutdown(struct pci_dev *pdev)
2603 ++{
2604 ++ struct device *dev = &pdev->dev;
2605 ++ struct sp_device *sp = dev_get_drvdata(dev);
2606 ++
2607 ++ if (!sp)
2608 ++ return;
2609 ++
2610 ++ sp_destroy(sp);
2611 ++}
2612 ++
2613 + static void sp_pci_remove(struct pci_dev *pdev)
2614 + {
2615 + struct device *dev = &pdev->dev;
2616 +@@ -371,6 +382,7 @@ static struct pci_driver sp_pci_driver = {
2617 + .id_table = sp_pci_table,
2618 + .probe = sp_pci_probe,
2619 + .remove = sp_pci_remove,
2620 ++ .shutdown = sp_pci_shutdown,
2621 + .driver.pm = &sp_pci_pm_ops,
2622 + };
2623 +
2624 +diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
2625 +index f397cc5bf1021..d19e5ffb5104b 100644
2626 +--- a/drivers/crypto/mxs-dcp.c
2627 ++++ b/drivers/crypto/mxs-dcp.c
2628 +@@ -300,21 +300,20 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
2629 +
2630 + struct scatterlist *dst = req->dst;
2631 + struct scatterlist *src = req->src;
2632 +- const int nents = sg_nents(req->src);
2633 ++ int dst_nents = sg_nents(dst);
2634 +
2635 + const int out_off = DCP_BUF_SZ;
2636 + uint8_t *in_buf = sdcp->coh->aes_in_buf;
2637 + uint8_t *out_buf = sdcp->coh->aes_out_buf;
2638 +
2639 +- uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
2640 + uint32_t dst_off = 0;
2641 ++ uint8_t *src_buf = NULL;
2642 + uint32_t last_out_len = 0;
2643 +
2644 + uint8_t *key = sdcp->coh->aes_key;
2645 +
2646 + int ret = 0;
2647 +- int split = 0;
2648 +- unsigned int i, len, clen, rem = 0, tlen = 0;
2649 ++ unsigned int i, len, clen, tlen = 0;
2650 + int init = 0;
2651 + bool limit_hit = false;
2652 +
2653 +@@ -332,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
2654 + memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
2655 + }
2656 +
2657 +- for_each_sg(req->src, src, nents, i) {
2658 ++ for_each_sg(req->src, src, sg_nents(src), i) {
2659 + src_buf = sg_virt(src);
2660 + len = sg_dma_len(src);
2661 + tlen += len;
2662 +@@ -357,34 +356,17 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
2663 + * submit the buffer.
2664 + */
2665 + if (actx->fill == out_off || sg_is_last(src) ||
2666 +- limit_hit) {
2667 ++ limit_hit) {
2668 + ret = mxs_dcp_run_aes(actx, req, init);
2669 + if (ret)
2670 + return ret;
2671 + init = 0;
2672 +
2673 +- out_tmp = out_buf;
2674 ++ sg_pcopy_from_buffer(dst, dst_nents, out_buf,
2675 ++ actx->fill, dst_off);
2676 ++ dst_off += actx->fill;
2677 + last_out_len = actx->fill;
2678 +- while (dst && actx->fill) {
2679 +- if (!split) {
2680 +- dst_buf = sg_virt(dst);
2681 +- dst_off = 0;
2682 +- }
2683 +- rem = min(sg_dma_len(dst) - dst_off,
2684 +- actx->fill);
2685 +-
2686 +- memcpy(dst_buf + dst_off, out_tmp, rem);
2687 +- out_tmp += rem;
2688 +- dst_off += rem;
2689 +- actx->fill -= rem;
2690 +-
2691 +- if (dst_off == sg_dma_len(dst)) {
2692 +- dst = sg_next(dst);
2693 +- split = 0;
2694 +- } else {
2695 +- split = 1;
2696 +- }
2697 +- }
2698 ++ actx->fill = 0;
2699 + }
2700 + } while (len);
2701 +
2702 +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
2703 +index d5590c08db51e..1c636d287112e 100644
2704 +--- a/drivers/dma/imx-sdma.c
2705 ++++ b/drivers/dma/imx-sdma.c
2706 +@@ -379,7 +379,6 @@ struct sdma_channel {
2707 + unsigned long watermark_level;
2708 + u32 shp_addr, per_addr;
2709 + enum dma_status status;
2710 +- bool context_loaded;
2711 + struct imx_dma_data data;
2712 + struct work_struct terminate_worker;
2713 + };
2714 +@@ -954,9 +953,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
2715 + int ret;
2716 + unsigned long flags;
2717 +
2718 +- if (sdmac->context_loaded)
2719 +- return 0;
2720 +-
2721 + if (sdmac->direction == DMA_DEV_TO_MEM)
2722 + load_address = sdmac->pc_from_device;
2723 + else if (sdmac->direction == DMA_DEV_TO_DEV)
2724 +@@ -999,8 +995,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
2725 +
2726 + spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
2727 +
2728 +- sdmac->context_loaded = true;
2729 +-
2730 + return ret;
2731 + }
2732 +
2733 +@@ -1039,7 +1033,6 @@ static void sdma_channel_terminate_work(struct work_struct *work)
2734 + vchan_get_all_descriptors(&sdmac->vc, &head);
2735 + spin_unlock_irqrestore(&sdmac->vc.lock, flags);
2736 + vchan_dma_desc_free_list(&sdmac->vc, &head);
2737 +- sdmac->context_loaded = false;
2738 + }
2739 +
2740 + static int sdma_terminate_all(struct dma_chan *chan)
2741 +@@ -1114,7 +1107,6 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
2742 + static int sdma_config_channel(struct dma_chan *chan)
2743 + {
2744 + struct sdma_channel *sdmac = to_sdma_chan(chan);
2745 +- int ret;
2746 +
2747 + sdma_disable_channel(chan);
2748 +
2749 +@@ -1154,9 +1146,7 @@ static int sdma_config_channel(struct dma_chan *chan)
2750 + sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
2751 + }
2752 +
2753 +- ret = sdma_load_context(sdmac);
2754 +-
2755 +- return ret;
2756 ++ return 0;
2757 + }
2758 +
2759 + static int sdma_set_channel_priority(struct sdma_channel *sdmac,
2760 +@@ -1307,7 +1297,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
2761 +
2762 + sdmac->event_id0 = 0;
2763 + sdmac->event_id1 = 0;
2764 +- sdmac->context_loaded = false;
2765 +
2766 + sdma_set_channel_priority(sdmac, 0);
2767 +
2768 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
2769 +index 311bcdc59eda6..3f4b03a2588b6 100644
2770 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
2771 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
2772 +@@ -277,21 +277,18 @@ retry:
2773 + r = amdgpu_gem_object_create(adev, size, args->in.alignment,
2774 + initial_domain,
2775 + flags, ttm_bo_type_device, resv, &gobj);
2776 +- if (r) {
2777 +- if (r != -ERESTARTSYS) {
2778 +- if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
2779 +- flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2780 +- goto retry;
2781 +- }
2782 ++ if (r && r != -ERESTARTSYS) {
2783 ++ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
2784 ++ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2785 ++ goto retry;
2786 ++ }
2787 +
2788 +- if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
2789 +- initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
2790 +- goto retry;
2791 +- }
2792 +- DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
2793 +- size, initial_domain, args->in.alignment, r);
2794 ++ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
2795 ++ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
2796 ++ goto retry;
2797 + }
2798 +- return r;
2799 ++ DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
2800 ++ size, initial_domain, args->in.alignment, r);
2801 + }
2802 +
2803 + if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
2804 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
2805 +index bca4dddd5a15b..82608df433964 100644
2806 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
2807 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
2808 +@@ -339,7 +339,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
2809 + void
2810 + amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
2811 + {
2812 +- u8 val;
2813 ++ u8 val = 0;
2814 +
2815 + if (!amdgpu_connector->router.ddc_valid)
2816 + return;
2817 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
2818 +index 3933a42f8d811..67b6eda21529e 100644
2819 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
2820 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
2821 +@@ -202,7 +202,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
2822 + c++;
2823 + }
2824 +
2825 +- BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
2826 ++ BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
2827 +
2828 + placement->num_placement = c;
2829 + placement->placement = places;
2830 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
2831 +index f40c871da0c62..fb701c4fd5c5f 100644
2832 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
2833 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
2834 +@@ -321,7 +321,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
2835 + return ret;
2836 + }
2837 +
2838 +- __decode_table_header_from_buff(hdr, &buff[2]);
2839 ++ __decode_table_header_from_buff(hdr, buff);
2840 +
2841 + if (hdr->header == EEPROM_TABLE_HDR_VAL) {
2842 + control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
2843 +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
2844 +index 27b1ced145d2c..14ae2bfad59da 100644
2845 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
2846 ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
2847 +@@ -119,7 +119,7 @@ static int vcn_v1_0_sw_init(void *handle)
2848 + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
2849 + adev->firmware.fw_size +=
2850 + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
2851 +- DRM_INFO("PSP loading VCN firmware\n");
2852 ++ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
2853 + }
2854 +
2855 + r = amdgpu_vcn_resume(adev);
2856 +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
2857 +index 8af567c546dbc..f4686e918e0d1 100644
2858 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
2859 ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
2860 +@@ -122,7 +122,7 @@ static int vcn_v2_0_sw_init(void *handle)
2861 + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
2862 + adev->firmware.fw_size +=
2863 + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
2864 +- DRM_INFO("PSP loading VCN firmware\n");
2865 ++ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
2866 + }
2867 +
2868 + r = amdgpu_vcn_resume(adev);
2869 +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
2870 +index 888b17d84691c..e0c0c3734432e 100644
2871 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
2872 ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
2873 +@@ -152,7 +152,7 @@ static int vcn_v2_5_sw_init(void *handle)
2874 + adev->firmware.fw_size +=
2875 + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
2876 + }
2877 +- DRM_INFO("PSP loading VCN firmware\n");
2878 ++ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
2879 + }
2880 +
2881 + r = amdgpu_vcn_resume(adev);
2882 +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
2883 +index 3b23de996db22..c2c5c4af51d2e 100644
2884 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
2885 ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
2886 +@@ -152,7 +152,7 @@ static int vcn_v3_0_sw_init(void *handle)
2887 + adev->firmware.fw_size +=
2888 + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
2889 + }
2890 +- DRM_INFO("PSP loading VCN firmware\n");
2891 ++ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
2892 + }
2893 +
2894 + r = amdgpu_vcn_resume(adev);
2895 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
2896 +index 88813dad731fa..c021519af8106 100644
2897 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
2898 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
2899 +@@ -98,36 +98,78 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
2900 + uint32_t *se_mask)
2901 + {
2902 + struct kfd_cu_info cu_info;
2903 +- uint32_t cu_per_se[KFD_MAX_NUM_SE] = {0};
2904 +- int i, se, sh, cu = 0;
2905 +-
2906 ++ uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
2907 ++ int i, se, sh, cu;
2908 + amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
2909 +
2910 + if (cu_mask_count > cu_info.cu_active_number)
2911 + cu_mask_count = cu_info.cu_active_number;
2912 +
2913 ++ /* Exceeding these bounds corrupts the stack and indicates a coding error.
2914 ++ * Returning with no CU's enabled will hang the queue, which should be
2915 ++ * attention grabbing.
2916 ++ */
2917 ++ if (cu_info.num_shader_engines > KFD_MAX_NUM_SE) {
2918 ++ pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n", cu_info.num_shader_engines);
2919 ++ return;
2920 ++ }
2921 ++ if (cu_info.num_shader_arrays_per_engine > KFD_MAX_NUM_SH_PER_SE) {
2922 ++ pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
2923 ++ cu_info.num_shader_arrays_per_engine * cu_info.num_shader_engines);
2924 ++ return;
2925 ++ }
2926 ++ /* Count active CUs per SH.
2927 ++ *
2928 ++ * Some CUs in an SH may be disabled. HW expects disabled CUs to be
2929 ++ * represented in the high bits of each SH's enable mask (the upper and lower
2930 ++ * 16 bits of se_mask) and will take care of the actual distribution of
2931 ++ * disabled CUs within each SH automatically.
2932 ++ * Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1.
2933 ++ *
2934 ++ * See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info.
2935 ++ */
2936 + for (se = 0; se < cu_info.num_shader_engines; se++)
2937 + for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
2938 +- cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
2939 +-
2940 +- /* Symmetrically map cu_mask to all SEs:
2941 +- * cu_mask[0] bit0 -> se_mask[0] bit0;
2942 +- * cu_mask[0] bit1 -> se_mask[1] bit0;
2943 +- * ... (if # SE is 4)
2944 +- * cu_mask[0] bit4 -> se_mask[0] bit1;
2945 ++ cu_per_sh[se][sh] = hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
2946 ++
2947 ++ /* Symmetrically map cu_mask to all SEs & SHs:
2948 ++ * se_mask programs up to 2 SH in the upper and lower 16 bits.
2949 ++ *
2950 ++ * Examples
2951 ++ * Assuming 1 SH/SE, 4 SEs:
2952 ++ * cu_mask[0] bit0 -> se_mask[0] bit0
2953 ++ * cu_mask[0] bit1 -> se_mask[1] bit0
2954 ++ * ...
2955 ++ * cu_mask[0] bit4 -> se_mask[0] bit1
2956 ++ * ...
2957 ++ *
2958 ++ * Assuming 2 SH/SE, 4 SEs
2959 ++ * cu_mask[0] bit0 -> se_mask[0] bit0 (SE0,SH0,CU0)
2960 ++ * cu_mask[0] bit1 -> se_mask[1] bit0 (SE1,SH0,CU0)
2961 ++ * ...
2962 ++ * cu_mask[0] bit4 -> se_mask[0] bit16 (SE0,SH1,CU0)
2963 ++ * cu_mask[0] bit5 -> se_mask[1] bit16 (SE1,SH1,CU0)
2964 ++ * ...
2965 ++ * cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
2966 + * ...
2967 ++ *
2968 ++ * First ensure all CUs are disabled, then enable user specified CUs.
2969 + */
2970 +- se = 0;
2971 +- for (i = 0; i < cu_mask_count; i++) {
2972 +- if (cu_mask[i / 32] & (1 << (i % 32)))
2973 +- se_mask[se] |= 1 << cu;
2974 +-
2975 +- do {
2976 +- se++;
2977 +- if (se == cu_info.num_shader_engines) {
2978 +- se = 0;
2979 +- cu++;
2980 ++ for (i = 0; i < cu_info.num_shader_engines; i++)
2981 ++ se_mask[i] = 0;
2982 ++
2983 ++ i = 0;
2984 ++ for (cu = 0; cu < 16; cu++) {
2985 ++ for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
2986 ++ for (se = 0; se < cu_info.num_shader_engines; se++) {
2987 ++ if (cu_per_sh[se][sh] > cu) {
2988 ++ if (cu_mask[i / 32] & (1 << (i % 32)))
2989 ++ se_mask[se] |= 1 << (cu + sh * 16);
2990 ++ i++;
2991 ++ if (i == cu_mask_count)
2992 ++ return;
2993 ++ }
2994 + }
2995 +- } while (cu >= cu_per_se[se] && cu < 32);
2996 ++ }
2997 + }
2998 + }
2999 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
3000 +index b5e2ea7550d41..6e6918ccedfdb 100644
3001 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
3002 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
3003 +@@ -27,6 +27,7 @@
3004 + #include "kfd_priv.h"
3005 +
3006 + #define KFD_MAX_NUM_SE 8
3007 ++#define KFD_MAX_NUM_SH_PER_SE 2
3008 +
3009 + /**
3010 + * struct mqd_manager
3011 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3012 +index ed221f815a1fa..8c345f0319b84 100644
3013 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3014 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3015 +@@ -1176,7 +1176,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
3016 + dc_hardware_init(adev->dm.dc);
3017 +
3018 + #if defined(CONFIG_DRM_AMD_DC_DCN)
3019 +- if (adev->apu_flags) {
3020 ++ if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
3021 + struct dc_phy_addr_space_config pa_config;
3022 +
3023 + mmhub_read_system_context(adev, &pa_config);
3024 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
3025 +index 1b6b15708b96a..08ff1166ffc89 100644
3026 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
3027 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
3028 +@@ -197,29 +197,29 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
3029 +
3030 + rd_buf_ptr = rd_buf;
3031 +
3032 +- str_len = strlen("Current: %d %d %d ");
3033 +- snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
3034 ++ str_len = strlen("Current: %d 0x%x %d ");
3035 ++ snprintf(rd_buf_ptr, str_len, "Current: %d 0x%x %d ",
3036 + link->cur_link_settings.lane_count,
3037 + link->cur_link_settings.link_rate,
3038 + link->cur_link_settings.link_spread);
3039 + rd_buf_ptr += str_len;
3040 +
3041 +- str_len = strlen("Verified: %d %d %d ");
3042 +- snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
3043 ++ str_len = strlen("Verified: %d 0x%x %d ");
3044 ++ snprintf(rd_buf_ptr, str_len, "Verified: %d 0x%x %d ",
3045 + link->verified_link_cap.lane_count,
3046 + link->verified_link_cap.link_rate,
3047 + link->verified_link_cap.link_spread);
3048 + rd_buf_ptr += str_len;
3049 +
3050 +- str_len = strlen("Reported: %d %d %d ");
3051 +- snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
3052 ++ str_len = strlen("Reported: %d 0x%x %d ");
3053 ++ snprintf(rd_buf_ptr, str_len, "Reported: %d 0x%x %d ",
3054 + link->reported_link_cap.lane_count,
3055 + link->reported_link_cap.link_rate,
3056 + link->reported_link_cap.link_spread);
3057 + rd_buf_ptr += str_len;
3058 +
3059 +- str_len = strlen("Preferred: %d %d %d ");
3060 +- snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
3061 ++ str_len = strlen("Preferred: %d 0x%x %d ");
3062 ++ snprintf(rd_buf_ptr, str_len, "Preferred: %d 0x%x %d\n",
3063 + link->preferred_link_setting.lane_count,
3064 + link->preferred_link_setting.link_rate,
3065 + link->preferred_link_setting.link_spread);
3066 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
3067 +index 7c939c0a977b3..29f61a8d3e291 100644
3068 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
3069 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
3070 +@@ -3938,13 +3938,12 @@ enum dc_status dcn10_set_clock(struct dc *dc,
3071 + struct dc_clock_config clock_cfg = {0};
3072 + struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3073 +
3074 +- if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3075 +- dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3076 +- context, clock_type, &clock_cfg);
3077 +-
3078 +- if (!dc->clk_mgr->funcs->get_clock)
3079 ++ if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3080 + return DC_FAIL_UNSUPPORTED_1;
3081 +
3082 ++ dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3083 ++ context, clock_type, &clock_cfg);
3084 ++
3085 + if (clk_khz > clock_cfg.max_clock_khz)
3086 + return DC_FAIL_CLK_EXCEED_MAX;
3087 +
3088 +@@ -3962,7 +3961,7 @@ enum dc_status dcn10_set_clock(struct dc *dc,
3089 + else
3090 + return DC_ERROR_UNEXPECTED;
3091 +
3092 +- if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3093 ++ if (dc->clk_mgr->funcs->update_clocks)
3094 + dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3095 + context, true);
3096 + return DC_OK;
3097 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
3098 +index 793554e61c520..03b941e76de2a 100644
3099 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
3100 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
3101 +@@ -1703,13 +1703,15 @@ void dcn20_program_front_end_for_ctx(
3102 + dcn20_program_pipe(dc, pipe, context);
3103 + pipe = pipe->bottom_pipe;
3104 + }
3105 +- /* Program secondary blending tree and writeback pipes */
3106 +- pipe = &context->res_ctx.pipe_ctx[i];
3107 +- if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
3108 +- && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
3109 +- && hws->funcs.program_all_writeback_pipes_in_tree)
3110 +- hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
3111 + }
3112 ++ /* Program secondary blending tree and writeback pipes */
3113 ++ pipe = &context->res_ctx.pipe_ctx[i];
3114 ++ if (!pipe->top_pipe && !pipe->prev_odm_pipe
3115 ++ && pipe->stream && pipe->stream->num_wb_info > 0
3116 ++ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
3117 ++ || pipe->stream->update_flags.raw)
3118 ++ && hws->funcs.program_all_writeback_pipes_in_tree)
3119 ++ hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
3120 + }
3121 + }
3122 +
3123 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
3124 +index 81f583733fa87..12e92f6204833 100644
3125 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
3126 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
3127 +@@ -2461,7 +2461,7 @@ void dcn20_set_mcif_arb_params(
3128 + wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
3129 + wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
3130 + }
3131 +- wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */
3132 ++ wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */
3133 + wb_arb_params->slice_lines = 32;
3134 + wb_arb_params->arbitration_slice = 2;
3135 + wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
3136 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
3137 +index 3fe9e41e4dbd7..6a3d3a0ec0a36 100644
3138 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
3139 ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
3140 +@@ -49,6 +49,11 @@
3141 + static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
3142 + struct dcn3_xfer_func_reg *reg)
3143 + {
3144 ++ reg->shifts.field_region_start_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
3145 ++ reg->masks.field_region_start_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
3146 ++ reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
3147 ++ reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
3148 ++
3149 + reg->shifts.exp_region0_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
3150 + reg->masks.exp_region0_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
3151 + reg->shifts.exp_region0_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
3152 +@@ -66,8 +71,6 @@ static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
3153 + reg->masks.field_region_end_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;
3154 + reg->shifts.field_region_linear_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
3155 + reg->masks.field_region_linear_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
3156 +- reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
3157 +- reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
3158 + reg->shifts.exp_region_start = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_B;
3159 + reg->masks.exp_region_start = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_B;
3160 + reg->shifts.exp_resion_start_segment = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
3161 +@@ -147,18 +150,19 @@ static enum dc_lut_mode dwb3_get_ogam_current(
3162 + uint32_t state_mode;
3163 + uint32_t ram_select;
3164 +
3165 +- REG_GET(DWB_OGAM_CONTROL,
3166 +- DWB_OGAM_MODE, &state_mode);
3167 +- REG_GET(DWB_OGAM_CONTROL,
3168 +- DWB_OGAM_SELECT, &ram_select);
3169 ++ REG_GET_2(DWB_OGAM_CONTROL,
3170 ++ DWB_OGAM_MODE_CURRENT, &state_mode,
3171 ++ DWB_OGAM_SELECT_CURRENT, &ram_select);
3172 +
3173 + if (state_mode == 0) {
3174 + mode = LUT_BYPASS;
3175 + } else if (state_mode == 2) {
3176 + if (ram_select == 0)
3177 + mode = LUT_RAM_A;
3178 +- else
3179 ++ else if (ram_select == 1)
3180 + mode = LUT_RAM_B;
3181 ++ else
3182 ++ mode = LUT_BYPASS;
3183 + } else {
3184 + // Reserved value
3185 + mode = LUT_BYPASS;
3186 +@@ -172,10 +176,10 @@ static void dwb3_configure_ogam_lut(
3187 + struct dcn30_dwbc *dwbc30,
3188 + bool is_ram_a)
3189 + {
3190 +- REG_UPDATE(DWB_OGAM_LUT_CONTROL,
3191 +- DWB_OGAM_LUT_READ_COLOR_SEL, 7);
3192 +- REG_UPDATE(DWB_OGAM_CONTROL,
3193 +- DWB_OGAM_SELECT, is_ram_a == true ? 0 : 1);
3194 ++ REG_UPDATE_2(DWB_OGAM_LUT_CONTROL,
3195 ++ DWB_OGAM_LUT_WRITE_COLOR_MASK, 7,
3196 ++ DWB_OGAM_LUT_HOST_SEL, (is_ram_a == true) ? 0 : 1);
3197 ++
3198 + REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
3199 + }
3200 +
3201 +@@ -185,17 +189,45 @@ static void dwb3_program_ogam_pwl(struct dcn30_dwbc *dwbc30,
3202 + {
3203 + uint32_t i;
3204 +
3205 +- // triple base implementation
3206 +- for (i = 0; i < num/2; i++) {
3207 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].red_reg);
3208 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].green_reg);
3209 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].blue_reg);
3210 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].red_reg);
3211 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].green_reg);
3212 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].blue_reg);
3213 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].red_reg);
3214 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].green_reg);
3215 +- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].blue_reg);
3216 ++ uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
3217 ++ uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
3218 ++ uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
3219 ++
3220 ++ if (is_rgb_equal(rgb, num)) {
3221 ++ for (i = 0 ; i < num; i++)
3222 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
3223 ++
3224 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
3225 ++
3226 ++ } else {
3227 ++
3228 ++ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
3229 ++ DWB_OGAM_LUT_WRITE_COLOR_MASK, 4);
3230 ++
3231 ++ for (i = 0 ; i < num; i++)
3232 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
3233 ++
3234 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
3235 ++
3236 ++ REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
3237 ++
3238 ++ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
3239 ++ DWB_OGAM_LUT_WRITE_COLOR_MASK, 2);
3240 ++
3241 ++ for (i = 0 ; i < num; i++)
3242 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].green_reg);
3243 ++
3244 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_green);
3245 ++
3246 ++ REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
3247 ++
3248 ++ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
3249 ++ DWB_OGAM_LUT_WRITE_COLOR_MASK, 1);
3250 ++
3251 ++ for (i = 0 ; i < num; i++)
3252 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].blue_reg);
3253 ++
3254 ++ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_blue);
3255 + }
3256 + }
3257 +
3258 +@@ -211,6 +243,8 @@ static bool dwb3_program_ogam_lut(
3259 + return false;
3260 + }
3261 +
3262 ++ REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
3263 ++
3264 + current_mode = dwb3_get_ogam_current(dwbc30);
3265 + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
3266 + next_mode = LUT_RAM_B;
3267 +@@ -227,8 +261,7 @@ static bool dwb3_program_ogam_lut(
3268 + dwb3_program_ogam_pwl(
3269 + dwbc30, params->rgb_resulted, params->hw_points_num);
3270 +
3271 +- REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
3272 +- REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
3273 ++ REG_UPDATE(DWB_OGAM_CONTROL, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
3274 +
3275 + return true;
3276 + }
3277 +@@ -271,14 +304,19 @@ static void dwb3_program_gamut_remap(
3278 +
3279 + struct color_matrices_reg gam_regs;
3280 +
3281 +- REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
3282 +-
3283 + if (regval == NULL || select == CM_GAMUT_REMAP_MODE_BYPASS) {
3284 + REG_SET(DWB_GAMUT_REMAP_MODE, 0,
3285 + DWB_GAMUT_REMAP_MODE, 0);
3286 + return;
3287 + }
3288 +
3289 ++ REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
3290 ++
3291 ++ gam_regs.shifts.csc_c11 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C11;
3292 ++ gam_regs.masks.csc_c11 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C11;
3293 ++ gam_regs.shifts.csc_c12 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C12;
3294 ++ gam_regs.masks.csc_c12 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C12;
3295 ++
3296 + switch (select) {
3297 + case CM_GAMUT_REMAP_MODE_RAMA_COEFF:
3298 + gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPA_C11_C12);
3299 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
3300 +index d53f8b39699b3..37944f94c6931 100644
3301 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
3302 ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
3303 +@@ -396,12 +396,22 @@ void dcn30_program_all_writeback_pipes_in_tree(
3304 + for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
3305 + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
3306 +
3307 ++ if (!pipe_ctx->plane_state)
3308 ++ continue;
3309 ++
3310 + if (pipe_ctx->plane_state == wb_info.writeback_source_plane) {
3311 + wb_info.mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
3312 + break;
3313 + }
3314 + }
3315 +- ASSERT(wb_info.mpcc_inst != -1);
3316 ++
3317 ++ if (wb_info.mpcc_inst == -1) {
3318 ++ /* Disable writeback pipe and disconnect from MPCC
3319 ++ * if source plane has been removed
3320 ++ */
3321 ++ dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst);
3322 ++ continue;
3323 ++ }
3324 +
3325 + ASSERT(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
3326 + dwb = dc->res_pool->dwbc[wb_info.dwb_pipe_inst];
3327 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
3328 +index a5a1cb62f967f..393447ebff6e7 100644
3329 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
3330 ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
3331 +@@ -2398,16 +2398,37 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
3332 + dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
3333 +
3334 + if (bw_params->clk_table.entries[0].memclk_mhz) {
3335 ++ int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
3336 ++
3337 ++ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
3338 ++ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
3339 ++ max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
3340 ++ if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
3341 ++ max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
3342 ++ if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
3343 ++ max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
3344 ++ if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
3345 ++ max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
3346 ++ }
3347 ++
3348 ++ if (!max_dcfclk_mhz)
3349 ++ max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
3350 ++ if (!max_dispclk_mhz)
3351 ++ max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
3352 ++ if (!max_dppclk_mhz)
3353 ++ max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
3354 ++ if (!max_phyclk_mhz)
3355 ++ max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
3356 +
3357 +- if (bw_params->clk_table.entries[1].dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
3358 ++ if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
3359 + // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
3360 +- dcfclk_sta_targets[num_dcfclk_sta_targets] = bw_params->clk_table.entries[1].dcfclk_mhz;
3361 ++ dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
3362 + num_dcfclk_sta_targets++;
3363 +- } else if (bw_params->clk_table.entries[1].dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
3364 ++ } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
3365 + // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
3366 + for (i = 0; i < num_dcfclk_sta_targets; i++) {
3367 +- if (dcfclk_sta_targets[i] > bw_params->clk_table.entries[1].dcfclk_mhz) {
3368 +- dcfclk_sta_targets[i] = bw_params->clk_table.entries[1].dcfclk_mhz;
3369 ++ if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
3370 ++ dcfclk_sta_targets[i] = max_dcfclk_mhz;
3371 + break;
3372 + }
3373 + }
3374 +@@ -2447,7 +2468,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
3375 + dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
3376 + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
3377 + } else {
3378 +- if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) {
3379 ++ if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
3380 + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
3381 + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
3382 + } else {
3383 +@@ -2462,11 +2483,12 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
3384 + }
3385 +
3386 + while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
3387 +- optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) {
3388 ++ optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
3389 + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
3390 + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
3391 + }
3392 +
3393 ++ dcn3_0_soc.num_states = num_states;
3394 + for (i = 0; i < dcn3_0_soc.num_states; i++) {
3395 + dcn3_0_soc.clock_limits[i].state = i;
3396 + dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
3397 +@@ -2474,9 +2496,9 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
3398 + dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
3399 +
3400 + /* Fill all states with max values of all other clocks */
3401 +- dcn3_0_soc.clock_limits[i].dispclk_mhz = bw_params->clk_table.entries[1].dispclk_mhz;
3402 +- dcn3_0_soc.clock_limits[i].dppclk_mhz = bw_params->clk_table.entries[1].dppclk_mhz;
3403 +- dcn3_0_soc.clock_limits[i].phyclk_mhz = bw_params->clk_table.entries[1].phyclk_mhz;
3404 ++ dcn3_0_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
3405 ++ dcn3_0_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
3406 ++ dcn3_0_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
3407 + dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
3408 + /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
3409 + /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
3410 +diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
3411 +index 911f9f4147741..39ca338eb80b3 100644
3412 +--- a/drivers/gpu/drm/ast/ast_drv.h
3413 ++++ b/drivers/gpu/drm/ast/ast_drv.h
3414 +@@ -337,6 +337,11 @@ int ast_mode_config_init(struct ast_private *ast);
3415 + #define AST_DP501_LINKRATE 0xf014
3416 + #define AST_DP501_EDID_DATA 0xf020
3417 +
3418 ++/* Define for Soc scratched reg */
3419 ++#define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6)
3420 ++//#define AST_VRAM_INIT_BY_BMC BIT(7)
3421 ++//#define AST_VRAM_INIT_READY BIT(6)
3422 ++
3423 + int ast_mm_init(struct ast_private *ast);
3424 +
3425 + /* ast post */
3426 +@@ -346,6 +351,7 @@ bool ast_is_vga_enabled(struct drm_device *dev);
3427 + void ast_post_gpu(struct drm_device *dev);
3428 + u32 ast_mindwm(struct ast_private *ast, u32 r);
3429 + void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
3430 ++void ast_patch_ahb_2500(struct ast_private *ast);
3431 + /* ast dp501 */
3432 + void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
3433 + bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
3434 +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
3435 +index 2aff2e6cf450c..79a3618679554 100644
3436 +--- a/drivers/gpu/drm/ast/ast_main.c
3437 ++++ b/drivers/gpu/drm/ast/ast_main.c
3438 +@@ -97,6 +97,11 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
3439 + jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
3440 + jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
3441 + if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
3442 ++ /* Patch AST2500 */
3443 ++ if (((pdev->revision & 0xF0) == 0x40)
3444 ++ && ((jregd0 & AST_VRAM_INIT_STATUS_MASK) == 0))
3445 ++ ast_patch_ahb_2500(ast);
3446 ++
3447 + /* Double check it's actually working */
3448 + data = ast_read32(ast, 0xf004);
3449 + if ((data != 0xFFFFFFFF) && (data != 0x00)) {
3450 +diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
3451 +index 0607658dde51b..b5d92f652fd85 100644
3452 +--- a/drivers/gpu/drm/ast/ast_post.c
3453 ++++ b/drivers/gpu/drm/ast/ast_post.c
3454 +@@ -2028,6 +2028,40 @@ static bool ast_dram_init_2500(struct ast_private *ast)
3455 + return true;
3456 + }
3457 +
3458 ++void ast_patch_ahb_2500(struct ast_private *ast)
3459 ++{
3460 ++ u32 data;
3461 ++
3462 ++ /* Clear bus lock condition */
3463 ++ ast_moutdwm(ast, 0x1e600000, 0xAEED1A03);
3464 ++ ast_moutdwm(ast, 0x1e600084, 0x00010000);
3465 ++ ast_moutdwm(ast, 0x1e600088, 0x00000000);
3466 ++ ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
3467 ++ data = ast_mindwm(ast, 0x1e6e2070);
3468 ++ if (data & 0x08000000) { /* check fast reset */
3469 ++ /*
3470 ++ * If "Fast restet" is enabled for ARM-ICE debugger,
3471 ++ * then WDT needs to enable, that
3472 ++ * WDT04 is WDT#1 Reload reg.
3473 ++ * WDT08 is WDT#1 counter restart reg to avoid system deadlock
3474 ++ * WDT0C is WDT#1 control reg
3475 ++ * [6:5]:= 01:Full chip
3476 ++ * [4]:= 1:1MHz clock source
3477 ++ * [1]:= 1:WDT will be cleeared and disabled after timeout occurs
3478 ++ * [0]:= 1:WDT enable
3479 ++ */
3480 ++ ast_moutdwm(ast, 0x1E785004, 0x00000010);
3481 ++ ast_moutdwm(ast, 0x1E785008, 0x00004755);
3482 ++ ast_moutdwm(ast, 0x1E78500c, 0x00000033);
3483 ++ udelay(1000);
3484 ++ }
3485 ++ do {
3486 ++ ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
3487 ++ data = ast_mindwm(ast, 0x1e6e2000);
3488 ++ } while (data != 1);
3489 ++ ast_moutdwm(ast, 0x1e6e207c, 0x08000000); /* clear fast reset */
3490 ++}
3491 ++
3492 + void ast_post_chip_2500(struct drm_device *dev)
3493 + {
3494 + struct ast_private *ast = to_ast_private(dev);
3495 +@@ -2035,39 +2069,44 @@ void ast_post_chip_2500(struct drm_device *dev)
3496 + u8 reg;
3497 +
3498 + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
3499 +- if ((reg & 0x80) == 0) {/* vga only */
3500 ++ if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
3501 + /* Clear bus lock condition */
3502 +- ast_moutdwm(ast, 0x1e600000, 0xAEED1A03);
3503 +- ast_moutdwm(ast, 0x1e600084, 0x00010000);
3504 +- ast_moutdwm(ast, 0x1e600088, 0x00000000);
3505 +- ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
3506 +- ast_write32(ast, 0xf004, 0x1e6e0000);
3507 +- ast_write32(ast, 0xf000, 0x1);
3508 +- ast_write32(ast, 0x12000, 0x1688a8a8);
3509 +- while (ast_read32(ast, 0x12000) != 0x1)
3510 +- ;
3511 +-
3512 +- ast_write32(ast, 0x10000, 0xfc600309);
3513 +- while (ast_read32(ast, 0x10000) != 0x1)
3514 +- ;
3515 ++ ast_patch_ahb_2500(ast);
3516 ++
3517 ++ /* Disable watchdog */
3518 ++ ast_moutdwm(ast, 0x1E78502C, 0x00000000);
3519 ++ ast_moutdwm(ast, 0x1E78504C, 0x00000000);
3520 ++
3521 ++ /*
3522 ++ * Reset USB port to patch USB unknown device issue
3523 ++ * SCU90 is Multi-function Pin Control #5
3524 ++ * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub
3525 ++ * port).
3526 ++ * SCU94 is Multi-function Pin Control #6
3527 ++ * [14:13]:= 1x:USB2.0 Host2 controller
3528 ++ * SCU70 is Hardware Strap reg
3529 ++ * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by
3530 ++ * [18]: 0(24)/1(48) MHz)
3531 ++ * SCU7C is Write clear reg to SCU70
3532 ++ * [23]:= write 1 and then SCU70[23] will be clear as 0b.
3533 ++ */
3534 ++ ast_moutdwm(ast, 0x1E6E2090, 0x20000000);
3535 ++ ast_moutdwm(ast, 0x1E6E2094, 0x00004000);
3536 ++ if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) {
3537 ++ ast_moutdwm(ast, 0x1E6E207C, 0x00800000);
3538 ++ mdelay(100);
3539 ++ ast_moutdwm(ast, 0x1E6E2070, 0x00800000);
3540 ++ }
3541 ++ /* Modify eSPI reset pin */
3542 ++ temp = ast_mindwm(ast, 0x1E6E2070);
3543 ++ if (temp & 0x02000000)
3544 ++ ast_moutdwm(ast, 0x1E6E207C, 0x00004000);
3545 +
3546 + /* Slow down CPU/AHB CLK in VGA only mode */
3547 + temp = ast_read32(ast, 0x12008);
3548 + temp |= 0x73;
3549 + ast_write32(ast, 0x12008, temp);
3550 +
3551 +- /* Reset USB port to patch USB unknown device issue */
3552 +- ast_moutdwm(ast, 0x1e6e2090, 0x20000000);
3553 +- temp = ast_mindwm(ast, 0x1e6e2094);
3554 +- temp |= 0x00004000;
3555 +- ast_moutdwm(ast, 0x1e6e2094, temp);
3556 +- temp = ast_mindwm(ast, 0x1e6e2070);
3557 +- if (temp & 0x00800000) {
3558 +- ast_moutdwm(ast, 0x1e6e207c, 0x00800000);
3559 +- mdelay(100);
3560 +- ast_moutdwm(ast, 0x1e6e2070, 0x00800000);
3561 +- }
3562 +-
3563 + if (!ast_dram_init_2500(ast))
3564 + drm_err(dev, "DRAM init failed !\n");
3565 +
3566 +diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
3567 +index c65ca860712d2..6cac2e58cd15f 100644
3568 +--- a/drivers/gpu/drm/bridge/nwl-dsi.c
3569 ++++ b/drivers/gpu/drm/bridge/nwl-dsi.c
3570 +@@ -196,7 +196,7 @@ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
3571 + u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
3572 +
3573 + return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp,
3574 +- dsi->lanes * 8 * NSEC_PER_SEC);
3575 ++ dsi->lanes * 8ULL * NSEC_PER_SEC);
3576 + }
3577 +
3578 + /*
3579 +diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
3580 +index 232abbba36868..c7adbeaf10b1b 100644
3581 +--- a/drivers/gpu/drm/drm_auth.c
3582 ++++ b/drivers/gpu/drm/drm_auth.c
3583 +@@ -135,16 +135,18 @@ static void drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
3584 + static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
3585 + {
3586 + struct drm_master *old_master;
3587 ++ struct drm_master *new_master;
3588 +
3589 + lockdep_assert_held_once(&dev->master_mutex);
3590 +
3591 + WARN_ON(fpriv->is_master);
3592 + old_master = fpriv->master;
3593 +- fpriv->master = drm_master_create(dev);
3594 +- if (!fpriv->master) {
3595 +- fpriv->master = old_master;
3596 ++ new_master = drm_master_create(dev);
3597 ++ if (!new_master)
3598 + return -ENOMEM;
3599 +- }
3600 ++ spin_lock(&fpriv->master_lookup_lock);
3601 ++ fpriv->master = new_master;
3602 ++ spin_unlock(&fpriv->master_lookup_lock);
3603 +
3604 + fpriv->is_master = 1;
3605 + fpriv->authenticated = 1;
3606 +@@ -302,10 +304,13 @@ int drm_master_open(struct drm_file *file_priv)
3607 + /* if there is no current master make this fd it, but do not create
3608 + * any master object for render clients */
3609 + mutex_lock(&dev->master_mutex);
3610 +- if (!dev->master)
3611 ++ if (!dev->master) {
3612 + ret = drm_new_set_master(dev, file_priv);
3613 +- else
3614 ++ } else {
3615 ++ spin_lock(&file_priv->master_lookup_lock);
3616 + file_priv->master = drm_master_get(dev->master);
3617 ++ spin_unlock(&file_priv->master_lookup_lock);
3618 ++ }
3619 + mutex_unlock(&dev->master_mutex);
3620 +
3621 + return ret;
3622 +@@ -371,6 +376,31 @@ struct drm_master *drm_master_get(struct drm_master *master)
3623 + }
3624 + EXPORT_SYMBOL(drm_master_get);
3625 +
3626 ++/**
3627 ++ * drm_file_get_master - reference &drm_file.master of @file_priv
3628 ++ * @file_priv: DRM file private
3629 ++ *
3630 ++ * Increments the reference count of @file_priv's &drm_file.master and returns
3631 ++ * the &drm_file.master. If @file_priv has no &drm_file.master, returns NULL.
3632 ++ *
3633 ++ * Master pointers returned from this function should be unreferenced using
3634 ++ * drm_master_put().
3635 ++ */
3636 ++struct drm_master *drm_file_get_master(struct drm_file *file_priv)
3637 ++{
3638 ++ struct drm_master *master = NULL;
3639 ++
3640 ++ spin_lock(&file_priv->master_lookup_lock);
3641 ++ if (!file_priv->master)
3642 ++ goto unlock;
3643 ++ master = drm_master_get(file_priv->master);
3644 ++
3645 ++unlock:
3646 ++ spin_unlock(&file_priv->master_lookup_lock);
3647 ++ return master;
3648 ++}
3649 ++EXPORT_SYMBOL(drm_file_get_master);
3650 ++
3651 + static void drm_master_destroy(struct kref *kref)
3652 + {
3653 + struct drm_master *master = container_of(kref, struct drm_master, refcount);
3654 +diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
3655 +index 3d7182001004d..b0a8264894885 100644
3656 +--- a/drivers/gpu/drm/drm_debugfs.c
3657 ++++ b/drivers/gpu/drm/drm_debugfs.c
3658 +@@ -91,6 +91,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
3659 + mutex_lock(&dev->filelist_mutex);
3660 + list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
3661 + struct task_struct *task;
3662 ++ bool is_current_master = drm_is_current_master(priv);
3663 +
3664 + rcu_read_lock(); /* locks pid_task()->comm */
3665 + task = pid_task(priv->pid, PIDTYPE_PID);
3666 +@@ -99,7 +100,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
3667 + task ? task->comm : "<unknown>",
3668 + pid_vnr(priv->pid),
3669 + priv->minor->index,
3670 +- drm_is_current_master(priv) ? 'y' : 'n',
3671 ++ is_current_master ? 'y' : 'n',
3672 + priv->authenticated ? 'y' : 'n',
3673 + from_kuid_munged(seq_user_ns(m), uid),
3674 + priv->magic);
3675 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
3676 +index a68dc25a19c6d..04e7a8d20f259 100644
3677 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
3678 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
3679 +@@ -2867,11 +2867,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
3680 + idx += tosend + 1;
3681 +
3682 + ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
3683 +- if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
3684 +- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
3685 ++ if (ret) {
3686 ++ if (drm_debug_enabled(DRM_UT_DP)) {
3687 ++ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
3688 +
3689 +- drm_printf(&p, "sideband msg failed to send\n");
3690 +- drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
3691 ++ drm_printf(&p, "sideband msg failed to send\n");
3692 ++ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
3693 ++ }
3694 + return ret;
3695 + }
3696 +
3697 +diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
3698 +index 7efbccffc2eaf..c6feeb5651b0c 100644
3699 +--- a/drivers/gpu/drm/drm_file.c
3700 ++++ b/drivers/gpu/drm/drm_file.c
3701 +@@ -176,6 +176,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
3702 + init_waitqueue_head(&file->event_wait);
3703 + file->event_space = 4096; /* set aside 4k for event buffer */
3704 +
3705 ++ spin_lock_init(&file->master_lookup_lock);
3706 + mutex_init(&file->event_read_lock);
3707 +
3708 + if (drm_core_check_feature(dev, DRIVER_GEM))
3709 +diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
3710 +index da4f085fc09e7..aef22634005ef 100644
3711 +--- a/drivers/gpu/drm/drm_lease.c
3712 ++++ b/drivers/gpu/drm/drm_lease.c
3713 +@@ -107,10 +107,19 @@ static bool _drm_has_leased(struct drm_master *master, int id)
3714 + */
3715 + bool _drm_lease_held(struct drm_file *file_priv, int id)
3716 + {
3717 +- if (!file_priv || !file_priv->master)
3718 ++ bool ret;
3719 ++ struct drm_master *master;
3720 ++
3721 ++ if (!file_priv)
3722 + return true;
3723 +
3724 +- return _drm_lease_held_master(file_priv->master, id);
3725 ++ master = drm_file_get_master(file_priv);
3726 ++ if (!master)
3727 ++ return true;
3728 ++ ret = _drm_lease_held_master(master, id);
3729 ++ drm_master_put(&master);
3730 ++
3731 ++ return ret;
3732 + }
3733 +
3734 + /**
3735 +@@ -129,13 +138,22 @@ bool drm_lease_held(struct drm_file *file_priv, int id)
3736 + struct drm_master *master;
3737 + bool ret;
3738 +
3739 +- if (!file_priv || !file_priv->master || !file_priv->master->lessor)
3740 ++ if (!file_priv)
3741 + return true;
3742 +
3743 +- master = file_priv->master;
3744 ++ master = drm_file_get_master(file_priv);
3745 ++ if (!master)
3746 ++ return true;
3747 ++ if (!master->lessor) {
3748 ++ ret = true;
3749 ++ goto out;
3750 ++ }
3751 + mutex_lock(&master->dev->mode_config.idr_mutex);
3752 + ret = _drm_lease_held_master(master, id);
3753 + mutex_unlock(&master->dev->mode_config.idr_mutex);
3754 ++
3755 ++out:
3756 ++ drm_master_put(&master);
3757 + return ret;
3758 + }
3759 +
3760 +@@ -155,10 +173,16 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
3761 + int count_in, count_out;
3762 + uint32_t crtcs_out = 0;
3763 +
3764 +- if (!file_priv || !file_priv->master || !file_priv->master->lessor)
3765 ++ if (!file_priv)
3766 + return crtcs_in;
3767 +
3768 +- master = file_priv->master;
3769 ++ master = drm_file_get_master(file_priv);
3770 ++ if (!master)
3771 ++ return crtcs_in;
3772 ++ if (!master->lessor) {
3773 ++ crtcs_out = crtcs_in;
3774 ++ goto out;
3775 ++ }
3776 + dev = master->dev;
3777 +
3778 + count_in = count_out = 0;
3779 +@@ -177,6 +201,9 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
3780 + count_in++;
3781 + }
3782 + mutex_unlock(&master->dev->mode_config.idr_mutex);
3783 ++
3784 ++out:
3785 ++ drm_master_put(&master);
3786 + return crtcs_out;
3787 + }
3788 +
3789 +@@ -490,7 +517,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
3790 + size_t object_count;
3791 + int ret = 0;
3792 + struct idr leases;
3793 +- struct drm_master *lessor = lessor_priv->master;
3794 ++ struct drm_master *lessor;
3795 + struct drm_master *lessee = NULL;
3796 + struct file *lessee_file = NULL;
3797 + struct file *lessor_file = lessor_priv->filp;
3798 +@@ -502,12 +529,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
3799 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
3800 + return -EOPNOTSUPP;
3801 +
3802 +- /* Do not allow sub-leases */
3803 +- if (lessor->lessor) {
3804 +- DRM_DEBUG_LEASE("recursive leasing not allowed\n");
3805 +- return -EINVAL;
3806 +- }
3807 +-
3808 + /* need some objects */
3809 + if (cl->object_count == 0) {
3810 + DRM_DEBUG_LEASE("no objects in lease\n");
3811 +@@ -519,12 +540,22 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
3812 + return -EINVAL;
3813 + }
3814 +
3815 ++ lessor = drm_file_get_master(lessor_priv);
3816 ++ /* Do not allow sub-leases */
3817 ++ if (lessor->lessor) {
3818 ++ DRM_DEBUG_LEASE("recursive leasing not allowed\n");
3819 ++ ret = -EINVAL;
3820 ++ goto out_lessor;
3821 ++ }
3822 ++
3823 + object_count = cl->object_count;
3824 +
3825 + object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
3826 + array_size(object_count, sizeof(__u32)));
3827 +- if (IS_ERR(object_ids))
3828 +- return PTR_ERR(object_ids);
3829 ++ if (IS_ERR(object_ids)) {
3830 ++ ret = PTR_ERR(object_ids);
3831 ++ goto out_lessor;
3832 ++ }
3833 +
3834 + idr_init(&leases);
3835 +
3836 +@@ -535,14 +566,15 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
3837 + if (ret) {
3838 + DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
3839 + idr_destroy(&leases);
3840 +- return ret;
3841 ++ goto out_lessor;
3842 + }
3843 +
3844 + /* Allocate a file descriptor for the lease */
3845 + fd = get_unused_fd_flags(cl->flags & (O_CLOEXEC | O_NONBLOCK));
3846 + if (fd < 0) {
3847 + idr_destroy(&leases);
3848 +- return fd;
3849 ++ ret = fd;
3850 ++ goto out_lessor;
3851 + }
3852 +
3853 + DRM_DEBUG_LEASE("Creating lease\n");
3854 +@@ -578,6 +610,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
3855 + /* Hook up the fd */
3856 + fd_install(fd, lessee_file);
3857 +
3858 ++ drm_master_put(&lessor);
3859 + DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
3860 + return 0;
3861 +
3862 +@@ -587,6 +620,8 @@ out_lessee:
3863 + out_leases:
3864 + put_unused_fd(fd);
3865 +
3866 ++out_lessor:
3867 ++ drm_master_put(&lessor);
3868 + DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
3869 + return ret;
3870 + }
3871 +@@ -609,7 +644,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
3872 + struct drm_mode_list_lessees *arg = data;
3873 + __u32 __user *lessee_ids = (__u32 __user *) (uintptr_t) (arg->lessees_ptr);
3874 + __u32 count_lessees = arg->count_lessees;
3875 +- struct drm_master *lessor = lessor_priv->master, *lessee;
3876 ++ struct drm_master *lessor, *lessee;
3877 + int count;
3878 + int ret = 0;
3879 +
3880 +@@ -620,6 +655,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
3881 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
3882 + return -EOPNOTSUPP;
3883 +
3884 ++ lessor = drm_file_get_master(lessor_priv);
3885 + DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id);
3886 +
3887 + mutex_lock(&dev->mode_config.idr_mutex);
3888 +@@ -643,6 +679,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
3889 + arg->count_lessees = count;
3890 +
3891 + mutex_unlock(&dev->mode_config.idr_mutex);
3892 ++ drm_master_put(&lessor);
3893 +
3894 + return ret;
3895 + }
3896 +@@ -662,7 +699,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
3897 + struct drm_mode_get_lease *arg = data;
3898 + __u32 __user *object_ids = (__u32 __user *) (uintptr_t) (arg->objects_ptr);
3899 + __u32 count_objects = arg->count_objects;
3900 +- struct drm_master *lessee = lessee_priv->master;
3901 ++ struct drm_master *lessee;
3902 + struct idr *object_idr;
3903 + int count;
3904 + void *entry;
3905 +@@ -676,6 +713,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
3906 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
3907 + return -EOPNOTSUPP;
3908 +
3909 ++ lessee = drm_file_get_master(lessee_priv);
3910 + DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id);
3911 +
3912 + mutex_lock(&dev->mode_config.idr_mutex);
3913 +@@ -703,6 +741,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
3914 + arg->count_objects = count;
3915 +
3916 + mutex_unlock(&dev->mode_config.idr_mutex);
3917 ++ drm_master_put(&lessee);
3918 +
3919 + return ret;
3920 + }
3921 +@@ -721,7 +760,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
3922 + void *data, struct drm_file *lessor_priv)
3923 + {
3924 + struct drm_mode_revoke_lease *arg = data;
3925 +- struct drm_master *lessor = lessor_priv->master;
3926 ++ struct drm_master *lessor;
3927 + struct drm_master *lessee;
3928 + int ret = 0;
3929 +
3930 +@@ -731,6 +770,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
3931 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
3932 + return -EOPNOTSUPP;
3933 +
3934 ++ lessor = drm_file_get_master(lessor_priv);
3935 + mutex_lock(&dev->mode_config.idr_mutex);
3936 +
3937 + lessee = _drm_find_lessee(lessor, arg->lessee_id);
3938 +@@ -751,6 +791,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
3939 +
3940 + fail:
3941 + mutex_unlock(&dev->mode_config.idr_mutex);
3942 ++ drm_master_put(&lessor);
3943 +
3944 + return ret;
3945 + }
3946 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
3947 +index 0644936afee26..bf33c3084cb41 100644
3948 +--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
3949 ++++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
3950 +@@ -115,6 +115,8 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
3951 + EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
3952 + else if (IS_ENABLED(CONFIG_IOMMU_DMA))
3953 + mapping = iommu_get_domain_for_dev(priv->dma_dev);
3954 ++ else
3955 ++ mapping = ERR_PTR(-ENODEV);
3956 +
3957 + if (IS_ERR(mapping))
3958 + return PTR_ERR(mapping);
3959 +diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
3960 +index 749a075fe9e4c..d1b51c133e27a 100644
3961 +--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
3962 ++++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
3963 +@@ -43,6 +43,22 @@
3964 + #define ATTR_INDEX 0x1fc0
3965 + #define ATTR_DATA 0x1fc1
3966 +
3967 ++#define WREG_MISC(v) \
3968 ++ WREG8(MGA_MISC_OUT, v)
3969 ++
3970 ++#define RREG_MISC(v) \
3971 ++ ((v) = RREG8(MGA_MISC_IN))
3972 ++
3973 ++#define WREG_MISC_MASKED(v, mask) \
3974 ++ do { \
3975 ++ u8 misc_; \
3976 ++ u8 mask_ = (mask); \
3977 ++ RREG_MISC(misc_); \
3978 ++ misc_ &= ~mask_; \
3979 ++ misc_ |= ((v) & mask_); \
3980 ++ WREG_MISC(misc_); \
3981 ++ } while (0)
3982 ++
3983 + #define WREG_ATTR(reg, v) \
3984 + do { \
3985 + RREG8(0x1fda); \
3986 +diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
3987 +index cece3e57fb273..45d71d65b6d3e 100644
3988 +--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
3989 ++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
3990 +@@ -174,6 +174,8 @@ static int mgag200_g200_set_plls(struct mga_device *mdev, long clock)
3991 + drm_dbg_kms(dev, "clock: %ld vco: %ld m: %d n: %d p: %d s: %d\n",
3992 + clock, f_vco, m, n, p, s);
3993 +
3994 ++ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
3995 ++
3996 + WREG_DAC(MGA1064_PIX_PLLC_M, m);
3997 + WREG_DAC(MGA1064_PIX_PLLC_N, n);
3998 + WREG_DAC(MGA1064_PIX_PLLC_P, (p | (s << 3)));
3999 +@@ -289,6 +291,8 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
4000 + return 1;
4001 + }
4002 +
4003 ++ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
4004 ++
4005 + WREG_DAC(MGA1064_PIX_PLLC_M, m);
4006 + WREG_DAC(MGA1064_PIX_PLLC_N, n);
4007 + WREG_DAC(MGA1064_PIX_PLLC_P, p);
4008 +@@ -385,6 +389,8 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
4009 + }
4010 + }
4011 +
4012 ++ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
4013 ++
4014 + for (i = 0; i <= 32 && pll_locked == false; i++) {
4015 + if (i > 0) {
4016 + WREG8(MGAREG_CRTC_INDEX, 0x1e);
4017 +@@ -522,6 +528,8 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
4018 + }
4019 + }
4020 +
4021 ++ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
4022 ++
4023 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
4024 + tmp = RREG8(DAC_DATA);
4025 + tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
4026 +@@ -654,6 +662,9 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
4027 + }
4028 + }
4029 + }
4030 ++
4031 ++ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
4032 ++
4033 + for (i = 0; i <= 32 && pll_locked == false; i++) {
4034 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
4035 + tmp = RREG8(DAC_DATA);
4036 +@@ -754,6 +765,8 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
4037 + }
4038 + }
4039 +
4040 ++ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
4041 ++
4042 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
4043 + tmp = RREG8(DAC_DATA);
4044 + tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
4045 +@@ -787,8 +800,6 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
4046 +
4047 + static int mgag200_crtc_set_plls(struct mga_device *mdev, long clock)
4048 + {
4049 +- u8 misc;
4050 +-
4051 + switch(mdev->type) {
4052 + case G200_PCI:
4053 + case G200_AGP:
4054 +@@ -808,11 +819,6 @@ static int mgag200_crtc_set_plls(struct mga_device *mdev, long clock)
4055 + return mga_g200er_set_plls(mdev, clock);
4056 + }
4057 +
4058 +- misc = RREG8(MGA_MISC_IN);
4059 +- misc &= ~MGAREG_MISC_CLK_SEL_MASK;
4060 +- misc |= MGAREG_MISC_CLK_SEL_MGA_MSK;
4061 +- WREG8(MGA_MISC_OUT, misc);
4062 +-
4063 + return 0;
4064 + }
4065 +
4066 +diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
4067 +index 977be0565c061..60e705283fe84 100644
4068 +--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
4069 ++++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
4070 +@@ -222,11 +222,10 @@
4071 +
4072 + #define MGAREG_MISC_IOADSEL (0x1 << 0)
4073 + #define MGAREG_MISC_RAMMAPEN (0x1 << 1)
4074 +-#define MGAREG_MISC_CLK_SEL_MASK GENMASK(3, 2)
4075 +-#define MGAREG_MISC_CLK_SEL_VGA25 (0x0 << 2)
4076 +-#define MGAREG_MISC_CLK_SEL_VGA28 (0x1 << 2)
4077 +-#define MGAREG_MISC_CLK_SEL_MGA_PIX (0x2 << 2)
4078 +-#define MGAREG_MISC_CLK_SEL_MGA_MSK (0x3 << 2)
4079 ++#define MGAREG_MISC_CLKSEL_MASK GENMASK(3, 2)
4080 ++#define MGAREG_MISC_CLKSEL_VGA25 (0x0 << 2)
4081 ++#define MGAREG_MISC_CLKSEL_VGA28 (0x1 << 2)
4082 ++#define MGAREG_MISC_CLKSEL_MGA (0x3 << 2)
4083 + #define MGAREG_MISC_VIDEO_DIS (0x1 << 4)
4084 + #define MGAREG_MISC_HIGH_PG_SEL (0x1 << 5)
4085 + #define MGAREG_MISC_HSYNCPOL BIT(6)
4086 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
4087 +index 2daf81f630764..b057295574361 100644
4088 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
4089 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
4090 +@@ -902,6 +902,7 @@ static const struct dpu_perf_cfg sdm845_perf_data = {
4091 + .amortizable_threshold = 25,
4092 + .min_prefill_lines = 24,
4093 + .danger_lut_tbl = {0xf, 0xffff, 0x0},
4094 ++ .safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
4095 + .qos_lut_tbl = {
4096 + {.nentry = ARRAY_SIZE(sdm845_qos_linear),
4097 + .entries = sdm845_qos_linear
4098 +@@ -929,6 +930,7 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
4099 + .min_dram_ib = 1600000,
4100 + .min_prefill_lines = 24,
4101 + .danger_lut_tbl = {0xff, 0xffff, 0x0},
4102 ++ .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
4103 + .qos_lut_tbl = {
4104 + {.nentry = ARRAY_SIZE(sc7180_qos_linear),
4105 + .entries = sc7180_qos_linear
4106 +@@ -956,6 +958,7 @@ static const struct dpu_perf_cfg sm8150_perf_data = {
4107 + .min_dram_ib = 800000,
4108 + .min_prefill_lines = 24,
4109 + .danger_lut_tbl = {0xf, 0xffff, 0x0},
4110 ++ .safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
4111 + .qos_lut_tbl = {
4112 + {.nentry = ARRAY_SIZE(sm8150_qos_linear),
4113 + .entries = sm8150_qos_linear
4114 +@@ -984,6 +987,7 @@ static const struct dpu_perf_cfg sm8250_perf_data = {
4115 + .min_dram_ib = 800000,
4116 + .min_prefill_lines = 35,
4117 + .danger_lut_tbl = {0xf, 0xffff, 0x0},
4118 ++ .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
4119 + .qos_lut_tbl = {
4120 + {.nentry = ARRAY_SIZE(sc7180_qos_linear),
4121 + .entries = sc7180_qos_linear
4122 +@@ -1012,6 +1016,7 @@ static const struct dpu_perf_cfg sc7280_perf_data = {
4123 + .min_dram_ib = 1600000,
4124 + .min_prefill_lines = 24,
4125 + .danger_lut_tbl = {0xffff, 0xffff, 0x0},
4126 ++ .safe_lut_tbl = {0xff00, 0xff00, 0xffff},
4127 + .qos_lut_tbl = {
4128 + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
4129 + .entries = sc7180_qos_macrotile
4130 +diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
4131 +index 0712752742f4f..cdcaf470f1480 100644
4132 +--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
4133 ++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
4134 +@@ -89,13 +89,6 @@ static void mdp4_disable_commit(struct msm_kms *kms)
4135 +
4136 + static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
4137 + {
4138 +- int i;
4139 +- struct drm_crtc *crtc;
4140 +- struct drm_crtc_state *crtc_state;
4141 +-
4142 +- /* see 119ecb7fd */
4143 +- for_each_new_crtc_in_state(state, crtc, crtc_state, i)
4144 +- drm_crtc_vblank_get(crtc);
4145 + }
4146 +
4147 + static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
4148 +@@ -114,12 +107,6 @@ static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
4149 +
4150 + static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
4151 + {
4152 +- struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
4153 +- struct drm_crtc *crtc;
4154 +-
4155 +- /* see 119ecb7fd */
4156 +- for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
4157 +- drm_crtc_vblank_put(crtc);
4158 + }
4159 +
4160 + static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
4161 +@@ -412,6 +399,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
4162 + {
4163 + struct platform_device *pdev = to_platform_device(dev->dev);
4164 + struct mdp4_platform_config *config = mdp4_get_config(pdev);
4165 ++ struct msm_drm_private *priv = dev->dev_private;
4166 + struct mdp4_kms *mdp4_kms;
4167 + struct msm_kms *kms = NULL;
4168 + struct msm_gem_address_space *aspace;
4169 +@@ -431,7 +419,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
4170 + goto fail;
4171 + }
4172 +
4173 +- kms = &mdp4_kms->base.base;
4174 ++ priv->kms = &mdp4_kms->base.base;
4175 ++ kms = priv->kms;
4176 +
4177 + mdp4_kms->dev = dev;
4178 +
4179 +diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
4180 +index 6856223e91e12..c1514f2cb409c 100644
4181 +--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
4182 ++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
4183 +@@ -83,13 +83,6 @@ struct dp_ctrl_private {
4184 + struct completion video_comp;
4185 + };
4186 +
4187 +-struct dp_cr_status {
4188 +- u8 lane_0_1;
4189 +- u8 lane_2_3;
4190 +-};
4191 +-
4192 +-#define DP_LANE0_1_CR_DONE 0x11
4193 +-
4194 + static int dp_aux_link_configure(struct drm_dp_aux *aux,
4195 + struct dp_link_info *link)
4196 + {
4197 +@@ -1080,7 +1073,7 @@ static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
4198 + }
4199 +
4200 + static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
4201 +- struct dp_cr_status *cr, int *training_step)
4202 ++ int *training_step)
4203 + {
4204 + int tries, old_v_level, ret = 0;
4205 + u8 link_status[DP_LINK_STATUS_SIZE];
4206 +@@ -1109,9 +1102,6 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
4207 + if (ret)
4208 + return ret;
4209 +
4210 +- cr->lane_0_1 = link_status[0];
4211 +- cr->lane_2_3 = link_status[1];
4212 +-
4213 + if (drm_dp_clock_recovery_ok(link_status,
4214 + ctrl->link->link_params.num_lanes)) {
4215 + return 0;
4216 +@@ -1188,7 +1178,7 @@ static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
4217 + }
4218 +
4219 + static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
4220 +- struct dp_cr_status *cr, int *training_step)
4221 ++ int *training_step)
4222 + {
4223 + int tries = 0, ret = 0;
4224 + char pattern;
4225 +@@ -1204,10 +1194,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
4226 + else
4227 + pattern = DP_TRAINING_PATTERN_2;
4228 +
4229 +- ret = dp_ctrl_update_vx_px(ctrl);
4230 +- if (ret)
4231 +- return ret;
4232 +-
4233 + ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, pattern);
4234 + if (ret)
4235 + return ret;
4236 +@@ -1220,8 +1206,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
4237 + ret = dp_ctrl_read_link_status(ctrl, link_status);
4238 + if (ret)
4239 + return ret;
4240 +- cr->lane_0_1 = link_status[0];
4241 +- cr->lane_2_3 = link_status[1];
4242 +
4243 + if (drm_dp_channel_eq_ok(link_status,
4244 + ctrl->link->link_params.num_lanes)) {
4245 +@@ -1241,7 +1225,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
4246 + static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl);
4247 +
4248 + static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
4249 +- struct dp_cr_status *cr, int *training_step)
4250 ++ int *training_step)
4251 + {
4252 + int ret = 0;
4253 + u8 encoding = DP_SET_ANSI_8B10B;
4254 +@@ -1257,7 +1241,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
4255 + drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
4256 + &encoding, 1);
4257 +
4258 +- ret = dp_ctrl_link_train_1(ctrl, cr, training_step);
4259 ++ ret = dp_ctrl_link_train_1(ctrl, training_step);
4260 + if (ret) {
4261 + DRM_ERROR("link training #1 failed. ret=%d\n", ret);
4262 + goto end;
4263 +@@ -1266,7 +1250,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
4264 + /* print success info as this is a result of user initiated action */
4265 + DRM_DEBUG_DP("link training #1 successful\n");
4266 +
4267 +- ret = dp_ctrl_link_train_2(ctrl, cr, training_step);
4268 ++ ret = dp_ctrl_link_train_2(ctrl, training_step);
4269 + if (ret) {
4270 + DRM_ERROR("link training #2 failed. ret=%d\n", ret);
4271 + goto end;
4272 +@@ -1282,7 +1266,7 @@ end:
4273 + }
4274 +
4275 + static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
4276 +- struct dp_cr_status *cr, int *training_step)
4277 ++ int *training_step)
4278 + {
4279 + int ret = 0;
4280 +
4281 +@@ -1297,7 +1281,7 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
4282 + * a link training pattern, we have to first do soft reset.
4283 + */
4284 +
4285 +- ret = dp_ctrl_link_train(ctrl, cr, training_step);
4286 ++ ret = dp_ctrl_link_train(ctrl, training_step);
4287 +
4288 + return ret;
4289 + }
4290 +@@ -1494,14 +1478,16 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
4291 + static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
4292 + {
4293 + int ret = 0;
4294 +- struct dp_cr_status cr;
4295 + int training_step = DP_TRAINING_NONE;
4296 +
4297 + dp_ctrl_push_idle(&ctrl->dp_ctrl);
4298 +
4299 ++ ctrl->link->phy_params.p_level = 0;
4300 ++ ctrl->link->phy_params.v_level = 0;
4301 ++
4302 + ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
4303 +
4304 +- ret = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
4305 ++ ret = dp_ctrl_setup_main_link(ctrl, &training_step);
4306 + if (ret)
4307 + goto end;
4308 +
4309 +@@ -1632,6 +1618,35 @@ void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
4310 + }
4311 + }
4312 +
4313 ++static bool dp_ctrl_clock_recovery_any_ok(
4314 ++ const u8 link_status[DP_LINK_STATUS_SIZE],
4315 ++ int lane_count)
4316 ++{
4317 ++ int reduced_cnt;
4318 ++
4319 ++ if (lane_count <= 1)
4320 ++ return false;
4321 ++
4322 ++ /*
4323 ++ * only interested in the lane number after reduced
4324 ++ * lane_count = 4, then only interested in 2 lanes
4325 ++ * lane_count = 2, then only interested in 1 lane
4326 ++ */
4327 ++ reduced_cnt = lane_count >> 1;
4328 ++
4329 ++ return drm_dp_clock_recovery_ok(link_status, reduced_cnt);
4330 ++}
4331 ++
4332 ++static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl)
4333 ++{
4334 ++ u8 link_status[DP_LINK_STATUS_SIZE];
4335 ++ int num_lanes = ctrl->link->link_params.num_lanes;
4336 ++
4337 ++ dp_ctrl_read_link_status(ctrl, link_status);
4338 ++
4339 ++ return drm_dp_channel_eq_ok(link_status, num_lanes);
4340 ++}
4341 ++
4342 + int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
4343 + {
4344 + int rc = 0;
4345 +@@ -1639,7 +1654,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
4346 + u32 rate = 0;
4347 + int link_train_max_retries = 5;
4348 + u32 const phy_cts_pixel_clk_khz = 148500;
4349 +- struct dp_cr_status cr;
4350 ++ u8 link_status[DP_LINK_STATUS_SIZE];
4351 + unsigned int training_step;
4352 +
4353 + if (!dp_ctrl)
4354 +@@ -1666,6 +1681,9 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
4355 + ctrl->link->link_params.rate,
4356 + ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
4357 +
4358 ++ ctrl->link->phy_params.p_level = 0;
4359 ++ ctrl->link->phy_params.v_level = 0;
4360 ++
4361 + rc = dp_ctrl_enable_mainlink_clocks(ctrl);
4362 + if (rc)
4363 + return rc;
4364 +@@ -1679,19 +1697,21 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
4365 + }
4366 +
4367 + training_step = DP_TRAINING_NONE;
4368 +- rc = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
4369 ++ rc = dp_ctrl_setup_main_link(ctrl, &training_step);
4370 + if (rc == 0) {
4371 + /* training completed successfully */
4372 + break;
4373 + } else if (training_step == DP_TRAINING_1) {
4374 + /* link train_1 failed */
4375 +- if (!dp_catalog_link_is_connected(ctrl->catalog)) {
4376 ++ if (!dp_catalog_link_is_connected(ctrl->catalog))
4377 + break;
4378 +- }
4379 ++
4380 ++ dp_ctrl_read_link_status(ctrl, link_status);
4381 +
4382 + rc = dp_ctrl_link_rate_down_shift(ctrl);
4383 + if (rc < 0) { /* already in RBR = 1.6G */
4384 +- if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) {
4385 ++ if (dp_ctrl_clock_recovery_any_ok(link_status,
4386 ++ ctrl->link->link_params.num_lanes)) {
4387 + /*
4388 + * some lanes are ready,
4389 + * reduce lane number
4390 +@@ -1707,12 +1727,18 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
4391 + }
4392 + }
4393 + } else if (training_step == DP_TRAINING_2) {
4394 +- /* link train_2 failed, lower lane rate */
4395 +- if (!dp_catalog_link_is_connected(ctrl->catalog)) {
4396 ++ /* link train_2 failed */
4397 ++ if (!dp_catalog_link_is_connected(ctrl->catalog))
4398 + break;
4399 +- }
4400 +
4401 +- rc = dp_ctrl_link_lane_down_shift(ctrl);
4402 ++ dp_ctrl_read_link_status(ctrl, link_status);
4403 ++
4404 ++ if (!drm_dp_clock_recovery_ok(link_status,
4405 ++ ctrl->link->link_params.num_lanes))
4406 ++ rc = dp_ctrl_link_rate_down_shift(ctrl);
4407 ++ else
4408 ++ rc = dp_ctrl_link_lane_down_shift(ctrl);
4409 ++
4410 + if (rc < 0) {
4411 + /* end with failure */
4412 + break; /* lane == 1 already */
4413 +@@ -1723,17 +1749,19 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
4414 + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
4415 + return rc;
4416 +
4417 +- /* stop txing train pattern */
4418 +- dp_ctrl_clear_training_pattern(ctrl);
4419 ++ if (rc == 0) { /* link train successfully */
4420 ++ /*
4421 ++ * do not stop train pattern here
4422 ++ * stop link training at on_stream
4423 ++ * to pass compliance test
4424 ++ */
4425 ++ } else {
4426 ++ /*
4427 ++ * link training failed
4428 ++ * end txing train pattern here
4429 ++ */
4430 ++ dp_ctrl_clear_training_pattern(ctrl);
4431 +
4432 +- /*
4433 +- * keep transmitting idle pattern until video ready
4434 +- * to avoid main link from loss of sync
4435 +- */
4436 +- if (rc == 0) /* link train successfully */
4437 +- dp_ctrl_push_idle(dp_ctrl);
4438 +- else {
4439 +- /* link training failed */
4440 + dp_ctrl_deinitialize_mainlink(ctrl);
4441 + rc = -ECONNRESET;
4442 + }
4443 +@@ -1741,9 +1769,15 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
4444 + return rc;
4445 + }
4446 +
4447 ++static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
4448 ++{
4449 ++ int training_step = DP_TRAINING_NONE;
4450 ++
4451 ++ return dp_ctrl_setup_main_link(ctrl, &training_step);
4452 ++}
4453 ++
4454 + int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
4455 + {
4456 +- u32 rate = 0;
4457 + int ret = 0;
4458 + bool mainlink_ready = false;
4459 + struct dp_ctrl_private *ctrl;
4460 +@@ -1753,10 +1787,6 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
4461 +
4462 + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
4463 +
4464 +- rate = ctrl->panel->link_info.rate;
4465 +-
4466 +- ctrl->link->link_params.rate = rate;
4467 +- ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes;
4468 + ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
4469 +
4470 + DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n",
4471 +@@ -1771,6 +1801,12 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
4472 + }
4473 + }
4474 +
4475 ++ if (!dp_ctrl_channel_eq_ok(ctrl))
4476 ++ dp_ctrl_link_retrain(ctrl);
4477 ++
4478 ++ /* stop txing train pattern to end link training */
4479 ++ dp_ctrl_clear_training_pattern(ctrl);
4480 ++
4481 + ret = dp_ctrl_enable_stream_clocks(ctrl);
4482 + if (ret) {
4483 + DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
4484 +diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
4485 +index 9cc8166636686..6eeb9a14b5846 100644
4486 +--- a/drivers/gpu/drm/msm/dp/dp_panel.c
4487 ++++ b/drivers/gpu/drm/msm/dp/dp_panel.c
4488 +@@ -272,7 +272,7 @@ static u8 dp_panel_get_edid_checksum(struct edid *edid)
4489 + {
4490 + struct edid *last_block;
4491 + u8 *raw_edid;
4492 +- bool is_edid_corrupt;
4493 ++ bool is_edid_corrupt = false;
4494 +
4495 + if (!edid) {
4496 + DRM_ERROR("invalid edid input\n");
4497 +@@ -304,7 +304,12 @@ void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
4498 + panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
4499 +
4500 + if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
4501 +- u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid);
4502 ++ u8 checksum;
4503 ++
4504 ++ if (dp_panel->edid)
4505 ++ checksum = dp_panel_get_edid_checksum(dp_panel->edid);
4506 ++ else
4507 ++ checksum = dp_panel->connector->real_edid_checksum;
4508 +
4509 + dp_link_send_edid_checksum(panel->link, checksum);
4510 + dp_link_send_test_response(panel->link);
4511 +diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
4512 +index f3f1c03c7db95..763f127e46213 100644
4513 +--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
4514 ++++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
4515 +@@ -154,7 +154,6 @@ static const struct msm_dsi_config sdm660_dsi_cfg = {
4516 + .reg_cfg = {
4517 + .num = 2,
4518 + .regs = {
4519 +- {"vdd", 73400, 32 }, /* 0.9 V */
4520 + {"vdda", 12560, 4 }, /* 1.2 V */
4521 + },
4522 + },
4523 +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
4524 +index 65d68eb9e3cb4..c96fd752fa1d7 100644
4525 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
4526 ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
4527 +@@ -1049,7 +1049,7 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
4528 + .reg_cfg = {
4529 + .num = 1,
4530 + .regs = {
4531 +- {"vcca", 17000, 32},
4532 ++ {"vcca", 73400, 32},
4533 + },
4534 + },
4535 + .ops = {
4536 +diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
4537 +index 801da917507d5..512af976b7e90 100644
4538 +--- a/drivers/gpu/drm/omapdrm/omap_plane.c
4539 ++++ b/drivers/gpu/drm/omapdrm/omap_plane.c
4540 +@@ -6,6 +6,7 @@
4541 +
4542 + #include <drm/drm_atomic.h>
4543 + #include <drm/drm_atomic_helper.h>
4544 ++#include <drm/drm_gem_atomic_helper.h>
4545 + #include <drm/drm_plane_helper.h>
4546 +
4547 + #include "omap_dmm_tiler.h"
4548 +@@ -29,6 +30,8 @@ static int omap_plane_prepare_fb(struct drm_plane *plane,
4549 + if (!new_state->fb)
4550 + return 0;
4551 +
4552 ++ drm_gem_plane_helper_prepare_fb(plane, new_state);
4553 ++
4554 + return omap_framebuffer_pin(new_state->fb);
4555 + }
4556 +
4557 +diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
4558 +index 597cf1459b0a8..4c6bdea5537b9 100644
4559 +--- a/drivers/gpu/drm/panfrost/panfrost_device.h
4560 ++++ b/drivers/gpu/drm/panfrost/panfrost_device.h
4561 +@@ -120,8 +120,12 @@ struct panfrost_device {
4562 + };
4563 +
4564 + struct panfrost_mmu {
4565 ++ struct panfrost_device *pfdev;
4566 ++ struct kref refcount;
4567 + struct io_pgtable_cfg pgtbl_cfg;
4568 + struct io_pgtable_ops *pgtbl_ops;
4569 ++ struct drm_mm mm;
4570 ++ spinlock_t mm_lock;
4571 + int as;
4572 + atomic_t as_count;
4573 + struct list_head list;
4574 +@@ -132,9 +136,7 @@ struct panfrost_file_priv {
4575 +
4576 + struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
4577 +
4578 +- struct panfrost_mmu mmu;
4579 +- struct drm_mm mm;
4580 +- spinlock_t mm_lock;
4581 ++ struct panfrost_mmu *mmu;
4582 + };
4583 +
4584 + static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
4585 +diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
4586 +index 83a461bdeea84..b2aa8e0503147 100644
4587 +--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
4588 ++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
4589 +@@ -417,7 +417,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
4590 + * anyway, so let's not bother.
4591 + */
4592 + if (!list_is_singular(&bo->mappings.list) ||
4593 +- WARN_ON_ONCE(first->mmu != &priv->mmu)) {
4594 ++ WARN_ON_ONCE(first->mmu != priv->mmu)) {
4595 + ret = -EINVAL;
4596 + goto out_unlock_mappings;
4597 + }
4598 +@@ -449,32 +449,6 @@ int panfrost_unstable_ioctl_check(void)
4599 + return 0;
4600 + }
4601 +
4602 +-#define PFN_4G (SZ_4G >> PAGE_SHIFT)
4603 +-#define PFN_4G_MASK (PFN_4G - 1)
4604 +-#define PFN_16M (SZ_16M >> PAGE_SHIFT)
4605 +-
4606 +-static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
4607 +- unsigned long color,
4608 +- u64 *start, u64 *end)
4609 +-{
4610 +- /* Executable buffers can't start or end on a 4GB boundary */
4611 +- if (!(color & PANFROST_BO_NOEXEC)) {
4612 +- u64 next_seg;
4613 +-
4614 +- if ((*start & PFN_4G_MASK) == 0)
4615 +- (*start)++;
4616 +-
4617 +- if ((*end & PFN_4G_MASK) == 0)
4618 +- (*end)--;
4619 +-
4620 +- next_seg = ALIGN(*start, PFN_4G);
4621 +- if (next_seg - *start <= PFN_16M)
4622 +- *start = next_seg + 1;
4623 +-
4624 +- *end = min(*end, ALIGN(*start, PFN_4G) - 1);
4625 +- }
4626 +-}
4627 +-
4628 + static int
4629 + panfrost_open(struct drm_device *dev, struct drm_file *file)
4630 + {
4631 +@@ -489,15 +463,11 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
4632 + panfrost_priv->pfdev = pfdev;
4633 + file->driver_priv = panfrost_priv;
4634 +
4635 +- spin_lock_init(&panfrost_priv->mm_lock);
4636 +-
4637 +- /* 4G enough for now. can be 48-bit */
4638 +- drm_mm_init(&panfrost_priv->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
4639 +- panfrost_priv->mm.color_adjust = panfrost_drm_mm_color_adjust;
4640 +-
4641 +- ret = panfrost_mmu_pgtable_alloc(panfrost_priv);
4642 +- if (ret)
4643 +- goto err_pgtable;
4644 ++ panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev);
4645 ++ if (IS_ERR(panfrost_priv->mmu)) {
4646 ++ ret = PTR_ERR(panfrost_priv->mmu);
4647 ++ goto err_free;
4648 ++ }
4649 +
4650 + ret = panfrost_job_open(panfrost_priv);
4651 + if (ret)
4652 +@@ -506,9 +476,8 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
4653 + return 0;
4654 +
4655 + err_job:
4656 +- panfrost_mmu_pgtable_free(panfrost_priv);
4657 +-err_pgtable:
4658 +- drm_mm_takedown(&panfrost_priv->mm);
4659 ++ panfrost_mmu_ctx_put(panfrost_priv->mmu);
4660 ++err_free:
4661 + kfree(panfrost_priv);
4662 + return ret;
4663 + }
4664 +@@ -521,8 +490,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
4665 + panfrost_perfcnt_close(file);
4666 + panfrost_job_close(panfrost_priv);
4667 +
4668 +- panfrost_mmu_pgtable_free(panfrost_priv);
4669 +- drm_mm_takedown(&panfrost_priv->mm);
4670 ++ panfrost_mmu_ctx_put(panfrost_priv->mmu);
4671 + kfree(panfrost_priv);
4672 + }
4673 +
4674 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
4675 +index 3e0723bc36bda..23377481f4e31 100644
4676 +--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
4677 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
4678 +@@ -60,7 +60,7 @@ panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
4679 +
4680 + mutex_lock(&bo->mappings.lock);
4681 + list_for_each_entry(iter, &bo->mappings.list, node) {
4682 +- if (iter->mmu == &priv->mmu) {
4683 ++ if (iter->mmu == priv->mmu) {
4684 + kref_get(&iter->refcount);
4685 + mapping = iter;
4686 + break;
4687 +@@ -74,16 +74,13 @@ panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
4688 + static void
4689 + panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
4690 + {
4691 +- struct panfrost_file_priv *priv;
4692 +-
4693 + if (mapping->active)
4694 + panfrost_mmu_unmap(mapping);
4695 +
4696 +- priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
4697 +- spin_lock(&priv->mm_lock);
4698 ++ spin_lock(&mapping->mmu->mm_lock);
4699 + if (drm_mm_node_allocated(&mapping->mmnode))
4700 + drm_mm_remove_node(&mapping->mmnode);
4701 +- spin_unlock(&priv->mm_lock);
4702 ++ spin_unlock(&mapping->mmu->mm_lock);
4703 + }
4704 +
4705 + static void panfrost_gem_mapping_release(struct kref *kref)
4706 +@@ -94,6 +91,7 @@ static void panfrost_gem_mapping_release(struct kref *kref)
4707 +
4708 + panfrost_gem_teardown_mapping(mapping);
4709 + drm_gem_object_put(&mapping->obj->base.base);
4710 ++ panfrost_mmu_ctx_put(mapping->mmu);
4711 + kfree(mapping);
4712 + }
4713 +
4714 +@@ -143,11 +141,11 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
4715 + else
4716 + align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
4717 +
4718 +- mapping->mmu = &priv->mmu;
4719 +- spin_lock(&priv->mm_lock);
4720 +- ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
4721 ++ mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
4722 ++ spin_lock(&mapping->mmu->mm_lock);
4723 ++ ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
4724 + size >> PAGE_SHIFT, align, color, 0);
4725 +- spin_unlock(&priv->mm_lock);
4726 ++ spin_unlock(&mapping->mmu->mm_lock);
4727 + if (ret)
4728 + goto err;
4729 +
4730 +@@ -176,7 +174,7 @@ void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
4731 +
4732 + mutex_lock(&bo->mappings.lock);
4733 + list_for_each_entry(iter, &bo->mappings.list, node) {
4734 +- if (iter->mmu == &priv->mmu) {
4735 ++ if (iter->mmu == priv->mmu) {
4736 + mapping = iter;
4737 + list_del(&iter->node);
4738 + break;
4739 +diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
4740 +index 6003cfeb13221..682f2161b9999 100644
4741 +--- a/drivers/gpu/drm/panfrost/panfrost_job.c
4742 ++++ b/drivers/gpu/drm/panfrost/panfrost_job.c
4743 +@@ -165,7 +165,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
4744 + return;
4745 + }
4746 +
4747 +- cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
4748 ++ cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
4749 +
4750 + job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
4751 + job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
4752 +@@ -527,7 +527,7 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
4753 + if (job) {
4754 + pfdev->jobs[j] = NULL;
4755 +
4756 +- panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
4757 ++ panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
4758 + panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
4759 +
4760 + dma_fence_signal_locked(job->done_fence);
4761 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
4762 +index 0581186ebfb3a..eea6ade902cb4 100644
4763 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
4764 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
4765 +@@ -1,5 +1,8 @@
4766 + // SPDX-License-Identifier: GPL-2.0
4767 + /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@××××××.org> */
4768 ++
4769 ++#include <drm/panfrost_drm.h>
4770 ++
4771 + #include <linux/atomic.h>
4772 + #include <linux/bitfield.h>
4773 + #include <linux/delay.h>
4774 +@@ -52,25 +55,16 @@ static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
4775 + }
4776 +
4777 + static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
4778 +- u64 iova, size_t size)
4779 ++ u64 iova, u64 size)
4780 + {
4781 + u8 region_width;
4782 + u64 region = iova & PAGE_MASK;
4783 +- /*
4784 +- * fls returns:
4785 +- * 1 .. 32
4786 +- *
4787 +- * 10 + fls(num_pages)
4788 +- * results in the range (11 .. 42)
4789 +- */
4790 +-
4791 +- size = round_up(size, PAGE_SIZE);
4792 +
4793 +- region_width = 10 + fls(size >> PAGE_SHIFT);
4794 +- if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
4795 +- /* not pow2, so must go up to the next pow2 */
4796 +- region_width += 1;
4797 +- }
4798 ++ /* The size is encoded as ceil(log2) minus(1), which may be calculated
4799 ++ * with fls. The size must be clamped to hardware bounds.
4800 ++ */
4801 ++ size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
4802 ++ region_width = fls64(size - 1) - 1;
4803 + region |= region_width;
4804 +
4805 + /* Lock the region that needs to be updated */
4806 +@@ -81,7 +75,7 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
4807 +
4808 +
4809 + static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
4810 +- u64 iova, size_t size, u32 op)
4811 ++ u64 iova, u64 size, u32 op)
4812 + {
4813 + if (as_nr < 0)
4814 + return 0;
4815 +@@ -98,7 +92,7 @@ static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
4816 +
4817 + static int mmu_hw_do_operation(struct panfrost_device *pfdev,
4818 + struct panfrost_mmu *mmu,
4819 +- u64 iova, size_t size, u32 op)
4820 ++ u64 iova, u64 size, u32 op)
4821 + {
4822 + int ret;
4823 +
4824 +@@ -115,7 +109,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
4825 + u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
4826 + u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
4827 +
4828 +- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
4829 ++ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
4830 +
4831 + mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
4832 + mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
4833 +@@ -131,7 +125,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
4834 +
4835 + static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
4836 + {
4837 +- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
4838 ++ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
4839 +
4840 + mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
4841 + mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
4842 +@@ -231,7 +225,7 @@ static size_t get_pgsize(u64 addr, size_t size)
4843 +
4844 + static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
4845 + struct panfrost_mmu *mmu,
4846 +- u64 iova, size_t size)
4847 ++ u64 iova, u64 size)
4848 + {
4849 + if (mmu->as < 0)
4850 + return;
4851 +@@ -337,7 +331,7 @@ static void mmu_tlb_inv_context_s1(void *cookie)
4852 +
4853 + static void mmu_tlb_sync_context(void *cookie)
4854 + {
4855 +- //struct panfrost_device *pfdev = cookie;
4856 ++ //struct panfrost_mmu *mmu = cookie;
4857 + // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
4858 + }
4859 +
4860 +@@ -352,57 +346,10 @@ static const struct iommu_flush_ops mmu_tlb_ops = {
4861 + .tlb_flush_walk = mmu_tlb_flush_walk,
4862 + };
4863 +
4864 +-int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
4865 +-{
4866 +- struct panfrost_mmu *mmu = &priv->mmu;
4867 +- struct panfrost_device *pfdev = priv->pfdev;
4868 +-
4869 +- INIT_LIST_HEAD(&mmu->list);
4870 +- mmu->as = -1;
4871 +-
4872 +- mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
4873 +- .pgsize_bitmap = SZ_4K | SZ_2M,
4874 +- .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
4875 +- .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
4876 +- .coherent_walk = pfdev->coherent,
4877 +- .tlb = &mmu_tlb_ops,
4878 +- .iommu_dev = pfdev->dev,
4879 +- };
4880 +-
4881 +- mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
4882 +- priv);
4883 +- if (!mmu->pgtbl_ops)
4884 +- return -EINVAL;
4885 +-
4886 +- return 0;
4887 +-}
4888 +-
4889 +-void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
4890 +-{
4891 +- struct panfrost_device *pfdev = priv->pfdev;
4892 +- struct panfrost_mmu *mmu = &priv->mmu;
4893 +-
4894 +- spin_lock(&pfdev->as_lock);
4895 +- if (mmu->as >= 0) {
4896 +- pm_runtime_get_noresume(pfdev->dev);
4897 +- if (pm_runtime_active(pfdev->dev))
4898 +- panfrost_mmu_disable(pfdev, mmu->as);
4899 +- pm_runtime_put_autosuspend(pfdev->dev);
4900 +-
4901 +- clear_bit(mmu->as, &pfdev->as_alloc_mask);
4902 +- clear_bit(mmu->as, &pfdev->as_in_use_mask);
4903 +- list_del(&mmu->list);
4904 +- }
4905 +- spin_unlock(&pfdev->as_lock);
4906 +-
4907 +- free_io_pgtable_ops(mmu->pgtbl_ops);
4908 +-}
4909 +-
4910 + static struct panfrost_gem_mapping *
4911 + addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
4912 + {
4913 + struct panfrost_gem_mapping *mapping = NULL;
4914 +- struct panfrost_file_priv *priv;
4915 + struct drm_mm_node *node;
4916 + u64 offset = addr >> PAGE_SHIFT;
4917 + struct panfrost_mmu *mmu;
4918 +@@ -415,11 +362,10 @@ addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
4919 + goto out;
4920 +
4921 + found_mmu:
4922 +- priv = container_of(mmu, struct panfrost_file_priv, mmu);
4923 +
4924 +- spin_lock(&priv->mm_lock);
4925 ++ spin_lock(&mmu->mm_lock);
4926 +
4927 +- drm_mm_for_each_node(node, &priv->mm) {
4928 ++ drm_mm_for_each_node(node, &mmu->mm) {
4929 + if (offset >= node->start &&
4930 + offset < (node->start + node->size)) {
4931 + mapping = drm_mm_node_to_panfrost_mapping(node);
4932 +@@ -429,7 +375,7 @@ found_mmu:
4933 + }
4934 + }
4935 +
4936 +- spin_unlock(&priv->mm_lock);
4937 ++ spin_unlock(&mmu->mm_lock);
4938 + out:
4939 + spin_unlock(&pfdev->as_lock);
4940 + return mapping;
4941 +@@ -542,6 +488,107 @@ err_bo:
4942 + return ret;
4943 + }
4944 +
4945 ++static void panfrost_mmu_release_ctx(struct kref *kref)
4946 ++{
4947 ++ struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
4948 ++ refcount);
4949 ++ struct panfrost_device *pfdev = mmu->pfdev;
4950 ++
4951 ++ spin_lock(&pfdev->as_lock);
4952 ++ if (mmu->as >= 0) {
4953 ++ pm_runtime_get_noresume(pfdev->dev);
4954 ++ if (pm_runtime_active(pfdev->dev))
4955 ++ panfrost_mmu_disable(pfdev, mmu->as);
4956 ++ pm_runtime_put_autosuspend(pfdev->dev);
4957 ++
4958 ++ clear_bit(mmu->as, &pfdev->as_alloc_mask);
4959 ++ clear_bit(mmu->as, &pfdev->as_in_use_mask);
4960 ++ list_del(&mmu->list);
4961 ++ }
4962 ++ spin_unlock(&pfdev->as_lock);
4963 ++
4964 ++ free_io_pgtable_ops(mmu->pgtbl_ops);
4965 ++ drm_mm_takedown(&mmu->mm);
4966 ++ kfree(mmu);
4967 ++}
4968 ++
4969 ++void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
4970 ++{
4971 ++ kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
4972 ++}
4973 ++
4974 ++struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
4975 ++{
4976 ++ kref_get(&mmu->refcount);
4977 ++
4978 ++ return mmu;
4979 ++}
4980 ++
4981 ++#define PFN_4G (SZ_4G >> PAGE_SHIFT)
4982 ++#define PFN_4G_MASK (PFN_4G - 1)
4983 ++#define PFN_16M (SZ_16M >> PAGE_SHIFT)
4984 ++
4985 ++static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
4986 ++ unsigned long color,
4987 ++ u64 *start, u64 *end)
4988 ++{
4989 ++ /* Executable buffers can't start or end on a 4GB boundary */
4990 ++ if (!(color & PANFROST_BO_NOEXEC)) {
4991 ++ u64 next_seg;
4992 ++
4993 ++ if ((*start & PFN_4G_MASK) == 0)
4994 ++ (*start)++;
4995 ++
4996 ++ if ((*end & PFN_4G_MASK) == 0)
4997 ++ (*end)--;
4998 ++
4999 ++ next_seg = ALIGN(*start, PFN_4G);
5000 ++ if (next_seg - *start <= PFN_16M)
5001 ++ *start = next_seg + 1;
5002 ++
5003 ++ *end = min(*end, ALIGN(*start, PFN_4G) - 1);
5004 ++ }
5005 ++}
5006 ++
5007 ++struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
5008 ++{
5009 ++ struct panfrost_mmu *mmu;
5010 ++
5011 ++ mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
5012 ++ if (!mmu)
5013 ++ return ERR_PTR(-ENOMEM);
5014 ++
5015 ++ mmu->pfdev = pfdev;
5016 ++ spin_lock_init(&mmu->mm_lock);
5017 ++
5018 ++ /* 4G enough for now. can be 48-bit */
5019 ++ drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
5020 ++ mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
5021 ++
5022 ++ INIT_LIST_HEAD(&mmu->list);
5023 ++ mmu->as = -1;
5024 ++
5025 ++ mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
5026 ++ .pgsize_bitmap = SZ_4K | SZ_2M,
5027 ++ .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
5028 ++ .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
5029 ++ .coherent_walk = pfdev->coherent,
5030 ++ .tlb = &mmu_tlb_ops,
5031 ++ .iommu_dev = pfdev->dev,
5032 ++ };
5033 ++
5034 ++ mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
5035 ++ mmu);
5036 ++ if (!mmu->pgtbl_ops) {
5037 ++ kfree(mmu);
5038 ++ return ERR_PTR(-EINVAL);
5039 ++ }
5040 ++
5041 ++ kref_init(&mmu->refcount);
5042 ++
5043 ++ return mmu;
5044 ++}
5045 ++
5046 + static const char *access_type_name(struct panfrost_device *pfdev,
5047 + u32 fault_status)
5048 + {
5049 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
5050 +index 44fc2edf63ce6..cc2a0d307febc 100644
5051 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
5052 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
5053 +@@ -18,7 +18,8 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev);
5054 + u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
5055 + void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
5056 +
5057 +-int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv);
5058 +-void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv);
5059 ++struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu);
5060 ++void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu);
5061 ++struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev);
5062 +
5063 + #endif
5064 +diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
5065 +index eddaa62ad8b0e..2ae3a4d301d39 100644
5066 +--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
5067 ++++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
5068 +@@ -318,6 +318,8 @@
5069 + #define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
5070 + #define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
5071 +
5072 ++#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
5073 ++
5074 + #define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
5075 + #define gpu_read(dev, reg) readl(dev->iomem + reg)
5076 +
5077 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
5078 +index c22551c2facb1..2a06ec1cbefb0 100644
5079 +--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
5080 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
5081 +@@ -559,6 +559,13 @@ static int rcar_du_remove(struct platform_device *pdev)
5082 + return 0;
5083 + }
5084 +
5085 ++static void rcar_du_shutdown(struct platform_device *pdev)
5086 ++{
5087 ++ struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
5088 ++
5089 ++ drm_atomic_helper_shutdown(&rcdu->ddev);
5090 ++}
5091 ++
5092 + static int rcar_du_probe(struct platform_device *pdev)
5093 + {
5094 + struct rcar_du_device *rcdu;
5095 +@@ -615,6 +622,7 @@ error:
5096 + static struct platform_driver rcar_du_platform_driver = {
5097 + .probe = rcar_du_probe,
5098 + .remove = rcar_du_remove,
5099 ++ .shutdown = rcar_du_shutdown,
5100 + .driver = {
5101 + .name = "rcar-du",
5102 + .pm = &rcar_du_pm_ops,
5103 +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
5104 +index edee565334d8e..155f305e7c4e5 100644
5105 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
5106 ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
5107 +@@ -1205,7 +1205,9 @@ static int vc4_hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
5108 + HDMI_WRITE(HDMI_MAI_CTL,
5109 + VC4_SET_FIELD(vc4_hdmi->audio.channels,
5110 + VC4_HD_MAI_CTL_CHNUM) |
5111 +- VC4_HD_MAI_CTL_ENABLE);
5112 ++ VC4_HD_MAI_CTL_WHOLSMP |
5113 ++ VC4_HD_MAI_CTL_CHALIGN |
5114 ++ VC4_HD_MAI_CTL_ENABLE);
5115 + break;
5116 + case SNDRV_PCM_TRIGGER_STOP:
5117 + HDMI_WRITE(HDMI_MAI_CTL,
5118 +diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
5119 +index 6d310d31b75d4..1b10ab2b80a31 100644
5120 +--- a/drivers/gpu/drm/vkms/vkms_plane.c
5121 ++++ b/drivers/gpu/drm/vkms/vkms_plane.c
5122 +@@ -8,7 +8,6 @@
5123 + #include <drm/drm_gem_atomic_helper.h>
5124 + #include <drm/drm_gem_framebuffer_helper.h>
5125 + #include <drm/drm_plane_helper.h>
5126 +-#include <drm/drm_gem_shmem_helper.h>
5127 +
5128 + #include "vkms_drv.h"
5129 +
5130 +@@ -150,45 +149,10 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
5131 + return 0;
5132 + }
5133 +
5134 +-static int vkms_prepare_fb(struct drm_plane *plane,
5135 +- struct drm_plane_state *state)
5136 +-{
5137 +- struct drm_gem_object *gem_obj;
5138 +- struct dma_buf_map map;
5139 +- int ret;
5140 +-
5141 +- if (!state->fb)
5142 +- return 0;
5143 +-
5144 +- gem_obj = drm_gem_fb_get_obj(state->fb, 0);
5145 +- ret = drm_gem_shmem_vmap(gem_obj, &map);
5146 +- if (ret)
5147 +- DRM_ERROR("vmap failed: %d\n", ret);
5148 +-
5149 +- return drm_gem_plane_helper_prepare_fb(plane, state);
5150 +-}
5151 +-
5152 +-static void vkms_cleanup_fb(struct drm_plane *plane,
5153 +- struct drm_plane_state *old_state)
5154 +-{
5155 +- struct drm_gem_object *gem_obj;
5156 +- struct drm_gem_shmem_object *shmem_obj;
5157 +- struct dma_buf_map map;
5158 +-
5159 +- if (!old_state->fb)
5160 +- return;
5161 +-
5162 +- gem_obj = drm_gem_fb_get_obj(old_state->fb, 0);
5163 +- shmem_obj = to_drm_gem_shmem_obj(drm_gem_fb_get_obj(old_state->fb, 0));
5164 +- dma_buf_map_set_vaddr(&map, shmem_obj->vaddr);
5165 +- drm_gem_shmem_vunmap(gem_obj, &map);
5166 +-}
5167 +-
5168 + static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
5169 + .atomic_update = vkms_plane_atomic_update,
5170 + .atomic_check = vkms_plane_atomic_check,
5171 +- .prepare_fb = vkms_prepare_fb,
5172 +- .cleanup_fb = vkms_cleanup_fb,
5173 ++ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
5174 + };
5175 +
5176 + struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
5177 +diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c
5178 +index aeb0a22a2c347..edd17c30d5a51 100644
5179 +--- a/drivers/gpu/drm/vmwgfx/ttm_memory.c
5180 ++++ b/drivers/gpu/drm/vmwgfx/ttm_memory.c
5181 +@@ -435,8 +435,10 @@ int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev)
5182 +
5183 + si_meminfo(&si);
5184 +
5185 ++ spin_lock(&glob->lock);
5186 + /* set it as 0 by default to keep original behavior of OOM */
5187 + glob->lower_mem_limit = 0;
5188 ++ spin_unlock(&glob->lock);
5189 +
5190 + ret = ttm_mem_init_kernel_zone(glob, &si);
5191 + if (unlikely(ret != 0))
5192 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
5193 +index 81f525a82b77f..4e7de45407c81 100644
5194 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
5195 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
5196 +@@ -715,7 +715,7 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
5197 + * without checking which bindings actually need to be emitted
5198 + *
5199 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state
5200 +- * @bi: Pointer to where the binding info array is stored in @cbs
5201 ++ * @biv: Pointer to where the binding info array is stored in @cbs
5202 + * @max_num: Maximum number of entries in the @bi array.
5203 + *
5204 + * Scans the @bi array for bindings and builds a buffer of view id data.
5205 +@@ -725,11 +725,9 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
5206 + * contains the command data.
5207 + */
5208 + static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
5209 +- const struct vmw_ctx_bindinfo *bi,
5210 ++ const struct vmw_ctx_bindinfo_view *biv,
5211 + u32 max_num)
5212 + {
5213 +- const struct vmw_ctx_bindinfo_view *biv =
5214 +- container_of(bi, struct vmw_ctx_bindinfo_view, bi);
5215 + unsigned long i;
5216 +
5217 + cbs->bind_cmd_count = 0;
5218 +@@ -838,7 +836,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
5219 + */
5220 + static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
5221 + {
5222 +- const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
5223 ++ const struct vmw_ctx_bindinfo_view *loc = &cbs->render_targets[0];
5224 + struct {
5225 + SVGA3dCmdHeader header;
5226 + SVGA3dCmdDXSetRenderTargets body;
5227 +@@ -874,7 +872,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
5228 + * without checking which bindings actually need to be emitted
5229 + *
5230 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state
5231 +- * @bi: Pointer to where the binding info array is stored in @cbs
5232 ++ * @biso: Pointer to where the binding info array is stored in @cbs
5233 + * @max_num: Maximum number of entries in the @bi array.
5234 + *
5235 + * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
5236 +@@ -884,11 +882,9 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
5237 + * contains the command data.
5238 + */
5239 + static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
5240 +- const struct vmw_ctx_bindinfo *bi,
5241 ++ const struct vmw_ctx_bindinfo_so_target *biso,
5242 + u32 max_num)
5243 + {
5244 +- const struct vmw_ctx_bindinfo_so_target *biso =
5245 +- container_of(bi, struct vmw_ctx_bindinfo_so_target, bi);
5246 + unsigned long i;
5247 + SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
5248 +
5249 +@@ -919,7 +915,7 @@ static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
5250 + */
5251 + static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
5252 + {
5253 +- const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
5254 ++ const struct vmw_ctx_bindinfo_so_target *loc = &cbs->so_targets[0];
5255 + struct {
5256 + SVGA3dCmdHeader header;
5257 + SVGA3dCmdDXSetSOTargets body;
5258 +@@ -1066,7 +1062,7 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
5259 +
5260 + static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
5261 + {
5262 +- const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[0].views[0].bi;
5263 ++ const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[0].views[0];
5264 + struct {
5265 + SVGA3dCmdHeader header;
5266 + SVGA3dCmdDXSetUAViews body;
5267 +@@ -1096,7 +1092,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
5268 +
5269 + static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
5270 + {
5271 +- const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[1].views[0].bi;
5272 ++ const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[1].views[0];
5273 + struct {
5274 + SVGA3dCmdHeader header;
5275 + SVGA3dCmdDXSetCSUAViews body;
5276 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
5277 +index 2e23e537cdf52..dac4624c5dc16 100644
5278 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
5279 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
5280 +@@ -516,7 +516,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
5281 + struct vmw_cmdbuf_man *man =
5282 + container_of(work, struct vmw_cmdbuf_man, work);
5283 + struct vmw_cmdbuf_header *entry, *next;
5284 +- uint32_t dummy;
5285 ++ uint32_t dummy = 0;
5286 + bool send_fence = false;
5287 + struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
5288 + int i;
5289 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
5290 +index b262d61d839d5..9487faff52293 100644
5291 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
5292 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
5293 +@@ -159,6 +159,7 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
5294 + void vmw_cmdbuf_res_revert(struct list_head *list)
5295 + {
5296 + struct vmw_cmdbuf_res *entry, *next;
5297 ++ int ret;
5298 +
5299 + list_for_each_entry_safe(entry, next, list, head) {
5300 + switch (entry->state) {
5301 +@@ -166,7 +167,8 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
5302 + vmw_cmdbuf_res_free(entry->man, entry);
5303 + break;
5304 + case VMW_CMDBUF_RES_DEL:
5305 +- drm_ht_insert_item(&entry->man->resources, &entry->hash);
5306 ++ ret = drm_ht_insert_item(&entry->man->resources, &entry->hash);
5307 ++ BUG_ON(ret);
5308 + list_del(&entry->head);
5309 + list_add_tail(&entry->head, &entry->man->list);
5310 + entry->state = VMW_CMDBUF_RES_COMMITTED;
5311 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
5312 +index d6a6d8a3387a9..319ecca5d1cb8 100644
5313 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
5314 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
5315 +@@ -2546,6 +2546,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
5316 +
5317 + so_type = vmw_so_cmd_to_type(header->id);
5318 + res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
5319 ++ if (IS_ERR(res))
5320 ++ return PTR_ERR(res);
5321 + cmd = container_of(header, typeof(*cmd), header);
5322 + ret = vmw_cotable_notify(res, cmd->defined_id);
5323 +
5324 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
5325 +index f2d6254154585..2d8caf09f1727 100644
5326 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
5327 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
5328 +@@ -506,11 +506,13 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
5329 + {
5330 + unsigned long num_pt_pages = 0;
5331 + struct ttm_buffer_object *bo = mob->pt_bo;
5332 +- struct vmw_piter save_pt_iter;
5333 ++ struct vmw_piter save_pt_iter = {0};
5334 + struct vmw_piter pt_iter;
5335 + const struct vmw_sg_table *vsgt;
5336 + int ret;
5337 +
5338 ++ BUG_ON(num_data_pages == 0);
5339 ++
5340 + ret = ttm_bo_reserve(bo, false, true, NULL);
5341 + BUG_ON(ret != 0);
5342 +
5343 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
5344 +index 609269625468d..e90fd3d16697e 100644
5345 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
5346 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
5347 +@@ -154,6 +154,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
5348 + /* HB port can't access encrypted memory. */
5349 + if (hb && !mem_encrypt_active()) {
5350 + unsigned long bp = channel->cookie_high;
5351 ++ u32 channel_id = (channel->channel_id << 16);
5352 +
5353 + si = (uintptr_t) msg;
5354 + di = channel->cookie_low;
5355 +@@ -161,7 +162,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
5356 + VMW_PORT_HB_OUT(
5357 + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
5358 + msg_len, si, di,
5359 +- VMWARE_HYPERVISOR_HB | (channel->channel_id << 16) |
5360 ++ VMWARE_HYPERVISOR_HB | channel_id |
5361 + VMWARE_HYPERVISOR_OUT,
5362 + VMW_HYPERVISOR_MAGIC, bp,
5363 + eax, ebx, ecx, edx, si, di);
5364 +@@ -209,6 +210,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
5365 + /* HB port can't access encrypted memory */
5366 + if (hb && !mem_encrypt_active()) {
5367 + unsigned long bp = channel->cookie_low;
5368 ++ u32 channel_id = (channel->channel_id << 16);
5369 +
5370 + si = channel->cookie_high;
5371 + di = (uintptr_t) reply;
5372 +@@ -216,7 +218,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
5373 + VMW_PORT_HB_IN(
5374 + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
5375 + reply_len, si, di,
5376 +- VMWARE_HYPERVISOR_HB | (channel->channel_id << 16),
5377 ++ VMWARE_HYPERVISOR_HB | channel_id,
5378 + VMW_HYPERVISOR_MAGIC, bp,
5379 + eax, ebx, ecx, edx, si, di);
5380 +
5381 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
5382 +index 35f02958ee2cc..f275a08999ef1 100644
5383 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
5384 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
5385 +@@ -114,6 +114,7 @@ static void vmw_resource_release(struct kref *kref)
5386 + container_of(kref, struct vmw_resource, kref);
5387 + struct vmw_private *dev_priv = res->dev_priv;
5388 + int id;
5389 ++ int ret;
5390 + struct idr *idr = &dev_priv->res_idr[res->func->res_type];
5391 +
5392 + spin_lock(&dev_priv->resource_lock);
5393 +@@ -122,7 +123,8 @@ static void vmw_resource_release(struct kref *kref)
5394 + if (res->backup) {
5395 + struct ttm_buffer_object *bo = &res->backup->base;
5396 +
5397 +- ttm_bo_reserve(bo, false, false, NULL);
5398 ++ ret = ttm_bo_reserve(bo, false, false, NULL);
5399 ++ BUG_ON(ret);
5400 + if (vmw_resource_mob_attached(res) &&
5401 + res->func->unbind != NULL) {
5402 + struct ttm_validate_buffer val_buf;
5403 +@@ -1002,7 +1004,9 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
5404 + if (res->backup) {
5405 + vbo = res->backup;
5406 +
5407 +- ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
5408 ++ ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
5409 ++ if (ret)
5410 ++ goto out_no_validate;
5411 + if (!vbo->base.pin_count) {
5412 + ret = ttm_bo_validate
5413 + (&vbo->base,
5414 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
5415 +index 2877c7b43bd78..615bf9ca03d78 100644
5416 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
5417 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
5418 +@@ -539,7 +539,8 @@ const SVGACOTableType vmw_so_cotables[] = {
5419 + [vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
5420 + [vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
5421 + [vmw_so_ss] = SVGA_COTABLE_SAMPLER,
5422 +- [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT
5423 ++ [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT,
5424 ++ [vmw_so_max]= SVGA_COTABLE_MAX
5425 + };
5426 +
5427 +
5428 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
5429 +index beab3e19d8e21..0c62cd400b64c 100644
5430 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
5431 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
5432 +@@ -869,7 +869,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
5433 + user_srf->prime.base.shareable = false;
5434 + user_srf->prime.base.tfile = NULL;
5435 + if (drm_is_primary_client(file_priv))
5436 +- user_srf->master = drm_master_get(file_priv->master);
5437 ++ user_srf->master = drm_file_get_master(file_priv);
5438 +
5439 + /**
5440 + * From this point, the generic resource management functions
5441 +@@ -1540,7 +1540,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
5442 +
5443 + user_srf = container_of(srf, struct vmw_user_surface, srf);
5444 + if (drm_is_primary_client(file_priv))
5445 +- user_srf->master = drm_master_get(file_priv->master);
5446 ++ user_srf->master = drm_file_get_master(file_priv);
5447 +
5448 + ret = ttm_read_lock(&dev_priv->reservation_sem, true);
5449 + if (unlikely(ret != 0))
5450 +@@ -1883,7 +1883,6 @@ static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
5451 + static int vmw_surface_dirty_sync(struct vmw_resource *res)
5452 + {
5453 + struct vmw_private *dev_priv = res->dev_priv;
5454 +- bool has_dx = 0;
5455 + u32 i, num_dirty;
5456 + struct vmw_surface_dirty *dirty =
5457 + (struct vmw_surface_dirty *) res->dirty;
5458 +@@ -1910,7 +1909,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
5459 + if (!num_dirty)
5460 + goto out;
5461 +
5462 +- alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2));
5463 ++ alloc_size = num_dirty * ((has_sm4_context(dev_priv)) ? sizeof(*cmd1) : sizeof(*cmd2));
5464 + cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
5465 + if (!cmd)
5466 + return -ENOMEM;
5467 +@@ -1928,7 +1927,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
5468 + * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
5469 + * UPDATE_GB_IMAGE is not.
5470 + */
5471 +- if (has_dx) {
5472 ++ if (has_sm4_context(dev_priv)) {
5473 + cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
5474 + cmd1->header.size = sizeof(cmd1->body);
5475 + cmd1->body.sid = res->id;
5476 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
5477 +index e7570f422400d..bf20ca9f3a245 100644
5478 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
5479 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
5480 +@@ -586,13 +586,13 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
5481 + container_of(entry->base.bo, typeof(*vbo), base);
5482 +
5483 + if (entry->cpu_blit) {
5484 +- struct ttm_operation_ctx ctx = {
5485 ++ struct ttm_operation_ctx ttm_ctx = {
5486 + .interruptible = intr,
5487 + .no_wait_gpu = false
5488 + };
5489 +
5490 + ret = ttm_bo_validate(entry->base.bo,
5491 +- &vmw_nonfixed_placement, &ctx);
5492 ++ &vmw_nonfixed_placement, &ttm_ctx);
5493 + } else {
5494 + ret = vmw_validation_bo_validate_single
5495 + (entry->base.bo, intr, entry->as_mob);
5496 +diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
5497 +index 109d627968ac0..01c6ce7784ddb 100644
5498 +--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
5499 ++++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
5500 +@@ -1452,9 +1452,10 @@ zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
5501 + struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
5502 + int ret, vrefresh;
5503 +
5504 ++ pm_runtime_get_sync(disp->dev);
5505 ++
5506 + zynqmp_disp_crtc_setup_clock(crtc, adjusted_mode);
5507 +
5508 +- pm_runtime_get_sync(disp->dev);
5509 + ret = clk_prepare_enable(disp->pclk);
5510 + if (ret) {
5511 + dev_err(disp->dev, "failed to enable a pixel clock\n");
5512 +diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
5513 +index 59d1fb017da01..13811332b349f 100644
5514 +--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
5515 ++++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
5516 +@@ -402,10 +402,6 @@ static int zynqmp_dp_phy_init(struct zynqmp_dp *dp)
5517 + }
5518 + }
5519 +
5520 +- ret = zynqmp_dp_reset(dp, false);
5521 +- if (ret < 0)
5522 +- return ret;
5523 +-
5524 + zynqmp_dp_clr(dp, ZYNQMP_DP_PHY_RESET, ZYNQMP_DP_PHY_RESET_ALL_RESET);
5525 +
5526 + /*
5527 +@@ -441,8 +437,6 @@ static void zynqmp_dp_phy_exit(struct zynqmp_dp *dp)
5528 + ret);
5529 + }
5530 +
5531 +- zynqmp_dp_reset(dp, true);
5532 +-
5533 + for (i = 0; i < dp->num_lanes; i++) {
5534 + ret = phy_exit(dp->phy[i]);
5535 + if (ret)
5536 +@@ -1682,9 +1676,13 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
5537 + return PTR_ERR(dp->reset);
5538 + }
5539 +
5540 ++ ret = zynqmp_dp_reset(dp, false);
5541 ++ if (ret < 0)
5542 ++ return ret;
5543 ++
5544 + ret = zynqmp_dp_phy_probe(dp);
5545 + if (ret)
5546 +- return ret;
5547 ++ goto err_reset;
5548 +
5549 + /* Initialize the hardware. */
5550 + zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN,
5551 +@@ -1696,7 +1694,7 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
5552 +
5553 + ret = zynqmp_dp_phy_init(dp);
5554 + if (ret)
5555 +- return ret;
5556 ++ goto err_reset;
5557 +
5558 + zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 1);
5559 +
5560 +@@ -1708,15 +1706,18 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
5561 + zynqmp_dp_irq_handler, IRQF_ONESHOT,
5562 + dev_name(dp->dev), dp);
5563 + if (ret < 0)
5564 +- goto error;
5565 ++ goto err_phy_exit;
5566 +
5567 + dev_dbg(dp->dev, "ZynqMP DisplayPort Tx probed with %u lanes\n",
5568 + dp->num_lanes);
5569 +
5570 + return 0;
5571 +
5572 +-error:
5573 ++err_phy_exit:
5574 + zynqmp_dp_phy_exit(dp);
5575 ++err_reset:
5576 ++ zynqmp_dp_reset(dp, true);
5577 ++
5578 + return ret;
5579 + }
5580 +
5581 +@@ -1734,4 +1735,5 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub)
5582 + zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, 0xffffffff);
5583 +
5584 + zynqmp_dp_phy_exit(dp);
5585 ++ zynqmp_dp_reset(dp, true);
5586 + }
5587 +diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
5588 +index 1ea1a7c0b20fe..e29efcb1c0402 100644
5589 +--- a/drivers/hid/Makefile
5590 ++++ b/drivers/hid/Makefile
5591 +@@ -115,7 +115,6 @@ obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o
5592 + obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
5593 + obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
5594 + obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o hid-thrustmaster.o
5595 +-obj-$(CONFIG_HID_TMINIT) += hid-tminit.o
5596 + obj-$(CONFIG_HID_TIVO) += hid-tivo.o
5597 + obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
5598 + obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
5599 +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
5600 +index 3589d9945da1c..9c7b64e5357ad 100644
5601 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
5602 ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
5603 +@@ -186,7 +186,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
5604 + rc = -ENOMEM;
5605 + goto cleanup;
5606 + }
5607 +- info.period = msecs_to_jiffies(AMD_SFH_IDLE_LOOP);
5608 ++ info.period = AMD_SFH_IDLE_LOOP;
5609 + info.sensor_idx = cl_idx;
5610 + info.dma_address = cl_data->sensor_dma_addr[i];
5611 +
5612 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
5613 +index 68c8644234a4a..f43b40450e97c 100644
5614 +--- a/drivers/hid/hid-input.c
5615 ++++ b/drivers/hid/hid-input.c
5616 +@@ -419,8 +419,6 @@ static int hidinput_get_battery_property(struct power_supply *psy,
5617 +
5618 + if (dev->battery_status == HID_BATTERY_UNKNOWN)
5619 + val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
5620 +- else if (dev->battery_capacity == 100)
5621 +- val->intval = POWER_SUPPLY_STATUS_FULL;
5622 + else
5623 + val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
5624 + break;
5625 +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
5626 +index 51b39bda9a9d2..2e104682c22b9 100644
5627 +--- a/drivers/hid/hid-quirks.c
5628 ++++ b/drivers/hid/hid-quirks.c
5629 +@@ -662,8 +662,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
5630 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
5631 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
5632 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
5633 +-#endif
5634 +-#if IS_ENABLED(CONFIG_HID_TMINIT)
5635 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65d) },
5636 + #endif
5637 + #if IS_ENABLED(CONFIG_HID_TIVO)
5638 +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
5639 +index 46474612e73c6..517141138b007 100644
5640 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c
5641 ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
5642 +@@ -171,8 +171,6 @@ static const struct i2c_hid_quirks {
5643 + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
5644 + { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
5645 + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
5646 +- { USB_VENDOR_ID_ELAN, HID_ANY_ID,
5647 +- I2C_HID_QUIRK_BOGUS_IRQ },
5648 + { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
5649 + I2C_HID_QUIRK_RESET_ON_RESUME },
5650 + { I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393,
5651 +@@ -183,7 +181,8 @@ static const struct i2c_hid_quirks {
5652 + * Sending the wakeup after reset actually break ELAN touchscreen controller
5653 + */
5654 + { USB_VENDOR_ID_ELAN, HID_ANY_ID,
5655 +- I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
5656 ++ I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET |
5657 ++ I2C_HID_QUIRK_BOGUS_IRQ },
5658 + { 0, 0 }
5659 + };
5660 +
5661 +diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
5662 +index 5668d8305b78e..df712ce4b164d 100644
5663 +--- a/drivers/hwmon/pmbus/ibm-cffps.c
5664 ++++ b/drivers/hwmon/pmbus/ibm-cffps.c
5665 +@@ -50,9 +50,9 @@
5666 + #define CFFPS_MFR_VAUX_FAULT BIT(6)
5667 + #define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
5668 +
5669 +-#define CFFPS_LED_BLINK BIT(0)
5670 +-#define CFFPS_LED_ON BIT(1)
5671 +-#define CFFPS_LED_OFF BIT(2)
5672 ++#define CFFPS_LED_BLINK (BIT(0) | BIT(6))
5673 ++#define CFFPS_LED_ON (BIT(1) | BIT(6))
5674 ++#define CFFPS_LED_OFF (BIT(2) | BIT(6))
5675 + #define CFFPS_BLINK_RATE_MS 250
5676 +
5677 + enum {
5678 +diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
5679 +index 9bde869829121..530529feebb51 100644
5680 +--- a/drivers/iio/dac/ad5624r_spi.c
5681 ++++ b/drivers/iio/dac/ad5624r_spi.c
5682 +@@ -229,7 +229,7 @@ static int ad5624r_probe(struct spi_device *spi)
5683 + if (!indio_dev)
5684 + return -ENOMEM;
5685 + st = iio_priv(indio_dev);
5686 +- st->reg = devm_regulator_get(&spi->dev, "vcc");
5687 ++ st->reg = devm_regulator_get_optional(&spi->dev, "vref");
5688 + if (!IS_ERR(st->reg)) {
5689 + ret = regulator_enable(st->reg);
5690 + if (ret)
5691 +@@ -240,6 +240,22 @@ static int ad5624r_probe(struct spi_device *spi)
5692 + goto error_disable_reg;
5693 +
5694 + voltage_uv = ret;
5695 ++ } else {
5696 ++ if (PTR_ERR(st->reg) != -ENODEV)
5697 ++ return PTR_ERR(st->reg);
5698 ++ /* Backwards compatibility. This naming is not correct */
5699 ++ st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
5700 ++ if (!IS_ERR(st->reg)) {
5701 ++ ret = regulator_enable(st->reg);
5702 ++ if (ret)
5703 ++ return ret;
5704 ++
5705 ++ ret = regulator_get_voltage(st->reg);
5706 ++ if (ret < 0)
5707 ++ goto error_disable_reg;
5708 ++
5709 ++ voltage_uv = ret;
5710 ++ }
5711 + }
5712 +
5713 + spi_set_drvdata(spi, indio_dev);
5714 +diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
5715 +index 3b5ba26d7d867..3b4a0e60e6059 100644
5716 +--- a/drivers/iio/temperature/ltc2983.c
5717 ++++ b/drivers/iio/temperature/ltc2983.c
5718 +@@ -89,6 +89,8 @@
5719 +
5720 + #define LTC2983_STATUS_START_MASK BIT(7)
5721 + #define LTC2983_STATUS_START(x) FIELD_PREP(LTC2983_STATUS_START_MASK, x)
5722 ++#define LTC2983_STATUS_UP_MASK GENMASK(7, 6)
5723 ++#define LTC2983_STATUS_UP(reg) FIELD_GET(LTC2983_STATUS_UP_MASK, reg)
5724 +
5725 + #define LTC2983_STATUS_CHAN_SEL_MASK GENMASK(4, 0)
5726 + #define LTC2983_STATUS_CHAN_SEL(x) \
5727 +@@ -1362,17 +1364,16 @@ put_child:
5728 +
5729 + static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio)
5730 + {
5731 +- u32 iio_chan_t = 0, iio_chan_v = 0, chan, iio_idx = 0;
5732 ++ u32 iio_chan_t = 0, iio_chan_v = 0, chan, iio_idx = 0, status;
5733 + int ret;
5734 +- unsigned long time;
5735 +-
5736 +- /* make sure the device is up */
5737 +- time = wait_for_completion_timeout(&st->completion,
5738 +- msecs_to_jiffies(250));
5739 +
5740 +- if (!time) {
5741 ++ /* make sure the device is up: start bit (7) is 0 and done bit (6) is 1 */
5742 ++ ret = regmap_read_poll_timeout(st->regmap, LTC2983_STATUS_REG, status,
5743 ++ LTC2983_STATUS_UP(status) == 1, 25000,
5744 ++ 25000 * 10);
5745 ++ if (ret) {
5746 + dev_err(&st->spi->dev, "Device startup timed out\n");
5747 +- return -ETIMEDOUT;
5748 ++ return ret;
5749 + }
5750 +
5751 + st->iio_chan = devm_kzalloc(&st->spi->dev,
5752 +@@ -1492,10 +1493,11 @@ static int ltc2983_probe(struct spi_device *spi)
5753 + ret = ltc2983_parse_dt(st);
5754 + if (ret)
5755 + return ret;
5756 +- /*
5757 +- * let's request the irq now so it is used to sync the device
5758 +- * startup in ltc2983_setup()
5759 +- */
5760 ++
5761 ++ ret = ltc2983_setup(st, true);
5762 ++ if (ret)
5763 ++ return ret;
5764 ++
5765 + ret = devm_request_irq(&spi->dev, spi->irq, ltc2983_irq_handler,
5766 + IRQF_TRIGGER_RISING, name, st);
5767 + if (ret) {
5768 +@@ -1503,10 +1505,6 @@ static int ltc2983_probe(struct spi_device *spi)
5769 + return ret;
5770 + }
5771 +
5772 +- ret = ltc2983_setup(st, true);
5773 +- if (ret)
5774 +- return ret;
5775 +-
5776 + indio_dev->name = name;
5777 + indio_dev->num_channels = st->iio_channels;
5778 + indio_dev->channels = st->iio_chan;
5779 +diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
5780 +index da8adadf47559..75b6da00065a3 100644
5781 +--- a/drivers/infiniband/core/iwcm.c
5782 ++++ b/drivers/infiniband/core/iwcm.c
5783 +@@ -1187,29 +1187,34 @@ static int __init iw_cm_init(void)
5784 +
5785 + ret = iwpm_init(RDMA_NL_IWCM);
5786 + if (ret)
5787 +- pr_err("iw_cm: couldn't init iwpm\n");
5788 +- else
5789 +- rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
5790 ++ return ret;
5791 ++
5792 + iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
5793 + if (!iwcm_wq)
5794 +- return -ENOMEM;
5795 ++ goto err_alloc;
5796 +
5797 + iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
5798 + iwcm_ctl_table);
5799 + if (!iwcm_ctl_table_hdr) {
5800 + pr_err("iw_cm: couldn't register sysctl paths\n");
5801 +- destroy_workqueue(iwcm_wq);
5802 +- return -ENOMEM;
5803 ++ goto err_sysctl;
5804 + }
5805 +
5806 ++ rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
5807 + return 0;
5808 ++
5809 ++err_sysctl:
5810 ++ destroy_workqueue(iwcm_wq);
5811 ++err_alloc:
5812 ++ iwpm_exit(RDMA_NL_IWCM);
5813 ++ return -ENOMEM;
5814 + }
5815 +
5816 + static void __exit iw_cm_cleanup(void)
5817 + {
5818 ++ rdma_nl_unregister(RDMA_NL_IWCM);
5819 + unregister_net_sysctl_table(iwcm_ctl_table_hdr);
5820 + destroy_workqueue(iwcm_wq);
5821 +- rdma_nl_unregister(RDMA_NL_IWCM);
5822 + iwpm_exit(RDMA_NL_IWCM);
5823 + }
5824 +
5825 +diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
5826 +index 51572f1dc6111..72621ecd81f70 100644
5827 +--- a/drivers/infiniband/hw/efa/efa_verbs.c
5828 ++++ b/drivers/infiniband/hw/efa/efa_verbs.c
5829 +@@ -717,7 +717,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
5830 +
5831 + qp->qp_handle = create_qp_resp.qp_handle;
5832 + qp->ibqp.qp_num = create_qp_resp.qp_num;
5833 +- qp->ibqp.qp_type = init_attr->qp_type;
5834 + qp->max_send_wr = init_attr->cap.max_send_wr;
5835 + qp->max_recv_wr = init_attr->cap.max_recv_wr;
5836 + qp->max_send_sge = init_attr->cap.max_send_sge;
5837 +diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
5838 +index e3a8a420c0455..c076eed9c3b77 100644
5839 +--- a/drivers/infiniband/hw/hfi1/init.c
5840 ++++ b/drivers/infiniband/hw/hfi1/init.c
5841 +@@ -650,12 +650,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
5842 +
5843 + ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
5844 + ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
5845 +-
5846 +- if (loopback) {
5847 +- dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
5848 +- !default_pkey_idx);
5849 +- ppd->pkeys[!default_pkey_idx] = 0x8001;
5850 +- }
5851 ++ ppd->pkeys[0] = 0x8001;
5852 +
5853 + INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
5854 + INIT_WORK(&ppd->link_up_work, handle_link_up);
5855 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5856 +index dcbe5e28a4f7a..90945e664f5da 100644
5857 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5858 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5859 +@@ -4735,8 +4735,10 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
5860 + spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
5861 +
5862 + list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
5863 +- if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16))
5864 ++ if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) {
5865 ++ *dip_idx = hr_dip->dip_idx;
5866 + goto out;
5867 ++ }
5868 + }
5869 +
5870 + /* If no dgid is found, a new dip and a mapping between dgid and
5871 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
5872 +index 23cf2f6bc7a54..d4da840dbc2ef 100644
5873 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
5874 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
5875 +@@ -1784,7 +1784,7 @@ struct hns_roce_eq_context {
5876 +
5877 + struct hns_roce_dip {
5878 + u8 dgid[GID_LEN_V2];
5879 +- u8 dip_idx;
5880 ++ u32 dip_idx;
5881 + struct list_head node; /* all dips are on a list */
5882 + };
5883 +
5884 +diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
5885 +index b8454dcb03183..39a085f8e6055 100644
5886 +--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
5887 ++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
5888 +@@ -361,7 +361,9 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
5889 + free_cmd_mbox:
5890 + hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5891 +
5892 +- return ERR_PTR(ret);
5893 ++ if (ret)
5894 ++ return ERR_PTR(ret);
5895 ++ return NULL;
5896 + }
5897 +
5898 + int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
5899 +diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
5900 +index 230a909ba9bcd..5d5dd0b5d5075 100644
5901 +--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
5902 ++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
5903 +@@ -835,7 +835,6 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
5904 + goto err_out;
5905 + }
5906 + hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
5907 +- resp->cap_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
5908 + }
5909 +
5910 + if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
5911 +@@ -848,7 +847,6 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
5912 + goto err_sdb;
5913 + }
5914 + hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
5915 +- resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
5916 + }
5917 + } else {
5918 + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
5919 +@@ -1060,6 +1058,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
5920 + }
5921 +
5922 + if (udata) {
5923 ++ resp.cap_flags = hr_qp->en_flags;
5924 + ret = ib_copy_to_udata(udata, &resp,
5925 + min(udata->outlen, sizeof(resp)));
5926 + if (ret) {
5927 +@@ -1158,14 +1157,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
5928 + if (!hr_qp)
5929 + return ERR_PTR(-ENOMEM);
5930 +
5931 +- if (init_attr->qp_type == IB_QPT_XRC_INI)
5932 +- init_attr->recv_cq = NULL;
5933 +-
5934 +- if (init_attr->qp_type == IB_QPT_XRC_TGT) {
5935 ++ if (init_attr->qp_type == IB_QPT_XRC_TGT)
5936 + hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
5937 +- init_attr->recv_cq = NULL;
5938 +- init_attr->send_cq = NULL;
5939 +- }
5940 +
5941 + if (init_attr->qp_type == IB_QPT_GSI) {
5942 + hr_qp->port = init_attr->port_num - 1;
5943 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
5944 +index 5851486c0d930..2471f48ea5f39 100644
5945 +--- a/drivers/infiniband/hw/mlx5/qp.c
5946 ++++ b/drivers/infiniband/hw/mlx5/qp.c
5947 +@@ -1896,7 +1896,6 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev,
5948 + static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
5949 + struct mlx5_create_qp_params *params)
5950 + {
5951 +- struct mlx5_ib_create_qp *ucmd = params->ucmd;
5952 + struct ib_qp_init_attr *attr = params->attr;
5953 + u32 uidx = params->uidx;
5954 + struct mlx5_ib_resources *devr = &dev->devr;
5955 +@@ -1916,8 +1915,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
5956 + if (!in)
5957 + return -ENOMEM;
5958 +
5959 +- if (MLX5_CAP_GEN(mdev, ece_support) && ucmd)
5960 +- MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
5961 + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
5962 +
5963 + MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC);
5964 +diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
5965 +index dc4a240f44895..3c84deefa327d 100644
5966 +--- a/drivers/input/mouse/elan_i2c.h
5967 ++++ b/drivers/input/mouse/elan_i2c.h
5968 +@@ -55,8 +55,9 @@
5969 + #define ETP_FW_PAGE_SIZE_512 512
5970 + #define ETP_FW_SIGNATURE_SIZE 6
5971 +
5972 +-#define ETP_PRODUCT_ID_DELBIN 0x00C2
5973 ++#define ETP_PRODUCT_ID_WHITEBOX 0x00B8
5974 + #define ETP_PRODUCT_ID_VOXEL 0x00BF
5975 ++#define ETP_PRODUCT_ID_DELBIN 0x00C2
5976 + #define ETP_PRODUCT_ID_MAGPIE 0x0120
5977 + #define ETP_PRODUCT_ID_BOBBA 0x0121
5978 +
5979 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
5980 +index dad22c1ea6a0f..47af62c122672 100644
5981 +--- a/drivers/input/mouse/elan_i2c_core.c
5982 ++++ b/drivers/input/mouse/elan_i2c_core.c
5983 +@@ -105,6 +105,7 @@ static u32 elan_i2c_lookup_quirks(u16 ic_type, u16 product_id)
5984 + u32 quirks;
5985 + } elan_i2c_quirks[] = {
5986 + { 0x0D, ETP_PRODUCT_ID_DELBIN, ETP_QUIRK_QUICK_WAKEUP },
5987 ++ { 0x0D, ETP_PRODUCT_ID_WHITEBOX, ETP_QUIRK_QUICK_WAKEUP },
5988 + { 0x10, ETP_PRODUCT_ID_VOXEL, ETP_QUIRK_QUICK_WAKEUP },
5989 + { 0x14, ETP_PRODUCT_ID_MAGPIE, ETP_QUIRK_QUICK_WAKEUP },
5990 + { 0x14, ETP_PRODUCT_ID_BOBBA, ETP_QUIRK_QUICK_WAKEUP },
5991 +diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
5992 +index c11bc8b833b8e..d5552e2c160d2 100644
5993 +--- a/drivers/iommu/intel/pasid.h
5994 ++++ b/drivers/iommu/intel/pasid.h
5995 +@@ -28,12 +28,12 @@
5996 + #define VCMD_CMD_ALLOC 0x1
5997 + #define VCMD_CMD_FREE 0x2
5998 + #define VCMD_VRSP_IP 0x1
5999 +-#define VCMD_VRSP_SC(e) (((e) >> 1) & 0x3)
6000 ++#define VCMD_VRSP_SC(e) (((e) & 0xff) >> 1)
6001 + #define VCMD_VRSP_SC_SUCCESS 0
6002 +-#define VCMD_VRSP_SC_NO_PASID_AVAIL 2
6003 +-#define VCMD_VRSP_SC_INVALID_PASID 2
6004 +-#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 8) & 0xfffff)
6005 +-#define VCMD_CMD_OPERAND(e) ((e) << 8)
6006 ++#define VCMD_VRSP_SC_NO_PASID_AVAIL 16
6007 ++#define VCMD_VRSP_SC_INVALID_PASID 16
6008 ++#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 16) & 0xfffff)
6009 ++#define VCMD_CMD_OPERAND(e) ((e) << 16)
6010 + /*
6011 + * Domain ID reserved for pasid entries programmed for first-level
6012 + * only and pass-through transfer modes.
6013 +diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
6014 +index 5665b6ea8119f..75378e35c3d66 100644
6015 +--- a/drivers/mailbox/mtk-cmdq-mailbox.c
6016 ++++ b/drivers/mailbox/mtk-cmdq-mailbox.c
6017 +@@ -168,7 +168,8 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
6018 + dma_sync_single_for_cpu(dev, prev_task->pa_base,
6019 + prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
6020 + prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
6021 +- (u64)CMDQ_JUMP_BY_PA << 32 | task->pa_base;
6022 ++ (u64)CMDQ_JUMP_BY_PA << 32 |
6023 ++ (task->pa_base >> task->cmdq->shift_pa);
6024 + dma_sync_single_for_device(dev, prev_task->pa_base,
6025 + prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
6026 +
6027 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
6028 +index b0ab080f25676..85f3a1a4fbb39 100644
6029 +--- a/drivers/md/dm-crypt.c
6030 ++++ b/drivers/md/dm-crypt.c
6031 +@@ -2661,7 +2661,12 @@ static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
6032 + struct crypt_config *cc = pool_data;
6033 + struct page *page;
6034 +
6035 +- if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
6036 ++ /*
6037 ++ * Note, percpu_counter_read_positive() may over (and under) estimate
6038 ++ * the current usage by at most (batch - 1) * num_online_cpus() pages,
6039 ++ * but avoids potential spinlock contention of an exact result.
6040 ++ */
6041 ++ if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
6042 + likely(gfp_mask & __GFP_NORETRY))
6043 + return NULL;
6044 +
6045 +diff --git a/drivers/media/cec/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c
6046 +index ea4b1ebfca991..0ffd89712536b 100644
6047 +--- a/drivers/media/cec/platform/stm32/stm32-cec.c
6048 ++++ b/drivers/media/cec/platform/stm32/stm32-cec.c
6049 +@@ -305,14 +305,16 @@ static int stm32_cec_probe(struct platform_device *pdev)
6050 +
6051 + cec->clk_hdmi_cec = devm_clk_get(&pdev->dev, "hdmi-cec");
6052 + if (IS_ERR(cec->clk_hdmi_cec) &&
6053 +- PTR_ERR(cec->clk_hdmi_cec) == -EPROBE_DEFER)
6054 +- return -EPROBE_DEFER;
6055 ++ PTR_ERR(cec->clk_hdmi_cec) == -EPROBE_DEFER) {
6056 ++ ret = -EPROBE_DEFER;
6057 ++ goto err_unprepare_cec_clk;
6058 ++ }
6059 +
6060 + if (!IS_ERR(cec->clk_hdmi_cec)) {
6061 + ret = clk_prepare(cec->clk_hdmi_cec);
6062 + if (ret) {
6063 + dev_err(&pdev->dev, "Can't prepare hdmi-cec clock\n");
6064 +- return ret;
6065 ++ goto err_unprepare_cec_clk;
6066 + }
6067 + }
6068 +
6069 +@@ -324,19 +326,27 @@ static int stm32_cec_probe(struct platform_device *pdev)
6070 + CEC_NAME, caps, CEC_MAX_LOG_ADDRS);
6071 + ret = PTR_ERR_OR_ZERO(cec->adap);
6072 + if (ret)
6073 +- return ret;
6074 ++ goto err_unprepare_hdmi_cec_clk;
6075 +
6076 + ret = cec_register_adapter(cec->adap, &pdev->dev);
6077 +- if (ret) {
6078 +- cec_delete_adapter(cec->adap);
6079 +- return ret;
6080 +- }
6081 ++ if (ret)
6082 ++ goto err_delete_adapter;
6083 +
6084 + cec_hw_init(cec);
6085 +
6086 + platform_set_drvdata(pdev, cec);
6087 +
6088 + return 0;
6089 ++
6090 ++err_delete_adapter:
6091 ++ cec_delete_adapter(cec->adap);
6092 ++
6093 ++err_unprepare_hdmi_cec_clk:
6094 ++ clk_unprepare(cec->clk_hdmi_cec);
6095 ++
6096 ++err_unprepare_cec_clk:
6097 ++ clk_unprepare(cec->clk_cec);
6098 ++ return ret;
6099 + }
6100 +
6101 + static int stm32_cec_remove(struct platform_device *pdev)
6102 +diff --git a/drivers/media/cec/platform/tegra/tegra_cec.c b/drivers/media/cec/platform/tegra/tegra_cec.c
6103 +index 1ac0c70a59818..5e907395ca2e5 100644
6104 +--- a/drivers/media/cec/platform/tegra/tegra_cec.c
6105 ++++ b/drivers/media/cec/platform/tegra/tegra_cec.c
6106 +@@ -366,7 +366,11 @@ static int tegra_cec_probe(struct platform_device *pdev)
6107 + return -ENOENT;
6108 + }
6109 +
6110 +- clk_prepare_enable(cec->clk);
6111 ++ ret = clk_prepare_enable(cec->clk);
6112 ++ if (ret) {
6113 ++ dev_err(&pdev->dev, "Unable to prepare clock for CEC\n");
6114 ++ return ret;
6115 ++ }
6116 +
6117 + /* set context info. */
6118 + cec->dev = &pdev->dev;
6119 +@@ -446,9 +450,7 @@ static int tegra_cec_resume(struct platform_device *pdev)
6120 +
6121 + dev_notice(&pdev->dev, "Resuming\n");
6122 +
6123 +- clk_prepare_enable(cec->clk);
6124 +-
6125 +- return 0;
6126 ++ return clk_prepare_enable(cec->clk);
6127 + }
6128 + #endif
6129 +
6130 +diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
6131 +index 082796534b0ae..bb02354a48b81 100644
6132 +--- a/drivers/media/dvb-frontends/dib8000.c
6133 ++++ b/drivers/media/dvb-frontends/dib8000.c
6134 +@@ -2107,32 +2107,55 @@ static void dib8000_load_ana_fe_coefs(struct dib8000_state *state, const s16 *an
6135 + dib8000_write_word(state, 117 + mode, ana_fe[mode]);
6136 + }
6137 +
6138 +-static const u16 lut_prbs_2k[14] = {
6139 +- 0, 0x423, 0x009, 0x5C7, 0x7A6, 0x3D8, 0x527, 0x7FF, 0x79B, 0x3D6, 0x3A2, 0x53B, 0x2F4, 0x213
6140 ++static const u16 lut_prbs_2k[13] = {
6141 ++ 0x423, 0x009, 0x5C7,
6142 ++ 0x7A6, 0x3D8, 0x527,
6143 ++ 0x7FF, 0x79B, 0x3D6,
6144 ++ 0x3A2, 0x53B, 0x2F4,
6145 ++ 0x213
6146 + };
6147 +-static const u16 lut_prbs_4k[14] = {
6148 +- 0, 0x208, 0x0C3, 0x7B9, 0x423, 0x5C7, 0x3D8, 0x7FF, 0x3D6, 0x53B, 0x213, 0x029, 0x0D0, 0x48E
6149 ++
6150 ++static const u16 lut_prbs_4k[13] = {
6151 ++ 0x208, 0x0C3, 0x7B9,
6152 ++ 0x423, 0x5C7, 0x3D8,
6153 ++ 0x7FF, 0x3D6, 0x53B,
6154 ++ 0x213, 0x029, 0x0D0,
6155 ++ 0x48E
6156 + };
6157 +-static const u16 lut_prbs_8k[14] = {
6158 +- 0, 0x740, 0x069, 0x7DD, 0x208, 0x7B9, 0x5C7, 0x7FF, 0x53B, 0x029, 0x48E, 0x4C4, 0x367, 0x684
6159 ++
6160 ++static const u16 lut_prbs_8k[13] = {
6161 ++ 0x740, 0x069, 0x7DD,
6162 ++ 0x208, 0x7B9, 0x5C7,
6163 ++ 0x7FF, 0x53B, 0x029,
6164 ++ 0x48E, 0x4C4, 0x367,
6165 ++ 0x684
6166 + };
6167 +
6168 + static u16 dib8000_get_init_prbs(struct dib8000_state *state, u16 subchannel)
6169 + {
6170 + int sub_channel_prbs_group = 0;
6171 ++ int prbs_group;
6172 +
6173 +- sub_channel_prbs_group = (subchannel / 3) + 1;
6174 +- dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n", sub_channel_prbs_group, subchannel, lut_prbs_8k[sub_channel_prbs_group]);
6175 ++ sub_channel_prbs_group = subchannel / 3;
6176 ++ if (sub_channel_prbs_group >= ARRAY_SIZE(lut_prbs_2k))
6177 ++ return 0;
6178 +
6179 + switch (state->fe[0]->dtv_property_cache.transmission_mode) {
6180 + case TRANSMISSION_MODE_2K:
6181 +- return lut_prbs_2k[sub_channel_prbs_group];
6182 ++ prbs_group = lut_prbs_2k[sub_channel_prbs_group];
6183 ++ break;
6184 + case TRANSMISSION_MODE_4K:
6185 +- return lut_prbs_4k[sub_channel_prbs_group];
6186 ++ prbs_group = lut_prbs_4k[sub_channel_prbs_group];
6187 ++ break;
6188 + default:
6189 + case TRANSMISSION_MODE_8K:
6190 +- return lut_prbs_8k[sub_channel_prbs_group];
6191 ++ prbs_group = lut_prbs_8k[sub_channel_prbs_group];
6192 + }
6193 ++
6194 ++ dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n",
6195 ++ sub_channel_prbs_group, subchannel, prbs_group);
6196 ++
6197 ++ return prbs_group;
6198 + }
6199 +
6200 + static void dib8000_set_13seg_channel(struct dib8000_state *state)
6201 +@@ -2409,10 +2432,8 @@ static void dib8000_set_isdbt_common_channel(struct dib8000_state *state, u8 seq
6202 + /* TSB or ISDBT ? apply it now */
6203 + if (c->isdbt_sb_mode) {
6204 + dib8000_set_sb_channel(state);
6205 +- if (c->isdbt_sb_subchannel < 14)
6206 +- init_prbs = dib8000_get_init_prbs(state, c->isdbt_sb_subchannel);
6207 +- else
6208 +- init_prbs = 0;
6209 ++ init_prbs = dib8000_get_init_prbs(state,
6210 ++ c->isdbt_sb_subchannel);
6211 + } else {
6212 + dib8000_set_13seg_channel(state);
6213 + init_prbs = 0xfff;
6214 +@@ -3004,6 +3025,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
6215 +
6216 + unsigned long *timeout = &state->timeout;
6217 + unsigned long now = jiffies;
6218 ++ u16 init_prbs;
6219 + #ifdef DIB8000_AGC_FREEZE
6220 + u16 agc1, agc2;
6221 + #endif
6222 +@@ -3302,8 +3324,10 @@ static int dib8000_tune(struct dvb_frontend *fe)
6223 + break;
6224 +
6225 + case CT_DEMOD_STEP_11: /* 41 : init prbs autosearch */
6226 +- if (state->subchannel <= 41) {
6227 +- dib8000_set_subchannel_prbs(state, dib8000_get_init_prbs(state, state->subchannel));
6228 ++ init_prbs = dib8000_get_init_prbs(state, state->subchannel);
6229 ++
6230 ++ if (init_prbs) {
6231 ++ dib8000_set_subchannel_prbs(state, init_prbs);
6232 + *tune_state = CT_DEMOD_STEP_9;
6233 + } else {
6234 + *tune_state = CT_DEMOD_STOP;
6235 +diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
6236 +index a017ec4e0f504..cdeaaec318791 100644
6237 +--- a/drivers/media/i2c/imx258.c
6238 ++++ b/drivers/media/i2c/imx258.c
6239 +@@ -23,7 +23,7 @@
6240 + #define IMX258_CHIP_ID 0x0258
6241 +
6242 + /* V_TIMING internal */
6243 +-#define IMX258_VTS_30FPS 0x0c98
6244 ++#define IMX258_VTS_30FPS 0x0c50
6245 + #define IMX258_VTS_30FPS_2K 0x0638
6246 + #define IMX258_VTS_30FPS_VGA 0x034c
6247 + #define IMX258_VTS_MAX 0xffff
6248 +@@ -47,7 +47,7 @@
6249 + /* Analog gain control */
6250 + #define IMX258_REG_ANALOG_GAIN 0x0204
6251 + #define IMX258_ANA_GAIN_MIN 0
6252 +-#define IMX258_ANA_GAIN_MAX 0x1fff
6253 ++#define IMX258_ANA_GAIN_MAX 480
6254 + #define IMX258_ANA_GAIN_STEP 1
6255 + #define IMX258_ANA_GAIN_DEFAULT 0x0
6256 +
6257 +diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
6258 +index 9554c8348c020..17cc69c3227f8 100644
6259 +--- a/drivers/media/i2c/tda1997x.c
6260 ++++ b/drivers/media/i2c/tda1997x.c
6261 +@@ -1695,14 +1695,15 @@ static int tda1997x_query_dv_timings(struct v4l2_subdev *sd,
6262 + struct v4l2_dv_timings *timings)
6263 + {
6264 + struct tda1997x_state *state = to_state(sd);
6265 ++ int ret;
6266 +
6267 + v4l_dbg(1, debug, state->client, "%s\n", __func__);
6268 + memset(timings, 0, sizeof(struct v4l2_dv_timings));
6269 + mutex_lock(&state->lock);
6270 +- tda1997x_detect_std(state, timings);
6271 ++ ret = tda1997x_detect_std(state, timings);
6272 + mutex_unlock(&state->lock);
6273 +
6274 +- return 0;
6275 ++ return ret;
6276 + }
6277 +
6278 + static const struct v4l2_subdev_video_ops tda1997x_video_ops = {
6279 +diff --git a/drivers/media/platform/ti-vpe/cal-camerarx.c b/drivers/media/platform/ti-vpe/cal-camerarx.c
6280 +index cbe6114908de7..63d13bcc83b47 100644
6281 +--- a/drivers/media/platform/ti-vpe/cal-camerarx.c
6282 ++++ b/drivers/media/platform/ti-vpe/cal-camerarx.c
6283 +@@ -842,7 +842,9 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
6284 + if (ret)
6285 + goto error;
6286 +
6287 +- cal_camerarx_sd_init_cfg(sd, NULL);
6288 ++ ret = cal_camerarx_sd_init_cfg(sd, NULL);
6289 ++ if (ret)
6290 ++ goto error;
6291 +
6292 + ret = v4l2_device_register_subdev(&cal->v4l2_dev, sd);
6293 + if (ret)
6294 +diff --git a/drivers/media/platform/ti-vpe/cal-video.c b/drivers/media/platform/ti-vpe/cal-video.c
6295 +index 7b7436a355ee3..b9405f70af9f5 100644
6296 +--- a/drivers/media/platform/ti-vpe/cal-video.c
6297 ++++ b/drivers/media/platform/ti-vpe/cal-video.c
6298 +@@ -694,7 +694,7 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
6299 +
6300 + spin_lock_irq(&ctx->dma.lock);
6301 + buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, list);
6302 +- ctx->dma.pending = buf;
6303 ++ ctx->dma.active = buf;
6304 + list_del(&buf->list);
6305 + spin_unlock_irq(&ctx->dma.lock);
6306 +
6307 +diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
6308 +index 1ba3f96ffa7dc..40ab66c850f23 100644
6309 +--- a/drivers/media/rc/rc-loopback.c
6310 ++++ b/drivers/media/rc/rc-loopback.c
6311 +@@ -42,7 +42,7 @@ static int loop_set_tx_mask(struct rc_dev *dev, u32 mask)
6312 +
6313 + if ((mask & (RXMASK_REGULAR | RXMASK_LEARNING)) != mask) {
6314 + dprintk("invalid tx mask: %u\n", mask);
6315 +- return -EINVAL;
6316 ++ return 2;
6317 + }
6318 +
6319 + dprintk("setting tx mask: %u\n", mask);
6320 +diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
6321 +index 252136cc885ce..6acb8013de08b 100644
6322 +--- a/drivers/media/usb/uvc/uvc_v4l2.c
6323 ++++ b/drivers/media/usb/uvc/uvc_v4l2.c
6324 +@@ -899,8 +899,8 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
6325 + {
6326 + struct uvc_fh *handle = fh;
6327 + struct uvc_video_chain *chain = handle->chain;
6328 ++ u8 *buf;
6329 + int ret;
6330 +- u8 i;
6331 +
6332 + if (chain->selector == NULL ||
6333 + (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
6334 +@@ -908,22 +908,27 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
6335 + return 0;
6336 + }
6337 +
6338 ++ buf = kmalloc(1, GFP_KERNEL);
6339 ++ if (!buf)
6340 ++ return -ENOMEM;
6341 ++
6342 + ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, chain->selector->id,
6343 + chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
6344 +- &i, 1);
6345 +- if (ret < 0)
6346 +- return ret;
6347 ++ buf, 1);
6348 ++ if (!ret)
6349 ++ *input = *buf - 1;
6350 +
6351 +- *input = i - 1;
6352 +- return 0;
6353 ++ kfree(buf);
6354 ++
6355 ++ return ret;
6356 + }
6357 +
6358 + static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
6359 + {
6360 + struct uvc_fh *handle = fh;
6361 + struct uvc_video_chain *chain = handle->chain;
6362 ++ u8 *buf;
6363 + int ret;
6364 +- u32 i;
6365 +
6366 + ret = uvc_acquire_privileges(handle);
6367 + if (ret < 0)
6368 +@@ -939,10 +944,17 @@ static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
6369 + if (input >= chain->selector->bNrInPins)
6370 + return -EINVAL;
6371 +
6372 +- i = input + 1;
6373 +- return uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
6374 +- chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
6375 +- &i, 1);
6376 ++ buf = kmalloc(1, GFP_KERNEL);
6377 ++ if (!buf)
6378 ++ return -ENOMEM;
6379 ++
6380 ++ *buf = input + 1;
6381 ++ ret = uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
6382 ++ chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
6383 ++ buf, 1);
6384 ++ kfree(buf);
6385 ++
6386 ++ return ret;
6387 + }
6388 +
6389 + static int uvc_ioctl_queryctrl(struct file *file, void *fh,
6390 +diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
6391 +index 230d65a642178..af48705c704f8 100644
6392 +--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
6393 ++++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
6394 +@@ -196,7 +196,7 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
6395 + if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
6396 + return false;
6397 +
6398 +- for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
6399 ++ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
6400 + if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
6401 + fnc, fnc_handle) &&
6402 + v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
6403 +@@ -218,7 +218,7 @@ bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic)
6404 + {
6405 + unsigned int i;
6406 +
6407 +- for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
6408 ++ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
6409 + const struct v4l2_bt_timings *bt =
6410 + &v4l2_dv_timings_presets[i].bt;
6411 +
6412 +diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
6413 +index 046ce4ecc1959..4a32505644428 100644
6414 +--- a/drivers/misc/pvpanic/pvpanic-pci.c
6415 ++++ b/drivers/misc/pvpanic/pvpanic-pci.c
6416 +@@ -119,4 +119,6 @@ static struct pci_driver pvpanic_pci_driver = {
6417 + },
6418 + };
6419 +
6420 ++MODULE_DEVICE_TABLE(pci, pvpanic_pci_id_tbl);
6421 ++
6422 + module_pci_driver(pvpanic_pci_driver);
6423 +diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
6424 +index 880c33ab9f47b..94ebf7f3fd58a 100644
6425 +--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
6426 ++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
6427 +@@ -2243,7 +2243,8 @@ int vmci_qp_broker_map(struct vmci_handle handle,
6428 +
6429 + result = VMCI_SUCCESS;
6430 +
6431 +- if (context_id != VMCI_HOST_CONTEXT_ID) {
6432 ++ if (context_id != VMCI_HOST_CONTEXT_ID &&
6433 ++ !QPBROKERSTATE_HAS_MEM(entry)) {
6434 + struct vmci_qp_page_store page_store;
6435 +
6436 + page_store.pages = guest_mem;
6437 +@@ -2350,7 +2351,8 @@ int vmci_qp_broker_unmap(struct vmci_handle handle,
6438 + goto out;
6439 + }
6440 +
6441 +- if (context_id != VMCI_HOST_CONTEXT_ID) {
6442 ++ if (context_id != VMCI_HOST_CONTEXT_ID &&
6443 ++ QPBROKERSTATE_HAS_MEM(entry)) {
6444 + qp_acquire_queue_mutex(entry->produce_q);
6445 + result = qp_save_headers(entry);
6446 + if (result < VMCI_SUCCESS)
6447 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
6448 +index 2518bc0856596..d47829b9fc0ff 100644
6449 +--- a/drivers/mmc/core/block.c
6450 ++++ b/drivers/mmc/core/block.c
6451 +@@ -542,6 +542,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
6452 + return mmc_sanitize(card, idata->ic.cmd_timeout_ms);
6453 +
6454 + mmc_wait_for_req(card->host, &mrq);
6455 ++ memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
6456 +
6457 + if (cmd.error) {
6458 + dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
6459 +@@ -591,8 +592,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
6460 + if (idata->ic.postsleep_min_us)
6461 + usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
6462 +
6463 +- memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
6464 +-
6465 + if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
6466 + /*
6467 + * Ensure RPMB/R1B command has completed by polling CMD13
6468 +diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
6469 +index 4ca9374157348..58cfaffa3c2d8 100644
6470 +--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
6471 ++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
6472 +@@ -542,9 +542,22 @@ static int sd_write_long_data(struct realtek_pci_sdmmc *host,
6473 + return 0;
6474 + }
6475 +
6476 ++static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
6477 ++{
6478 ++ rtsx_pci_write_register(host->pcr, SD_CFG1,
6479 ++ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
6480 ++}
6481 ++
6482 ++static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
6483 ++{
6484 ++ rtsx_pci_write_register(host->pcr, SD_CFG1,
6485 ++ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
6486 ++}
6487 ++
6488 + static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
6489 + {
6490 + struct mmc_data *data = mrq->data;
6491 ++ int err;
6492 +
6493 + if (host->sg_count < 0) {
6494 + data->error = host->sg_count;
6495 +@@ -553,22 +566,19 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
6496 + return data->error;
6497 + }
6498 +
6499 +- if (data->flags & MMC_DATA_READ)
6500 +- return sd_read_long_data(host, mrq);
6501 ++ if (data->flags & MMC_DATA_READ) {
6502 ++ if (host->initial_mode)
6503 ++ sd_disable_initial_mode(host);
6504 +
6505 +- return sd_write_long_data(host, mrq);
6506 +-}
6507 ++ err = sd_read_long_data(host, mrq);
6508 +
6509 +-static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
6510 +-{
6511 +- rtsx_pci_write_register(host->pcr, SD_CFG1,
6512 +- SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
6513 +-}
6514 ++ if (host->initial_mode)
6515 ++ sd_enable_initial_mode(host);
6516 +
6517 +-static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
6518 +-{
6519 +- rtsx_pci_write_register(host->pcr, SD_CFG1,
6520 +- SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
6521 ++ return err;
6522 ++ }
6523 ++
6524 ++ return sd_write_long_data(host, mrq);
6525 + }
6526 +
6527 + static void sd_normal_rw(struct realtek_pci_sdmmc *host,
6528 +diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
6529 +index 839965f7c717f..9a630ba37484e 100644
6530 +--- a/drivers/mmc/host/sdhci-of-arasan.c
6531 ++++ b/drivers/mmc/host/sdhci-of-arasan.c
6532 +@@ -159,6 +159,12 @@ struct sdhci_arasan_data {
6533 + /* Controller immediately reports SDHCI_CLOCK_INT_STABLE after enabling the
6534 + * internal clock even when the clock isn't stable */
6535 + #define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1)
6536 ++/*
6537 ++ * Some of the Arasan variations might not have timing requirements
6538 ++ * met at 25MHz for Default Speed mode, those controllers work at
6539 ++ * 19MHz instead
6540 ++ */
6541 ++#define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2)
6542 + };
6543 +
6544 + struct sdhci_arasan_of_data {
6545 +@@ -267,7 +273,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
6546 + * through low speeds without power cycling.
6547 + */
6548 + sdhci_set_clock(host, host->max_clk);
6549 +- phy_power_on(sdhci_arasan->phy);
6550 ++ if (phy_power_on(sdhci_arasan->phy)) {
6551 ++ pr_err("%s: Cannot power on phy.\n",
6552 ++ mmc_hostname(host->mmc));
6553 ++ return;
6554 ++ }
6555 ++
6556 + sdhci_arasan->is_phy_on = true;
6557 +
6558 + /*
6559 +@@ -290,6 +301,16 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
6560 + sdhci_arasan->is_phy_on = false;
6561 + }
6562 +
6563 ++ if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN) {
6564 ++ /*
6565 ++ * Some of the Arasan variations might not have timing
6566 ++ * requirements met at 25MHz for Default Speed mode,
6567 ++ * those controllers work at 19MHz instead.
6568 ++ */
6569 ++ if (clock == DEFAULT_SPEED_MAX_DTR)
6570 ++ clock = (DEFAULT_SPEED_MAX_DTR * 19) / 25;
6571 ++ }
6572 ++
6573 + /* Set the Input and Output Clock Phase Delays */
6574 + if (clk_data->set_clk_delays)
6575 + clk_data->set_clk_delays(host);
6576 +@@ -307,7 +328,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
6577 + msleep(20);
6578 +
6579 + if (ctrl_phy) {
6580 +- phy_power_on(sdhci_arasan->phy);
6581 ++ if (phy_power_on(sdhci_arasan->phy)) {
6582 ++ pr_err("%s: Cannot power on phy.\n",
6583 ++ mmc_hostname(host->mmc));
6584 ++ return;
6585 ++ }
6586 ++
6587 + sdhci_arasan->is_phy_on = true;
6588 + }
6589 + }
6590 +@@ -463,7 +489,9 @@ static int sdhci_arasan_suspend(struct device *dev)
6591 + ret = phy_power_off(sdhci_arasan->phy);
6592 + if (ret) {
6593 + dev_err(dev, "Cannot power off phy.\n");
6594 +- sdhci_resume_host(host);
6595 ++ if (sdhci_resume_host(host))
6596 ++ dev_err(dev, "Cannot resume host.\n");
6597 ++
6598 + return ret;
6599 + }
6600 + sdhci_arasan->is_phy_on = false;
6601 +@@ -1598,6 +1626,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
6602 + if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
6603 + host->mmc_host_ops.execute_tuning =
6604 + arasan_zynqmp_execute_tuning;
6605 ++
6606 ++ sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN;
6607 + }
6608 +
6609 + arasan_dt_parse_clk_phases(dev, &sdhci_arasan->clk_data);
6610 +diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
6611 +index 8b49fd56cf964..29e8a546dcd60 100644
6612 +--- a/drivers/mtd/nand/raw/intel-nand-controller.c
6613 ++++ b/drivers/mtd/nand/raw/intel-nand-controller.c
6614 +@@ -631,19 +631,26 @@ static int ebu_nand_probe(struct platform_device *pdev)
6615 + ebu_host->clk_rate = clk_get_rate(ebu_host->clk);
6616 +
6617 + ebu_host->dma_tx = dma_request_chan(dev, "tx");
6618 +- if (IS_ERR(ebu_host->dma_tx))
6619 +- return dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
6620 +- "failed to request DMA tx chan!.\n");
6621 ++ if (IS_ERR(ebu_host->dma_tx)) {
6622 ++ ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
6623 ++ "failed to request DMA tx chan!.\n");
6624 ++ goto err_disable_unprepare_clk;
6625 ++ }
6626 +
6627 + ebu_host->dma_rx = dma_request_chan(dev, "rx");
6628 +- if (IS_ERR(ebu_host->dma_rx))
6629 +- return dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx),
6630 +- "failed to request DMA rx chan!.\n");
6631 ++ if (IS_ERR(ebu_host->dma_rx)) {
6632 ++ ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx),
6633 ++ "failed to request DMA rx chan!.\n");
6634 ++ ebu_host->dma_rx = NULL;
6635 ++ goto err_cleanup_dma;
6636 ++ }
6637 +
6638 + resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
6639 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
6640 +- if (!res)
6641 +- return -EINVAL;
6642 ++ if (!res) {
6643 ++ ret = -EINVAL;
6644 ++ goto err_cleanup_dma;
6645 ++ }
6646 + ebu_host->cs[cs].addr_sel = res->start;
6647 + writel(ebu_host->cs[cs].addr_sel | EBU_ADDR_MASK(5) | EBU_ADDR_SEL_REGEN,
6648 + ebu_host->ebu + EBU_ADDR_SEL(cs));
6649 +@@ -653,7 +660,8 @@ static int ebu_nand_probe(struct platform_device *pdev)
6650 + mtd = nand_to_mtd(&ebu_host->chip);
6651 + if (!mtd->name) {
6652 + dev_err(ebu_host->dev, "NAND label property is mandatory\n");
6653 +- return -EINVAL;
6654 ++ ret = -EINVAL;
6655 ++ goto err_cleanup_dma;
6656 + }
6657 +
6658 + mtd->dev.parent = dev;
6659 +@@ -681,6 +689,7 @@ err_clean_nand:
6660 + nand_cleanup(&ebu_host->chip);
6661 + err_cleanup_dma:
6662 + ebu_dma_cleanup(ebu_host);
6663 ++err_disable_unprepare_clk:
6664 + clk_disable_unprepare(ebu_host->clk);
6665 +
6666 + return ret;
6667 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
6668 +index 9a184c99fbe44..3476ef2237360 100644
6669 +--- a/drivers/net/bonding/bond_main.c
6670 ++++ b/drivers/net/bonding/bond_main.c
6671 +@@ -2245,7 +2245,6 @@ static int __bond_release_one(struct net_device *bond_dev,
6672 + /* recompute stats just before removing the slave */
6673 + bond_get_stats(bond->dev, &bond->bond_stats);
6674 +
6675 +- bond_upper_dev_unlink(bond, slave);
6676 + /* unregister rx_handler early so bond_handle_frame wouldn't be called
6677 + * for this slave anymore.
6678 + */
6679 +@@ -2254,6 +2253,8 @@ static int __bond_release_one(struct net_device *bond_dev,
6680 + if (BOND_MODE(bond) == BOND_MODE_8023AD)
6681 + bond_3ad_unbind_slave(slave);
6682 +
6683 ++ bond_upper_dev_unlink(bond, slave);
6684 ++
6685 + if (bond_mode_can_use_xmit_hash(bond))
6686 + bond_update_slave_arr(bond, slave);
6687 +
6688 +diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
6689 +index e78026ef6d8cc..64d6dfa831220 100644
6690 +--- a/drivers/net/dsa/lantiq_gswip.c
6691 ++++ b/drivers/net/dsa/lantiq_gswip.c
6692 +@@ -843,7 +843,8 @@ static int gswip_setup(struct dsa_switch *ds)
6693 +
6694 + gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
6695 + GSWIP_MAC_CTRL_2p(cpu_port));
6696 +- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN);
6697 ++ gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
6698 ++ GSWIP_MAC_FLEN);
6699 + gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
6700 + GSWIP_BM_QUEUE_GCTRL);
6701 +
6702 +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
6703 +index 58964d22cb17d..e3a3499ba7a23 100644
6704 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
6705 ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
6706 +@@ -3231,12 +3231,6 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
6707 + &ethsw->fq[i].napi, dpaa2_switch_poll,
6708 + NAPI_POLL_WEIGHT);
6709 +
6710 +- err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
6711 +- if (err) {
6712 +- dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
6713 +- goto err_free_netdev;
6714 +- }
6715 +-
6716 + /* Setup IRQs */
6717 + err = dpaa2_switch_setup_irqs(sw_dev);
6718 + if (err)
6719 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
6720 +index 38b601031db46..95343f6d15e12 100644
6721 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
6722 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
6723 +@@ -10,7 +10,14 @@
6724 +
6725 + static u16 hclge_errno_to_resp(int errno)
6726 + {
6727 +- return abs(errno);
6728 ++ int resp = abs(errno);
6729 ++
6730 ++ /* The status for pf to vf msg cmd is u16, constrainted by HW.
6731 ++ * We need to keep the same type with it.
6732 ++ * The intput errno is the stander error code, it's safely to
6733 ++ * use a u16 to store the abs(errno).
6734 ++ */
6735 ++ return (u16)resp;
6736 + }
6737 +
6738 + /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
6739 +diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
6740 +index 90793b36126e6..68c80f04113c8 100644
6741 +--- a/drivers/net/ethernet/intel/iavf/iavf.h
6742 ++++ b/drivers/net/ethernet/intel/iavf/iavf.h
6743 +@@ -186,12 +186,6 @@ enum iavf_state_t {
6744 + __IAVF_RUNNING, /* opened, working */
6745 + };
6746 +
6747 +-enum iavf_critical_section_t {
6748 +- __IAVF_IN_CRITICAL_TASK, /* cannot be interrupted */
6749 +- __IAVF_IN_CLIENT_TASK,
6750 +- __IAVF_IN_REMOVE_TASK, /* device being removed */
6751 +-};
6752 +-
6753 + #define IAVF_CLOUD_FIELD_OMAC 0x01
6754 + #define IAVF_CLOUD_FIELD_IMAC 0x02
6755 + #define IAVF_CLOUD_FIELD_IVLAN 0x04
6756 +@@ -236,6 +230,9 @@ struct iavf_adapter {
6757 + struct iavf_q_vector *q_vectors;
6758 + struct list_head vlan_filter_list;
6759 + struct list_head mac_filter_list;
6760 ++ struct mutex crit_lock;
6761 ++ struct mutex client_lock;
6762 ++ struct mutex remove_lock;
6763 + /* Lock to protect accesses to MAC and VLAN lists */
6764 + spinlock_t mac_vlan_list_lock;
6765 + char misc_vector_name[IFNAMSIZ + 9];
6766 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
6767 +index af43fbd8cb75e..edbeb27213f83 100644
6768 +--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
6769 ++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
6770 +@@ -1352,8 +1352,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
6771 + if (!fltr)
6772 + return -ENOMEM;
6773 +
6774 +- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
6775 +- &adapter->crit_section)) {
6776 ++ while (!mutex_trylock(&adapter->crit_lock)) {
6777 + if (--count == 0) {
6778 + kfree(fltr);
6779 + return -EINVAL;
6780 +@@ -1378,7 +1377,7 @@ ret:
6781 + if (err && fltr)
6782 + kfree(fltr);
6783 +
6784 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
6785 ++ mutex_unlock(&adapter->crit_lock);
6786 + return err;
6787 + }
6788 +
6789 +@@ -1563,8 +1562,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
6790 + return -EINVAL;
6791 + }
6792 +
6793 +- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
6794 +- &adapter->crit_section)) {
6795 ++ while (!mutex_trylock(&adapter->crit_lock)) {
6796 + if (--count == 0) {
6797 + kfree(rss_new);
6798 + return -EINVAL;
6799 +@@ -1600,7 +1598,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
6800 + if (!err)
6801 + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
6802 +
6803 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
6804 ++ mutex_unlock(&adapter->crit_lock);
6805 +
6806 + if (!rss_new_add)
6807 + kfree(rss_new);
6808 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
6809 +index 606a01ce40739..23762a7ef740b 100644
6810 +--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
6811 ++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
6812 +@@ -131,6 +131,27 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
6813 + return 0;
6814 + }
6815 +
6816 ++/**
6817 ++ * iavf_lock_timeout - try to lock mutex but give up after timeout
6818 ++ * @lock: mutex that should be locked
6819 ++ * @msecs: timeout in msecs
6820 ++ *
6821 ++ * Returns 0 on success, negative on failure
6822 ++ **/
6823 ++static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
6824 ++{
6825 ++ unsigned int wait, delay = 10;
6826 ++
6827 ++ for (wait = 0; wait < msecs; wait += delay) {
6828 ++ if (mutex_trylock(lock))
6829 ++ return 0;
6830 ++
6831 ++ msleep(delay);
6832 ++ }
6833 ++
6834 ++ return -1;
6835 ++}
6836 ++
6837 + /**
6838 + * iavf_schedule_reset - Set the flags and schedule a reset event
6839 + * @adapter: board private structure
6840 +@@ -1916,7 +1937,7 @@ static void iavf_watchdog_task(struct work_struct *work)
6841 + struct iavf_hw *hw = &adapter->hw;
6842 + u32 reg_val;
6843 +
6844 +- if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
6845 ++ if (!mutex_trylock(&adapter->crit_lock))
6846 + goto restart_watchdog;
6847 +
6848 + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
6849 +@@ -1934,8 +1955,7 @@ static void iavf_watchdog_task(struct work_struct *work)
6850 + adapter->state = __IAVF_STARTUP;
6851 + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
6852 + queue_delayed_work(iavf_wq, &adapter->init_task, 10);
6853 +- clear_bit(__IAVF_IN_CRITICAL_TASK,
6854 +- &adapter->crit_section);
6855 ++ mutex_unlock(&adapter->crit_lock);
6856 + /* Don't reschedule the watchdog, since we've restarted
6857 + * the init task. When init_task contacts the PF and
6858 + * gets everything set up again, it'll restart the
6859 +@@ -1945,14 +1965,13 @@ static void iavf_watchdog_task(struct work_struct *work)
6860 + }
6861 + adapter->aq_required = 0;
6862 + adapter->current_op = VIRTCHNL_OP_UNKNOWN;
6863 +- clear_bit(__IAVF_IN_CRITICAL_TASK,
6864 +- &adapter->crit_section);
6865 ++ mutex_unlock(&adapter->crit_lock);
6866 + queue_delayed_work(iavf_wq,
6867 + &adapter->watchdog_task,
6868 + msecs_to_jiffies(10));
6869 + goto watchdog_done;
6870 + case __IAVF_RESETTING:
6871 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
6872 ++ mutex_unlock(&adapter->crit_lock);
6873 + queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
6874 + return;
6875 + case __IAVF_DOWN:
6876 +@@ -1975,7 +1994,7 @@ static void iavf_watchdog_task(struct work_struct *work)
6877 + }
6878 + break;
6879 + case __IAVF_REMOVE:
6880 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
6881 ++ mutex_unlock(&adapter->crit_lock);
6882 + return;
6883 + default:
6884 + goto restart_watchdog;
6885 +@@ -1984,7 +2003,6 @@ static void iavf_watchdog_task(struct work_struct *work)
6886 + /* check for hw reset */
6887 + reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
6888 + if (!reg_val) {
6889 +- adapter->state = __IAVF_RESETTING;
6890 + adapter->flags |= IAVF_FLAG_RESET_PENDING;
6891 + adapter->aq_required = 0;
6892 + adapter->current_op = VIRTCHNL_OP_UNKNOWN;
6893 +@@ -1998,7 +2016,7 @@ watchdog_done:
6894 + if (adapter->state == __IAVF_RUNNING ||
6895 + adapter->state == __IAVF_COMM_FAILED)
6896 + iavf_detect_recover_hung(&adapter->vsi);
6897 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
6898 ++ mutex_unlock(&adapter->crit_lock);
6899 + restart_watchdog:
6900 + if (adapter->aq_required)
6901 + queue_delayed_work(iavf_wq, &adapter->watchdog_task,
6902 +@@ -2062,7 +2080,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
6903 + memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
6904 + iavf_shutdown_adminq(&adapter->hw);
6905 + adapter->netdev->flags &= ~IFF_UP;
6906 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
6907 ++ mutex_unlock(&adapter->crit_lock);
6908 + adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
6909 + adapter->state = __IAVF_DOWN;
6910 + wake_up(&adapter->down_waitqueue);
6911 +@@ -2095,11 +2113,14 @@ static void iavf_reset_task(struct work_struct *work)
6912 + /* When device is being removed it doesn't make sense to run the reset
6913 + * task, just return in such a case.
6914 + */
6915 +- if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
6916 ++ if (mutex_is_locked(&adapter->remove_lock))
6917 + return;
6918 +
6919 +- while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
6920 +- &adapter->crit_section))
6921 ++ if (iavf_lock_timeout(&adapter->crit_lock, 200)) {
6922 ++ schedule_work(&adapter->reset_task);
6923 ++ return;
6924 ++ }
6925 ++ while (!mutex_trylock(&adapter->client_lock))
6926 + usleep_range(500, 1000);
6927 + if (CLIENT_ENABLED(adapter)) {
6928 + adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
6929 +@@ -2151,7 +2172,7 @@ static void iavf_reset_task(struct work_struct *work)
6930 + dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
6931 + reg_val);
6932 + iavf_disable_vf(adapter);
6933 +- clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
6934 ++ mutex_unlock(&adapter->client_lock);
6935 + return; /* Do not attempt to reinit. It's dead, Jim. */
6936 + }
6937 +
6938 +@@ -2278,13 +2299,13 @@ continue_reset:
6939 + adapter->state = __IAVF_DOWN;
6940 + wake_up(&adapter->down_waitqueue);
6941 + }
6942 +- clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
6943 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
6944 ++ mutex_unlock(&adapter->client_lock);
6945 ++ mutex_unlock(&adapter->crit_lock);
6946 +
6947 + return;
6948 + reset_err:
6949 +- clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
6950 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
6951 ++ mutex_unlock(&adapter->client_lock);
6952 ++ mutex_unlock(&adapter->crit_lock);
6953 + dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
6954 + iavf_close(netdev);
6955 + }
6956 +@@ -2312,6 +2333,8 @@ static void iavf_adminq_task(struct work_struct *work)
6957 + if (!event.msg_buf)
6958 + goto out;
6959 +
6960 ++ if (iavf_lock_timeout(&adapter->crit_lock, 200))
6961 ++ goto freedom;
6962 + do {
6963 + ret = iavf_clean_arq_element(hw, &event, &pending);
6964 + v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
6965 +@@ -2325,6 +2348,7 @@ static void iavf_adminq_task(struct work_struct *work)
6966 + if (pending != 0)
6967 + memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
6968 + } while (pending);
6969 ++ mutex_unlock(&adapter->crit_lock);
6970 +
6971 + if ((adapter->flags &
6972 + (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
6973 +@@ -2391,7 +2415,7 @@ static void iavf_client_task(struct work_struct *work)
6974 + * later.
6975 + */
6976 +
6977 +- if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
6978 ++ if (!mutex_trylock(&adapter->client_lock))
6979 + return;
6980 +
6981 + if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
6982 +@@ -2414,7 +2438,7 @@ static void iavf_client_task(struct work_struct *work)
6983 + adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
6984 + }
6985 + out:
6986 +- clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
6987 ++ mutex_unlock(&adapter->client_lock);
6988 + }
6989 +
6990 + /**
6991 +@@ -3017,8 +3041,7 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
6992 + if (!filter)
6993 + return -ENOMEM;
6994 +
6995 +- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
6996 +- &adapter->crit_section)) {
6997 ++ while (!mutex_trylock(&adapter->crit_lock)) {
6998 + if (--count == 0)
6999 + goto err;
7000 + udelay(1);
7001 +@@ -3049,7 +3072,7 @@ err:
7002 + if (err)
7003 + kfree(filter);
7004 +
7005 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
7006 ++ mutex_unlock(&adapter->crit_lock);
7007 + return err;
7008 + }
7009 +
7010 +@@ -3196,8 +3219,7 @@ static int iavf_open(struct net_device *netdev)
7011 + return -EIO;
7012 + }
7013 +
7014 +- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
7015 +- &adapter->crit_section))
7016 ++ while (!mutex_trylock(&adapter->crit_lock))
7017 + usleep_range(500, 1000);
7018 +
7019 + if (adapter->state != __IAVF_DOWN) {
7020 +@@ -3232,7 +3254,7 @@ static int iavf_open(struct net_device *netdev)
7021 +
7022 + iavf_irq_enable(adapter, true);
7023 +
7024 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
7025 ++ mutex_unlock(&adapter->crit_lock);
7026 +
7027 + return 0;
7028 +
7029 +@@ -3244,7 +3266,7 @@ err_setup_rx:
7030 + err_setup_tx:
7031 + iavf_free_all_tx_resources(adapter);
7032 + err_unlock:
7033 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
7034 ++ mutex_unlock(&adapter->crit_lock);
7035 +
7036 + return err;
7037 + }
7038 +@@ -3268,8 +3290,7 @@ static int iavf_close(struct net_device *netdev)
7039 + if (adapter->state <= __IAVF_DOWN_PENDING)
7040 + return 0;
7041 +
7042 +- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
7043 +- &adapter->crit_section))
7044 ++ while (!mutex_trylock(&adapter->crit_lock))
7045 + usleep_range(500, 1000);
7046 +
7047 + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
7048 +@@ -3280,7 +3301,7 @@ static int iavf_close(struct net_device *netdev)
7049 + adapter->state = __IAVF_DOWN_PENDING;
7050 + iavf_free_traffic_irqs(adapter);
7051 +
7052 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
7053 ++ mutex_unlock(&adapter->crit_lock);
7054 +
7055 + /* We explicitly don't free resources here because the hardware is
7056 + * still active and can DMA into memory. Resources are cleared in
7057 +@@ -3629,6 +3650,10 @@ static void iavf_init_task(struct work_struct *work)
7058 + init_task.work);
7059 + struct iavf_hw *hw = &adapter->hw;
7060 +
7061 ++ if (iavf_lock_timeout(&adapter->crit_lock, 5000)) {
7062 ++ dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
7063 ++ return;
7064 ++ }
7065 + switch (adapter->state) {
7066 + case __IAVF_STARTUP:
7067 + if (iavf_startup(adapter) < 0)
7068 +@@ -3641,14 +3666,14 @@ static void iavf_init_task(struct work_struct *work)
7069 + case __IAVF_INIT_GET_RESOURCES:
7070 + if (iavf_init_get_resources(adapter) < 0)
7071 + goto init_failed;
7072 +- return;
7073 ++ goto out;
7074 + default:
7075 + goto init_failed;
7076 + }
7077 +
7078 + queue_delayed_work(iavf_wq, &adapter->init_task,
7079 + msecs_to_jiffies(30));
7080 +- return;
7081 ++ goto out;
7082 + init_failed:
7083 + if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
7084 + dev_err(&adapter->pdev->dev,
7085 +@@ -3657,9 +3682,11 @@ init_failed:
7086 + iavf_shutdown_adminq(hw);
7087 + adapter->state = __IAVF_STARTUP;
7088 + queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
7089 +- return;
7090 ++ goto out;
7091 + }
7092 + queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
7093 ++out:
7094 ++ mutex_unlock(&adapter->crit_lock);
7095 + }
7096 +
7097 + /**
7098 +@@ -3676,9 +3703,12 @@ static void iavf_shutdown(struct pci_dev *pdev)
7099 + if (netif_running(netdev))
7100 + iavf_close(netdev);
7101 +
7102 ++ if (iavf_lock_timeout(&adapter->crit_lock, 5000))
7103 ++ dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
7104 + /* Prevent the watchdog from running. */
7105 + adapter->state = __IAVF_REMOVE;
7106 + adapter->aq_required = 0;
7107 ++ mutex_unlock(&adapter->crit_lock);
7108 +
7109 + #ifdef CONFIG_PM
7110 + pci_save_state(pdev);
7111 +@@ -3772,6 +3802,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7112 + /* set up the locks for the AQ, do this only once in probe
7113 + * and destroy them only once in remove
7114 + */
7115 ++ mutex_init(&adapter->crit_lock);
7116 ++ mutex_init(&adapter->client_lock);
7117 ++ mutex_init(&adapter->remove_lock);
7118 + mutex_init(&hw->aq.asq_mutex);
7119 + mutex_init(&hw->aq.arq_mutex);
7120 +
7121 +@@ -3823,8 +3856,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
7122 +
7123 + netif_device_detach(netdev);
7124 +
7125 +- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
7126 +- &adapter->crit_section))
7127 ++ while (!mutex_trylock(&adapter->crit_lock))
7128 + usleep_range(500, 1000);
7129 +
7130 + if (netif_running(netdev)) {
7131 +@@ -3835,7 +3867,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
7132 + iavf_free_misc_irq(adapter);
7133 + iavf_reset_interrupt_capability(adapter);
7134 +
7135 +- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
7136 ++ mutex_unlock(&adapter->crit_lock);
7137 +
7138 + return 0;
7139 + }
7140 +@@ -3897,7 +3929,7 @@ static void iavf_remove(struct pci_dev *pdev)
7141 + struct iavf_hw *hw = &adapter->hw;
7142 + int err;
7143 + /* Indicate we are in remove and not to run reset_task */
7144 +- set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
7145 ++ mutex_lock(&adapter->remove_lock);
7146 + cancel_delayed_work_sync(&adapter->init_task);
7147 + cancel_work_sync(&adapter->reset_task);
7148 + cancel_delayed_work_sync(&adapter->client_task);
7149 +@@ -3912,10 +3944,6 @@ static void iavf_remove(struct pci_dev *pdev)
7150 + err);
7151 + }
7152 +
7153 +- /* Shut down all the garbage mashers on the detention level */
7154 +- adapter->state = __IAVF_REMOVE;
7155 +- adapter->aq_required = 0;
7156 +- adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
7157 + iavf_request_reset(adapter);
7158 + msleep(50);
7159 + /* If the FW isn't responding, kick it once, but only once. */
7160 +@@ -3923,6 +3951,13 @@ static void iavf_remove(struct pci_dev *pdev)
7161 + iavf_request_reset(adapter);
7162 + msleep(50);
7163 + }
7164 ++ if (iavf_lock_timeout(&adapter->crit_lock, 5000))
7165 ++ dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
7166 ++
7167 ++ /* Shut down all the garbage mashers on the detention level */
7168 ++ adapter->state = __IAVF_REMOVE;
7169 ++ adapter->aq_required = 0;
7170 ++ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
7171 + iavf_free_all_tx_resources(adapter);
7172 + iavf_free_all_rx_resources(adapter);
7173 + iavf_misc_irq_disable(adapter);
7174 +@@ -3942,6 +3977,11 @@ static void iavf_remove(struct pci_dev *pdev)
7175 + /* destroy the locks only once, here */
7176 + mutex_destroy(&hw->aq.arq_mutex);
7177 + mutex_destroy(&hw->aq.asq_mutex);
7178 ++ mutex_destroy(&adapter->client_lock);
7179 ++ mutex_unlock(&adapter->crit_lock);
7180 ++ mutex_destroy(&adapter->crit_lock);
7181 ++ mutex_unlock(&adapter->remove_lock);
7182 ++ mutex_destroy(&adapter->remove_lock);
7183 +
7184 + iounmap(hw->hw_addr);
7185 + pci_release_regions(pdev);
7186 +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
7187 +index 9b85fdf012977..3e301c5c5270a 100644
7188 +--- a/drivers/net/ethernet/intel/igc/igc_main.c
7189 ++++ b/drivers/net/ethernet/intel/igc/igc_main.c
7190 +@@ -4402,6 +4402,7 @@ static irqreturn_t igc_msix_ring(int irq, void *data)
7191 + */
7192 + static int igc_request_msix(struct igc_adapter *adapter)
7193 + {
7194 ++ unsigned int num_q_vectors = adapter->num_q_vectors;
7195 + int i = 0, err = 0, vector = 0, free_vector = 0;
7196 + struct net_device *netdev = adapter->netdev;
7197 +
7198 +@@ -4410,7 +4411,13 @@ static int igc_request_msix(struct igc_adapter *adapter)
7199 + if (err)
7200 + goto err_out;
7201 +
7202 +- for (i = 0; i < adapter->num_q_vectors; i++) {
7203 ++ if (num_q_vectors > MAX_Q_VECTORS) {
7204 ++ num_q_vectors = MAX_Q_VECTORS;
7205 ++ dev_warn(&adapter->pdev->dev,
7206 ++ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
7207 ++ adapter->num_q_vectors, MAX_Q_VECTORS);
7208 ++ }
7209 ++ for (i = 0; i < num_q_vectors; i++) {
7210 + struct igc_q_vector *q_vector = adapter->q_vector[i];
7211 +
7212 + vector++;
7213 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
7214 +index e0d1af9e7770d..6c64fdbef0df1 100644
7215 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
7216 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
7217 +@@ -1201,7 +1201,22 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
7218 + /* Enable backpressure for RQ aura */
7219 + if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
7220 + aq->aura.bp_ena = 0;
7221 ++ /* If NIX1 LF is attached then specify NIX1_RX.
7222 ++ *
7223 ++ * Below NPA_AURA_S[BP_ENA] is set according to the
7224 ++ * NPA_BPINTF_E enumeration given as:
7225 ++ * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so
7226 ++ * NIX0_RX is 0x0 + 0*0x1 = 0
7227 ++ * NIX1_RX is 0x0 + 1*0x1 = 1
7228 ++ * But in HRM it is given that
7229 ++ * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
7230 ++ * NIX-RX based on [BP] level. One bit per NIX-RX; index
7231 ++ * enumerated by NPA_BPINTF_E."
7232 ++ */
7233 ++ if (pfvf->nix_blkaddr == BLKADDR_NIX1)
7234 ++ aq->aura.bp_ena = 1;
7235 + aq->aura.nix0_bpid = pfvf->bpid[0];
7236 ++
7237 + /* Set backpressure level for RQ's Aura */
7238 + aq->aura.bp = RQ_BP_LVL_AURA;
7239 + }
7240 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
7241 +index 9d79c5ec31e9f..db5dfff585c99 100644
7242 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
7243 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
7244 +@@ -877,7 +877,7 @@ static void cb_timeout_handler(struct work_struct *work)
7245 + ent->ret = -ETIMEDOUT;
7246 + mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
7247 + ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
7248 +- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
7249 ++ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
7250 +
7251 + out:
7252 + cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
7253 +@@ -994,7 +994,7 @@ static void cmd_work_handler(struct work_struct *work)
7254 + MLX5_SET(mbox_out, ent->out, status, status);
7255 + MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
7256 +
7257 +- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
7258 ++ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
7259 + return;
7260 + }
7261 +
7262 +@@ -1008,7 +1008,7 @@ static void cmd_work_handler(struct work_struct *work)
7263 + poll_timeout(ent);
7264 + /* make sure we read the descriptor after ownership is SW */
7265 + rmb();
7266 +- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
7267 ++ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
7268 + }
7269 + }
7270 +
7271 +@@ -1068,7 +1068,7 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
7272 + mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
7273 +
7274 + ent->ret = -ETIMEDOUT;
7275 +- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
7276 ++ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
7277 + }
7278 +
7279 + static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
7280 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
7281 +index 43356fad53deb..ffdfb5a94b14b 100644
7282 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
7283 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
7284 +@@ -846,9 +846,9 @@ again:
7285 + new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
7286 + ste_location, send_ste_list);
7287 + if (!new_htbl) {
7288 +- mlx5dr_htbl_put(cur_htbl);
7289 + mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
7290 + cur_htbl->chunk_size);
7291 ++ mlx5dr_htbl_put(cur_htbl);
7292 + } else {
7293 + cur_htbl = new_htbl;
7294 + }
7295 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
7296 +index 9df0e73d1c358..69b49deb66b22 100644
7297 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
7298 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
7299 +@@ -620,6 +620,7 @@ static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
7300 +
7301 + MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
7302 + MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
7303 ++ MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
7304 +
7305 + MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
7306 + MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
7307 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
7308 +index eeb30680b4dcf..0a0a26376bea0 100644
7309 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
7310 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
7311 +@@ -1697,7 +1697,7 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
7312 + case NFP_NET_META_RESYNC_INFO:
7313 + if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
7314 + pkt_len))
7315 +- return NULL;
7316 ++ return false;
7317 + data += sizeof(struct nfp_net_tls_resync_req);
7318 + break;
7319 + default:
7320 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
7321 +index 28dd0ed85a824..f7dc8458cde86 100644
7322 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
7323 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
7324 +@@ -289,10 +289,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
7325 + val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
7326 + break;
7327 + default:
7328 +- dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
7329 +- phy_modes(gmac->phy_mode));
7330 +- err = -EINVAL;
7331 +- goto err_remove_config_dt;
7332 ++ goto err_unsupported_phy;
7333 + }
7334 + regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
7335 +
7336 +@@ -309,10 +306,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
7337 + NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
7338 + break;
7339 + default:
7340 +- dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
7341 +- phy_modes(gmac->phy_mode));
7342 +- err = -EINVAL;
7343 +- goto err_remove_config_dt;
7344 ++ goto err_unsupported_phy;
7345 + }
7346 + regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
7347 +
7348 +@@ -329,8 +323,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
7349 + NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
7350 + break;
7351 + default:
7352 +- /* We don't get here; the switch above will have errored out */
7353 +- unreachable();
7354 ++ goto err_unsupported_phy;
7355 + }
7356 + regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
7357 +
7358 +@@ -361,6 +354,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
7359 +
7360 + return 0;
7361 +
7362 ++err_unsupported_phy:
7363 ++ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
7364 ++ phy_modes(gmac->phy_mode));
7365 ++ err = -EINVAL;
7366 ++
7367 + err_remove_config_dt:
7368 + stmmac_remove_config_dt(pdev, plat_dat);
7369 +
7370 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7371 +index a5285a8a9eaeb..4d92fcfe703c9 100644
7372 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7373 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7374 +@@ -5358,7 +5358,7 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
7375 + struct stmmac_channel *ch =
7376 + container_of(napi, struct stmmac_channel, rxtx_napi);
7377 + struct stmmac_priv *priv = ch->priv_data;
7378 +- int rx_done, tx_done;
7379 ++ int rx_done, tx_done, rxtx_done;
7380 + u32 chan = ch->index;
7381 +
7382 + priv->xstats.napi_poll++;
7383 +@@ -5368,14 +5368,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
7384 +
7385 + rx_done = stmmac_rx_zc(priv, budget, chan);
7386 +
7387 ++ rxtx_done = max(tx_done, rx_done);
7388 ++
7389 + /* If either TX or RX work is not complete, return budget
7390 + * and keep pooling
7391 + */
7392 +- if (tx_done >= budget || rx_done >= budget)
7393 ++ if (rxtx_done >= budget)
7394 + return budget;
7395 +
7396 + /* all work done, exit the polling mode */
7397 +- if (napi_complete_done(napi, rx_done)) {
7398 ++ if (napi_complete_done(napi, rxtx_done)) {
7399 + unsigned long flags;
7400 +
7401 + spin_lock_irqsave(&ch->lock, flags);
7402 +@@ -5386,7 +5388,7 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
7403 + spin_unlock_irqrestore(&ch->lock, flags);
7404 + }
7405 +
7406 +- return min(rx_done, budget - 1);
7407 ++ return min(rxtx_done, budget - 1);
7408 + }
7409 +
7410 + /**
7411 +diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
7412 +index ec5db481c9cd0..15e13d6dc5db5 100644
7413 +--- a/drivers/net/ethernet/wiznet/w5100.c
7414 ++++ b/drivers/net/ethernet/wiznet/w5100.c
7415 +@@ -1052,6 +1052,8 @@ static int w5100_mmio_probe(struct platform_device *pdev)
7416 + mac_addr = data->mac_addr;
7417 +
7418 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
7419 ++ if (!mem)
7420 ++ return -EINVAL;
7421 + if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
7422 + ops = &w5100_mmio_indirect_ops;
7423 + else
7424 +diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
7425 +index 525cdf28d9ea7..1e43748dcb401 100644
7426 +--- a/drivers/net/ipa/ipa_cmd.c
7427 ++++ b/drivers/net/ipa/ipa_cmd.c
7428 +@@ -159,35 +159,45 @@ static void ipa_cmd_validate_build(void)
7429 + BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
7430 + #undef TABLE_COUNT_MAX
7431 + #undef TABLE_SIZE
7432 +-}
7433 +
7434 +-#ifdef IPA_VALIDATE
7435 ++ /* Hashed and non-hashed fields are assumed to be the same size */
7436 ++ BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
7437 ++ field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
7438 ++ BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
7439 ++ field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
7440 ++}
7441 +
7442 + /* Validate a memory region holding a table */
7443 +-bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
7444 +- bool route, bool ipv6, bool hashed)
7445 ++bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
7446 + {
7447 ++ u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
7448 ++ u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
7449 ++ const char *table = route ? "route" : "filter";
7450 + struct device *dev = &ipa->pdev->dev;
7451 +- u32 offset_max;
7452 +
7453 +- offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK)
7454 +- : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
7455 ++ /* Size must fit in the immediate command field that holds it */
7456 ++ if (mem->size > size_max) {
7457 ++ dev_err(dev, "%s table region size too large\n", table);
7458 ++ dev_err(dev, " (0x%04x > 0x%04x)\n",
7459 ++ mem->size, size_max);
7460 ++
7461 ++ return false;
7462 ++ }
7463 ++
7464 ++ /* Offset must fit in the immediate command field that holds it */
7465 + if (mem->offset > offset_max ||
7466 + ipa->mem_offset > offset_max - mem->offset) {
7467 +- dev_err(dev, "IPv%c %s%s table region offset too large\n",
7468 +- ipv6 ? '6' : '4', hashed ? "hashed " : "",
7469 +- route ? "route" : "filter");
7470 ++ dev_err(dev, "%s table region offset too large\n", table);
7471 + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
7472 + ipa->mem_offset, mem->offset, offset_max);
7473 +
7474 + return false;
7475 + }
7476 +
7477 ++ /* Entire memory range must fit within IPA-local memory */
7478 + if (mem->offset > ipa->mem_size ||
7479 + mem->size > ipa->mem_size - mem->offset) {
7480 +- dev_err(dev, "IPv%c %s%s table region out of range\n",
7481 +- ipv6 ? '6' : '4', hashed ? "hashed " : "",
7482 +- route ? "route" : "filter");
7483 ++ dev_err(dev, "%s table region out of range\n", table);
7484 + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
7485 + mem->offset, mem->size, ipa->mem_size);
7486 +
7487 +@@ -197,6 +207,8 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
7488 + return true;
7489 + }
7490 +
7491 ++#ifdef IPA_VALIDATE
7492 ++
7493 + /* Validate the memory region that holds headers */
7494 + static bool ipa_cmd_header_valid(struct ipa *ipa)
7495 + {
7496 +diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
7497 +index b99262281f41c..ea723419c826b 100644
7498 +--- a/drivers/net/ipa/ipa_cmd.h
7499 ++++ b/drivers/net/ipa/ipa_cmd.h
7500 +@@ -57,20 +57,18 @@ struct ipa_cmd_info {
7501 + enum dma_data_direction direction;
7502 + };
7503 +
7504 +-#ifdef IPA_VALIDATE
7505 +-
7506 + /**
7507 + * ipa_cmd_table_valid() - Validate a memory region holding a table
7508 + * @ipa: - IPA pointer
7509 + * @mem: - IPA memory region descriptor
7510 + * @route: - Whether the region holds a route or filter table
7511 +- * @ipv6: - Whether the table is for IPv6 or IPv4
7512 +- * @hashed: - Whether the table is hashed or non-hashed
7513 + *
7514 + * Return: true if region is valid, false otherwise
7515 + */
7516 + bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
7517 +- bool route, bool ipv6, bool hashed);
7518 ++ bool route);
7519 ++
7520 ++#ifdef IPA_VALIDATE
7521 +
7522 + /**
7523 + * ipa_cmd_data_valid() - Validate command-realted configuration is valid
7524 +@@ -82,13 +80,6 @@ bool ipa_cmd_data_valid(struct ipa *ipa);
7525 +
7526 + #else /* !IPA_VALIDATE */
7527 +
7528 +-static inline bool ipa_cmd_table_valid(struct ipa *ipa,
7529 +- const struct ipa_mem *mem, bool route,
7530 +- bool ipv6, bool hashed)
7531 +-{
7532 +- return true;
7533 +-}
7534 +-
7535 + static inline bool ipa_cmd_data_valid(struct ipa *ipa)
7536 + {
7537 + return true;
7538 +diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c
7539 +index 05806ceae8b54..157f8d47058b5 100644
7540 +--- a/drivers/net/ipa/ipa_data-v4.11.c
7541 ++++ b/drivers/net/ipa/ipa_data-v4.11.c
7542 +@@ -346,18 +346,13 @@ static const struct ipa_mem_data ipa_mem_data = {
7543 + static const struct ipa_interconnect_data ipa_interconnect_data[] = {
7544 + {
7545 + .name = "memory",
7546 +- .peak_bandwidth = 465000, /* 465 MBps */
7547 +- .average_bandwidth = 80000, /* 80 MBps */
7548 +- },
7549 +- /* Average rate is unused for the next two interconnects */
7550 +- {
7551 +- .name = "imem",
7552 +- .peak_bandwidth = 68570, /* 68.57 MBps */
7553 +- .average_bandwidth = 80000, /* 80 MBps (unused?) */
7554 ++ .peak_bandwidth = 600000, /* 600 MBps */
7555 ++ .average_bandwidth = 150000, /* 150 MBps */
7556 + },
7557 ++ /* Average rate is unused for the next interconnect */
7558 + {
7559 + .name = "config",
7560 +- .peak_bandwidth = 30000, /* 30 MBps */
7561 ++ .peak_bandwidth = 74000, /* 74 MBps */
7562 + .average_bandwidth = 0, /* unused */
7563 + },
7564 + };
7565 +diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c
7566 +index e41be790f45e5..75b50a50e3487 100644
7567 +--- a/drivers/net/ipa/ipa_data-v4.9.c
7568 ++++ b/drivers/net/ipa/ipa_data-v4.9.c
7569 +@@ -392,18 +392,13 @@ static const struct ipa_mem_data ipa_mem_data = {
7570 + /* Interconnect rates are in 1000 byte/second units */
7571 + static const struct ipa_interconnect_data ipa_interconnect_data[] = {
7572 + {
7573 +- .name = "ipa_to_llcc",
7574 ++ .name = "memory",
7575 + .peak_bandwidth = 600000, /* 600 MBps */
7576 + .average_bandwidth = 150000, /* 150 MBps */
7577 + },
7578 +- {
7579 +- .name = "llcc_to_ebi1",
7580 +- .peak_bandwidth = 1804000, /* 1.804 GBps */
7581 +- .average_bandwidth = 150000, /* 150 MBps */
7582 +- },
7583 + /* Average rate is unused for the next interconnect */
7584 + {
7585 +- .name = "appss_to_ipa",
7586 ++ .name = "config",
7587 + .peak_bandwidth = 74000, /* 74 MBps */
7588 + .average_bandwidth = 0, /* unused */
7589 + },
7590 +diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
7591 +index 3168d72f42450..618a84cf669ac 100644
7592 +--- a/drivers/net/ipa/ipa_table.c
7593 ++++ b/drivers/net/ipa/ipa_table.c
7594 +@@ -174,7 +174,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
7595 + size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
7596 + }
7597 +
7598 +- if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
7599 ++ if (!ipa_cmd_table_valid(ipa, mem, route))
7600 + return false;
7601 +
7602 + /* mem->size >= size is sufficient, but we'll demand more */
7603 +diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
7604 +index f7a2ec150e542..211b5476a6f51 100644
7605 +--- a/drivers/net/phy/dp83822.c
7606 ++++ b/drivers/net/phy/dp83822.c
7607 +@@ -326,11 +326,9 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev)
7608 +
7609 + static int dp8382x_disable_wol(struct phy_device *phydev)
7610 + {
7611 +- int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
7612 +- DP83822_WOL_SECURE_ON;
7613 +-
7614 +- return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
7615 +- MII_DP83822_WOL_CFG, value);
7616 ++ return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
7617 ++ DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
7618 ++ DP83822_WOL_SECURE_ON);
7619 + }
7620 +
7621 + static int dp83822_read_status(struct phy_device *phydev)
7622 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
7623 +index b4885a700296e..b0a4ca3559fd8 100644
7624 +--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
7625 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
7626 +@@ -3351,7 +3351,8 @@ found:
7627 + "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n",
7628 + cptr, code, reference, length, major, minor);
7629 + if ((!AR_SREV_9485(ah) && length >= 1024) ||
7630 +- (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) {
7631 ++ (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485) ||
7632 ++ (length > cptr)) {
7633 + ath_dbg(common, EEPROM, "Skipping bad header\n");
7634 + cptr -= COMP_HDR_LEN;
7635 + continue;
7636 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
7637 +index 2ca3b86714a9d..172081ffe4774 100644
7638 +--- a/drivers/net/wireless/ath/ath9k/hw.c
7639 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
7640 +@@ -1621,7 +1621,6 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
7641 + ath9k_hw_gpio_request_out(ah, i, NULL,
7642 + AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
7643 + ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
7644 +- ath9k_hw_gpio_free(ah, i);
7645 + }
7646 + }
7647 +
7648 +@@ -2728,14 +2727,17 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type)
7649 + static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out,
7650 + const char *label)
7651 + {
7652 ++ int err;
7653 ++
7654 + if (ah->caps.gpio_requested & BIT(gpio))
7655 + return;
7656 +
7657 +- /* may be requested by BSP, free anyway */
7658 +- gpio_free(gpio);
7659 +-
7660 +- if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label))
7661 ++ err = gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label);
7662 ++ if (err) {
7663 ++ ath_err(ath9k_hw_common(ah), "request GPIO%d failed:%d\n",
7664 ++ gpio, err);
7665 + return;
7666 ++ }
7667 +
7668 + ah->caps.gpio_requested |= BIT(gpio);
7669 + }
7670 +diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
7671 +index dabed4e3ca457..e8c772a671764 100644
7672 +--- a/drivers/net/wireless/ath/wcn36xx/main.c
7673 ++++ b/drivers/net/wireless/ath/wcn36xx/main.c
7674 +@@ -405,13 +405,14 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
7675 + wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
7676 + ch);
7677 +
7678 +- if (wcn->sw_scan_opchannel == ch) {
7679 ++ if (wcn->sw_scan_opchannel == ch && wcn->sw_scan_channel) {
7680 + /* If channel is the initial operating channel, we may
7681 + * want to receive/transmit regular data packets, then
7682 + * simply stop the scan session and exit PS mode.
7683 + */
7684 + wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
7685 + wcn->sw_scan_vif);
7686 ++ wcn->sw_scan_channel = 0;
7687 + } else if (wcn->sw_scan) {
7688 + /* A scan is ongoing, do not change the operating
7689 + * channel, but start a scan session on the channel.
7690 +@@ -419,6 +420,7 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
7691 + wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN,
7692 + wcn->sw_scan_vif);
7693 + wcn36xx_smd_start_scan(wcn, ch);
7694 ++ wcn->sw_scan_channel = ch;
7695 + } else {
7696 + wcn36xx_change_opchannel(wcn, ch);
7697 + }
7698 +@@ -699,6 +701,7 @@ static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
7699 +
7700 + wcn->sw_scan = true;
7701 + wcn->sw_scan_vif = vif;
7702 ++ wcn->sw_scan_channel = 0;
7703 + if (vif_priv->sta_assoc)
7704 + wcn->sw_scan_opchannel = WCN36XX_HW_CHANNEL(wcn);
7705 + else
7706 +diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
7707 +index 1b831157ede17..cab196bb38cd4 100644
7708 +--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
7709 ++++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
7710 +@@ -287,6 +287,10 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
7711 + status.rate_idx = 0;
7712 + }
7713 +
7714 ++ if (ieee80211_is_beacon(hdr->frame_control) ||
7715 ++ ieee80211_is_probe_resp(hdr->frame_control))
7716 ++ status.boottime_ns = ktime_get_boottime_ns();
7717 ++
7718 + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
7719 +
7720 + if (ieee80211_is_beacon(hdr->frame_control)) {
7721 +diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
7722 +index 71fa9992b118c..d0fcce86903ae 100644
7723 +--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
7724 ++++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
7725 +@@ -232,6 +232,7 @@ struct wcn36xx {
7726 + struct cfg80211_scan_request *scan_req;
7727 + bool sw_scan;
7728 + u8 sw_scan_opchannel;
7729 ++ u8 sw_scan_channel;
7730 + struct ieee80211_vif *sw_scan_vif;
7731 + struct mutex scan_lock;
7732 + bool scan_aborted;
7733 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
7734 +index b2605aefc2909..8b200379f7c20 100644
7735 +--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
7736 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
7737 +@@ -1,6 +1,6 @@
7738 + /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
7739 + /*
7740 +- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
7741 ++ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
7742 + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
7743 + * Copyright (C) 2016-2017 Intel Deutschland GmbH
7744 + */
7745 +@@ -874,7 +874,7 @@ struct iwl_scan_probe_params_v3 {
7746 + u8 reserved;
7747 + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
7748 + __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE];
7749 +- u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE];
7750 ++ u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN];
7751 + } __packed; /* SCAN_PROBE_PARAMS_API_S_VER_3 */
7752 +
7753 + /**
7754 +@@ -894,7 +894,7 @@ struct iwl_scan_probe_params_v4 {
7755 + __le16 reserved;
7756 + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
7757 + __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE];
7758 +- u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE];
7759 ++ u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN];
7760 + } __packed; /* SCAN_PROBE_PARAMS_API_S_VER_4 */
7761 +
7762 + #define SCAN_MAX_NUM_CHANS_V3 67
7763 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
7764 +index cc4e18ca95662..a27849419d29e 100644
7765 +--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
7766 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
7767 +@@ -2314,7 +2314,7 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
7768 + return;
7769 +
7770 + if (dump_data->monitor_only)
7771 +- dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
7772 ++ dump_mask &= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR);
7773 +
7774 + fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
7775 + file_len = le32_to_cpu(dump_file->file_len);
7776 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
7777 +index fd5e089616515..7f0c821898082 100644
7778 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
7779 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
7780 +@@ -1005,8 +1005,10 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
7781 + return -ENOMEM;
7782 +
7783 + #ifdef CONFIG_IWLWIFI_DEBUGFS
7784 +- if (mvm->beacon_inject_active)
7785 ++ if (mvm->beacon_inject_active) {
7786 ++ dev_kfree_skb(beacon);
7787 + return -EBUSY;
7788 ++ }
7789 + #endif
7790 +
7791 + ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
7792 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
7793 +index 141d9fc299b01..6981608ef165a 100644
7794 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
7795 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
7796 +@@ -2987,16 +2987,20 @@ static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
7797 + void *_data)
7798 + {
7799 + struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data;
7800 ++ const struct cfg80211_bss_ies *ies;
7801 + const struct element *elem;
7802 +
7803 +- elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, bss->ies->data,
7804 +- bss->ies->len);
7805 ++ rcu_read_lock();
7806 ++ ies = rcu_dereference(bss->ies);
7807 ++ elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data,
7808 ++ ies->len);
7809 +
7810 + if (!elem || elem->datalen < 10 ||
7811 + !(elem->data[10] &
7812 + WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) {
7813 + data->tolerated = false;
7814 + }
7815 ++ rcu_read_unlock();
7816 + }
7817 +
7818 + static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
7819 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
7820 +index ebed82c590e56..31611542e1aa0 100644
7821 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
7822 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
7823 +@@ -754,10 +754,26 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
7824 +
7825 + mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
7826 +
7827 +- mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
7828 +- mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
7829 +- mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
7830 +- mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
7831 ++ if (iwl_mvm_has_new_tx_api(mvm)) {
7832 ++ /*
7833 ++ * If we have the new TX/queue allocation API initialize them
7834 ++ * all to invalid numbers. We'll rewrite the ones that we need
7835 ++ * later, but that doesn't happen for all of them all of the
7836 ++ * time (e.g. P2P Device is optional), and if a dynamic queue
7837 ++ * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then
7838 ++ * iwl_mvm_is_static_queue() erroneously returns true, and we
7839 ++ * might have things getting stuck.
7840 ++ */
7841 ++ mvm->aux_queue = IWL_MVM_INVALID_QUEUE;
7842 ++ mvm->snif_queue = IWL_MVM_INVALID_QUEUE;
7843 ++ mvm->probe_queue = IWL_MVM_INVALID_QUEUE;
7844 ++ mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE;
7845 ++ } else {
7846 ++ mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
7847 ++ mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
7848 ++ mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
7849 ++ mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
7850 ++ }
7851 +
7852 + mvm->sf_state = SF_UNINIT;
7853 + if (iwl_mvm_has_unified_ucode(mvm))
7854 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
7855 +index 5a0696c44f6df..ee3aff8bf7c25 100644
7856 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
7857 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
7858 +@@ -1648,7 +1648,7 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
7859 + struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
7860 + u32 n_aps_flag =
7861 + iwl_mvm_scan_ch_n_aps_flag(vif_type,
7862 +- cfg->v2.channel_num);
7863 ++ channels[i]->hw_value);
7864 +
7865 + cfg->flags = cpu_to_le32(flags | n_aps_flag);
7866 + cfg->v2.channel_num = channels[i]->hw_value;
7867 +@@ -2368,14 +2368,17 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
7868 + if (ret)
7869 + return ret;
7870 +
7871 +- iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
7872 +- &bitmap_ssid);
7873 + if (!params->scan_6ghz) {
7874 ++ iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
7875 ++ &bitmap_ssid);
7876 + iwl_mvm_scan_umac_fill_ch_p_v6(mvm, params, vif,
7877 +- &scan_p->channel_params, bitmap_ssid);
7878 ++ &scan_p->channel_params, bitmap_ssid);
7879 +
7880 + return 0;
7881 ++ } else {
7882 ++ pb->preq = params->preq;
7883 + }
7884 ++
7885 + cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
7886 + cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
7887 + cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
7888 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
7889 +index f618368eda832..c310c366c38e8 100644
7890 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
7891 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
7892 +@@ -316,8 +316,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
7893 + }
7894 +
7895 + static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
7896 +- int queue, u8 tid, u8 flags)
7897 ++ u16 *queueptr, u8 tid, u8 flags)
7898 + {
7899 ++ int queue = *queueptr;
7900 + struct iwl_scd_txq_cfg_cmd cmd = {
7901 + .scd_queue = queue,
7902 + .action = SCD_CFG_DISABLE_QUEUE,
7903 +@@ -326,6 +327,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
7904 +
7905 + if (iwl_mvm_has_new_tx_api(mvm)) {
7906 + iwl_trans_txq_free(mvm->trans, queue);
7907 ++ *queueptr = IWL_MVM_INVALID_QUEUE;
7908 +
7909 + return 0;
7910 + }
7911 +@@ -487,6 +489,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
7912 + u8 sta_id, tid;
7913 + unsigned long disable_agg_tids = 0;
7914 + bool same_sta;
7915 ++ u16 queue_tmp = queue;
7916 + int ret;
7917 +
7918 + lockdep_assert_held(&mvm->mutex);
7919 +@@ -509,7 +512,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
7920 + iwl_mvm_invalidate_sta_queue(mvm, queue,
7921 + disable_agg_tids, false);
7922 +
7923 +- ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
7924 ++ ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
7925 + if (ret) {
7926 + IWL_ERR(mvm,
7927 + "Failed to free inactive queue %d (ret=%d)\n",
7928 +@@ -1184,6 +1187,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
7929 + unsigned int wdg_timeout =
7930 + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
7931 + int queue = -1;
7932 ++ u16 queue_tmp;
7933 + unsigned long disable_agg_tids = 0;
7934 + enum iwl_mvm_agg_state queue_state;
7935 + bool shared_queue = false, inc_ssn;
7936 +@@ -1332,7 +1336,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
7937 + return 0;
7938 +
7939 + out_err:
7940 +- iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
7941 ++ queue_tmp = queue;
7942 ++ iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
7943 +
7944 + return ret;
7945 + }
7946 +@@ -1779,7 +1784,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
7947 + if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
7948 + continue;
7949 +
7950 +- iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
7951 ++ iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
7952 + 0);
7953 + mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
7954 + }
7955 +@@ -1987,7 +1992,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
7956 + ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
7957 + if (ret) {
7958 + if (!iwl_mvm_has_new_tx_api(mvm))
7959 +- iwl_mvm_disable_txq(mvm, NULL, *queue,
7960 ++ iwl_mvm_disable_txq(mvm, NULL, queue,
7961 + IWL_MAX_TID_COUNT, 0);
7962 + return ret;
7963 + }
7964 +@@ -2060,7 +2065,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
7965 + if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
7966 + return -EINVAL;
7967 +
7968 +- iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
7969 ++ iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
7970 + ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
7971 + if (ret)
7972 + IWL_WARN(mvm, "Failed sending remove station\n");
7973 +@@ -2077,7 +2082,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
7974 + if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
7975 + return -EINVAL;
7976 +
7977 +- iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
7978 ++ iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
7979 + ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
7980 + if (ret)
7981 + IWL_WARN(mvm, "Failed sending remove station\n");
7982 +@@ -2173,7 +2178,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
7983 + struct ieee80211_vif *vif)
7984 + {
7985 + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
7986 +- int queue;
7987 ++ u16 *queueptr, queue;
7988 +
7989 + lockdep_assert_held(&mvm->mutex);
7990 +
7991 +@@ -2182,10 +2187,10 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
7992 + switch (vif->type) {
7993 + case NL80211_IFTYPE_AP:
7994 + case NL80211_IFTYPE_ADHOC:
7995 +- queue = mvm->probe_queue;
7996 ++ queueptr = &mvm->probe_queue;
7997 + break;
7998 + case NL80211_IFTYPE_P2P_DEVICE:
7999 +- queue = mvm->p2p_dev_queue;
8000 ++ queueptr = &mvm->p2p_dev_queue;
8001 + break;
8002 + default:
8003 + WARN(1, "Can't free bcast queue on vif type %d\n",
8004 +@@ -2193,7 +2198,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
8005 + return;
8006 + }
8007 +
8008 +- iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
8009 ++ queue = *queueptr;
8010 ++ iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
8011 + if (iwl_mvm_has_new_tx_api(mvm))
8012 + return;
8013 +
8014 +@@ -2428,7 +2434,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
8015 +
8016 + iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
8017 +
8018 +- iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
8019 ++ iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
8020 +
8021 + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
8022 + if (ret)
8023 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
8024 +index fb8491412be44..586c4104edf22 100644
8025 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
8026 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
8027 +@@ -487,6 +487,9 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
8028 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
8029 + int i;
8030 +
8031 ++ if (!trans_pcie->rx_pool)
8032 ++ return;
8033 ++
8034 + for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
8035 + if (!trans_pcie->rx_pool[i].page)
8036 + continue;
8037 +@@ -1093,7 +1096,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
8038 + INIT_LIST_HEAD(&rba->rbd_empty);
8039 + spin_unlock_bh(&rba->lock);
8040 +
8041 +- /* free all first - we might be reconfigured for a different size */
8042 ++ /* free all first - we overwrite everything here */
8043 + iwl_pcie_free_rbs_pool(trans);
8044 +
8045 + for (i = 0; i < RX_QUEUE_SIZE; i++)
8046 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
8047 +index 239bc177a3e5c..a7a495dbf64db 100644
8048 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
8049 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
8050 +@@ -1866,6 +1866,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
8051 + {
8052 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
8053 +
8054 ++ /* free all first - we might be reconfigured for a different size */
8055 ++ iwl_pcie_free_rbs_pool(trans);
8056 ++
8057 + trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
8058 + trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
8059 + trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
8060 +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
8061 +index 01735776345a9..7ddce3c3f0c48 100644
8062 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
8063 ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
8064 +@@ -1378,6 +1378,8 @@ struct rtl8xxxu_priv {
8065 + u8 no_pape:1;
8066 + u8 int_buf[USB_INTR_CONTENT_LENGTH];
8067 + u8 rssi_level;
8068 ++ DECLARE_BITMAP(tx_aggr_started, IEEE80211_NUM_TIDS);
8069 ++ DECLARE_BITMAP(tid_tx_operational, IEEE80211_NUM_TIDS);
8070 + /*
8071 + * Only one virtual interface permitted because only STA mode
8072 + * is supported and no iface_combinations are provided.
8073 +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
8074 +index 9ff09cf7eb622..ce8e2438f86b0 100644
8075 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
8076 ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
8077 +@@ -4805,6 +4805,8 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
8078 + struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
8079 + struct rtl8xxxu_priv *priv = hw->priv;
8080 + struct device *dev = &priv->udev->dev;
8081 ++ u8 *qc = ieee80211_get_qos_ctl(hdr);
8082 ++ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
8083 + u32 rate;
8084 + u16 rate_flags = tx_info->control.rates[0].flags;
8085 + u16 seq_number;
8086 +@@ -4828,7 +4830,7 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
8087 +
8088 + tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
8089 +
8090 +- if (ampdu_enable)
8091 ++ if (ampdu_enable && test_bit(tid, priv->tid_tx_operational))
8092 + tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
8093 + else
8094 + tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
8095 +@@ -4876,6 +4878,8 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
8096 + struct rtl8xxxu_priv *priv = hw->priv;
8097 + struct device *dev = &priv->udev->dev;
8098 + struct rtl8xxxu_txdesc40 *tx_desc40;
8099 ++ u8 *qc = ieee80211_get_qos_ctl(hdr);
8100 ++ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
8101 + u32 rate;
8102 + u16 rate_flags = tx_info->control.rates[0].flags;
8103 + u16 seq_number;
8104 +@@ -4902,7 +4906,7 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
8105 +
8106 + tx_desc40->txdw9 = cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
8107 +
8108 +- if (ampdu_enable)
8109 ++ if (ampdu_enable && test_bit(tid, priv->tid_tx_operational))
8110 + tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
8111 + else
8112 + tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
8113 +@@ -5015,12 +5019,19 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
8114 + if (ieee80211_is_data_qos(hdr->frame_control) && sta) {
8115 + if (sta->ht_cap.ht_supported) {
8116 + u32 ampdu, val32;
8117 ++ u8 *qc = ieee80211_get_qos_ctl(hdr);
8118 ++ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
8119 +
8120 + ampdu = (u32)sta->ht_cap.ampdu_density;
8121 + val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT;
8122 + tx_desc->txdw2 |= cpu_to_le32(val32);
8123 +
8124 + ampdu_enable = true;
8125 ++
8126 ++ if (!test_bit(tid, priv->tx_aggr_started) &&
8127 ++ !(skb->protocol == cpu_to_be16(ETH_P_PAE)))
8128 ++ if (!ieee80211_start_tx_ba_session(sta, tid, 0))
8129 ++ set_bit(tid, priv->tx_aggr_started);
8130 + }
8131 + }
8132 +
8133 +@@ -6089,6 +6100,7 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
8134 + struct device *dev = &priv->udev->dev;
8135 + u8 ampdu_factor, ampdu_density;
8136 + struct ieee80211_sta *sta = params->sta;
8137 ++ u16 tid = params->tid;
8138 + enum ieee80211_ampdu_mlme_action action = params->action;
8139 +
8140 + switch (action) {
8141 +@@ -6101,17 +6113,20 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
8142 + dev_dbg(dev,
8143 + "Changed HT: ampdu_factor %02x, ampdu_density %02x\n",
8144 + ampdu_factor, ampdu_density);
8145 +- break;
8146 ++ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
8147 ++ case IEEE80211_AMPDU_TX_STOP_CONT:
8148 + case IEEE80211_AMPDU_TX_STOP_FLUSH:
8149 +- dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__);
8150 +- rtl8xxxu_set_ampdu_factor(priv, 0);
8151 +- rtl8xxxu_set_ampdu_min_space(priv, 0);
8152 +- break;
8153 + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
8154 +- dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n",
8155 +- __func__);
8156 ++ dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP\n", __func__);
8157 + rtl8xxxu_set_ampdu_factor(priv, 0);
8158 + rtl8xxxu_set_ampdu_min_space(priv, 0);
8159 ++ clear_bit(tid, priv->tx_aggr_started);
8160 ++ clear_bit(tid, priv->tid_tx_operational);
8161 ++ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
8162 ++ break;
8163 ++ case IEEE80211_AMPDU_TX_OPERATIONAL:
8164 ++ dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_OPERATIONAL\n", __func__);
8165 ++ set_bit(tid, priv->tid_tx_operational);
8166 + break;
8167 + case IEEE80211_AMPDU_RX_START:
8168 + dev_dbg(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__);
8169 +diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
8170 +index c0e4b111c8b4e..73d6807a8cdfb 100644
8171 +--- a/drivers/net/wireless/realtek/rtw88/Makefile
8172 ++++ b/drivers/net/wireless/realtek/rtw88/Makefile
8173 +@@ -15,9 +15,9 @@ rtw88_core-y += main.o \
8174 + ps.o \
8175 + sec.o \
8176 + bf.o \
8177 +- wow.o \
8178 + regd.o
8179 +
8180 ++rtw88_core-$(CONFIG_PM) += wow.o
8181 +
8182 + obj-$(CONFIG_RTW88_8822B) += rtw88_8822b.o
8183 + rtw88_8822b-objs := rtw8822b.o rtw8822b_table.o
8184 +diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
8185 +index ea2cd4db1d3ce..ce57932e38a44 100644
8186 +--- a/drivers/net/wireless/realtek/rtw88/fw.c
8187 ++++ b/drivers/net/wireless/realtek/rtw88/fw.c
8188 +@@ -715,7 +715,7 @@ static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
8189 + continue;
8190 + if ((!ssid && !rsvd_pkt->ssid) ||
8191 + rtw_ssid_equal(rsvd_pkt->ssid, ssid))
8192 +- size = rsvd_pkt->skb->len;
8193 ++ size = rsvd_pkt->probe_req_size;
8194 + }
8195 +
8196 + return size;
8197 +@@ -943,6 +943,8 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
8198 + ssid->ssid_len, 0);
8199 + else
8200 + skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0);
8201 ++ if (skb_new)
8202 ++ rsvd_pkt->probe_req_size = (u16)skb_new->len;
8203 + break;
8204 + case RSVD_NLO_INFO:
8205 + skb_new = rtw_nlo_info_get(hw);
8206 +@@ -1539,6 +1541,7 @@ int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
8207 + static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
8208 + u8 location)
8209 + {
8210 ++ struct rtw_chip_info *chip = rtwdev->chip;
8211 + u8 h2c_pkt[H2C_PKT_SIZE] = {0};
8212 + u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
8213 +
8214 +@@ -1549,6 +1552,7 @@ static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
8215 + UPDATE_PKT_SET_LOCATION(h2c_pkt, location);
8216 +
8217 + /* include txdesc size */
8218 ++ size += chip->tx_pkt_desc_sz;
8219 + UPDATE_PKT_SET_SIZE(h2c_pkt, size);
8220 +
8221 + rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
8222 +@@ -1558,7 +1562,7 @@ void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
8223 + struct cfg80211_ssid *ssid)
8224 + {
8225 + u8 loc;
8226 +- u32 size;
8227 ++ u16 size;
8228 +
8229 + loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
8230 + if (!loc) {
8231 +diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
8232 +index 7c5b1d75e26f1..35bc9e10dcbaa 100644
8233 +--- a/drivers/net/wireless/realtek/rtw88/fw.h
8234 ++++ b/drivers/net/wireless/realtek/rtw88/fw.h
8235 +@@ -126,6 +126,7 @@ struct rtw_rsvd_page {
8236 + u8 page;
8237 + bool add_txdesc;
8238 + struct cfg80211_ssid *ssid;
8239 ++ u16 probe_req_size;
8240 + };
8241 +
8242 + enum rtw_keep_alive_pkt_type {
8243 +diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
8244 +index fc9544f4e5e45..bdccfa70dddc7 100644
8245 +--- a/drivers/net/wireless/realtek/rtw88/wow.c
8246 ++++ b/drivers/net/wireless/realtek/rtw88/wow.c
8247 +@@ -283,15 +283,26 @@ static void rtw_wow_rx_dma_start(struct rtw_dev *rtwdev)
8248 +
8249 + static int rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
8250 + {
8251 +- /* wait 100ms for wow firmware to finish work */
8252 +- msleep(100);
8253 ++ int ret;
8254 ++ u8 check;
8255 ++ u32 check_dis;
8256 +
8257 + if (wow_enable) {
8258 +- if (rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
8259 ++ ret = read_poll_timeout(rtw_read8, check, !check, 1000,
8260 ++ 100000, true, rtwdev,
8261 ++ REG_WOWLAN_WAKE_REASON);
8262 ++ if (ret)
8263 + goto wow_fail;
8264 + } else {
8265 +- if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) ||
8266 +- rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE))
8267 ++ ret = read_poll_timeout(rtw_read32_mask, check_dis,
8268 ++ !check_dis, 1000, 100000, true, rtwdev,
8269 ++ REG_FE1IMR, BIT_FS_RXDONE);
8270 ++ if (ret)
8271 ++ goto wow_fail;
8272 ++ ret = read_poll_timeout(rtw_read32_mask, check_dis,
8273 ++ !check_dis, 1000, 100000, false, rtwdev,
8274 ++ REG_RXPKT_NUM, BIT_RW_RELEASE);
8275 ++ if (ret)
8276 + goto wow_fail;
8277 + }
8278 +
8279 +diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
8280 +index ed10a8b66068a..3c7c4f1d55cd9 100644
8281 +--- a/drivers/nvdimm/pmem.c
8282 ++++ b/drivers/nvdimm/pmem.c
8283 +@@ -449,11 +449,11 @@ static int pmem_attach_disk(struct device *dev,
8284 + pmem->pfn_flags |= PFN_MAP;
8285 + bb_range = pmem->pgmap.range;
8286 + } else {
8287 ++ addr = devm_memremap(dev, pmem->phys_addr,
8288 ++ pmem->size, ARCH_MEMREMAP_PMEM);
8289 + if (devm_add_action_or_reset(dev, pmem_release_queue,
8290 + &pmem->pgmap))
8291 + return -ENOMEM;
8292 +- addr = devm_memremap(dev, pmem->phys_addr,
8293 +- pmem->size, ARCH_MEMREMAP_PMEM);
8294 + bb_range.start = res->start;
8295 + bb_range.end = res->end;
8296 + }
8297 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
8298 +index 148e756857a89..a13eec2fca5aa 100644
8299 +--- a/drivers/nvme/host/core.c
8300 ++++ b/drivers/nvme/host/core.c
8301 +@@ -1009,7 +1009,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
8302 + return BLK_STS_IOERR;
8303 + }
8304 +
8305 +- cmd->common.command_id = req->tag;
8306 ++ nvme_req(req)->genctr++;
8307 ++ cmd->common.command_id = nvme_cid(req);
8308 + trace_nvme_setup_cmd(req, cmd);
8309 + return ret;
8310 + }
8311 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
8312 +index 0015860ec12bf..632076b9c1c9d 100644
8313 +--- a/drivers/nvme/host/nvme.h
8314 ++++ b/drivers/nvme/host/nvme.h
8315 +@@ -158,6 +158,7 @@ enum nvme_quirks {
8316 + struct nvme_request {
8317 + struct nvme_command *cmd;
8318 + union nvme_result result;
8319 ++ u8 genctr;
8320 + u8 retries;
8321 + u8 flags;
8322 + u16 status;
8323 +@@ -497,6 +498,49 @@ struct nvme_ctrl_ops {
8324 + int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
8325 + };
8326 +
8327 ++/*
8328 ++ * nvme command_id is constructed as such:
8329 ++ * | xxxx | xxxxxxxxxxxx |
8330 ++ * gen request tag
8331 ++ */
8332 ++#define nvme_genctr_mask(gen) (gen & 0xf)
8333 ++#define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12)
8334 ++#define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12)
8335 ++#define nvme_tag_from_cid(cid) (cid & 0xfff)
8336 ++
8337 ++static inline u16 nvme_cid(struct request *rq)
8338 ++{
8339 ++ return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
8340 ++}
8341 ++
8342 ++static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
8343 ++ u16 command_id)
8344 ++{
8345 ++ u8 genctr = nvme_genctr_from_cid(command_id);
8346 ++ u16 tag = nvme_tag_from_cid(command_id);
8347 ++ struct request *rq;
8348 ++
8349 ++ rq = blk_mq_tag_to_rq(tags, tag);
8350 ++ if (unlikely(!rq)) {
8351 ++ pr_err("could not locate request for tag %#x\n",
8352 ++ tag);
8353 ++ return NULL;
8354 ++ }
8355 ++ if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
8356 ++ dev_err(nvme_req(rq)->ctrl->device,
8357 ++ "request %#x genctr mismatch (got %#x expected %#x)\n",
8358 ++ tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
8359 ++ return NULL;
8360 ++ }
8361 ++ return rq;
8362 ++}
8363 ++
8364 ++static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
8365 ++ u16 command_id)
8366 ++{
8367 ++ return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
8368 ++}
8369 ++
8370 + #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
8371 + void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
8372 + const char *dev_name);
8373 +@@ -594,7 +638,8 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
8374 +
8375 + static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
8376 + {
8377 +- return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
8378 ++ return !qid &&
8379 ++ nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
8380 + }
8381 +
8382 + void nvme_complete_rq(struct request *req);
8383 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
8384 +index d963f25fc7aed..01feb1c2278dc 100644
8385 +--- a/drivers/nvme/host/pci.c
8386 ++++ b/drivers/nvme/host/pci.c
8387 +@@ -1017,7 +1017,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
8388 + return;
8389 + }
8390 +
8391 +- req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
8392 ++ req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
8393 + if (unlikely(!req)) {
8394 + dev_warn(nvmeq->dev->ctrl.device,
8395 + "invalid id %d completed on queue %d\n",
8396 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
8397 +index f80682f7df54d..b95945c58b3b4 100644
8398 +--- a/drivers/nvme/host/rdma.c
8399 ++++ b/drivers/nvme/host/rdma.c
8400 +@@ -1731,10 +1731,10 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
8401 + struct request *rq;
8402 + struct nvme_rdma_request *req;
8403 +
8404 +- rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
8405 ++ rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
8406 + if (!rq) {
8407 + dev_err(queue->ctrl->ctrl.device,
8408 +- "tag 0x%x on QP %#x not found\n",
8409 ++ "got bad command_id %#x on QP %#x\n",
8410 + cqe->command_id, queue->qp->qp_num);
8411 + nvme_rdma_error_recovery(queue->ctrl);
8412 + return;
8413 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
8414 +index ab1ea5b0888ea..258d71807367a 100644
8415 +--- a/drivers/nvme/host/tcp.c
8416 ++++ b/drivers/nvme/host/tcp.c
8417 +@@ -487,11 +487,11 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
8418 + {
8419 + struct request *rq;
8420 +
8421 +- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
8422 ++ rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
8423 + if (!rq) {
8424 + dev_err(queue->ctrl->ctrl.device,
8425 +- "queue %d tag 0x%x not found\n",
8426 +- nvme_tcp_queue_id(queue), cqe->command_id);
8427 ++ "got bad cqe.command_id %#x on queue %d\n",
8428 ++ cqe->command_id, nvme_tcp_queue_id(queue));
8429 + nvme_tcp_error_recovery(&queue->ctrl->ctrl);
8430 + return -EINVAL;
8431 + }
8432 +@@ -508,11 +508,11 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
8433 + {
8434 + struct request *rq;
8435 +
8436 +- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
8437 ++ rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
8438 + if (!rq) {
8439 + dev_err(queue->ctrl->ctrl.device,
8440 +- "queue %d tag %#x not found\n",
8441 +- nvme_tcp_queue_id(queue), pdu->command_id);
8442 ++ "got bad c2hdata.command_id %#x on queue %d\n",
8443 ++ pdu->command_id, nvme_tcp_queue_id(queue));
8444 + return -ENOENT;
8445 + }
8446 +
8447 +@@ -606,7 +606,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
8448 + data->hdr.plen =
8449 + cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
8450 + data->ttag = pdu->ttag;
8451 +- data->command_id = rq->tag;
8452 ++ data->command_id = nvme_cid(rq);
8453 + data->data_offset = cpu_to_le32(req->data_sent);
8454 + data->data_length = cpu_to_le32(req->pdu_len);
8455 + return 0;
8456 +@@ -619,11 +619,11 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
8457 + struct request *rq;
8458 + int ret;
8459 +
8460 +- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
8461 ++ rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
8462 + if (!rq) {
8463 + dev_err(queue->ctrl->ctrl.device,
8464 +- "queue %d tag %#x not found\n",
8465 +- nvme_tcp_queue_id(queue), pdu->command_id);
8466 ++ "got bad r2t.command_id %#x on queue %d\n",
8467 ++ pdu->command_id, nvme_tcp_queue_id(queue));
8468 + return -ENOENT;
8469 + }
8470 + req = blk_mq_rq_to_pdu(rq);
8471 +@@ -702,17 +702,9 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
8472 + unsigned int *offset, size_t *len)
8473 + {
8474 + struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
8475 +- struct nvme_tcp_request *req;
8476 +- struct request *rq;
8477 +-
8478 +- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
8479 +- if (!rq) {
8480 +- dev_err(queue->ctrl->ctrl.device,
8481 +- "queue %d tag %#x not found\n",
8482 +- nvme_tcp_queue_id(queue), pdu->command_id);
8483 +- return -ENOENT;
8484 +- }
8485 +- req = blk_mq_rq_to_pdu(rq);
8486 ++ struct request *rq =
8487 ++ nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
8488 ++ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
8489 +
8490 + while (true) {
8491 + int recv_len, ret;
8492 +@@ -804,8 +796,8 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
8493 + }
8494 +
8495 + if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
8496 +- struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
8497 +- pdu->command_id);
8498 ++ struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
8499 ++ pdu->command_id);
8500 +
8501 + nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
8502 + queue->nr_cqe++;
8503 +diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
8504 +index a5c4a18650263..f6ee47de3038a 100644
8505 +--- a/drivers/nvme/target/loop.c
8506 ++++ b/drivers/nvme/target/loop.c
8507 +@@ -107,10 +107,10 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
8508 + } else {
8509 + struct request *rq;
8510 +
8511 +- rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
8512 ++ rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
8513 + if (!rq) {
8514 + dev_err(queue->ctrl->ctrl.device,
8515 +- "tag 0x%x on queue %d not found\n",
8516 ++ "got bad command_id %#x on queue %d\n",
8517 + cqe->command_id, nvme_loop_queue_idx(queue));
8518 + return;
8519 + }
8520 +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
8521 +index f9c9c98599197..30691b50731f3 100644
8522 +--- a/drivers/nvmem/core.c
8523 ++++ b/drivers/nvmem/core.c
8524 +@@ -818,8 +818,11 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
8525 +
8526 + if (nvmem->nkeepout) {
8527 + rval = nvmem_validate_keepouts(nvmem);
8528 +- if (rval)
8529 +- goto err_put_device;
8530 ++ if (rval) {
8531 ++ ida_free(&nvmem_ida, nvmem->id);
8532 ++ kfree(nvmem);
8533 ++ return ERR_PTR(rval);
8534 ++ }
8535 + }
8536 +
8537 + dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
8538 +diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
8539 +index d6d3f24685a85..f372eda2b2551 100644
8540 +--- a/drivers/nvmem/qfprom.c
8541 ++++ b/drivers/nvmem/qfprom.c
8542 +@@ -138,6 +138,9 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
8543 + {
8544 + int ret;
8545 +
8546 ++ writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
8547 ++ writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET);
8548 ++
8549 + /*
8550 + * This may be a shared rail and may be able to run at a lower rate
8551 + * when we're not blowing fuses. At the moment, the regulator framework
8552 +@@ -158,9 +161,6 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
8553 + "Failed to set clock rate for disable (ignoring)\n");
8554 +
8555 + clk_disable_unprepare(priv->secclk);
8556 +-
8557 +- writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
8558 +- writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET);
8559 + }
8560 +
8561 + /**
8562 +diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
8563 +index a32e60b024b8d..6675b5e56960c 100644
8564 +--- a/drivers/of/kobj.c
8565 ++++ b/drivers/of/kobj.c
8566 +@@ -119,7 +119,7 @@ int __of_attach_node_sysfs(struct device_node *np)
8567 + struct property *pp;
8568 + int rc;
8569 +
8570 +- if (!of_kset)
8571 ++ if (!IS_ENABLED(CONFIG_SYSFS) || !of_kset)
8572 + return 0;
8573 +
8574 + np->kobj.kset = of_kset;
8575 +diff --git a/drivers/opp/of.c b/drivers/opp/of.c
8576 +index 01feeba78426c..de550ee48e77c 100644
8577 +--- a/drivers/opp/of.c
8578 ++++ b/drivers/opp/of.c
8579 +@@ -95,15 +95,7 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
8580 + static struct device_node *of_parse_required_opp(struct device_node *np,
8581 + int index)
8582 + {
8583 +- struct device_node *required_np;
8584 +-
8585 +- required_np = of_parse_phandle(np, "required-opps", index);
8586 +- if (unlikely(!required_np)) {
8587 +- pr_err("%s: Unable to parse required-opps: %pOF, index: %d\n",
8588 +- __func__, np, index);
8589 +- }
8590 +-
8591 +- return required_np;
8592 ++ return of_parse_phandle(np, "required-opps", index);
8593 + }
8594 +
8595 + /* The caller must call dev_pm_opp_put_opp_table() after the table is used */
8596 +@@ -1349,7 +1341,7 @@ int of_get_required_opp_performance_state(struct device_node *np, int index)
8597 +
8598 + required_np = of_parse_required_opp(np, index);
8599 + if (!required_np)
8600 +- return -EINVAL;
8601 ++ return -ENODEV;
8602 +
8603 + opp_table = _find_table_of_opp_np(required_np);
8604 + if (IS_ERR(opp_table)) {
8605 +diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
8606 +index 2c11bd3fe1fd6..17061f1df0f44 100644
8607 +--- a/drivers/parport/ieee1284_ops.c
8608 ++++ b/drivers/parport/ieee1284_ops.c
8609 +@@ -518,7 +518,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
8610 + goto out;
8611 +
8612 + /* Yield the port for a while. */
8613 +- if (count && dev->port->irq != PARPORT_IRQ_NONE) {
8614 ++ if (dev->port->irq != PARPORT_IRQ_NONE) {
8615 + parport_release (dev);
8616 + schedule_timeout_interruptible(msecs_to_jiffies(40));
8617 + parport_claim_or_block (dev);
8618 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
8619 +index c95ebe808f92b..fdbf051586970 100644
8620 +--- a/drivers/pci/controller/pci-aardvark.c
8621 ++++ b/drivers/pci/controller/pci-aardvark.c
8622 +@@ -58,6 +58,7 @@
8623 + #define PIO_COMPLETION_STATUS_CRS 2
8624 + #define PIO_COMPLETION_STATUS_CA 4
8625 + #define PIO_NON_POSTED_REQ BIT(10)
8626 ++#define PIO_ERR_STATUS BIT(11)
8627 + #define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
8628 + #define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
8629 + #define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
8630 +@@ -118,6 +119,46 @@
8631 + #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
8632 + #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
8633 +
8634 ++/* PCIe window configuration */
8635 ++#define OB_WIN_BASE_ADDR 0x4c00
8636 ++#define OB_WIN_BLOCK_SIZE 0x20
8637 ++#define OB_WIN_COUNT 8
8638 ++#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
8639 ++ OB_WIN_BLOCK_SIZE * (win) + \
8640 ++ (offset))
8641 ++#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
8642 ++#define OB_WIN_ENABLE BIT(0)
8643 ++#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
8644 ++#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
8645 ++#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
8646 ++#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
8647 ++#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
8648 ++#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
8649 ++#define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
8650 ++#define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
8651 ++#define OB_WIN_FUNC_NUM_SHIFT 24
8652 ++#define OB_WIN_FUNC_NUM_ENABLE BIT(23)
8653 ++#define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
8654 ++#define OB_WIN_BUS_NUM_BITS_SHIFT 20
8655 ++#define OB_WIN_MSG_CODE_ENABLE BIT(22)
8656 ++#define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
8657 ++#define OB_WIN_MSG_CODE_SHIFT 14
8658 ++#define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
8659 ++#define OB_WIN_ATTR_ENABLE BIT(11)
8660 ++#define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
8661 ++#define OB_WIN_ATTR_TC_SHIFT 8
8662 ++#define OB_WIN_ATTR_RELAXED BIT(7)
8663 ++#define OB_WIN_ATTR_NOSNOOP BIT(6)
8664 ++#define OB_WIN_ATTR_POISON BIT(5)
8665 ++#define OB_WIN_ATTR_IDO BIT(4)
8666 ++#define OB_WIN_TYPE_MASK GENMASK(3, 0)
8667 ++#define OB_WIN_TYPE_SHIFT 0
8668 ++#define OB_WIN_TYPE_MEM 0x0
8669 ++#define OB_WIN_TYPE_IO 0x4
8670 ++#define OB_WIN_TYPE_CONFIG_TYPE0 0x8
8671 ++#define OB_WIN_TYPE_CONFIG_TYPE1 0x9
8672 ++#define OB_WIN_TYPE_MSG 0xc
8673 ++
8674 + /* LMI registers base address and register offsets */
8675 + #define LMI_BASE_ADDR 0x6000
8676 + #define CFG_REG (LMI_BASE_ADDR + 0x0)
8677 +@@ -166,7 +207,7 @@
8678 + #define PCIE_CONFIG_WR_TYPE0 0xa
8679 + #define PCIE_CONFIG_WR_TYPE1 0xb
8680 +
8681 +-#define PIO_RETRY_CNT 500
8682 ++#define PIO_RETRY_CNT 750000 /* 1.5 s */
8683 + #define PIO_RETRY_DELAY 2 /* 2 us*/
8684 +
8685 + #define LINK_WAIT_MAX_RETRIES 10
8686 +@@ -180,8 +221,16 @@
8687 + struct advk_pcie {
8688 + struct platform_device *pdev;
8689 + void __iomem *base;
8690 ++ struct {
8691 ++ phys_addr_t match;
8692 ++ phys_addr_t remap;
8693 ++ phys_addr_t mask;
8694 ++ u32 actions;
8695 ++ } wins[OB_WIN_COUNT];
8696 ++ u8 wins_count;
8697 + struct irq_domain *irq_domain;
8698 + struct irq_chip irq_chip;
8699 ++ raw_spinlock_t irq_lock;
8700 + struct irq_domain *msi_domain;
8701 + struct irq_domain *msi_inner_domain;
8702 + struct irq_chip msi_bottom_irq_chip;
8703 +@@ -366,9 +415,39 @@ err:
8704 + dev_err(dev, "link never came up\n");
8705 + }
8706 +
8707 ++/*
8708 ++ * Set PCIe address window register which could be used for memory
8709 ++ * mapping.
8710 ++ */
8711 ++static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
8712 ++ phys_addr_t match, phys_addr_t remap,
8713 ++ phys_addr_t mask, u32 actions)
8714 ++{
8715 ++ advk_writel(pcie, OB_WIN_ENABLE |
8716 ++ lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
8717 ++ advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
8718 ++ advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
8719 ++ advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
8720 ++ advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
8721 ++ advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
8722 ++ advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
8723 ++}
8724 ++
8725 ++static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
8726 ++{
8727 ++ advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
8728 ++ advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
8729 ++ advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
8730 ++ advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
8731 ++ advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
8732 ++ advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
8733 ++ advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
8734 ++}
8735 ++
8736 + static void advk_pcie_setup_hw(struct advk_pcie *pcie)
8737 + {
8738 + u32 reg;
8739 ++ int i;
8740 +
8741 + /* Enable TX */
8742 + reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
8743 +@@ -447,15 +526,51 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
8744 + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
8745 + advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
8746 +
8747 ++ /*
8748 ++ * Enable AXI address window location generation:
8749 ++ * When it is enabled, the default outbound window
8750 ++ * configurations (Default User Field: 0xD0074CFC)
8751 ++ * are used to transparent address translation for
8752 ++ * the outbound transactions. Thus, PCIe address
8753 ++ * windows are not required for transparent memory
8754 ++ * access when default outbound window configuration
8755 ++ * is set for memory access.
8756 ++ */
8757 + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
8758 + reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
8759 + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
8760 +
8761 +- /* Bypass the address window mapping for PIO */
8762 ++ /*
8763 ++ * Set memory access in Default User Field so it
8764 ++ * is not required to configure PCIe address for
8765 ++ * transparent memory access.
8766 ++ */
8767 ++ advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
8768 ++
8769 ++ /*
8770 ++ * Bypass the address window mapping for PIO:
8771 ++ * Since PIO access already contains all required
8772 ++ * info over AXI interface by PIO registers, the
8773 ++ * address window is not required.
8774 ++ */
8775 + reg = advk_readl(pcie, PIO_CTRL);
8776 + reg |= PIO_CTRL_ADDR_WIN_DISABLE;
8777 + advk_writel(pcie, reg, PIO_CTRL);
8778 +
8779 ++ /*
8780 ++ * Configure PCIe address windows for non-memory or
8781 ++ * non-transparent access as by default PCIe uses
8782 ++ * transparent memory access.
8783 ++ */
8784 ++ for (i = 0; i < pcie->wins_count; i++)
8785 ++ advk_pcie_set_ob_win(pcie, i,
8786 ++ pcie->wins[i].match, pcie->wins[i].remap,
8787 ++ pcie->wins[i].mask, pcie->wins[i].actions);
8788 ++
8789 ++ /* Disable remaining PCIe outbound windows */
8790 ++ for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
8791 ++ advk_pcie_disable_ob_win(pcie, i);
8792 ++
8793 + advk_pcie_train_link(pcie);
8794 +
8795 + /*
8796 +@@ -472,7 +587,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
8797 + advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
8798 + }
8799 +
8800 +-static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
8801 ++static int advk_pcie_check_pio_status(struct advk_pcie *pcie, u32 *val)
8802 + {
8803 + struct device *dev = &pcie->pdev->dev;
8804 + u32 reg;
8805 +@@ -483,14 +598,49 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
8806 + status = (reg & PIO_COMPLETION_STATUS_MASK) >>
8807 + PIO_COMPLETION_STATUS_SHIFT;
8808 +
8809 +- if (!status)
8810 +- return;
8811 +-
8812 ++ /*
8813 ++ * According to HW spec, the PIO status check sequence as below:
8814 ++ * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
8815 ++ * it still needs to check Error Status(bit11), only when this bit
8816 ++ * indicates no error happen, the operation is successful.
8817 ++ * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
8818 ++ * means a PIO write error, and for PIO read it is successful with
8819 ++ * a read value of 0xFFFFFFFF.
8820 ++ * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
8821 ++ * only means a PIO write error, and for PIO read it is successful
8822 ++ * with a read value of 0xFFFF0001.
8823 ++ * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
8824 ++ * error for both PIO read and PIO write operation.
8825 ++ * 5) other errors are indicated as 'unknown'.
8826 ++ */
8827 + switch (status) {
8828 ++ case PIO_COMPLETION_STATUS_OK:
8829 ++ if (reg & PIO_ERR_STATUS) {
8830 ++ strcomp_status = "COMP_ERR";
8831 ++ break;
8832 ++ }
8833 ++ /* Get the read result */
8834 ++ if (val)
8835 ++ *val = advk_readl(pcie, PIO_RD_DATA);
8836 ++ /* No error */
8837 ++ strcomp_status = NULL;
8838 ++ break;
8839 + case PIO_COMPLETION_STATUS_UR:
8840 + strcomp_status = "UR";
8841 + break;
8842 + case PIO_COMPLETION_STATUS_CRS:
8843 ++ /* PCIe r4.0, sec 2.3.2, says:
8844 ++ * If CRS Software Visibility is not enabled, the Root Complex
8845 ++ * must re-issue the Configuration Request as a new Request.
8846 ++ * A Root Complex implementation may choose to limit the number
8847 ++ * of Configuration Request/CRS Completion Status loops before
8848 ++ * determining that something is wrong with the target of the
8849 ++ * Request and taking appropriate action, e.g., complete the
8850 ++ * Request to the host as a failed transaction.
8851 ++ *
8852 ++ * To simplify implementation do not re-issue the Configuration
8853 ++ * Request and complete the Request as a failed transaction.
8854 ++ */
8855 + strcomp_status = "CRS";
8856 + break;
8857 + case PIO_COMPLETION_STATUS_CA:
8858 +@@ -501,6 +651,9 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
8859 + break;
8860 + }
8861 +
8862 ++ if (!strcomp_status)
8863 ++ return 0;
8864 ++
8865 + if (reg & PIO_NON_POSTED_REQ)
8866 + str_posted = "Non-posted";
8867 + else
8868 +@@ -508,6 +661,8 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
8869 +
8870 + dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
8871 + str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
8872 ++
8873 ++ return -EFAULT;
8874 + }
8875 +
8876 + static int advk_pcie_wait_pio(struct advk_pcie *pcie)
8877 +@@ -745,10 +900,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
8878 + return PCIBIOS_SET_FAILED;
8879 + }
8880 +
8881 +- advk_pcie_check_pio_status(pcie);
8882 ++ /* Check PIO status and get the read result */
8883 ++ ret = advk_pcie_check_pio_status(pcie, val);
8884 ++ if (ret < 0) {
8885 ++ *val = 0xffffffff;
8886 ++ return PCIBIOS_SET_FAILED;
8887 ++ }
8888 +
8889 +- /* Get the read result */
8890 +- *val = advk_readl(pcie, PIO_RD_DATA);
8891 + if (size == 1)
8892 + *val = (*val >> (8 * (where & 3))) & 0xff;
8893 + else if (size == 2)
8894 +@@ -812,7 +970,9 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
8895 + if (ret < 0)
8896 + return PCIBIOS_SET_FAILED;
8897 +
8898 +- advk_pcie_check_pio_status(pcie);
8899 ++ ret = advk_pcie_check_pio_status(pcie, NULL);
8900 ++ if (ret < 0)
8901 ++ return PCIBIOS_SET_FAILED;
8902 +
8903 + return PCIBIOS_SUCCESSFUL;
8904 + }
8905 +@@ -886,22 +1046,28 @@ static void advk_pcie_irq_mask(struct irq_data *d)
8906 + {
8907 + struct advk_pcie *pcie = d->domain->host_data;
8908 + irq_hw_number_t hwirq = irqd_to_hwirq(d);
8909 ++ unsigned long flags;
8910 + u32 mask;
8911 +
8912 ++ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
8913 + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
8914 + mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
8915 + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
8916 ++ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
8917 + }
8918 +
8919 + static void advk_pcie_irq_unmask(struct irq_data *d)
8920 + {
8921 + struct advk_pcie *pcie = d->domain->host_data;
8922 + irq_hw_number_t hwirq = irqd_to_hwirq(d);
8923 ++ unsigned long flags;
8924 + u32 mask;
8925 +
8926 ++ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
8927 + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
8928 + mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
8929 + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
8930 ++ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
8931 + }
8932 +
8933 + static int advk_pcie_irq_map(struct irq_domain *h,
8934 +@@ -985,6 +1151,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
8935 + struct irq_chip *irq_chip;
8936 + int ret = 0;
8937 +
8938 ++ raw_spin_lock_init(&pcie->irq_lock);
8939 ++
8940 + pcie_intc_node = of_get_next_child(node, NULL);
8941 + if (!pcie_intc_node) {
8942 + dev_err(dev, "No PCIe Intc node found\n");
8943 +@@ -1162,6 +1330,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
8944 + struct device *dev = &pdev->dev;
8945 + struct advk_pcie *pcie;
8946 + struct pci_host_bridge *bridge;
8947 ++ struct resource_entry *entry;
8948 + int ret, irq;
8949 +
8950 + bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
8951 +@@ -1172,6 +1341,80 @@ static int advk_pcie_probe(struct platform_device *pdev)
8952 + pcie->pdev = pdev;
8953 + platform_set_drvdata(pdev, pcie);
8954 +
8955 ++ resource_list_for_each_entry(entry, &bridge->windows) {
8956 ++ resource_size_t start = entry->res->start;
8957 ++ resource_size_t size = resource_size(entry->res);
8958 ++ unsigned long type = resource_type(entry->res);
8959 ++ u64 win_size;
8960 ++
8961 ++ /*
8962 ++ * Aardvark hardware allows to configure also PCIe window
8963 ++ * for config type 0 and type 1 mapping, but driver uses
8964 ++ * only PIO for issuing configuration transfers which does
8965 ++ * not use PCIe window configuration.
8966 ++ */
8967 ++ if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
8968 ++ type != IORESOURCE_IO)
8969 ++ continue;
8970 ++
8971 ++ /*
8972 ++ * Skip transparent memory resources. Default outbound access
8973 ++ * configuration is set to transparent memory access so it
8974 ++ * does not need window configuration.
8975 ++ */
8976 ++ if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
8977 ++ entry->offset == 0)
8978 ++ continue;
8979 ++
8980 ++ /*
8981 ++ * The n-th PCIe window is configured by tuple (match, remap, mask)
8982 ++ * and an access to address A uses this window if A matches the
8983 ++ * match with given mask.
8984 ++ * So every PCIe window size must be a power of two and every start
8985 ++ * address must be aligned to window size. Minimal size is 64 KiB
8986 ++ * because lower 16 bits of mask must be zero. Remapped address
8987 ++ * may have set only bits from the mask.
8988 ++ */
8989 ++ while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
8990 ++ /* Calculate the largest aligned window size */
8991 ++ win_size = (1ULL << (fls64(size)-1)) |
8992 ++ (start ? (1ULL << __ffs64(start)) : 0);
8993 ++ win_size = 1ULL << __ffs64(win_size);
8994 ++ if (win_size < 0x10000)
8995 ++ break;
8996 ++
8997 ++ dev_dbg(dev,
8998 ++ "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
8999 ++ pcie->wins_count, (unsigned long long)start,
9000 ++ (unsigned long long)start + win_size, type);
9001 ++
9002 ++ if (type == IORESOURCE_IO) {
9003 ++ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
9004 ++ pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
9005 ++ } else {
9006 ++ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
9007 ++ pcie->wins[pcie->wins_count].match = start;
9008 ++ }
9009 ++ pcie->wins[pcie->wins_count].remap = start - entry->offset;
9010 ++ pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
9011 ++
9012 ++ if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
9013 ++ break;
9014 ++
9015 ++ start += win_size;
9016 ++ size -= win_size;
9017 ++ pcie->wins_count++;
9018 ++ }
9019 ++
9020 ++ if (size > 0) {
9021 ++ dev_err(&pcie->pdev->dev,
9022 ++ "Invalid PCIe region [0x%llx-0x%llx]\n",
9023 ++ (unsigned long long)entry->res->start,
9024 ++ (unsigned long long)entry->res->end + 1);
9025 ++ return -EINVAL;
9026 ++ }
9027 ++ }
9028 ++
9029 + pcie->base = devm_platform_ioremap_resource(pdev, 0);
9030 + if (IS_ERR(pcie->base))
9031 + return PTR_ERR(pcie->base);
9032 +@@ -1252,6 +1495,7 @@ static int advk_pcie_remove(struct platform_device *pdev)
9033 + {
9034 + struct advk_pcie *pcie = platform_get_drvdata(pdev);
9035 + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
9036 ++ int i;
9037 +
9038 + pci_lock_rescan_remove();
9039 + pci_stop_root_bus(bridge->bus);
9040 +@@ -1261,6 +1505,10 @@ static int advk_pcie_remove(struct platform_device *pdev)
9041 + advk_pcie_remove_msi_irq_domain(pcie);
9042 + advk_pcie_remove_irq_domain(pcie);
9043 +
9044 ++ /* Disable outbound address windows mapping */
9045 ++ for (i = 0; i < OB_WIN_COUNT; i++)
9046 ++ advk_pcie_disable_ob_win(pcie, i);
9047 ++
9048 + return 0;
9049 + }
9050 +
9051 +diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
9052 +index 8689311c5ef66..1c3d5b87ef20e 100644
9053 +--- a/drivers/pci/controller/pcie-xilinx-nwl.c
9054 ++++ b/drivers/pci/controller/pcie-xilinx-nwl.c
9055 +@@ -6,6 +6,7 @@
9056 + * (C) Copyright 2014 - 2015, Xilinx, Inc.
9057 + */
9058 +
9059 ++#include <linux/clk.h>
9060 + #include <linux/delay.h>
9061 + #include <linux/interrupt.h>
9062 + #include <linux/irq.h>
9063 +@@ -169,6 +170,7 @@ struct nwl_pcie {
9064 + u8 last_busno;
9065 + struct nwl_msi msi;
9066 + struct irq_domain *legacy_irq_domain;
9067 ++ struct clk *clk;
9068 + raw_spinlock_t leg_mask_lock;
9069 + };
9070 +
9071 +@@ -823,6 +825,16 @@ static int nwl_pcie_probe(struct platform_device *pdev)
9072 + return err;
9073 + }
9074 +
9075 ++ pcie->clk = devm_clk_get(dev, NULL);
9076 ++ if (IS_ERR(pcie->clk))
9077 ++ return PTR_ERR(pcie->clk);
9078 ++
9079 ++ err = clk_prepare_enable(pcie->clk);
9080 ++ if (err) {
9081 ++ dev_err(dev, "can't enable PCIe ref clock\n");
9082 ++ return err;
9083 ++ }
9084 ++
9085 + err = nwl_pcie_bridge_init(pcie);
9086 + if (err) {
9087 + dev_err(dev, "HW Initialization failed\n");
9088 +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
9089 +index 5516647e53a87..9f320fba2d9b0 100644
9090 +--- a/drivers/pci/msi.c
9091 ++++ b/drivers/pci/msi.c
9092 +@@ -776,6 +776,9 @@ static void msix_mask_all(void __iomem *base, int tsize)
9093 + u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
9094 + int i;
9095 +
9096 ++ if (pci_msi_ignore_mask)
9097 ++ return;
9098 ++
9099 + for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
9100 + writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
9101 + }
9102 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
9103 +index a9d0530b7846d..8b3cb62c63cca 100644
9104 +--- a/drivers/pci/pci.c
9105 ++++ b/drivers/pci/pci.c
9106 +@@ -1906,11 +1906,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
9107 + * so that things like MSI message writing will behave as expected
9108 + * (e.g. if the device really is in D0 at enable time).
9109 + */
9110 +- if (dev->pm_cap) {
9111 +- u16 pmcsr;
9112 +- pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
9113 +- dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
9114 +- }
9115 ++ pci_update_current_state(dev, dev->current_state);
9116 +
9117 + if (atomic_inc_return(&dev->enable_cnt) > 1)
9118 + return 0; /* already enabled */
9119 +diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
9120 +index e1fed6649c41f..3ee63968deaa5 100644
9121 +--- a/drivers/pci/pcie/portdrv_core.c
9122 ++++ b/drivers/pci/pcie/portdrv_core.c
9123 +@@ -257,8 +257,13 @@ static int get_port_device_capability(struct pci_dev *dev)
9124 + services |= PCIE_PORT_SERVICE_DPC;
9125 +
9126 + if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
9127 +- pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
9128 +- services |= PCIE_PORT_SERVICE_BWNOTIF;
9129 ++ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
9130 ++ u32 linkcap;
9131 ++
9132 ++ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
9133 ++ if (linkcap & PCI_EXP_LNKCAP_LBNC)
9134 ++ services |= PCIE_PORT_SERVICE_BWNOTIF;
9135 ++ }
9136 +
9137 + return services;
9138 + }
9139 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
9140 +index 7b1c81b899cdf..1905ee0297a4c 100644
9141 +--- a/drivers/pci/quirks.c
9142 ++++ b/drivers/pci/quirks.c
9143 +@@ -3241,6 +3241,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
9144 + PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
9145 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
9146 + PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
9147 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256);
9148 +
9149 + /*
9150 + * Intel 5000 and 5100 Memory controllers have an erratum with read completion
9151 +diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
9152 +index 8b003c890b87b..c9f03418e71e0 100644
9153 +--- a/drivers/pci/syscall.c
9154 ++++ b/drivers/pci/syscall.c
9155 +@@ -22,8 +22,10 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
9156 + long err;
9157 + int cfg_ret;
9158 +
9159 ++ err = -EPERM;
9160 ++ dev = NULL;
9161 + if (!capable(CAP_SYS_ADMIN))
9162 +- return -EPERM;
9163 ++ goto error;
9164 +
9165 + err = -ENODEV;
9166 + dev = pci_get_domain_bus_and_slot(0, bus, dfn);
9167 +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
9168 +index 5a68e242f6b34..5cb018f988003 100644
9169 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
9170 ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
9171 +@@ -167,10 +167,14 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
9172 + PIN_GRP_GPIO("jtag", 20, 5, BIT(0), "jtag"),
9173 + PIN_GRP_GPIO("sdio0", 8, 3, BIT(1), "sdio"),
9174 + PIN_GRP_GPIO("emmc_nb", 27, 9, BIT(2), "emmc"),
9175 +- PIN_GRP_GPIO("pwm0", 11, 1, BIT(3), "pwm"),
9176 +- PIN_GRP_GPIO("pwm1", 12, 1, BIT(4), "pwm"),
9177 +- PIN_GRP_GPIO("pwm2", 13, 1, BIT(5), "pwm"),
9178 +- PIN_GRP_GPIO("pwm3", 14, 1, BIT(6), "pwm"),
9179 ++ PIN_GRP_GPIO_3("pwm0", 11, 1, BIT(3) | BIT(20), 0, BIT(20), BIT(3),
9180 ++ "pwm", "led"),
9181 ++ PIN_GRP_GPIO_3("pwm1", 12, 1, BIT(4) | BIT(21), 0, BIT(21), BIT(4),
9182 ++ "pwm", "led"),
9183 ++ PIN_GRP_GPIO_3("pwm2", 13, 1, BIT(5) | BIT(22), 0, BIT(22), BIT(5),
9184 ++ "pwm", "led"),
9185 ++ PIN_GRP_GPIO_3("pwm3", 14, 1, BIT(6) | BIT(23), 0, BIT(23), BIT(6),
9186 ++ "pwm", "led"),
9187 + PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"),
9188 + PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"),
9189 + PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"),
9190 +@@ -184,10 +188,6 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
9191 + PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
9192 + BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
9193 + 18, 2, "gpio", "uart"),
9194 +- PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
9195 +- PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
9196 +- PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
9197 +- PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
9198 + };
9199 +
9200 + static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
9201 +diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
9202 +index 983ba9865f772..263498be8e319 100644
9203 +--- a/drivers/pinctrl/pinctrl-ingenic.c
9204 ++++ b/drivers/pinctrl/pinctrl-ingenic.c
9205 +@@ -710,7 +710,7 @@ static const struct ingenic_chip_info jz4755_chip_info = {
9206 + };
9207 +
9208 + static const u32 jz4760_pull_ups[6] = {
9209 +- 0xffffffff, 0xfffcf3ff, 0xffffffff, 0xffffcfff, 0xfffffb7c, 0xfffff00f,
9210 ++ 0xffffffff, 0xfffcf3ff, 0xffffffff, 0xffffcfff, 0xfffffb7c, 0x0000000f,
9211 + };
9212 +
9213 + static const u32 jz4760_pull_downs[6] = {
9214 +@@ -936,11 +936,11 @@ static const struct ingenic_chip_info jz4760_chip_info = {
9215 + };
9216 +
9217 + static const u32 jz4770_pull_ups[6] = {
9218 +- 0x3fffffff, 0xfff0030c, 0xffffffff, 0xffff4fff, 0xfffffb7c, 0xffa7f00f,
9219 ++ 0x3fffffff, 0xfff0f3fc, 0xffffffff, 0xffff4fff, 0xfffffb7c, 0x0024f00f,
9220 + };
9221 +
9222 + static const u32 jz4770_pull_downs[6] = {
9223 +- 0x00000000, 0x000f0c03, 0x00000000, 0x0000b000, 0x00000483, 0x00580ff0,
9224 ++ 0x00000000, 0x000f0c03, 0x00000000, 0x0000b000, 0x00000483, 0x005b0ff0,
9225 + };
9226 +
9227 + static int jz4770_uart0_data_pins[] = { 0xa0, 0xa3, };
9228 +@@ -3441,17 +3441,17 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
9229 + {
9230 + if (jzpc->info->version >= ID_X2000) {
9231 + switch (bias) {
9232 +- case PIN_CONFIG_BIAS_PULL_UP:
9233 ++ case GPIO_PULL_UP:
9234 + ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false);
9235 + ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, true);
9236 + break;
9237 +
9238 +- case PIN_CONFIG_BIAS_PULL_DOWN:
9239 ++ case GPIO_PULL_DOWN:
9240 + ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, false);
9241 + ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, true);
9242 + break;
9243 +
9244 +- case PIN_CONFIG_BIAS_DISABLE:
9245 ++ case GPIO_PULL_DIS:
9246 + default:
9247 + ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, false);
9248 + ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false);
9249 +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
9250 +index 2c9c9835f375e..b1f6e4e8bcbb5 100644
9251 +--- a/drivers/pinctrl/pinctrl-single.c
9252 ++++ b/drivers/pinctrl/pinctrl-single.c
9253 +@@ -1221,6 +1221,7 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
9254 +
9255 + if (PCS_HAS_PINCONF) {
9256 + dev_err(pcs->dev, "pinconf not supported\n");
9257 ++ res = -ENOTSUPP;
9258 + goto free_pingroups;
9259 + }
9260 +
9261 +diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
9262 +index 008c83107a3ca..5fa2488fae87a 100644
9263 +--- a/drivers/pinctrl/pinctrl-stmfx.c
9264 ++++ b/drivers/pinctrl/pinctrl-stmfx.c
9265 +@@ -566,7 +566,7 @@ static irqreturn_t stmfx_pinctrl_irq_thread_fn(int irq, void *dev_id)
9266 + u8 pending[NR_GPIO_REGS];
9267 + u8 src[NR_GPIO_REGS] = {0, 0, 0};
9268 + unsigned long n, status;
9269 +- int ret;
9270 ++ int i, ret;
9271 +
9272 + ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_IRQ_GPI_PENDING,
9273 + &pending, NR_GPIO_REGS);
9274 +@@ -576,7 +576,9 @@ static irqreturn_t stmfx_pinctrl_irq_thread_fn(int irq, void *dev_id)
9275 + regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
9276 + src, NR_GPIO_REGS);
9277 +
9278 +- status = *(unsigned long *)pending;
9279 ++ BUILD_BUG_ON(NR_GPIO_REGS > sizeof(status));
9280 ++ for (i = 0, status = 0; i < NR_GPIO_REGS; i++)
9281 ++ status |= (unsigned long)pending[i] << (i * 8);
9282 + for_each_set_bit(n, &status, gc->ngpio) {
9283 + handle_nested_irq(irq_find_mapping(gc->irq.domain, n));
9284 + stmfx_pinctrl_irq_toggle_trigger(pctl, n);
9285 +diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
9286 +index 376876bd66058..2975b4369f32f 100644
9287 +--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
9288 ++++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
9289 +@@ -918,7 +918,7 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
9290 + pin_bank->grange.pin_base = drvdata->pin_base
9291 + + pin_bank->pin_base;
9292 + pin_bank->grange.base = pin_bank->grange.pin_base;
9293 +- pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
9294 ++ pin_bank->grange.npins = pin_bank->nr_pins;
9295 + pin_bank->grange.gc = &pin_bank->gpio_chip;
9296 + pinctrl_add_gpio_range(drvdata->pctl_dev, &pin_bank->grange);
9297 + }
9298 +diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
9299 +index aa7f7aa772971..a7404d69b2d32 100644
9300 +--- a/drivers/platform/chrome/cros_ec_proto.c
9301 ++++ b/drivers/platform/chrome/cros_ec_proto.c
9302 +@@ -279,6 +279,15 @@ static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
9303 + msg->insize = sizeof(struct ec_response_get_protocol_info);
9304 +
9305 + ret = send_command(ec_dev, msg);
9306 ++ /*
9307 ++ * Send command once again when timeout occurred.
9308 ++ * Fingerprint MCU (FPMCU) is restarted during system boot which
9309 ++ * introduces small window in which FPMCU won't respond for any
9310 ++ * messages sent by kernel. There is no need to wait before next
9311 ++ * attempt because we waited at least EC_MSG_DEADLINE_MS.
9312 ++ */
9313 ++ if (ret == -ETIMEDOUT)
9314 ++ ret = send_command(ec_dev, msg);
9315 +
9316 + if (ret < 0) {
9317 + dev_dbg(ec_dev->dev,
9318 +diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
9319 +index 33f8237727335..8e761991455af 100644
9320 +--- a/drivers/platform/x86/dell/dell-smbios-wmi.c
9321 ++++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
9322 +@@ -69,6 +69,7 @@ static int run_smbios_call(struct wmi_device *wdev)
9323 + if (obj->type == ACPI_TYPE_INTEGER)
9324 + dev_dbg(&wdev->dev, "SMBIOS call failed: %llu\n",
9325 + obj->integer.value);
9326 ++ kfree(output.pointer);
9327 + return -EIO;
9328 + }
9329 + memcpy(&priv->buf->std, obj->buffer.pointer, obj->buffer.length);
9330 +diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
9331 +index 215e77d3b6d93..622bdae6182c0 100644
9332 +--- a/drivers/power/supply/max17042_battery.c
9333 ++++ b/drivers/power/supply/max17042_battery.c
9334 +@@ -869,8 +869,12 @@ static irqreturn_t max17042_thread_handler(int id, void *dev)
9335 + {
9336 + struct max17042_chip *chip = dev;
9337 + u32 val;
9338 ++ int ret;
9339 ++
9340 ++ ret = regmap_read(chip->regmap, MAX17042_STATUS, &val);
9341 ++ if (ret)
9342 ++ return IRQ_HANDLED;
9343 +
9344 +- regmap_read(chip->regmap, MAX17042_STATUS, &val);
9345 + if ((val & STATUS_INTR_SOCMIN_BIT) ||
9346 + (val & STATUS_INTR_SOCMAX_BIT)) {
9347 + dev_info(&chip->client->dev, "SOC threshold INTR\n");
9348 +diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
9349 +index bc89c62ccb9b5..75e4c2d777b9c 100644
9350 +--- a/drivers/rtc/rtc-tps65910.c
9351 ++++ b/drivers/rtc/rtc-tps65910.c
9352 +@@ -467,6 +467,6 @@ static struct platform_driver tps65910_rtc_driver = {
9353 + };
9354 +
9355 + module_platform_driver(tps65910_rtc_driver);
9356 +-MODULE_ALIAS("platform:rtc-tps65910");
9357 ++MODULE_ALIAS("platform:tps65910-rtc");
9358 + MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@××××××.com>");
9359 + MODULE_LICENSE("GPL");
9360 +diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
9361 +index 307ce7ff5ca44..2d672aa27a549 100644
9362 +--- a/drivers/s390/cio/qdio_main.c
9363 ++++ b/drivers/s390/cio/qdio_main.c
9364 +@@ -886,6 +886,33 @@ static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
9365 + }
9366 + }
9367 +
9368 ++static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
9369 ++{
9370 ++ struct ccw_device *cdev = irq->cdev;
9371 ++ int rc;
9372 ++
9373 ++ spin_lock_irq(get_ccwdev_lock(cdev));
9374 ++ qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
9375 ++ if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
9376 ++ rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
9377 ++ else
9378 ++ /* default behaviour is halt */
9379 ++ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
9380 ++ spin_unlock_irq(get_ccwdev_lock(cdev));
9381 ++ if (rc) {
9382 ++ DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
9383 ++ DBF_ERROR("rc:%4d", rc);
9384 ++ return rc;
9385 ++ }
9386 ++
9387 ++ wait_event_interruptible_timeout(cdev->private->wait_q,
9388 ++ irq->state == QDIO_IRQ_STATE_INACTIVE ||
9389 ++ irq->state == QDIO_IRQ_STATE_ERR,
9390 ++ 10 * HZ);
9391 ++
9392 ++ return 0;
9393 ++}
9394 ++
9395 + /**
9396 + * qdio_shutdown - shut down a qdio subchannel
9397 + * @cdev: associated ccw device
9398 +@@ -923,27 +950,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
9399 + qdio_shutdown_queues(irq_ptr);
9400 + qdio_shutdown_debug_entries(irq_ptr);
9401 +
9402 +- /* cleanup subchannel */
9403 +- spin_lock_irq(get_ccwdev_lock(cdev));
9404 +- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
9405 +- if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
9406 +- rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
9407 +- else
9408 +- /* default behaviour is halt */
9409 +- rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
9410 +- spin_unlock_irq(get_ccwdev_lock(cdev));
9411 +- if (rc) {
9412 +- DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
9413 +- DBF_ERROR("rc:%4d", rc);
9414 +- goto no_cleanup;
9415 +- }
9416 +-
9417 +- wait_event_interruptible_timeout(cdev->private->wait_q,
9418 +- irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
9419 +- irq_ptr->state == QDIO_IRQ_STATE_ERR,
9420 +- 10 * HZ);
9421 +-
9422 +-no_cleanup:
9423 ++ rc = qdio_cancel_ccw(irq_ptr, how);
9424 + qdio_shutdown_thinint(irq_ptr);
9425 + qdio_shutdown_irq(irq_ptr);
9426 +
9427 +@@ -1079,6 +1086,7 @@ int qdio_establish(struct ccw_device *cdev,
9428 + {
9429 + struct qdio_irq *irq_ptr = cdev->private->qdio_data;
9430 + struct subchannel_id schid;
9431 ++ long timeout;
9432 + int rc;
9433 +
9434 + ccw_device_get_schid(cdev, &schid);
9435 +@@ -1107,11 +1115,8 @@ int qdio_establish(struct ccw_device *cdev,
9436 + qdio_setup_irq(irq_ptr, init_data);
9437 +
9438 + rc = qdio_establish_thinint(irq_ptr);
9439 +- if (rc) {
9440 +- qdio_shutdown_irq(irq_ptr);
9441 +- mutex_unlock(&irq_ptr->setup_mutex);
9442 +- return rc;
9443 +- }
9444 ++ if (rc)
9445 ++ goto err_thinint;
9446 +
9447 + /* establish q */
9448 + irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
9449 +@@ -1127,15 +1132,16 @@ int qdio_establish(struct ccw_device *cdev,
9450 + if (rc) {
9451 + DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
9452 + DBF_ERROR("rc:%4x", rc);
9453 +- qdio_shutdown_thinint(irq_ptr);
9454 +- qdio_shutdown_irq(irq_ptr);
9455 +- mutex_unlock(&irq_ptr->setup_mutex);
9456 +- return rc;
9457 ++ goto err_ccw_start;
9458 + }
9459 +
9460 +- wait_event_interruptible_timeout(cdev->private->wait_q,
9461 +- irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
9462 +- irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
9463 ++ timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
9464 ++ irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
9465 ++ irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
9466 ++ if (timeout <= 0) {
9467 ++ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
9468 ++ goto err_ccw_timeout;
9469 ++ }
9470 +
9471 + if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
9472 + mutex_unlock(&irq_ptr->setup_mutex);
9473 +@@ -1152,6 +1158,16 @@ int qdio_establish(struct ccw_device *cdev,
9474 + qdio_print_subchannel_info(irq_ptr);
9475 + qdio_setup_debug_entries(irq_ptr);
9476 + return 0;
9477 ++
9478 ++err_ccw_timeout:
9479 ++ qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
9480 ++err_ccw_start:
9481 ++ qdio_shutdown_thinint(irq_ptr);
9482 ++err_thinint:
9483 ++ qdio_shutdown_irq(irq_ptr);
9484 ++ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
9485 ++ mutex_unlock(&irq_ptr->setup_mutex);
9486 ++ return rc;
9487 + }
9488 + EXPORT_SYMBOL_GPL(qdio_establish);
9489 +
9490 +diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
9491 +index adddcd5899416..0df93d2cd3c36 100644
9492 +--- a/drivers/scsi/BusLogic.c
9493 ++++ b/drivers/scsi/BusLogic.c
9494 +@@ -1711,7 +1711,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
9495 + if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
9496 + blogic_info(" DMA Channel: None, ", adapter);
9497 + if (adapter->bios_addr > 0)
9498 +- blogic_info("BIOS Address: 0x%lX, ", adapter,
9499 ++ blogic_info("BIOS Address: 0x%X, ", adapter,
9500 + adapter->bios_addr);
9501 + else
9502 + blogic_info("BIOS Address: None, ", adapter);
9503 +@@ -3451,7 +3451,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
9504 + if (buf[0] != '\n' || len > 1)
9505 + printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf);
9506 + } else
9507 +- printk("%s", buf);
9508 ++ pr_cont("%s", buf);
9509 + } else {
9510 + if (begin) {
9511 + if (adapter != NULL && adapter->adapter_initd)
9512 +@@ -3459,7 +3459,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
9513 + else
9514 + printk("%s%s", blogic_msglevelmap[msglevel], buf);
9515 + } else
9516 +- printk("%s", buf);
9517 ++ pr_cont("%s", buf);
9518 + }
9519 + begin = (buf[len - 1] == '\n');
9520 + }
9521 +diff --git a/drivers/scsi/pcmcia/fdomain_cs.c b/drivers/scsi/pcmcia/fdomain_cs.c
9522 +index e42acf314d068..33df6a9ba9b5f 100644
9523 +--- a/drivers/scsi/pcmcia/fdomain_cs.c
9524 ++++ b/drivers/scsi/pcmcia/fdomain_cs.c
9525 +@@ -45,8 +45,10 @@ static int fdomain_probe(struct pcmcia_device *link)
9526 + goto fail_disable;
9527 +
9528 + if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE,
9529 +- "fdomain_cs"))
9530 ++ "fdomain_cs")) {
9531 ++ ret = -EBUSY;
9532 + goto fail_disable;
9533 ++ }
9534 +
9535 + sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev);
9536 + if (!sh) {
9537 +diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
9538 +index b92570a7c309d..98981a61b0122 100644
9539 +--- a/drivers/scsi/qedf/qedf_main.c
9540 ++++ b/drivers/scsi/qedf/qedf_main.c
9541 +@@ -3000,7 +3000,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
9542 + {
9543 + u32 *list;
9544 + int i;
9545 +- int status = 0, rc;
9546 ++ int status;
9547 + u32 *pbl;
9548 + dma_addr_t page;
9549 + int num_pages;
9550 +@@ -3012,7 +3012,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
9551 + */
9552 + if (!qedf->num_queues) {
9553 + QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
9554 +- return 1;
9555 ++ return -ENOMEM;
9556 + }
9557 +
9558 + /*
9559 +@@ -3020,7 +3020,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
9560 + * addresses of our queues
9561 + */
9562 + if (!qedf->p_cpuq) {
9563 +- status = 1;
9564 ++ status = -EINVAL;
9565 + QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
9566 + goto mem_alloc_failure;
9567 + }
9568 +@@ -3036,8 +3036,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
9569 + "qedf->global_queues=%p.\n", qedf->global_queues);
9570 +
9571 + /* Allocate DMA coherent buffers for BDQ */
9572 +- rc = qedf_alloc_bdq(qedf);
9573 +- if (rc) {
9574 ++ status = qedf_alloc_bdq(qedf);
9575 ++ if (status) {
9576 + QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
9577 + goto mem_alloc_failure;
9578 + }
9579 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
9580 +index edf9154327048..99e1a323807d1 100644
9581 +--- a/drivers/scsi/qedi/qedi_main.c
9582 ++++ b/drivers/scsi/qedi/qedi_main.c
9583 +@@ -1621,7 +1621,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
9584 + {
9585 + u32 *list;
9586 + int i;
9587 +- int status = 0, rc;
9588 ++ int status;
9589 + u32 *pbl;
9590 + dma_addr_t page;
9591 + int num_pages;
9592 +@@ -1632,14 +1632,14 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
9593 + */
9594 + if (!qedi->num_queues) {
9595 + QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
9596 +- return 1;
9597 ++ return -ENOMEM;
9598 + }
9599 +
9600 + /* Make sure we allocated the PBL that will contain the physical
9601 + * addresses of our queues
9602 + */
9603 + if (!qedi->p_cpuq) {
9604 +- status = 1;
9605 ++ status = -EINVAL;
9606 + goto mem_alloc_failure;
9607 + }
9608 +
9609 +@@ -1654,13 +1654,13 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
9610 + "qedi->global_queues=%p.\n", qedi->global_queues);
9611 +
9612 + /* Allocate DMA coherent buffers for BDQ */
9613 +- rc = qedi_alloc_bdq(qedi);
9614 +- if (rc)
9615 ++ status = qedi_alloc_bdq(qedi);
9616 ++ if (status)
9617 + goto mem_alloc_failure;
9618 +
9619 + /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
9620 +- rc = qedi_alloc_nvm_iscsi_cfg(qedi);
9621 +- if (rc)
9622 ++ status = qedi_alloc_nvm_iscsi_cfg(qedi);
9623 ++ if (status)
9624 + goto mem_alloc_failure;
9625 +
9626 + /* Allocate a CQ and an associated PBL for each MSI-X
9627 +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
9628 +index 0cacb667a88b9..adc9129972116 100644
9629 +--- a/drivers/scsi/qla2xxx/qla_nvme.c
9630 ++++ b/drivers/scsi/qla2xxx/qla_nvme.c
9631 +@@ -91,8 +91,9 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
9632 + struct qla_hw_data *ha;
9633 + struct qla_qpair *qpair;
9634 +
9635 +- if (!qidx)
9636 +- qidx++;
9637 ++ /* Map admin queue and 1st IO queue to index 0 */
9638 ++ if (qidx)
9639 ++ qidx--;
9640 +
9641 + vha = (struct scsi_qla_host *)lport->private;
9642 + ha = vha->hw;
9643 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
9644 +index 4eab564ea6a05..df4199fd44d6e 100644
9645 +--- a/drivers/scsi/qla2xxx/qla_os.c
9646 ++++ b/drivers/scsi/qla2xxx/qla_os.c
9647 +@@ -14,6 +14,7 @@
9648 + #include <linux/slab.h>
9649 + #include <linux/blk-mq-pci.h>
9650 + #include <linux/refcount.h>
9651 ++#include <linux/crash_dump.h>
9652 +
9653 + #include <scsi/scsi_tcq.h>
9654 + #include <scsi/scsicam.h>
9655 +@@ -2818,6 +2819,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
9656 + return ret;
9657 + }
9658 +
9659 ++ if (is_kdump_kernel()) {
9660 ++ ql2xmqsupport = 0;
9661 ++ ql2xallocfwdump = 0;
9662 ++ }
9663 ++
9664 + /* This may fail but that's ok */
9665 + pci_enable_pcie_error_reporting(pdev);
9666 +
9667 +diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
9668 +index 5db16509b6e1c..f573517e8f6e4 100644
9669 +--- a/drivers/scsi/smartpqi/smartpqi_init.c
9670 ++++ b/drivers/scsi/smartpqi/smartpqi_init.c
9671 +@@ -1322,6 +1322,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
9672 + "requested %u bytes, received %u bytes\n",
9673 + raid_map_size,
9674 + get_unaligned_le32(&raid_map->structure_size));
9675 ++ rc = -EINVAL;
9676 + goto error;
9677 + }
9678 + }
9679 +diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
9680 +index 70647eacf1953..3e5690c45e63b 100644
9681 +--- a/drivers/scsi/ufs/ufs-exynos.c
9682 ++++ b/drivers/scsi/ufs/ufs-exynos.c
9683 +@@ -259,7 +259,7 @@ static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
9684 + struct ufs_hba *hba = ufs->hba;
9685 + struct list_head *head = &hba->clk_list_head;
9686 + struct ufs_clk_info *clki;
9687 +- u32 pclk_rate;
9688 ++ unsigned long pclk_rate;
9689 + u32 f_min, f_max;
9690 + u8 div = 0;
9691 + int ret = 0;
9692 +@@ -298,7 +298,7 @@ static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
9693 + }
9694 +
9695 + if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) {
9696 +- dev_err(hba->dev, "not available pclk range %d\n", pclk_rate);
9697 ++ dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate);
9698 + ret = -EINVAL;
9699 + goto out;
9700 + }
9701 +diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h
9702 +index 06ee565f7eb02..a5804e8eb3586 100644
9703 +--- a/drivers/scsi/ufs/ufs-exynos.h
9704 ++++ b/drivers/scsi/ufs/ufs-exynos.h
9705 +@@ -184,7 +184,7 @@ struct exynos_ufs {
9706 + u32 pclk_div;
9707 + u32 pclk_avail_min;
9708 + u32 pclk_avail_max;
9709 +- u32 mclk_rate;
9710 ++ unsigned long mclk_rate;
9711 + int avail_ln_rx;
9712 + int avail_ln_tx;
9713 + int rx_sel_idx;
9714 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
9715 +index 72fd41bfbd54b..90837e54c2fea 100644
9716 +--- a/drivers/scsi/ufs/ufshcd.c
9717 ++++ b/drivers/scsi/ufs/ufshcd.c
9718 +@@ -3326,9 +3326,11 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
9719 +
9720 + if (is_kmalloc) {
9721 + /* Make sure we don't copy more data than available */
9722 +- if (param_offset + param_size > buff_len)
9723 +- param_size = buff_len - param_offset;
9724 +- memcpy(param_read_buf, &desc_buf[param_offset], param_size);
9725 ++ if (param_offset >= buff_len)
9726 ++ ret = -EINVAL;
9727 ++ else
9728 ++ memcpy(param_read_buf, &desc_buf[param_offset],
9729 ++ min_t(u32, param_size, buff_len - param_offset));
9730 + }
9731 + out:
9732 + if (is_kmalloc)
9733 +diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
9734 +index c557ffd0992c7..55e46fa6cf424 100644
9735 +--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
9736 ++++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
9737 +@@ -51,7 +51,7 @@ static int aspeed_lpc_ctrl_mmap(struct file *file, struct vm_area_struct *vma)
9738 + unsigned long vsize = vma->vm_end - vma->vm_start;
9739 + pgprot_t prot = vma->vm_page_prot;
9740 +
9741 +- if (vma->vm_pgoff + vsize > lpc_ctrl->mem_base + lpc_ctrl->mem_size)
9742 ++ if (vma->vm_pgoff + vma_pages(vma) > lpc_ctrl->mem_size >> PAGE_SHIFT)
9743 + return -EINVAL;
9744 +
9745 + /* ast2400/2500 AHB accesses are not cache coherent */
9746 +diff --git a/drivers/soc/aspeed/aspeed-p2a-ctrl.c b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
9747 +index b60fbeaffcbd0..20b5fb2a207cc 100644
9748 +--- a/drivers/soc/aspeed/aspeed-p2a-ctrl.c
9749 ++++ b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
9750 +@@ -110,7 +110,7 @@ static int aspeed_p2a_mmap(struct file *file, struct vm_area_struct *vma)
9751 + vsize = vma->vm_end - vma->vm_start;
9752 + prot = vma->vm_page_prot;
9753 +
9754 +- if (vma->vm_pgoff + vsize > ctrl->mem_base + ctrl->mem_size)
9755 ++ if (vma->vm_pgoff + vma_pages(vma) > ctrl->mem_size >> PAGE_SHIFT)
9756 + return -EINVAL;
9757 +
9758 + /* ast2400/2500 AHB accesses are not cache coherent */
9759 +diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
9760 +index 5f3e2bf0c40bc..9e2b81bd38db1 100644
9761 +--- a/drivers/soc/mediatek/mtk-mmsys.h
9762 ++++ b/drivers/soc/mediatek/mtk-mmsys.h
9763 +@@ -262,6 +262,10 @@ static const struct mtk_mmsys_routes mmsys_default_routing_table[] = {
9764 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3,
9765 + DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_MASK,
9766 + DSI3_SEL_IN_RDMA2
9767 ++ }, {
9768 ++ DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
9769 ++ DISP_REG_CONFIG_DISP_UFOE_MOUT_EN, UFOE_MOUT_EN_DSI0,
9770 ++ UFOE_MOUT_EN_DSI0
9771 + }
9772 + };
9773 +
9774 +diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
9775 +index 934fcc4d2b057..7b6b94332510a 100644
9776 +--- a/drivers/soc/qcom/qcom_aoss.c
9777 ++++ b/drivers/soc/qcom/qcom_aoss.c
9778 +@@ -476,12 +476,12 @@ static int qmp_cooling_device_add(struct qmp *qmp,
9779 + static int qmp_cooling_devices_register(struct qmp *qmp)
9780 + {
9781 + struct device_node *np, *child;
9782 +- int count = QMP_NUM_COOLING_RESOURCES;
9783 ++ int count = 0;
9784 + int ret;
9785 +
9786 + np = qmp->dev->of_node;
9787 +
9788 +- qmp->cooling_devs = devm_kcalloc(qmp->dev, count,
9789 ++ qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES,
9790 + sizeof(*qmp->cooling_devs),
9791 + GFP_KERNEL);
9792 +
9793 +@@ -497,12 +497,16 @@ static int qmp_cooling_devices_register(struct qmp *qmp)
9794 + goto unroll;
9795 + }
9796 +
9797 ++ if (!count)
9798 ++ devm_kfree(qmp->dev, qmp->cooling_devs);
9799 ++
9800 + return 0;
9801 +
9802 + unroll:
9803 + while (--count >= 0)
9804 + thermal_cooling_device_unregister
9805 + (qmp->cooling_devs[count].cdev);
9806 ++ devm_kfree(qmp->dev, qmp->cooling_devs);
9807 +
9808 + return ret;
9809 + }
9810 +diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
9811 +index fd95f94630b1c..c03d51ad40bf5 100644
9812 +--- a/drivers/soundwire/intel.c
9813 ++++ b/drivers/soundwire/intel.c
9814 +@@ -537,12 +537,14 @@ static int intel_link_power_down(struct sdw_intel *sdw)
9815 +
9816 + mutex_lock(sdw->link_res->shim_lock);
9817 +
9818 +- intel_shim_master_ip_to_glue(sdw);
9819 +-
9820 + if (!(*shim_mask & BIT(link_id)))
9821 + dev_err(sdw->cdns.dev,
9822 + "%s: Unbalanced power-up/down calls\n", __func__);
9823 +
9824 ++ sdw->cdns.link_up = false;
9825 ++
9826 ++ intel_shim_master_ip_to_glue(sdw);
9827 ++
9828 + *shim_mask &= ~BIT(link_id);
9829 +
9830 + if (!*shim_mask) {
9831 +@@ -559,18 +561,19 @@ static int intel_link_power_down(struct sdw_intel *sdw)
9832 + link_control &= spa_mask;
9833 +
9834 + ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
9835 ++ if (ret < 0) {
9836 ++ dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
9837 ++
9838 ++ /*
9839 ++ * we leave the sdw->cdns.link_up flag as false since we've disabled
9840 ++ * the link at this point and cannot handle interrupts any longer.
9841 ++ */
9842 ++ }
9843 + }
9844 +
9845 + mutex_unlock(sdw->link_res->shim_lock);
9846 +
9847 +- if (ret < 0) {
9848 +- dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
9849 +-
9850 +- return ret;
9851 +- }
9852 +-
9853 +- sdw->cdns.link_up = false;
9854 +- return 0;
9855 ++ return ret;
9856 + }
9857 +
9858 + static void intel_shim_sync_arm(struct sdw_intel *sdw)
9859 +diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
9860 +index 87f8829c39952..829770b8ec74c 100644
9861 +--- a/drivers/spi/spi-fsi.c
9862 ++++ b/drivers/spi/spi-fsi.c
9863 +@@ -25,16 +25,11 @@
9864 +
9865 + #define SPI_FSI_BASE 0x70000
9866 + #define SPI_FSI_INIT_TIMEOUT_MS 1000
9867 +-#define SPI_FSI_MAX_XFR_SIZE 2048
9868 +-#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED 8
9869 ++#define SPI_FSI_MAX_RX_SIZE 8
9870 ++#define SPI_FSI_MAX_TX_SIZE 40
9871 +
9872 + #define SPI_FSI_ERROR 0x0
9873 + #define SPI_FSI_COUNTER_CFG 0x1
9874 +-#define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32)
9875 +-#define SPI_FSI_COUNTER_CFG_N2_RX BIT_ULL(8)
9876 +-#define SPI_FSI_COUNTER_CFG_N2_TX BIT_ULL(9)
9877 +-#define SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
9878 +-#define SPI_FSI_COUNTER_CFG_N2_RELOAD BIT_ULL(11)
9879 + #define SPI_FSI_CFG1 0x2
9880 + #define SPI_FSI_CLOCK_CFG 0x3
9881 + #define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
9882 +@@ -76,8 +71,6 @@ struct fsi_spi {
9883 + struct device *dev; /* SPI controller device */
9884 + struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
9885 + u32 base;
9886 +- size_t max_xfr_size;
9887 +- bool restricted;
9888 + };
9889 +
9890 + struct fsi_spi_sequence {
9891 +@@ -241,7 +234,7 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
9892 + return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
9893 + }
9894 +
9895 +-static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
9896 ++static void fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
9897 + {
9898 + /*
9899 + * Add the next byte of instruction to the 8-byte sequence register.
9900 +@@ -251,8 +244,6 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
9901 + */
9902 + seq->data |= (u64)val << seq->bit;
9903 + seq->bit -= 8;
9904 +-
9905 +- return ((64 - seq->bit) / 8) - 2;
9906 + }
9907 +
9908 + static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
9909 +@@ -261,71 +252,11 @@ static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
9910 + seq->data = 0ULL;
9911 + }
9912 +
9913 +-static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
9914 +- struct fsi_spi_sequence *seq,
9915 +- struct spi_transfer *transfer)
9916 +-{
9917 +- int loops;
9918 +- int idx;
9919 +- int rc;
9920 +- u8 val = 0;
9921 +- u8 len = min(transfer->len, 8U);
9922 +- u8 rem = transfer->len % len;
9923 +-
9924 +- loops = transfer->len / len;
9925 +-
9926 +- if (transfer->tx_buf) {
9927 +- val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
9928 +- idx = fsi_spi_sequence_add(seq, val);
9929 +-
9930 +- if (rem)
9931 +- rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
9932 +- } else if (transfer->rx_buf) {
9933 +- val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
9934 +- idx = fsi_spi_sequence_add(seq, val);
9935 +-
9936 +- if (rem)
9937 +- rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
9938 +- } else {
9939 +- return -EINVAL;
9940 +- }
9941 +-
9942 +- if (ctx->restricted && loops > 1) {
9943 +- dev_warn(ctx->dev,
9944 +- "Transfer too large; no branches permitted.\n");
9945 +- return -EINVAL;
9946 +- }
9947 +-
9948 +- if (loops > 1) {
9949 +- u64 cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
9950 +-
9951 +- fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
9952 +-
9953 +- if (transfer->rx_buf)
9954 +- cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
9955 +- SPI_FSI_COUNTER_CFG_N2_TX |
9956 +- SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
9957 +- SPI_FSI_COUNTER_CFG_N2_RELOAD;
9958 +-
9959 +- rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
9960 +- if (rc)
9961 +- return rc;
9962 +- } else {
9963 +- fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
9964 +- }
9965 +-
9966 +- if (rem)
9967 +- fsi_spi_sequence_add(seq, rem);
9968 +-
9969 +- return 0;
9970 +-}
9971 +-
9972 + static int fsi_spi_transfer_data(struct fsi_spi *ctx,
9973 + struct spi_transfer *transfer)
9974 + {
9975 + int rc = 0;
9976 + u64 status = 0ULL;
9977 +- u64 cfg = 0ULL;
9978 +
9979 + if (transfer->tx_buf) {
9980 + int nb;
9981 +@@ -363,16 +294,6 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
9982 + u64 in = 0ULL;
9983 + u8 *rx = transfer->rx_buf;
9984 +
9985 +- rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
9986 +- if (rc)
9987 +- return rc;
9988 +-
9989 +- if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
9990 +- rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
9991 +- if (rc)
9992 +- return rc;
9993 +- }
9994 +-
9995 + while (transfer->len > recv) {
9996 + do {
9997 + rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
9998 +@@ -439,6 +360,10 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
9999 + }
10000 + } while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
10001 +
10002 ++ rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
10003 ++ if (rc)
10004 ++ return rc;
10005 ++
10006 + rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
10007 + if (rc)
10008 + return rc;
10009 +@@ -459,6 +384,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
10010 + {
10011 + int rc;
10012 + u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
10013 ++ unsigned int len;
10014 + struct spi_transfer *transfer;
10015 + struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
10016 +
10017 +@@ -471,8 +397,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
10018 + struct spi_transfer *next = NULL;
10019 +
10020 + /* Sequencer must do shift out (tx) first. */
10021 +- if (!transfer->tx_buf ||
10022 +- transfer->len > (ctx->max_xfr_size + 8)) {
10023 ++ if (!transfer->tx_buf || transfer->len > SPI_FSI_MAX_TX_SIZE) {
10024 + rc = -EINVAL;
10025 + goto error;
10026 + }
10027 +@@ -486,9 +411,13 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
10028 + fsi_spi_sequence_init(&seq);
10029 + fsi_spi_sequence_add(&seq, seq_slave);
10030 +
10031 +- rc = fsi_spi_sequence_transfer(ctx, &seq, transfer);
10032 +- if (rc)
10033 +- goto error;
10034 ++ len = transfer->len;
10035 ++ while (len > 8) {
10036 ++ fsi_spi_sequence_add(&seq,
10037 ++ SPI_FSI_SEQUENCE_SHIFT_OUT(8));
10038 ++ len -= 8;
10039 ++ }
10040 ++ fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
10041 +
10042 + if (!list_is_last(&transfer->transfer_list,
10043 + &mesg->transfers)) {
10044 +@@ -496,7 +425,9 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
10045 +
10046 + /* Sequencer can only do shift in (rx) after tx. */
10047 + if (next->rx_buf) {
10048 +- if (next->len > ctx->max_xfr_size) {
10049 ++ u8 shift;
10050 ++
10051 ++ if (next->len > SPI_FSI_MAX_RX_SIZE) {
10052 + rc = -EINVAL;
10053 + goto error;
10054 + }
10055 +@@ -504,10 +435,8 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
10056 + dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
10057 + next->len);
10058 +
10059 +- rc = fsi_spi_sequence_transfer(ctx, &seq,
10060 +- next);
10061 +- if (rc)
10062 +- goto error;
10063 ++ shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
10064 ++ fsi_spi_sequence_add(&seq, shift);
10065 + } else {
10066 + next = NULL;
10067 + }
10068 +@@ -541,9 +470,7 @@ error:
10069 +
10070 + static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
10071 + {
10072 +- struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
10073 +-
10074 +- return ctx->max_xfr_size;
10075 ++ return SPI_FSI_MAX_RX_SIZE;
10076 + }
10077 +
10078 + static int fsi_spi_probe(struct device *dev)
10079 +@@ -582,14 +509,6 @@ static int fsi_spi_probe(struct device *dev)
10080 + ctx->fsi = fsi;
10081 + ctx->base = base + SPI_FSI_BASE;
10082 +
10083 +- if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
10084 +- ctx->restricted = true;
10085 +- ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
10086 +- } else {
10087 +- ctx->restricted = false;
10088 +- ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
10089 +- }
10090 +-
10091 + rc = devm_spi_register_controller(dev, ctlr);
10092 + if (rc)
10093 + spi_controller_put(ctlr);
10094 +diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
10095 +index cb6feb34dd401..f980af0373452 100644
10096 +--- a/drivers/staging/board/board.c
10097 ++++ b/drivers/staging/board/board.c
10098 +@@ -136,6 +136,7 @@ int __init board_staging_register_clock(const struct board_staging_clk *bsc)
10099 + static int board_staging_add_dev_domain(struct platform_device *pdev,
10100 + const char *domain)
10101 + {
10102 ++ struct device *dev = &pdev->dev;
10103 + struct of_phandle_args pd_args;
10104 + struct device_node *np;
10105 +
10106 +@@ -148,7 +149,11 @@ static int board_staging_add_dev_domain(struct platform_device *pdev,
10107 + pd_args.np = np;
10108 + pd_args.args_count = 0;
10109 +
10110 +- return of_genpd_add_device(&pd_args, &pdev->dev);
10111 ++ /* Initialization similar to device_pm_init_common() */
10112 ++ spin_lock_init(&dev->power.lock);
10113 ++ dev->power.early_init = true;
10114 ++
10115 ++ return of_genpd_add_device(&pd_args, dev);
10116 + }
10117 + #else
10118 + static inline int board_staging_add_dev_domain(struct platform_device *pdev,
10119 +diff --git a/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml b/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
10120 +index 3b23ad56b31ac..ef664b4458fb4 100644
10121 +--- a/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
10122 ++++ b/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
10123 +@@ -42,6 +42,8 @@ properties:
10124 + regulators:
10125 + type: object
10126 +
10127 ++ additionalProperties: false
10128 ++
10129 + properties:
10130 + '#address-cells':
10131 + const: 1
10132 +@@ -50,11 +52,13 @@ properties:
10133 + const: 0
10134 +
10135 + patternProperties:
10136 +- '^ldo[0-9]+@[0-9a-f]$':
10137 ++ '^(ldo|LDO)[0-9]+$':
10138 + type: object
10139 +
10140 + $ref: "/schemas/regulator/regulator.yaml#"
10141 +
10142 ++ unevaluatedProperties: false
10143 ++
10144 + required:
10145 + - compatible
10146 + - reg
10147 +diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
10148 +index cbc0032c16045..98d759e7cc957 100644
10149 +--- a/drivers/staging/ks7010/ks7010_sdio.c
10150 ++++ b/drivers/staging/ks7010/ks7010_sdio.c
10151 +@@ -939,9 +939,9 @@ static void ks7010_private_init(struct ks_wlan_private *priv,
10152 + memset(&priv->wstats, 0, sizeof(priv->wstats));
10153 +
10154 + /* sleep mode */
10155 ++ atomic_set(&priv->sleepstatus.status, 0);
10156 + atomic_set(&priv->sleepstatus.doze_request, 0);
10157 + atomic_set(&priv->sleepstatus.wakeup_request, 0);
10158 +- atomic_set(&priv->sleepstatus.wakeup_request, 0);
10159 +
10160 + trx_device_init(priv);
10161 + hostif_init(priv);
10162 +diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
10163 +index 0295e2e32d797..fa1bd99cd6f17 100644
10164 +--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
10165 ++++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
10166 +@@ -1763,7 +1763,8 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
10167 + if (err < 0)
10168 + goto register_entities_fail;
10169 + /* init atomisp wdts */
10170 +- if (init_atomisp_wdts(isp) != 0)
10171 ++ err = init_atomisp_wdts(isp);
10172 ++ if (err != 0)
10173 + goto wdt_work_queue_fail;
10174 +
10175 + /* save the iunit context only once after all the values are init'ed. */
10176 +@@ -1815,6 +1816,7 @@ request_irq_fail:
10177 + hmm_cleanup();
10178 + hmm_pool_unregister(HMM_POOL_TYPE_RESERVED);
10179 + hmm_pool_fail:
10180 ++ pm_runtime_get_noresume(&pdev->dev);
10181 + destroy_workqueue(isp->wdt_work_queue);
10182 + wdt_work_queue_fail:
10183 + atomisp_acc_cleanup(isp);
10184 +diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
10185 +index 57002ba701768..3cd90637ac63e 100644
10186 +--- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
10187 ++++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
10188 +@@ -376,12 +376,17 @@ static void cfg_ref(struct hantro_ctx *ctx,
10189 + vb2_dst = hantro_get_dst_buf(ctx);
10190 +
10191 + ref = hantro_get_ref(ctx, hdr->last_frame_ts);
10192 +- if (!ref)
10193 ++ if (!ref) {
10194 ++ vpu_debug(0, "failed to find last frame ts=%llu\n",
10195 ++ hdr->last_frame_ts);
10196 + ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
10197 ++ }
10198 + vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(0));
10199 +
10200 + ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
10201 +- WARN_ON(!ref && hdr->golden_frame_ts);
10202 ++ if (!ref && hdr->golden_frame_ts)
10203 ++ vpu_debug(0, "failed to find golden frame ts=%llu\n",
10204 ++ hdr->golden_frame_ts);
10205 + if (!ref)
10206 + ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
10207 + if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN)
10208 +@@ -389,7 +394,9 @@ static void cfg_ref(struct hantro_ctx *ctx,
10209 + vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(4));
10210 +
10211 + ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
10212 +- WARN_ON(!ref && hdr->alt_frame_ts);
10213 ++ if (!ref && hdr->alt_frame_ts)
10214 ++ vpu_debug(0, "failed to find alt frame ts=%llu\n",
10215 ++ hdr->alt_frame_ts);
10216 + if (!ref)
10217 + ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
10218 + if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT)
10219 +diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
10220 +index 8661a3cc1e6b5..3616192016053 100644
10221 +--- a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
10222 ++++ b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
10223 +@@ -453,12 +453,17 @@ static void cfg_ref(struct hantro_ctx *ctx,
10224 + vb2_dst = hantro_get_dst_buf(ctx);
10225 +
10226 + ref = hantro_get_ref(ctx, hdr->last_frame_ts);
10227 +- if (!ref)
10228 ++ if (!ref) {
10229 ++ vpu_debug(0, "failed to find last frame ts=%llu\n",
10230 ++ hdr->last_frame_ts);
10231 + ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
10232 ++ }
10233 + vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF0);
10234 +
10235 + ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
10236 +- WARN_ON(!ref && hdr->golden_frame_ts);
10237 ++ if (!ref && hdr->golden_frame_ts)
10238 ++ vpu_debug(0, "failed to find golden frame ts=%llu\n",
10239 ++ hdr->golden_frame_ts);
10240 + if (!ref)
10241 + ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
10242 + if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN)
10243 +@@ -466,7 +471,9 @@ static void cfg_ref(struct hantro_ctx *ctx,
10244 + vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(2));
10245 +
10246 + ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
10247 +- WARN_ON(!ref && hdr->alt_frame_ts);
10248 ++ if (!ref && hdr->alt_frame_ts)
10249 ++ vpu_debug(0, "failed to find alt frame ts=%llu\n",
10250 ++ hdr->alt_frame_ts);
10251 + if (!ref)
10252 + ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
10253 + if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT)
10254 +diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
10255 +index f85a2f5f1413b..ad1bca3fe0471 100644
10256 +--- a/drivers/staging/media/imx/imx7-media-csi.c
10257 ++++ b/drivers/staging/media/imx/imx7-media-csi.c
10258 +@@ -361,6 +361,7 @@ static void imx7_csi_dma_unsetup_vb2_buf(struct imx7_csi *csi,
10259 +
10260 + vb->timestamp = ktime_get_ns();
10261 + vb2_buffer_done(vb, return_status);
10262 ++ csi->active_vb2_buf[i] = NULL;
10263 + }
10264 + }
10265 + }
10266 +@@ -386,9 +387,10 @@ static int imx7_csi_dma_setup(struct imx7_csi *csi)
10267 + return 0;
10268 + }
10269 +
10270 +-static void imx7_csi_dma_cleanup(struct imx7_csi *csi)
10271 ++static void imx7_csi_dma_cleanup(struct imx7_csi *csi,
10272 ++ enum vb2_buffer_state return_status)
10273 + {
10274 +- imx7_csi_dma_unsetup_vb2_buf(csi, VB2_BUF_STATE_ERROR);
10275 ++ imx7_csi_dma_unsetup_vb2_buf(csi, return_status);
10276 + imx_media_free_dma_buf(csi->dev, &csi->underrun_buf);
10277 + }
10278 +
10279 +@@ -537,9 +539,10 @@ static int imx7_csi_init(struct imx7_csi *csi)
10280 + return 0;
10281 + }
10282 +
10283 +-static void imx7_csi_deinit(struct imx7_csi *csi)
10284 ++static void imx7_csi_deinit(struct imx7_csi *csi,
10285 ++ enum vb2_buffer_state return_status)
10286 + {
10287 +- imx7_csi_dma_cleanup(csi);
10288 ++ imx7_csi_dma_cleanup(csi, return_status);
10289 + imx7_csi_init_default(csi);
10290 + imx7_csi_dmareq_rff_disable(csi);
10291 + clk_disable_unprepare(csi->mclk);
10292 +@@ -702,7 +705,7 @@ static int imx7_csi_s_stream(struct v4l2_subdev *sd, int enable)
10293 +
10294 + ret = v4l2_subdev_call(csi->src_sd, video, s_stream, 1);
10295 + if (ret < 0) {
10296 +- imx7_csi_deinit(csi);
10297 ++ imx7_csi_deinit(csi, VB2_BUF_STATE_QUEUED);
10298 + goto out_unlock;
10299 + }
10300 +
10301 +@@ -712,7 +715,7 @@ static int imx7_csi_s_stream(struct v4l2_subdev *sd, int enable)
10302 +
10303 + v4l2_subdev_call(csi->src_sd, video, s_stream, 0);
10304 +
10305 +- imx7_csi_deinit(csi);
10306 ++ imx7_csi_deinit(csi, VB2_BUF_STATE_ERROR);
10307 + }
10308 +
10309 + csi->is_streaming = !!enable;
10310 +diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
10311 +index 94d11689b4ac6..33ff80da32775 100644
10312 +--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
10313 ++++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
10314 +@@ -707,7 +707,7 @@ static void PHY_StoreTxPowerByRateNew(
10315 + if (RfPath > ODM_RF_PATH_D)
10316 + return;
10317 +
10318 +- if (TxNum > ODM_RF_PATH_D)
10319 ++ if (TxNum > RF_MAX_TX_NUM)
10320 + return;
10321 +
10322 + for (i = 0; i < rateNum; ++i) {
10323 +diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
10324 +index 1deb74112ad43..11d9d9155eef2 100644
10325 +--- a/drivers/staging/rts5208/rtsx_scsi.c
10326 ++++ b/drivers/staging/rts5208/rtsx_scsi.c
10327 +@@ -2802,10 +2802,10 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
10328 + }
10329 +
10330 + if (dev_info_id == 0x15) {
10331 +- buf_len = 0x3A;
10332 ++ buf_len = 0x3C;
10333 + data_len = 0x3A;
10334 + } else {
10335 +- buf_len = 0x6A;
10336 ++ buf_len = 0x6C;
10337 + data_len = 0x6A;
10338 + }
10339 +
10340 +@@ -2855,11 +2855,7 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
10341 + }
10342 +
10343 + rtsx_stor_set_xfer_buf(buf, buf_len, srb);
10344 +-
10345 +- if (dev_info_id == 0x15)
10346 +- scsi_set_resid(srb, scsi_bufflen(srb) - 0x3C);
10347 +- else
10348 +- scsi_set_resid(srb, scsi_bufflen(srb) - 0x6C);
10349 ++ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
10350 +
10351 + kfree(buf);
10352 + return STATUS_SUCCESS;
10353 +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
10354 +index a82032c081e83..03229350ea731 100644
10355 +--- a/drivers/thunderbolt/switch.c
10356 ++++ b/drivers/thunderbolt/switch.c
10357 +@@ -2308,7 +2308,7 @@ static void tb_switch_default_link_ports(struct tb_switch *sw)
10358 + {
10359 + int i;
10360 +
10361 +- for (i = 1; i <= sw->config.max_port_number; i += 2) {
10362 ++ for (i = 1; i <= sw->config.max_port_number; i++) {
10363 + struct tb_port *port = &sw->ports[i];
10364 + struct tb_port *subordinate;
10365 +
10366 +diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
10367 +index e8c58f9bd2632..d6afaae1729aa 100644
10368 +--- a/drivers/tty/hvc/hvsi.c
10369 ++++ b/drivers/tty/hvc/hvsi.c
10370 +@@ -1038,7 +1038,7 @@ static const struct tty_operations hvsi_ops = {
10371 +
10372 + static int __init hvsi_init(void)
10373 + {
10374 +- int i;
10375 ++ int i, ret;
10376 +
10377 + hvsi_driver = alloc_tty_driver(hvsi_count);
10378 + if (!hvsi_driver)
10379 +@@ -1069,12 +1069,25 @@ static int __init hvsi_init(void)
10380 + }
10381 + hvsi_wait = wait_for_state; /* irqs active now */
10382 +
10383 +- if (tty_register_driver(hvsi_driver))
10384 +- panic("Couldn't register hvsi console driver\n");
10385 ++ ret = tty_register_driver(hvsi_driver);
10386 ++ if (ret) {
10387 ++ pr_err("Couldn't register hvsi console driver\n");
10388 ++ goto err_free_irq;
10389 ++ }
10390 +
10391 + printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count);
10392 +
10393 + return 0;
10394 ++err_free_irq:
10395 ++ hvsi_wait = poll_for_state;
10396 ++ for (i = 0; i < hvsi_count; i++) {
10397 ++ struct hvsi_struct *hp = &hvsi_ports[i];
10398 ++
10399 ++ free_irq(hp->virq, hp);
10400 ++ }
10401 ++ tty_driver_kref_put(hvsi_driver);
10402 ++
10403 ++ return ret;
10404 + }
10405 + device_initcall(hvsi_init);
10406 +
10407 +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
10408 +index 79418d4beb48f..b6c731a267d26 100644
10409 +--- a/drivers/tty/serial/8250/8250_omap.c
10410 ++++ b/drivers/tty/serial/8250/8250_omap.c
10411 +@@ -617,7 +617,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
10412 + struct uart_port *port = dev_id;
10413 + struct omap8250_priv *priv = port->private_data;
10414 + struct uart_8250_port *up = up_to_u8250p(port);
10415 +- unsigned int iir;
10416 ++ unsigned int iir, lsr;
10417 + int ret;
10418 +
10419 + #ifdef CONFIG_SERIAL_8250_DMA
10420 +@@ -628,6 +628,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
10421 + #endif
10422 +
10423 + serial8250_rpm_get(up);
10424 ++ lsr = serial_port_in(port, UART_LSR);
10425 + iir = serial_port_in(port, UART_IIR);
10426 + ret = serial8250_handle_irq(port, iir);
10427 +
10428 +@@ -642,6 +643,24 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
10429 + serial_port_in(port, UART_RX);
10430 + }
10431 +
10432 ++ /* Stop processing interrupts on input overrun */
10433 ++ if ((lsr & UART_LSR_OE) && up->overrun_backoff_time_ms > 0) {
10434 ++ unsigned long delay;
10435 ++
10436 ++ up->ier = port->serial_in(port, UART_IER);
10437 ++ if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
10438 ++ port->ops->stop_rx(port);
10439 ++ } else {
10440 ++ /* Keep restarting the timer until
10441 ++ * the input overrun subsides.
10442 ++ */
10443 ++ cancel_delayed_work(&up->overrun_backoff);
10444 ++ }
10445 ++
10446 ++ delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
10447 ++ schedule_delayed_work(&up->overrun_backoff, delay);
10448 ++ }
10449 ++
10450 + serial8250_rpm_put(up);
10451 +
10452 + return IRQ_RETVAL(ret);
10453 +@@ -1353,6 +1372,10 @@ static int omap8250_probe(struct platform_device *pdev)
10454 + }
10455 + }
10456 +
10457 ++ if (of_property_read_u32(np, "overrun-throttle-ms",
10458 ++ &up.overrun_backoff_time_ms) != 0)
10459 ++ up.overrun_backoff_time_ms = 0;
10460 ++
10461 + priv->wakeirq = irq_of_parse_and_map(np, 1);
10462 +
10463 + pdata = of_device_get_match_data(&pdev->dev);
10464 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
10465 +index 1934940b96170..2ad136dcfcc8f 100644
10466 +--- a/drivers/tty/serial/8250/8250_pci.c
10467 ++++ b/drivers/tty/serial/8250/8250_pci.c
10468 +@@ -87,7 +87,7 @@ static void moan_device(const char *str, struct pci_dev *dev)
10469 +
10470 + static int
10471 + setup_port(struct serial_private *priv, struct uart_8250_port *port,
10472 +- int bar, int offset, int regshift)
10473 ++ u8 bar, unsigned int offset, int regshift)
10474 + {
10475 + struct pci_dev *dev = priv->dev;
10476 +
10477 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
10478 +index 9422284bb3f33..81bafcf77bb2e 100644
10479 +--- a/drivers/tty/serial/8250/8250_port.c
10480 ++++ b/drivers/tty/serial/8250/8250_port.c
10481 +@@ -122,7 +122,8 @@ static const struct serial8250_config uart_config[] = {
10482 + .name = "16C950/954",
10483 + .fifo_size = 128,
10484 + .tx_loadsz = 128,
10485 +- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
10486 ++ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01,
10487 ++ .rxtrig_bytes = {16, 32, 112, 120},
10488 + /* UART_CAP_EFR breaks billionon CF bluetooth card. */
10489 + .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
10490 + },
10491 +diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
10492 +index bf0e2a4cb0cef..c6f927a76c3be 100644
10493 +--- a/drivers/tty/serial/jsm/jsm_neo.c
10494 ++++ b/drivers/tty/serial/jsm/jsm_neo.c
10495 +@@ -815,7 +815,9 @@ static void neo_parse_isr(struct jsm_board *brd, u32 port)
10496 + /* Parse any modem signal changes */
10497 + jsm_dbg(INTR, &ch->ch_bd->pci_dev,
10498 + "MOD_STAT: sending to parse_modem_sigs\n");
10499 ++ spin_lock_irqsave(&ch->uart_port.lock, lock_flags);
10500 + neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
10501 ++ spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags);
10502 + }
10503 + }
10504 +
10505 +diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
10506 +index 8e42a7682c63d..d74cbbbf33c62 100644
10507 +--- a/drivers/tty/serial/jsm/jsm_tty.c
10508 ++++ b/drivers/tty/serial/jsm/jsm_tty.c
10509 +@@ -187,6 +187,7 @@ static void jsm_tty_break(struct uart_port *port, int break_state)
10510 +
10511 + static int jsm_tty_open(struct uart_port *port)
10512 + {
10513 ++ unsigned long lock_flags;
10514 + struct jsm_board *brd;
10515 + struct jsm_channel *channel =
10516 + container_of(port, struct jsm_channel, uart_port);
10517 +@@ -240,6 +241,7 @@ static int jsm_tty_open(struct uart_port *port)
10518 + channel->ch_cached_lsr = 0;
10519 + channel->ch_stops_sent = 0;
10520 +
10521 ++ spin_lock_irqsave(&port->lock, lock_flags);
10522 + termios = &port->state->port.tty->termios;
10523 + channel->ch_c_cflag = termios->c_cflag;
10524 + channel->ch_c_iflag = termios->c_iflag;
10525 +@@ -259,6 +261,7 @@ static int jsm_tty_open(struct uart_port *port)
10526 + jsm_carrier(channel);
10527 +
10528 + channel->ch_open_count++;
10529 ++ spin_unlock_irqrestore(&port->lock, lock_flags);
10530 +
10531 + jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
10532 + return 0;
10533 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
10534 +index 2d5487bf68559..a2e62f372e10e 100644
10535 +--- a/drivers/tty/serial/sh-sci.c
10536 ++++ b/drivers/tty/serial/sh-sci.c
10537 +@@ -1760,6 +1760,10 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
10538 +
10539 + /* Handle BREAKs */
10540 + sci_handle_breaks(port);
10541 ++
10542 ++ /* drop invalid character received before break was detected */
10543 ++ serial_port_in(port, SCxRDR);
10544 ++
10545 + sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
10546 +
10547 + return IRQ_HANDLED;
10548 +@@ -1839,7 +1843,8 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
10549 + ret = sci_er_interrupt(irq, ptr);
10550 +
10551 + /* Break Interrupt */
10552 +- if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
10553 ++ if (s->irqs[SCIx_ERI_IRQ] != s->irqs[SCIx_BRI_IRQ] &&
10554 ++ (ssr_status & SCxSR_BRK(port)) && err_enabled)
10555 + ret = sci_br_interrupt(irq, ptr);
10556 +
10557 + /* Overrun Interrupt */
10558 +diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
10559 +index e86d13c04bdbe..bdc3885c0d493 100644
10560 +--- a/drivers/usb/chipidea/host.c
10561 ++++ b/drivers/usb/chipidea/host.c
10562 +@@ -240,15 +240,18 @@ static int ci_ehci_hub_control(
10563 + )
10564 + {
10565 + struct ehci_hcd *ehci = hcd_to_ehci(hcd);
10566 ++ unsigned int ports = HCS_N_PORTS(ehci->hcs_params);
10567 + u32 __iomem *status_reg;
10568 +- u32 temp;
10569 ++ u32 temp, port_index;
10570 + unsigned long flags;
10571 + int retval = 0;
10572 + bool done = false;
10573 + struct device *dev = hcd->self.controller;
10574 + struct ci_hdrc *ci = dev_get_drvdata(dev);
10575 +
10576 +- status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
10577 ++ port_index = wIndex & 0xff;
10578 ++ port_index -= (port_index > 0);
10579 ++ status_reg = &ehci->regs->port_status[port_index];
10580 +
10581 + spin_lock_irqsave(&ehci->lock, flags);
10582 +
10583 +@@ -260,6 +263,11 @@ static int ci_ehci_hub_control(
10584 + }
10585 +
10586 + if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
10587 ++ if (!wIndex || wIndex > ports) {
10588 ++ retval = -EPIPE;
10589 ++ goto done;
10590 ++ }
10591 ++
10592 + temp = ehci_readl(ehci, status_reg);
10593 + if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
10594 + retval = -EPIPE;
10595 +@@ -288,7 +296,7 @@ static int ci_ehci_hub_control(
10596 + ehci_writel(ehci, temp, status_reg);
10597 + }
10598 +
10599 +- set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
10600 ++ set_bit(port_index, &ehci->suspended_ports);
10601 + goto done;
10602 + }
10603 +
10604 +diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
10605 +index 756faa46d33a7..d328d20abfbc4 100644
10606 +--- a/drivers/usb/dwc3/dwc3-imx8mp.c
10607 ++++ b/drivers/usb/dwc3/dwc3-imx8mp.c
10608 +@@ -152,13 +152,6 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
10609 + }
10610 + dwc3_imx->irq = irq;
10611 +
10612 +- err = devm_request_threaded_irq(dev, irq, NULL, dwc3_imx8mp_interrupt,
10613 +- IRQF_ONESHOT, dev_name(dev), dwc3_imx);
10614 +- if (err) {
10615 +- dev_err(dev, "failed to request IRQ #%d --> %d\n", irq, err);
10616 +- goto disable_clks;
10617 +- }
10618 +-
10619 + pm_runtime_set_active(dev);
10620 + pm_runtime_enable(dev);
10621 + err = pm_runtime_get_sync(dev);
10622 +@@ -186,6 +179,13 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
10623 + }
10624 + of_node_put(dwc3_np);
10625 +
10626 ++ err = devm_request_threaded_irq(dev, irq, NULL, dwc3_imx8mp_interrupt,
10627 ++ IRQF_ONESHOT, dev_name(dev), dwc3_imx);
10628 ++ if (err) {
10629 ++ dev_err(dev, "failed to request IRQ #%d --> %d\n", irq, err);
10630 ++ goto depopulate;
10631 ++ }
10632 ++
10633 + device_set_wakeup_capable(dev, true);
10634 + pm_runtime_put(dev);
10635 +
10636 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
10637 +index 72a9797dbbae0..504c1cbc255d1 100644
10638 +--- a/drivers/usb/gadget/composite.c
10639 ++++ b/drivers/usb/gadget/composite.c
10640 +@@ -482,7 +482,7 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
10641 + {
10642 + unsigned val;
10643 +
10644 +- if (c->MaxPower)
10645 ++ if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
10646 + val = c->MaxPower;
10647 + else
10648 + val = CONFIG_USB_GADGET_VBUS_DRAW;
10649 +@@ -936,7 +936,11 @@ static int set_config(struct usb_composite_dev *cdev,
10650 + }
10651 +
10652 + /* when we return, be sure our power usage is valid */
10653 +- power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
10654 ++ if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
10655 ++ power = c->MaxPower;
10656 ++ else
10657 ++ power = CONFIG_USB_GADGET_VBUS_DRAW;
10658 ++
10659 + if (gadget->speed < USB_SPEED_SUPER)
10660 + power = min(power, 500U);
10661 + else
10662 +diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
10663 +index d1d044d9f8594..85a3f6d4b5af3 100644
10664 +--- a/drivers/usb/gadget/function/u_ether.c
10665 ++++ b/drivers/usb/gadget/function/u_ether.c
10666 +@@ -492,8 +492,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
10667 + }
10668 + spin_unlock_irqrestore(&dev->lock, flags);
10669 +
10670 +- if (skb && !in) {
10671 +- dev_kfree_skb_any(skb);
10672 ++ if (!in) {
10673 ++ if (skb)
10674 ++ dev_kfree_skb_any(skb);
10675 + return NETDEV_TX_OK;
10676 + }
10677 +
10678 +diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
10679 +index cffdc8d01b2a8..8fd27249ad257 100644
10680 +--- a/drivers/usb/host/ehci-mv.c
10681 ++++ b/drivers/usb/host/ehci-mv.c
10682 +@@ -42,26 +42,25 @@ struct ehci_hcd_mv {
10683 + int (*set_vbus)(unsigned int vbus);
10684 + };
10685 +
10686 +-static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
10687 ++static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
10688 + {
10689 +- clk_prepare_enable(ehci_mv->clk);
10690 +-}
10691 ++ int retval;
10692 +
10693 +-static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
10694 +-{
10695 +- clk_disable_unprepare(ehci_mv->clk);
10696 +-}
10697 ++ retval = clk_prepare_enable(ehci_mv->clk);
10698 ++ if (retval)
10699 ++ return retval;
10700 +
10701 +-static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
10702 +-{
10703 +- ehci_clock_enable(ehci_mv);
10704 +- return phy_init(ehci_mv->phy);
10705 ++ retval = phy_init(ehci_mv->phy);
10706 ++ if (retval)
10707 ++ clk_disable_unprepare(ehci_mv->clk);
10708 ++
10709 ++ return retval;
10710 + }
10711 +
10712 + static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
10713 + {
10714 + phy_exit(ehci_mv->phy);
10715 +- ehci_clock_disable(ehci_mv);
10716 ++ clk_disable_unprepare(ehci_mv->clk);
10717 + }
10718 +
10719 + static int mv_ehci_reset(struct usb_hcd *hcd)
10720 +diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
10721 +index 9c2eda0918e13..670a2fecc9c77 100644
10722 +--- a/drivers/usb/host/fotg210-hcd.c
10723 ++++ b/drivers/usb/host/fotg210-hcd.c
10724 +@@ -2509,11 +2509,6 @@ retry_xacterr:
10725 + return count;
10726 + }
10727 +
10728 +-/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
10729 +-#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
10730 +-/* ... and packet size, for any kind of endpoint descriptor */
10731 +-#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
10732 +-
10733 + /* reverse of qh_urb_transaction: free a list of TDs.
10734 + * used for cleanup after errors, before HC sees an URB's TDs.
10735 + */
10736 +@@ -2599,7 +2594,7 @@ static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210,
10737 + token |= (1 /* "in" */ << 8);
10738 + /* else it's already initted to "out" pid (0 << 8) */
10739 +
10740 +- maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
10741 ++ maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
10742 +
10743 + /*
10744 + * buffer gets wrapped in one or more qtds;
10745 +@@ -2713,9 +2708,11 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
10746 + gfp_t flags)
10747 + {
10748 + struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
10749 ++ struct usb_host_endpoint *ep;
10750 + u32 info1 = 0, info2 = 0;
10751 + int is_input, type;
10752 + int maxp = 0;
10753 ++ int mult;
10754 + struct usb_tt *tt = urb->dev->tt;
10755 + struct fotg210_qh_hw *hw;
10756 +
10757 +@@ -2730,14 +2727,15 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
10758 +
10759 + is_input = usb_pipein(urb->pipe);
10760 + type = usb_pipetype(urb->pipe);
10761 +- maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
10762 ++ ep = usb_pipe_endpoint(urb->dev, urb->pipe);
10763 ++ maxp = usb_endpoint_maxp(&ep->desc);
10764 ++ mult = usb_endpoint_maxp_mult(&ep->desc);
10765 +
10766 + /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
10767 + * acts like up to 3KB, but is built from smaller packets.
10768 + */
10769 +- if (max_packet(maxp) > 1024) {
10770 +- fotg210_dbg(fotg210, "bogus qh maxpacket %d\n",
10771 +- max_packet(maxp));
10772 ++ if (maxp > 1024) {
10773 ++ fotg210_dbg(fotg210, "bogus qh maxpacket %d\n", maxp);
10774 + goto done;
10775 + }
10776 +
10777 +@@ -2751,8 +2749,7 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
10778 + */
10779 + if (type == PIPE_INTERRUPT) {
10780 + qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
10781 +- is_input, 0,
10782 +- hb_mult(maxp) * max_packet(maxp)));
10783 ++ is_input, 0, mult * maxp));
10784 + qh->start = NO_FRAME;
10785 +
10786 + if (urb->dev->speed == USB_SPEED_HIGH) {
10787 +@@ -2789,7 +2786,7 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
10788 + think_time = tt ? tt->think_time : 0;
10789 + qh->tt_usecs = NS_TO_US(think_time +
10790 + usb_calc_bus_time(urb->dev->speed,
10791 +- is_input, 0, max_packet(maxp)));
10792 ++ is_input, 0, maxp));
10793 + qh->period = urb->interval;
10794 + if (qh->period > fotg210->periodic_size) {
10795 + qh->period = fotg210->periodic_size;
10796 +@@ -2852,11 +2849,11 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
10797 + * to help them do so. So now people expect to use
10798 + * such nonconformant devices with Linux too; sigh.
10799 + */
10800 +- info1 |= max_packet(maxp) << 16;
10801 ++ info1 |= maxp << 16;
10802 + info2 |= (FOTG210_TUNE_MULT_HS << 30);
10803 + } else { /* PIPE_INTERRUPT */
10804 +- info1 |= max_packet(maxp) << 16;
10805 +- info2 |= hb_mult(maxp) << 30;
10806 ++ info1 |= maxp << 16;
10807 ++ info2 |= mult << 30;
10808 + }
10809 + break;
10810 + default:
10811 +@@ -3926,6 +3923,7 @@ static void iso_stream_init(struct fotg210_hcd *fotg210,
10812 + int is_input;
10813 + long bandwidth;
10814 + unsigned multi;
10815 ++ struct usb_host_endpoint *ep;
10816 +
10817 + /*
10818 + * this might be a "high bandwidth" highspeed endpoint,
10819 +@@ -3933,14 +3931,14 @@ static void iso_stream_init(struct fotg210_hcd *fotg210,
10820 + */
10821 + epnum = usb_pipeendpoint(pipe);
10822 + is_input = usb_pipein(pipe) ? USB_DIR_IN : 0;
10823 +- maxp = usb_maxpacket(dev, pipe, !is_input);
10824 ++ ep = usb_pipe_endpoint(dev, pipe);
10825 ++ maxp = usb_endpoint_maxp(&ep->desc);
10826 + if (is_input)
10827 + buf1 = (1 << 11);
10828 + else
10829 + buf1 = 0;
10830 +
10831 +- maxp = max_packet(maxp);
10832 +- multi = hb_mult(maxp);
10833 ++ multi = usb_endpoint_maxp_mult(&ep->desc);
10834 + buf1 |= maxp;
10835 + maxp *= multi;
10836 +
10837 +@@ -4461,13 +4459,12 @@ static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
10838 +
10839 + /* HC need not update length with this error */
10840 + if (!(t & FOTG210_ISOC_BABBLE)) {
10841 +- desc->actual_length =
10842 +- fotg210_itdlen(urb, desc, t);
10843 ++ desc->actual_length = FOTG210_ITD_LENGTH(t);
10844 + urb->actual_length += desc->actual_length;
10845 + }
10846 + } else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) {
10847 + desc->status = 0;
10848 +- desc->actual_length = fotg210_itdlen(urb, desc, t);
10849 ++ desc->actual_length = FOTG210_ITD_LENGTH(t);
10850 + urb->actual_length += desc->actual_length;
10851 + } else {
10852 + /* URB was too late */
10853 +diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
10854 +index 6cee40ec65b41..67f59517ebade 100644
10855 +--- a/drivers/usb/host/fotg210.h
10856 ++++ b/drivers/usb/host/fotg210.h
10857 +@@ -686,11 +686,6 @@ static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
10858 + return fotg210_readl(fotg210, &fotg210->regs->frame_index);
10859 + }
10860 +
10861 +-#define fotg210_itdlen(urb, desc, t) ({ \
10862 +- usb_pipein((urb)->pipe) ? \
10863 +- (desc)->length - FOTG210_ITD_LENGTH(t) : \
10864 +- FOTG210_ITD_LENGTH(t); \
10865 +-})
10866 + /*-------------------------------------------------------------------------*/
10867 +
10868 + #endif /* __LINUX_FOTG210_H */
10869 +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
10870 +index b2058b3bc834c..86e5710a5307d 100644
10871 +--- a/drivers/usb/host/xhci-mtk.c
10872 ++++ b/drivers/usb/host/xhci-mtk.c
10873 +@@ -571,7 +571,7 @@ disable_ldos:
10874 + xhci_mtk_ldos_disable(mtk);
10875 +
10876 + disable_pm:
10877 +- pm_runtime_put_sync_autosuspend(dev);
10878 ++ pm_runtime_put_noidle(dev);
10879 + pm_runtime_disable(dev);
10880 + return ret;
10881 + }
10882 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
10883 +index 9248ce8d09a4a..cb21add9d7bee 100644
10884 +--- a/drivers/usb/host/xhci.c
10885 ++++ b/drivers/usb/host/xhci.c
10886 +@@ -4705,19 +4705,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
10887 + {
10888 + unsigned long long timeout_ns;
10889 +
10890 +- if (xhci->quirks & XHCI_INTEL_HOST)
10891 +- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
10892 +- else
10893 +- timeout_ns = udev->u1_params.sel;
10894 +-
10895 + /* Prevent U1 if service interval is shorter than U1 exit latency */
10896 + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
10897 +- if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
10898 ++ if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
10899 + dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
10900 + return USB3_LPM_DISABLED;
10901 + }
10902 + }
10903 +
10904 ++ if (xhci->quirks & XHCI_INTEL_HOST)
10905 ++ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
10906 ++ else
10907 ++ timeout_ns = udev->u1_params.sel;
10908 ++
10909 + /* The U1 timeout is encoded in 1us intervals.
10910 + * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
10911 + */
10912 +@@ -4769,19 +4769,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
10913 + {
10914 + unsigned long long timeout_ns;
10915 +
10916 +- if (xhci->quirks & XHCI_INTEL_HOST)
10917 +- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
10918 +- else
10919 +- timeout_ns = udev->u2_params.sel;
10920 +-
10921 + /* Prevent U2 if service interval is shorter than U2 exit latency */
10922 + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
10923 +- if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
10924 ++ if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
10925 + dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
10926 + return USB3_LPM_DISABLED;
10927 + }
10928 + }
10929 +
10930 ++ if (xhci->quirks & XHCI_INTEL_HOST)
10931 ++ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
10932 ++ else
10933 ++ timeout_ns = udev->u2_params.sel;
10934 ++
10935 + /* The U2 timeout is encoded in 256us intervals */
10936 + timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
10937 + /* If the necessary timeout value is bigger than what we can set in the
10938 +diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
10939 +index 5892f3ce0cdc8..ce9fc46c92661 100644
10940 +--- a/drivers/usb/musb/musb_dsps.c
10941 ++++ b/drivers/usb/musb/musb_dsps.c
10942 +@@ -890,23 +890,22 @@ static int dsps_probe(struct platform_device *pdev)
10943 + if (!glue->usbss_base)
10944 + return -ENXIO;
10945 +
10946 +- if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
10947 +- ret = dsps_setup_optional_vbus_irq(pdev, glue);
10948 +- if (ret)
10949 +- goto err_iounmap;
10950 +- }
10951 +-
10952 + platform_set_drvdata(pdev, glue);
10953 + pm_runtime_enable(&pdev->dev);
10954 + ret = dsps_create_musb_pdev(glue, pdev);
10955 + if (ret)
10956 + goto err;
10957 +
10958 ++ if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
10959 ++ ret = dsps_setup_optional_vbus_irq(pdev, glue);
10960 ++ if (ret)
10961 ++ goto err;
10962 ++ }
10963 ++
10964 + return 0;
10965 +
10966 + err:
10967 + pm_runtime_disable(&pdev->dev);
10968 +-err_iounmap:
10969 + iounmap(glue->usbss_base);
10970 + return ret;
10971 + }
10972 +diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
10973 +index 4ba6bcdaa8e9d..b07b2925ff78b 100644
10974 +--- a/drivers/usb/usbip/vhci_hcd.c
10975 ++++ b/drivers/usb/usbip/vhci_hcd.c
10976 +@@ -455,8 +455,14 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
10977 + vhci_hcd->port_status[rhport] &= ~(1 << USB_PORT_FEAT_RESET);
10978 + vhci_hcd->re_timeout = 0;
10979 +
10980 ++ /*
10981 ++ * A few drivers do usb reset during probe when
10982 ++ * the device could be in VDEV_ST_USED state
10983 ++ */
10984 + if (vhci_hcd->vdev[rhport].ud.status ==
10985 +- VDEV_ST_NOTASSIGNED) {
10986 ++ VDEV_ST_NOTASSIGNED ||
10987 ++ vhci_hcd->vdev[rhport].ud.status ==
10988 ++ VDEV_ST_USED) {
10989 + usbip_dbg_vhci_rh(
10990 + " enable rhport %d (status %u)\n",
10991 + rhport,
10992 +@@ -957,8 +963,32 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
10993 + spin_lock(&vdev->priv_lock);
10994 +
10995 + list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
10996 ++ struct urb *urb;
10997 ++
10998 ++ /* give back urb of unsent unlink request */
10999 + pr_info("unlink cleanup tx %lu\n", unlink->unlink_seqnum);
11000 ++
11001 ++ urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
11002 ++ if (!urb) {
11003 ++ list_del(&unlink->list);
11004 ++ kfree(unlink);
11005 ++ continue;
11006 ++ }
11007 ++
11008 ++ urb->status = -ENODEV;
11009 ++
11010 ++ usb_hcd_unlink_urb_from_ep(hcd, urb);
11011 ++
11012 + list_del(&unlink->list);
11013 ++
11014 ++ spin_unlock(&vdev->priv_lock);
11015 ++ spin_unlock_irqrestore(&vhci->lock, flags);
11016 ++
11017 ++ usb_hcd_giveback_urb(hcd, urb, urb->status);
11018 ++
11019 ++ spin_lock_irqsave(&vhci->lock, flags);
11020 ++ spin_lock(&vdev->priv_lock);
11021 ++
11022 + kfree(unlink);
11023 + }
11024 +
11025 +diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
11026 +index 67d0bf4efa160..e44bf736e2b22 100644
11027 +--- a/drivers/vfio/Kconfig
11028 ++++ b/drivers/vfio/Kconfig
11029 +@@ -29,7 +29,7 @@ menuconfig VFIO
11030 +
11031 + If you don't know what to do here, say N.
11032 +
11033 +-menuconfig VFIO_NOIOMMU
11034 ++config VFIO_NOIOMMU
11035 + bool "VFIO No-IOMMU support"
11036 + depends on VFIO
11037 + help
11038 +diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
11039 +index 3e006da477523..84c56f525889f 100644
11040 +--- a/drivers/video/fbdev/asiliantfb.c
11041 ++++ b/drivers/video/fbdev/asiliantfb.c
11042 +@@ -227,6 +227,9 @@ static int asiliantfb_check_var(struct fb_var_screeninfo *var,
11043 + {
11044 + unsigned long Ftarget, ratio, remainder;
11045 +
11046 ++ if (!var->pixclock)
11047 ++ return -EINVAL;
11048 ++
11049 + ratio = 1000000 / var->pixclock;
11050 + remainder = 1000000 % var->pixclock;
11051 + Ftarget = 1000000 * ratio + (1000000 * remainder) / var->pixclock;
11052 +diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
11053 +index 8fbde92ae8b9c..25801e8e3f74a 100644
11054 +--- a/drivers/video/fbdev/kyro/fbdev.c
11055 ++++ b/drivers/video/fbdev/kyro/fbdev.c
11056 +@@ -372,6 +372,11 @@ static int kyro_dev_overlay_viewport_set(u32 x, u32 y, u32 ulWidth, u32 ulHeight
11057 + /* probably haven't called CreateOverlay yet */
11058 + return -EINVAL;
11059 +
11060 ++ if (ulWidth == 0 || ulWidth == 0xffffffff ||
11061 ++ ulHeight == 0 || ulHeight == 0xffffffff ||
11062 ++ (x < 2 && ulWidth + 2 == 0))
11063 ++ return -EINVAL;
11064 ++
11065 + /* Stop Ramdac Output */
11066 + DisableRamdacOutput(deviceInfo.pSTGReg);
11067 +
11068 +@@ -394,6 +399,9 @@ static int kyrofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
11069 + {
11070 + struct kyrofb_info *par = info->par;
11071 +
11072 ++ if (!var->pixclock)
11073 ++ return -EINVAL;
11074 ++
11075 + if (var->bits_per_pixel != 16 && var->bits_per_pixel != 32) {
11076 + printk(KERN_WARNING "kyrofb: depth not supported: %u\n", var->bits_per_pixel);
11077 + return -EINVAL;
11078 +diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
11079 +index 55554b0433cb4..84d5e23ad7d38 100644
11080 +--- a/drivers/video/fbdev/riva/fbdev.c
11081 ++++ b/drivers/video/fbdev/riva/fbdev.c
11082 +@@ -1084,6 +1084,9 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
11083 + int mode_valid = 0;
11084 +
11085 + NVTRACE_ENTER();
11086 ++ if (!var->pixclock)
11087 ++ return -EINVAL;
11088 ++
11089 + switch (var->bits_per_pixel) {
11090 + case 1 ... 8:
11091 + var->red.offset = var->green.offset = var->blue.offset = 0;
11092 +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
11093 +index cf53713f8aa01..a29b5ffca99b7 100644
11094 +--- a/fs/btrfs/block-group.c
11095 ++++ b/fs/btrfs/block-group.c
11096 +@@ -1550,7 +1550,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
11097 + bg->start, div64_u64(bg->used * 100, bg->length));
11098 + trace_btrfs_reclaim_block_group(bg);
11099 + ret = btrfs_relocate_chunk(fs_info, bg->start);
11100 +- if (ret)
11101 ++ if (ret && ret != -EAGAIN)
11102 + btrfs_err(fs_info, "error relocating chunk %llu",
11103 + bg->start);
11104 +
11105 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
11106 +index 8d386a5587ee9..51395cfb75742 100644
11107 +--- a/fs/btrfs/disk-io.c
11108 ++++ b/fs/btrfs/disk-io.c
11109 +@@ -3329,6 +3329,30 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
11110 + */
11111 + fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
11112 +
11113 ++ /*
11114 ++ * Flag our filesystem as having big metadata blocks if they are bigger
11115 ++ * than the page size.
11116 ++ */
11117 ++ if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
11118 ++ if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
11119 ++ btrfs_info(fs_info,
11120 ++ "flagging fs with big metadata feature");
11121 ++ features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
11122 ++ }
11123 ++
11124 ++ /* Set up fs_info before parsing mount options */
11125 ++ nodesize = btrfs_super_nodesize(disk_super);
11126 ++ sectorsize = btrfs_super_sectorsize(disk_super);
11127 ++ stripesize = sectorsize;
11128 ++ fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
11129 ++ fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
11130 ++
11131 ++ fs_info->nodesize = nodesize;
11132 ++ fs_info->sectorsize = sectorsize;
11133 ++ fs_info->sectorsize_bits = ilog2(sectorsize);
11134 ++ fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
11135 ++ fs_info->stripesize = stripesize;
11136 ++
11137 + ret = btrfs_parse_options(fs_info, options, sb->s_flags);
11138 + if (ret) {
11139 + err = ret;
11140 +@@ -3355,30 +3379,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
11141 + if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
11142 + btrfs_info(fs_info, "has skinny extents");
11143 +
11144 +- /*
11145 +- * flag our filesystem as having big metadata blocks if
11146 +- * they are bigger than the page size
11147 +- */
11148 +- if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
11149 +- if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
11150 +- btrfs_info(fs_info,
11151 +- "flagging fs with big metadata feature");
11152 +- features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
11153 +- }
11154 +-
11155 +- nodesize = btrfs_super_nodesize(disk_super);
11156 +- sectorsize = btrfs_super_sectorsize(disk_super);
11157 +- stripesize = sectorsize;
11158 +- fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
11159 +- fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
11160 +-
11161 +- /* Cache block sizes */
11162 +- fs_info->nodesize = nodesize;
11163 +- fs_info->sectorsize = sectorsize;
11164 +- fs_info->sectorsize_bits = ilog2(sectorsize);
11165 +- fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
11166 +- fs_info->stripesize = stripesize;
11167 +-
11168 + /*
11169 + * mixed block groups end up with duplicate but slightly offset
11170 + * extent buffers for the same range. It leads to corruptions
11171 +diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
11172 +index 4806295116d88..c3510c8fdaf8c 100644
11173 +--- a/fs/btrfs/free-space-cache.c
11174 ++++ b/fs/btrfs/free-space-cache.c
11175 +@@ -2652,8 +2652,11 @@ int btrfs_remove_free_space(struct btrfs_block_group *block_group,
11176 + * btrfs_pin_extent_for_log_replay() when replaying the log.
11177 + * Advance the pointer not to overwrite the tree-log nodes.
11178 + */
11179 +- if (block_group->alloc_offset < offset + bytes)
11180 +- block_group->alloc_offset = offset + bytes;
11181 ++ if (block_group->start + block_group->alloc_offset <
11182 ++ offset + bytes) {
11183 ++ block_group->alloc_offset =
11184 ++ offset + bytes - block_group->start;
11185 ++ }
11186 + return 0;
11187 + }
11188 +
11189 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
11190 +index 6328deb27d126..044300db5e228 100644
11191 +--- a/fs/btrfs/inode.c
11192 ++++ b/fs/btrfs/inode.c
11193 +@@ -1248,11 +1248,6 @@ static noinline void async_cow_submit(struct btrfs_work *work)
11194 + nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
11195 + PAGE_SHIFT;
11196 +
11197 +- /* atomic_sub_return implies a barrier */
11198 +- if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
11199 +- 5 * SZ_1M)
11200 +- cond_wake_up_nomb(&fs_info->async_submit_wait);
11201 +-
11202 + /*
11203 + * ->inode could be NULL if async_chunk_start has failed to compress,
11204 + * in which case we don't have anything to submit, yet we need to
11205 +@@ -1261,6 +1256,11 @@ static noinline void async_cow_submit(struct btrfs_work *work)
11206 + */
11207 + if (async_chunk->inode)
11208 + submit_compressed_extents(async_chunk);
11209 ++
11210 ++ /* atomic_sub_return implies a barrier */
11211 ++ if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
11212 ++ 5 * SZ_1M)
11213 ++ cond_wake_up_nomb(&fs_info->async_submit_wait);
11214 + }
11215 +
11216 + static noinline void async_cow_free(struct btrfs_work *work)
11217 +@@ -5064,15 +5064,13 @@ static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
11218 + int ret;
11219 +
11220 + /*
11221 +- * Still need to make sure the inode looks like it's been updated so
11222 +- * that any holes get logged if we fsync.
11223 ++ * If NO_HOLES is enabled, we don't need to do anything.
11224 ++ * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
11225 ++ * or btrfs_update_inode() will be called, which guarantee that the next
11226 ++ * fsync will know this inode was changed and needs to be logged.
11227 + */
11228 +- if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
11229 +- inode->last_trans = fs_info->generation;
11230 +- inode->last_sub_trans = root->log_transid;
11231 +- inode->last_log_commit = root->last_log_commit;
11232 ++ if (btrfs_fs_incompat(fs_info, NO_HOLES))
11233 + return 0;
11234 +- }
11235 +
11236 + /*
11237 + * 1 - for the one we're dropping
11238 +@@ -9774,10 +9772,6 @@ static int start_delalloc_inodes(struct btrfs_root *root,
11239 + &work->work);
11240 + } else {
11241 + ret = sync_inode(inode, wbc);
11242 +- if (!ret &&
11243 +- test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
11244 +- &BTRFS_I(inode)->runtime_flags))
11245 +- ret = sync_inode(inode, wbc);
11246 + btrfs_add_delayed_iput(inode);
11247 + if (ret || wbc->nr_to_write <= 0)
11248 + goto out;
11249 +diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
11250 +index 6c413bb451a3d..8326f4bee89ff 100644
11251 +--- a/fs/btrfs/ordered-data.c
11252 ++++ b/fs/btrfs/ordered-data.c
11253 +@@ -917,6 +917,7 @@ static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
11254 + u64 len)
11255 + {
11256 + struct inode *inode = ordered->inode;
11257 ++ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
11258 + u64 file_offset = ordered->file_offset + pos;
11259 + u64 disk_bytenr = ordered->disk_bytenr + pos;
11260 + u64 num_bytes = len;
11261 +@@ -934,6 +935,13 @@ static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
11262 + else
11263 + type = __ffs(flags_masked);
11264 +
11265 ++ /*
11266 ++ * The splitting extent is already counted and will be added again
11267 ++ * in btrfs_add_ordered_extent_*(). Subtract num_bytes to avoid
11268 ++ * double counting.
11269 ++ */
11270 ++ percpu_counter_add_batch(&fs_info->ordered_bytes, -num_bytes,
11271 ++ fs_info->delalloc_batch);
11272 + if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered->flags)) {
11273 + WARN_ON_ONCE(1);
11274 + ret = btrfs_add_ordered_extent_compress(BTRFS_I(inode),
11275 +diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
11276 +index 2dc674b7c3b14..088220539788c 100644
11277 +--- a/fs/btrfs/space-info.c
11278 ++++ b/fs/btrfs/space-info.c
11279 +@@ -539,9 +539,49 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
11280 + while ((delalloc_bytes || ordered_bytes) && loops < 3) {
11281 + u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
11282 + long nr_pages = min_t(u64, temp, LONG_MAX);
11283 ++ int async_pages;
11284 +
11285 + btrfs_start_delalloc_roots(fs_info, nr_pages, true);
11286 +
11287 ++ /*
11288 ++ * We need to make sure any outstanding async pages are now
11289 ++ * processed before we continue. This is because things like
11290 ++ * sync_inode() try to be smart and skip writing if the inode is
11291 ++ * marked clean. We don't use filemap_fwrite for flushing
11292 ++ * because we want to control how many pages we write out at a
11293 ++ * time, thus this is the only safe way to make sure we've
11294 ++ * waited for outstanding compressed workers to have started
11295 ++ * their jobs and thus have ordered extents set up properly.
11296 ++ *
11297 ++ * This exists because we do not want to wait for each
11298 ++ * individual inode to finish its async work, we simply want to
11299 ++ * start the IO on everybody, and then come back here and wait
11300 ++ * for all of the async work to catch up. Once we're done with
11301 ++ * that we know we'll have ordered extents for everything and we
11302 ++ * can decide if we wait for that or not.
11303 ++ *
11304 ++ * If we choose to replace this in the future, make absolutely
11305 ++ * sure that the proper waiting is being done in the async case,
11306 ++ * as there have been bugs in that area before.
11307 ++ */
11308 ++ async_pages = atomic_read(&fs_info->async_delalloc_pages);
11309 ++ if (!async_pages)
11310 ++ goto skip_async;
11311 ++
11312 ++ /*
11313 ++ * We don't want to wait forever, if we wrote less pages in this
11314 ++ * loop than we have outstanding, only wait for that number of
11315 ++ * pages, otherwise we can wait for all async pages to finish
11316 ++ * before continuing.
11317 ++ */
11318 ++ if (async_pages > nr_pages)
11319 ++ async_pages -= nr_pages;
11320 ++ else
11321 ++ async_pages = 0;
11322 ++ wait_event(fs_info->async_submit_wait,
11323 ++ atomic_read(&fs_info->async_delalloc_pages) <=
11324 ++ async_pages);
11325 ++skip_async:
11326 + loops++;
11327 + if (wait_ordered && !trans) {
11328 + btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
11329 +@@ -793,7 +833,7 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
11330 + struct btrfs_space_info *space_info)
11331 + {
11332 + u64 ordered, delalloc;
11333 +- u64 thresh = div_factor_fine(space_info->total_bytes, 98);
11334 ++ u64 thresh = div_factor_fine(space_info->total_bytes, 90);
11335 + u64 used;
11336 +
11337 + /* If we're just plain full then async reclaim just slows us down. */
11338 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
11339 +index 24555cc1f42d5..9e9ab41df7da6 100644
11340 +--- a/fs/btrfs/tree-log.c
11341 ++++ b/fs/btrfs/tree-log.c
11342 +@@ -753,7 +753,9 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
11343 + */
11344 + ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
11345 + ins.offset);
11346 +- if (ret == 0) {
11347 ++ if (ret < 0) {
11348 ++ goto out;
11349 ++ } else if (ret == 0) {
11350 + btrfs_init_generic_ref(&ref,
11351 + BTRFS_ADD_DELAYED_REF,
11352 + ins.objectid, ins.offset, 0);
11353 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
11354 +index 7d5875824be7a..dc1f31cf3d4a5 100644
11355 +--- a/fs/btrfs/volumes.c
11356 ++++ b/fs/btrfs/volumes.c
11357 +@@ -1130,6 +1130,9 @@ static void btrfs_close_one_device(struct btrfs_device *device)
11358 + fs_devices->rw_devices--;
11359 + }
11360 +
11361 ++ if (device->devid == BTRFS_DEV_REPLACE_DEVID)
11362 ++ clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
11363 ++
11364 + if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
11365 + fs_devices->missing_devices--;
11366 +
11367 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
11368 +index 805c656a2e72a..f602e51c00065 100644
11369 +--- a/fs/ceph/caps.c
11370 ++++ b/fs/ceph/caps.c
11371 +@@ -1756,6 +1756,9 @@ struct ceph_cap_flush *ceph_alloc_cap_flush(void)
11372 + struct ceph_cap_flush *cf;
11373 +
11374 + cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
11375 ++ if (!cf)
11376 ++ return NULL;
11377 ++
11378 + cf->is_capsnap = false;
11379 + return cf;
11380 + }
11381 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
11382 +index a92a1fb7cb526..4c22f73b31232 100644
11383 +--- a/fs/cifs/sess.c
11384 ++++ b/fs/cifs/sess.c
11385 +@@ -889,7 +889,7 @@ sess_alloc_buffer(struct sess_data *sess_data, int wct)
11386 + return 0;
11387 +
11388 + out_free_smb_buf:
11389 +- kfree(smb_buf);
11390 ++ cifs_small_buf_release(smb_buf);
11391 + sess_data->iov[0].iov_base = NULL;
11392 + sess_data->iov[0].iov_len = 0;
11393 + sess_data->buf0_type = CIFS_NO_BUFFER;
11394 +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
11395 +index f795049e63d55..6c208108d69c1 100644
11396 +--- a/fs/f2fs/checkpoint.c
11397 ++++ b/fs/f2fs/checkpoint.c
11398 +@@ -444,7 +444,7 @@ static int f2fs_set_meta_page_dirty(struct page *page)
11399 + if (!PageDirty(page)) {
11400 + __set_page_dirty_nobuffers(page);
11401 + inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
11402 +- f2fs_set_page_private(page, 0);
11403 ++ set_page_private_reference(page);
11404 + return 1;
11405 + }
11406 + return 0;
11407 +@@ -1018,7 +1018,7 @@ void f2fs_update_dirty_page(struct inode *inode, struct page *page)
11408 + inode_inc_dirty_pages(inode);
11409 + spin_unlock(&sbi->inode_lock[type]);
11410 +
11411 +- f2fs_set_page_private(page, 0);
11412 ++ set_page_private_reference(page);
11413 + }
11414 +
11415 + void f2fs_remove_dirty_inode(struct inode *inode)
11416 +diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
11417 +index 925a5ca3744a9..14e6a78503f18 100644
11418 +--- a/fs/f2fs/compress.c
11419 ++++ b/fs/f2fs/compress.c
11420 +@@ -12,9 +12,11 @@
11421 + #include <linux/lzo.h>
11422 + #include <linux/lz4.h>
11423 + #include <linux/zstd.h>
11424 ++#include <linux/pagevec.h>
11425 +
11426 + #include "f2fs.h"
11427 + #include "node.h"
11428 ++#include "segment.h"
11429 + #include <trace/events/f2fs.h>
11430 +
11431 + static struct kmem_cache *cic_entry_slab;
11432 +@@ -74,7 +76,7 @@ bool f2fs_is_compressed_page(struct page *page)
11433 + return false;
11434 + if (!page_private(page))
11435 + return false;
11436 +- if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
11437 ++ if (page_private_nonpointer(page))
11438 + return false;
11439 +
11440 + f2fs_bug_on(F2FS_M_SB(page->mapping),
11441 +@@ -85,8 +87,7 @@ bool f2fs_is_compressed_page(struct page *page)
11442 + static void f2fs_set_compressed_page(struct page *page,
11443 + struct inode *inode, pgoff_t index, void *data)
11444 + {
11445 +- SetPagePrivate(page);
11446 +- set_page_private(page, (unsigned long)data);
11447 ++ attach_page_private(page, (void *)data);
11448 +
11449 + /* i_crypto_info and iv index */
11450 + page->index = index;
11451 +@@ -589,8 +590,7 @@ static void f2fs_compress_free_page(struct page *page)
11452 + {
11453 + if (!page)
11454 + return;
11455 +- set_page_private(page, (unsigned long)NULL);
11456 +- ClearPagePrivate(page);
11457 ++ detach_page_private(page);
11458 + page->mapping = NULL;
11459 + unlock_page(page);
11460 + mempool_free(page, compress_page_pool);
11461 +@@ -738,7 +738,7 @@ out:
11462 + return ret;
11463 + }
11464 +
11465 +-static void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
11466 ++void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
11467 + {
11468 + struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
11469 + struct f2fs_inode_info *fi = F2FS_I(dic->inode);
11470 +@@ -837,7 +837,8 @@ out_end_io:
11471 + * page being waited on in the cluster, and if so, it decompresses the cluster
11472 + * (or in the case of a failure, cleans up without actually decompressing).
11473 + */
11474 +-void f2fs_end_read_compressed_page(struct page *page, bool failed)
11475 ++void f2fs_end_read_compressed_page(struct page *page, bool failed,
11476 ++ block_t blkaddr)
11477 + {
11478 + struct decompress_io_ctx *dic =
11479 + (struct decompress_io_ctx *)page_private(page);
11480 +@@ -847,6 +848,9 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed)
11481 +
11482 + if (failed)
11483 + WRITE_ONCE(dic->failed, true);
11484 ++ else if (blkaddr)
11485 ++ f2fs_cache_compressed_page(sbi, page,
11486 ++ dic->inode->i_ino, blkaddr);
11487 +
11488 + if (atomic_dec_and_test(&dic->remaining_pages))
11489 + f2fs_decompress_cluster(dic);
11490 +@@ -1359,12 +1363,6 @@ out_destroy_crypt:
11491 +
11492 + for (--i; i >= 0; i--)
11493 + fscrypt_finalize_bounce_page(&cc->cpages[i]);
11494 +- for (i = 0; i < cc->nr_cpages; i++) {
11495 +- if (!cc->cpages[i])
11496 +- continue;
11497 +- f2fs_compress_free_page(cc->cpages[i]);
11498 +- cc->cpages[i] = NULL;
11499 +- }
11500 + out_put_cic:
11501 + kmem_cache_free(cic_entry_slab, cic);
11502 + out_put_dnode:
11503 +@@ -1375,6 +1373,12 @@ out_unlock_op:
11504 + else
11505 + f2fs_unlock_op(sbi);
11506 + out_free:
11507 ++ for (i = 0; i < cc->nr_cpages; i++) {
11508 ++ if (!cc->cpages[i])
11509 ++ continue;
11510 ++ f2fs_compress_free_page(cc->cpages[i]);
11511 ++ cc->cpages[i] = NULL;
11512 ++ }
11513 + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
11514 + cc->cpages = NULL;
11515 + return -EAGAIN;
11516 +@@ -1399,7 +1403,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
11517 +
11518 + for (i = 0; i < cic->nr_rpages; i++) {
11519 + WARN_ON(!cic->rpages[i]);
11520 +- clear_cold_data(cic->rpages[i]);
11521 ++ clear_page_private_gcing(cic->rpages[i]);
11522 + end_page_writeback(cic->rpages[i]);
11523 + }
11524 +
11525 +@@ -1685,6 +1689,164 @@ void f2fs_put_page_dic(struct page *page)
11526 + f2fs_put_dic(dic);
11527 + }
11528 +
11529 ++const struct address_space_operations f2fs_compress_aops = {
11530 ++ .releasepage = f2fs_release_page,
11531 ++ .invalidatepage = f2fs_invalidate_page,
11532 ++};
11533 ++
11534 ++struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
11535 ++{
11536 ++ return sbi->compress_inode->i_mapping;
11537 ++}
11538 ++
11539 ++void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
11540 ++{
11541 ++ if (!sbi->compress_inode)
11542 ++ return;
11543 ++ invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
11544 ++}
11545 ++
11546 ++void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
11547 ++ nid_t ino, block_t blkaddr)
11548 ++{
11549 ++ struct page *cpage;
11550 ++ int ret;
11551 ++
11552 ++ if (!test_opt(sbi, COMPRESS_CACHE))
11553 ++ return;
11554 ++
11555 ++ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
11556 ++ return;
11557 ++
11558 ++ if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
11559 ++ return;
11560 ++
11561 ++ cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
11562 ++ if (cpage) {
11563 ++ f2fs_put_page(cpage, 0);
11564 ++ return;
11565 ++ }
11566 ++
11567 ++ cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
11568 ++ if (!cpage)
11569 ++ return;
11570 ++
11571 ++ ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
11572 ++ blkaddr, GFP_NOFS);
11573 ++ if (ret) {
11574 ++ f2fs_put_page(cpage, 0);
11575 ++ return;
11576 ++ }
11577 ++
11578 ++ set_page_private_data(cpage, ino);
11579 ++
11580 ++ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
11581 ++ goto out;
11582 ++
11583 ++ memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
11584 ++ SetPageUptodate(cpage);
11585 ++out:
11586 ++ f2fs_put_page(cpage, 1);
11587 ++}
11588 ++
11589 ++bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
11590 ++ block_t blkaddr)
11591 ++{
11592 ++ struct page *cpage;
11593 ++ bool hitted = false;
11594 ++
11595 ++ if (!test_opt(sbi, COMPRESS_CACHE))
11596 ++ return false;
11597 ++
11598 ++ cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
11599 ++ blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
11600 ++ if (cpage) {
11601 ++ if (PageUptodate(cpage)) {
11602 ++ atomic_inc(&sbi->compress_page_hit);
11603 ++ memcpy(page_address(page),
11604 ++ page_address(cpage), PAGE_SIZE);
11605 ++ hitted = true;
11606 ++ }
11607 ++ f2fs_put_page(cpage, 1);
11608 ++ }
11609 ++
11610 ++ return hitted;
11611 ++}
11612 ++
11613 ++void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
11614 ++{
11615 ++ struct address_space *mapping = sbi->compress_inode->i_mapping;
11616 ++ struct pagevec pvec;
11617 ++ pgoff_t index = 0;
11618 ++ pgoff_t end = MAX_BLKADDR(sbi);
11619 ++
11620 ++ if (!mapping->nrpages)
11621 ++ return;
11622 ++
11623 ++ pagevec_init(&pvec);
11624 ++
11625 ++ do {
11626 ++ unsigned int nr_pages;
11627 ++ int i;
11628 ++
11629 ++ nr_pages = pagevec_lookup_range(&pvec, mapping,
11630 ++ &index, end - 1);
11631 ++ if (!nr_pages)
11632 ++ break;
11633 ++
11634 ++ for (i = 0; i < nr_pages; i++) {
11635 ++ struct page *page = pvec.pages[i];
11636 ++
11637 ++ if (page->index > end)
11638 ++ break;
11639 ++
11640 ++ lock_page(page);
11641 ++ if (page->mapping != mapping) {
11642 ++ unlock_page(page);
11643 ++ continue;
11644 ++ }
11645 ++
11646 ++ if (ino != get_page_private_data(page)) {
11647 ++ unlock_page(page);
11648 ++ continue;
11649 ++ }
11650 ++
11651 ++ generic_error_remove_page(mapping, page);
11652 ++ unlock_page(page);
11653 ++ }
11654 ++ pagevec_release(&pvec);
11655 ++ cond_resched();
11656 ++ } while (index < end);
11657 ++}
11658 ++
11659 ++int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
11660 ++{
11661 ++ struct inode *inode;
11662 ++
11663 ++ if (!test_opt(sbi, COMPRESS_CACHE))
11664 ++ return 0;
11665 ++
11666 ++ inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
11667 ++ if (IS_ERR(inode))
11668 ++ return PTR_ERR(inode);
11669 ++ sbi->compress_inode = inode;
11670 ++
11671 ++ sbi->compress_percent = COMPRESS_PERCENT;
11672 ++ sbi->compress_watermark = COMPRESS_WATERMARK;
11673 ++
11674 ++ atomic_set(&sbi->compress_page_hit, 0);
11675 ++
11676 ++ return 0;
11677 ++}
11678 ++
11679 ++void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
11680 ++{
11681 ++ if (!sbi->compress_inode)
11682 ++ return;
11683 ++ iput(sbi->compress_inode);
11684 ++ sbi->compress_inode = NULL;
11685 ++}
11686 ++
11687 + int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
11688 + {
11689 + dev_t dev = sbi->sb->s_bdev->bd_dev;
11690 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
11691 +index e2d0c7d9673e0..198e5ad7c98b5 100644
11692 +--- a/fs/f2fs/data.c
11693 ++++ b/fs/f2fs/data.c
11694 +@@ -58,18 +58,19 @@ static bool __is_cp_guaranteed(struct page *page)
11695 + if (!mapping)
11696 + return false;
11697 +
11698 +- if (f2fs_is_compressed_page(page))
11699 +- return false;
11700 +-
11701 + inode = mapping->host;
11702 + sbi = F2FS_I_SB(inode);
11703 +
11704 + if (inode->i_ino == F2FS_META_INO(sbi) ||
11705 + inode->i_ino == F2FS_NODE_INO(sbi) ||
11706 +- S_ISDIR(inode->i_mode) ||
11707 +- (S_ISREG(inode->i_mode) &&
11708 ++ S_ISDIR(inode->i_mode))
11709 ++ return true;
11710 ++
11711 ++ if (f2fs_is_compressed_page(page))
11712 ++ return false;
11713 ++ if ((S_ISREG(inode->i_mode) &&
11714 + (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
11715 +- is_cold_data(page))
11716 ++ page_private_gcing(page))
11717 + return true;
11718 + return false;
11719 + }
11720 +@@ -131,7 +132,7 @@ static void f2fs_finish_read_bio(struct bio *bio)
11721 +
11722 + if (f2fs_is_compressed_page(page)) {
11723 + if (bio->bi_status)
11724 +- f2fs_end_read_compressed_page(page, true);
11725 ++ f2fs_end_read_compressed_page(page, true, 0);
11726 + f2fs_put_page_dic(page);
11727 + continue;
11728 + }
11729 +@@ -227,15 +228,19 @@ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
11730 + struct bio_vec *bv;
11731 + struct bvec_iter_all iter_all;
11732 + bool all_compressed = true;
11733 ++ block_t blkaddr = SECTOR_TO_BLOCK(ctx->bio->bi_iter.bi_sector);
11734 +
11735 + bio_for_each_segment_all(bv, ctx->bio, iter_all) {
11736 + struct page *page = bv->bv_page;
11737 +
11738 + /* PG_error was set if decryption failed. */
11739 + if (f2fs_is_compressed_page(page))
11740 +- f2fs_end_read_compressed_page(page, PageError(page));
11741 ++ f2fs_end_read_compressed_page(page, PageError(page),
11742 ++ blkaddr);
11743 + else
11744 + all_compressed = false;
11745 ++
11746 ++ blkaddr++;
11747 + }
11748 +
11749 + /*
11750 +@@ -299,9 +304,8 @@ static void f2fs_write_end_io(struct bio *bio)
11751 + struct page *page = bvec->bv_page;
11752 + enum count_type type = WB_DATA_TYPE(page);
11753 +
11754 +- if (IS_DUMMY_WRITTEN_PAGE(page)) {
11755 +- set_page_private(page, (unsigned long)NULL);
11756 +- ClearPagePrivate(page);
11757 ++ if (page_private_dummy(page)) {
11758 ++ clear_page_private_dummy(page);
11759 + unlock_page(page);
11760 + mempool_free(page, sbi->write_io_dummy);
11761 +
11762 +@@ -331,7 +335,7 @@ static void f2fs_write_end_io(struct bio *bio)
11763 + dec_page_count(sbi, type);
11764 + if (f2fs_in_warm_node_list(sbi, page))
11765 + f2fs_del_fsync_node_entry(sbi, page);
11766 +- clear_cold_data(page);
11767 ++ clear_page_private_gcing(page);
11768 + end_page_writeback(page);
11769 + }
11770 + if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
11771 +@@ -455,10 +459,11 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
11772 + GFP_NOIO | __GFP_NOFAIL);
11773 + f2fs_bug_on(sbi, !page);
11774 +
11775 +- zero_user_segment(page, 0, PAGE_SIZE);
11776 +- SetPagePrivate(page);
11777 +- set_page_private(page, DUMMY_WRITTEN_PAGE);
11778 + lock_page(page);
11779 ++
11780 ++ zero_user_segment(page, 0, PAGE_SIZE);
11781 ++ set_page_private_dummy(page);
11782 ++
11783 + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
11784 + f2fs_bug_on(sbi, 1);
11785 + }
11786 +@@ -1351,9 +1356,11 @@ alloc:
11787 + old_blkaddr = dn->data_blkaddr;
11788 + f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
11789 + &sum, seg_type, NULL);
11790 +- if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
11791 ++ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
11792 + invalidate_mapping_pages(META_MAPPING(sbi),
11793 + old_blkaddr, old_blkaddr);
11794 ++ f2fs_invalidate_compress_page(sbi, old_blkaddr);
11795 ++ }
11796 + f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
11797 +
11798 + /*
11799 +@@ -1483,7 +1490,21 @@ next_dnode:
11800 + if (err) {
11801 + if (flag == F2FS_GET_BLOCK_BMAP)
11802 + map->m_pblk = 0;
11803 ++
11804 + if (err == -ENOENT) {
11805 ++ /*
11806 ++ * There is one exceptional case that read_node_page()
11807 ++ * may return -ENOENT due to filesystem has been
11808 ++ * shutdown or cp_error, so force to convert error
11809 ++ * number to EIO for such case.
11810 ++ */
11811 ++ if (map->m_may_create &&
11812 ++ (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
11813 ++ f2fs_cp_error(sbi))) {
11814 ++ err = -EIO;
11815 ++ goto unlock_out;
11816 ++ }
11817 ++
11818 + err = 0;
11819 + if (map->m_next_pgofs)
11820 + *map->m_next_pgofs =
11821 +@@ -2130,6 +2151,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
11822 + continue;
11823 + }
11824 + unlock_page(page);
11825 ++ if (for_write)
11826 ++ put_page(page);
11827 + cc->rpages[i] = NULL;
11828 + cc->nr_rpages--;
11829 + }
11830 +@@ -2173,7 +2196,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
11831 + goto out_put_dnode;
11832 + }
11833 +
11834 +- for (i = 0; i < dic->nr_cpages; i++) {
11835 ++ for (i = 0; i < cc->nr_cpages; i++) {
11836 + struct page *page = dic->cpages[i];
11837 + block_t blkaddr;
11838 + struct bio_post_read_ctx *ctx;
11839 +@@ -2181,6 +2204,14 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
11840 + blkaddr = data_blkaddr(dn.inode, dn.node_page,
11841 + dn.ofs_in_node + i + 1);
11842 +
11843 ++ f2fs_wait_on_block_writeback(inode, blkaddr);
11844 ++
11845 ++ if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
11846 ++ if (atomic_dec_and_test(&dic->remaining_pages))
11847 ++ f2fs_decompress_cluster(dic);
11848 ++ continue;
11849 ++ }
11850 ++
11851 + if (bio && (!page_is_mergeable(sbi, bio,
11852 + *last_block_in_bio, blkaddr) ||
11853 + !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
11854 +@@ -2202,8 +2233,6 @@ submit_and_realloc:
11855 + }
11856 + }
11857 +
11858 +- f2fs_wait_on_block_writeback(inode, blkaddr);
11859 +-
11860 + if (bio_add_page(bio, page, blocksize, 0) < blocksize)
11861 + goto submit_and_realloc;
11862 +
11863 +@@ -2482,9 +2511,9 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
11864 + if (f2fs_is_atomic_file(inode))
11865 + return true;
11866 + if (fio) {
11867 +- if (is_cold_data(fio->page))
11868 ++ if (page_private_gcing(fio->page))
11869 + return true;
11870 +- if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
11871 ++ if (page_private_dummy(fio->page))
11872 + return true;
11873 + if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
11874 + f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
11875 +@@ -2540,7 +2569,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
11876 + /* This page is already truncated */
11877 + if (fio->old_blkaddr == NULL_ADDR) {
11878 + ClearPageUptodate(page);
11879 +- clear_cold_data(page);
11880 ++ clear_page_private_gcing(page);
11881 + goto out_writepage;
11882 + }
11883 + got_it:
11884 +@@ -2750,7 +2779,7 @@ out:
11885 + inode_dec_dirty_pages(inode);
11886 + if (err) {
11887 + ClearPageUptodate(page);
11888 +- clear_cold_data(page);
11889 ++ clear_page_private_gcing(page);
11890 + }
11891 +
11892 + if (wbc->for_reclaim) {
11893 +@@ -3224,7 +3253,7 @@ restart:
11894 + f2fs_do_read_inline_data(page, ipage);
11895 + set_inode_flag(inode, FI_DATA_EXIST);
11896 + if (inode->i_nlink)
11897 +- set_inline_node(ipage);
11898 ++ set_page_private_inline(ipage);
11899 + } else {
11900 + err = f2fs_convert_inline_page(&dn, page);
11901 + if (err)
11902 +@@ -3615,12 +3644,20 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
11903 + }
11904 + }
11905 +
11906 +- clear_cold_data(page);
11907 ++ clear_page_private_gcing(page);
11908 +
11909 +- if (IS_ATOMIC_WRITTEN_PAGE(page))
11910 ++ if (test_opt(sbi, COMPRESS_CACHE)) {
11911 ++ if (f2fs_compressed_file(inode))
11912 ++ f2fs_invalidate_compress_pages(sbi, inode->i_ino);
11913 ++ if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
11914 ++ clear_page_private_data(page);
11915 ++ }
11916 ++
11917 ++ if (page_private_atomic(page))
11918 + return f2fs_drop_inmem_page(inode, page);
11919 +
11920 +- f2fs_clear_page_private(page);
11921 ++ detach_page_private(page);
11922 ++ set_page_private(page, 0);
11923 + }
11924 +
11925 + int f2fs_release_page(struct page *page, gfp_t wait)
11926 +@@ -3630,11 +3667,23 @@ int f2fs_release_page(struct page *page, gfp_t wait)
11927 + return 0;
11928 +
11929 + /* This is atomic written page, keep Private */
11930 +- if (IS_ATOMIC_WRITTEN_PAGE(page))
11931 ++ if (page_private_atomic(page))
11932 + return 0;
11933 +
11934 +- clear_cold_data(page);
11935 +- f2fs_clear_page_private(page);
11936 ++ if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
11937 ++ struct f2fs_sb_info *sbi = F2FS_P_SB(page);
11938 ++ struct inode *inode = page->mapping->host;
11939 ++
11940 ++ if (f2fs_compressed_file(inode))
11941 ++ f2fs_invalidate_compress_pages(sbi, inode->i_ino);
11942 ++ if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
11943 ++ clear_page_private_data(page);
11944 ++ }
11945 ++
11946 ++ clear_page_private_gcing(page);
11947 ++
11948 ++ detach_page_private(page);
11949 ++ set_page_private(page, 0);
11950 + return 1;
11951 + }
11952 +
11953 +@@ -3650,7 +3699,7 @@ static int f2fs_set_data_page_dirty(struct page *page)
11954 + return __set_page_dirty_nobuffers(page);
11955 +
11956 + if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
11957 +- if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
11958 ++ if (!page_private_atomic(page)) {
11959 + f2fs_register_inmem_page(inode, page);
11960 + return 1;
11961 + }
11962 +@@ -3742,7 +3791,7 @@ int f2fs_migrate_page(struct address_space *mapping,
11963 + {
11964 + int rc, extra_count;
11965 + struct f2fs_inode_info *fi = F2FS_I(mapping->host);
11966 +- bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
11967 ++ bool atomic_written = page_private_atomic(page);
11968 +
11969 + BUG_ON(PageWriteback(page));
11970 +
11971 +@@ -3778,8 +3827,13 @@ int f2fs_migrate_page(struct address_space *mapping,
11972 + }
11973 +
11974 + if (PagePrivate(page)) {
11975 +- f2fs_set_page_private(newpage, page_private(page));
11976 +- f2fs_clear_page_private(page);
11977 ++ set_page_private(newpage, page_private(page));
11978 ++ SetPagePrivate(newpage);
11979 ++ get_page(newpage);
11980 ++
11981 ++ set_page_private(page, 0);
11982 ++ ClearPagePrivate(page);
11983 ++ put_page(page);
11984 + }
11985 +
11986 + if (mode != MIGRATE_SYNC_NO_COPY)
11987 +diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
11988 +index c03949a7ccff5..833325038ef31 100644
11989 +--- a/fs/f2fs/debug.c
11990 ++++ b/fs/f2fs/debug.c
11991 +@@ -152,6 +152,12 @@ static void update_general_status(struct f2fs_sb_info *sbi)
11992 + si->node_pages = NODE_MAPPING(sbi)->nrpages;
11993 + if (sbi->meta_inode)
11994 + si->meta_pages = META_MAPPING(sbi)->nrpages;
11995 ++#ifdef CONFIG_F2FS_FS_COMPRESSION
11996 ++ if (sbi->compress_inode) {
11997 ++ si->compress_pages = COMPRESS_MAPPING(sbi)->nrpages;
11998 ++ si->compress_page_hit = atomic_read(&sbi->compress_page_hit);
11999 ++ }
12000 ++#endif
12001 + si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT];
12002 + si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT];
12003 + si->sits = MAIN_SEGS(sbi);
12004 +@@ -309,6 +315,12 @@ get_cache:
12005 +
12006 + si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
12007 + }
12008 ++#ifdef CONFIG_F2FS_FS_COMPRESSION
12009 ++ if (sbi->compress_inode) {
12010 ++ unsigned npages = COMPRESS_MAPPING(sbi)->nrpages;
12011 ++ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
12012 ++ }
12013 ++#endif
12014 + }
12015 +
12016 + static int stat_show(struct seq_file *s, void *v)
12017 +@@ -476,6 +488,7 @@ static int stat_show(struct seq_file *s, void *v)
12018 + "volatile IO: %4d (Max. %4d)\n",
12019 + si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
12020 + si->vw_cnt, si->max_vw_cnt);
12021 ++ seq_printf(s, " - compress: %4d, hit:%8d\n", si->compress_pages, si->compress_page_hit);
12022 + seq_printf(s, " - nodes: %4d in %4d\n",
12023 + si->ndirty_node, si->node_pages);
12024 + seq_printf(s, " - dents: %4d in dirs:%4d (%4d)\n",
12025 +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
12026 +index dc7ce79672b8d..c821015c0a469 100644
12027 +--- a/fs/f2fs/dir.c
12028 ++++ b/fs/f2fs/dir.c
12029 +@@ -929,11 +929,15 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
12030 + !f2fs_truncate_hole(dir, page->index, page->index + 1)) {
12031 + f2fs_clear_page_cache_dirty_tag(page);
12032 + clear_page_dirty_for_io(page);
12033 +- f2fs_clear_page_private(page);
12034 + ClearPageUptodate(page);
12035 +- clear_cold_data(page);
12036 ++
12037 ++ clear_page_private_gcing(page);
12038 ++
12039 + inode_dec_dirty_pages(dir);
12040 + f2fs_remove_dirty_inode(dir);
12041 ++
12042 ++ detach_page_private(page);
12043 ++ set_page_private(page, 0);
12044 + }
12045 + f2fs_put_page(page, 1);
12046 +
12047 +@@ -991,6 +995,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
12048 + struct f2fs_sb_info *sbi = F2FS_I_SB(d->inode);
12049 + struct blk_plug plug;
12050 + bool readdir_ra = sbi->readdir_ra == 1;
12051 ++ bool found_valid_dirent = false;
12052 + int err = 0;
12053 +
12054 + bit_pos = ((unsigned long)ctx->pos % d->max);
12055 +@@ -1005,13 +1010,15 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
12056 +
12057 + de = &d->dentry[bit_pos];
12058 + if (de->name_len == 0) {
12059 ++ if (found_valid_dirent || !bit_pos) {
12060 ++ printk_ratelimited(
12061 ++ "%sF2FS-fs (%s): invalid namelen(0), ino:%u, run fsck to fix.",
12062 ++ KERN_WARNING, sbi->sb->s_id,
12063 ++ le32_to_cpu(de->ino));
12064 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
12065 ++ }
12066 + bit_pos++;
12067 + ctx->pos = start_pos + bit_pos;
12068 +- printk_ratelimited(
12069 +- "%sF2FS-fs (%s): invalid namelen(0), ino:%u, run fsck to fix.",
12070 +- KERN_WARNING, sbi->sb->s_id,
12071 +- le32_to_cpu(de->ino));
12072 +- set_sbi_flag(sbi, SBI_NEED_FSCK);
12073 + continue;
12074 + }
12075 +
12076 +@@ -1054,6 +1061,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
12077 + f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
12078 +
12079 + ctx->pos = start_pos + bit_pos;
12080 ++ found_valid_dirent = true;
12081 + }
12082 + out:
12083 + if (readdir_ra)
12084 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
12085 +index a5de48e768d7b..395f18e90a8f6 100644
12086 +--- a/fs/f2fs/f2fs.h
12087 ++++ b/fs/f2fs/f2fs.h
12088 +@@ -43,6 +43,7 @@ enum {
12089 + FAULT_KVMALLOC,
12090 + FAULT_PAGE_ALLOC,
12091 + FAULT_PAGE_GET,
12092 ++ FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */
12093 + FAULT_ALLOC_NID,
12094 + FAULT_ORPHAN,
12095 + FAULT_BLOCK,
12096 +@@ -98,6 +99,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
12097 + #define F2FS_MOUNT_ATGC 0x08000000
12098 + #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000
12099 + #define F2FS_MOUNT_GC_MERGE 0x20000000
12100 ++#define F2FS_MOUNT_COMPRESS_CACHE 0x40000000
12101 +
12102 + #define F2FS_OPTION(sbi) ((sbi)->mount_opt)
12103 + #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
12104 +@@ -1291,17 +1293,116 @@ enum {
12105 + */
12106 + };
12107 +
12108 ++static inline int f2fs_test_bit(unsigned int nr, char *addr);
12109 ++static inline void f2fs_set_bit(unsigned int nr, char *addr);
12110 ++static inline void f2fs_clear_bit(unsigned int nr, char *addr);
12111 ++
12112 + /*
12113 +- * this value is set in page as a private data which indicate that
12114 +- * the page is atomically written, and it is in inmem_pages list.
12115 ++ * Layout of f2fs page.private:
12116 ++ *
12117 ++ * Layout A: lowest bit should be 1
12118 ++ * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
12119 ++ * bit 0 PAGE_PRIVATE_NOT_POINTER
12120 ++ * bit 1 PAGE_PRIVATE_ATOMIC_WRITE
12121 ++ * bit 2 PAGE_PRIVATE_DUMMY_WRITE
12122 ++ * bit 3 PAGE_PRIVATE_ONGOING_MIGRATION
12123 ++ * bit 4 PAGE_PRIVATE_INLINE_INODE
12124 ++ * bit 5 PAGE_PRIVATE_REF_RESOURCE
12125 ++ * bit 6- f2fs private data
12126 ++ *
12127 ++ * Layout B: lowest bit should be 0
12128 ++ * page.private is a wrapped pointer.
12129 + */
12130 +-#define ATOMIC_WRITTEN_PAGE ((unsigned long)-1)
12131 +-#define DUMMY_WRITTEN_PAGE ((unsigned long)-2)
12132 ++enum {
12133 ++ PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */
12134 ++ PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */
12135 ++ PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */
12136 ++ PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
12137 ++ PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
12138 ++ PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
12139 ++ PAGE_PRIVATE_MAX
12140 ++};
12141 ++
12142 ++#define PAGE_PRIVATE_GET_FUNC(name, flagname) \
12143 ++static inline bool page_private_##name(struct page *page) \
12144 ++{ \
12145 ++ return test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
12146 ++ test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
12147 ++}
12148 ++
12149 ++#define PAGE_PRIVATE_SET_FUNC(name, flagname) \
12150 ++static inline void set_page_private_##name(struct page *page) \
12151 ++{ \
12152 ++ if (!PagePrivate(page)) { \
12153 ++ get_page(page); \
12154 ++ SetPagePrivate(page); \
12155 ++ } \
12156 ++ set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
12157 ++ set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
12158 ++}
12159 ++
12160 ++#define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
12161 ++static inline void clear_page_private_##name(struct page *page) \
12162 ++{ \
12163 ++ clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
12164 ++ if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \
12165 ++ set_page_private(page, 0); \
12166 ++ if (PagePrivate(page)) { \
12167 ++ ClearPagePrivate(page); \
12168 ++ put_page(page); \
12169 ++ }\
12170 ++ } \
12171 ++}
12172 ++
12173 ++PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
12174 ++PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE);
12175 ++PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
12176 ++PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
12177 ++PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
12178 ++PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE);
12179 +
12180 +-#define IS_ATOMIC_WRITTEN_PAGE(page) \
12181 +- (page_private(page) == ATOMIC_WRITTEN_PAGE)
12182 +-#define IS_DUMMY_WRITTEN_PAGE(page) \
12183 +- (page_private(page) == DUMMY_WRITTEN_PAGE)
12184 ++PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
12185 ++PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
12186 ++PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
12187 ++PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
12188 ++PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE);
12189 ++
12190 ++PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
12191 ++PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
12192 ++PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
12193 ++PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
12194 ++PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
12195 ++
12196 ++static inline unsigned long get_page_private_data(struct page *page)
12197 ++{
12198 ++ unsigned long data = page_private(page);
12199 ++
12200 ++ if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
12201 ++ return 0;
12202 ++ return data >> PAGE_PRIVATE_MAX;
12203 ++}
12204 ++
12205 ++static inline void set_page_private_data(struct page *page, unsigned long data)
12206 ++{
12207 ++ if (!PagePrivate(page)) {
12208 ++ get_page(page);
12209 ++ SetPagePrivate(page);
12210 ++ }
12211 ++ set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
12212 ++ page_private(page) |= data << PAGE_PRIVATE_MAX;
12213 ++}
12214 ++
12215 ++static inline void clear_page_private_data(struct page *page)
12216 ++{
12217 ++ page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1;
12218 ++ if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) {
12219 ++ set_page_private(page, 0);
12220 ++ if (PagePrivate(page)) {
12221 ++ ClearPagePrivate(page);
12222 ++ put_page(page);
12223 ++ }
12224 ++ }
12225 ++}
12226 +
12227 + /* For compression */
12228 + enum compress_algorithm_type {
12229 +@@ -1317,6 +1418,9 @@ enum compress_flag {
12230 + COMPRESS_MAX_FLAG,
12231 + };
12232 +
12233 ++#define COMPRESS_WATERMARK 20
12234 ++#define COMPRESS_PERCENT 20
12235 ++
12236 + #define COMPRESS_DATA_RESERVED_SIZE 4
12237 + struct compress_data {
12238 + __le32 clen; /* compressed data size */
12239 +@@ -1626,6 +1730,12 @@ struct f2fs_sb_info {
12240 + u64 compr_written_block;
12241 + u64 compr_saved_block;
12242 + u32 compr_new_inode;
12243 ++
12244 ++ /* For compressed block cache */
12245 ++ struct inode *compress_inode; /* cache compressed blocks */
12246 ++ unsigned int compress_percent; /* cache page percentage */
12247 ++ unsigned int compress_watermark; /* cache page watermark */
12248 ++ atomic_t compress_page_hit; /* cache hit count */
12249 + #endif
12250 + };
12251 +
12252 +@@ -3169,20 +3279,6 @@ static inline bool __is_valid_data_blkaddr(block_t blkaddr)
12253 + return true;
12254 + }
12255 +
12256 +-static inline void f2fs_set_page_private(struct page *page,
12257 +- unsigned long data)
12258 +-{
12259 +- if (PagePrivate(page))
12260 +- return;
12261 +-
12262 +- attach_page_private(page, (void *)data);
12263 +-}
12264 +-
12265 +-static inline void f2fs_clear_page_private(struct page *page)
12266 +-{
12267 +- detach_page_private(page);
12268 +-}
12269 +-
12270 + /*
12271 + * file.c
12272 + */
12273 +@@ -3606,7 +3702,8 @@ struct f2fs_stat_info {
12274 + unsigned int bimodal, avg_vblocks;
12275 + int util_free, util_valid, util_invalid;
12276 + int rsvd_segs, overp_segs;
12277 +- int dirty_count, node_pages, meta_pages;
12278 ++ int dirty_count, node_pages, meta_pages, compress_pages;
12279 ++ int compress_page_hit;
12280 + int prefree_count, call_count, cp_count, bg_cp_count;
12281 + int tot_segs, node_segs, data_segs, free_segs, free_secs;
12282 + int bg_node_segs, bg_data_segs;
12283 +@@ -3942,7 +4039,9 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
12284 + bool f2fs_is_compress_backend_ready(struct inode *inode);
12285 + int f2fs_init_compress_mempool(void);
12286 + void f2fs_destroy_compress_mempool(void);
12287 +-void f2fs_end_read_compressed_page(struct page *page, bool failed);
12288 ++void f2fs_decompress_cluster(struct decompress_io_ctx *dic);
12289 ++void f2fs_end_read_compressed_page(struct page *page, bool failed,
12290 ++ block_t blkaddr);
12291 + bool f2fs_cluster_is_empty(struct compress_ctx *cc);
12292 + bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
12293 + void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
12294 +@@ -3960,10 +4059,19 @@ void f2fs_put_page_dic(struct page *page);
12295 + int f2fs_init_compress_ctx(struct compress_ctx *cc);
12296 + void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
12297 + void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
12298 ++int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
12299 ++void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
12300 + int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
12301 + void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
12302 + int __init f2fs_init_compress_cache(void);
12303 + void f2fs_destroy_compress_cache(void);
12304 ++struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
12305 ++void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
12306 ++void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
12307 ++ nid_t ino, block_t blkaddr);
12308 ++bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
12309 ++ block_t blkaddr);
12310 ++void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
12311 + #define inc_compr_inode_stat(inode) \
12312 + do { \
12313 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
12314 +@@ -3992,7 +4100,9 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
12315 + }
12316 + static inline int f2fs_init_compress_mempool(void) { return 0; }
12317 + static inline void f2fs_destroy_compress_mempool(void) { }
12318 +-static inline void f2fs_end_read_compressed_page(struct page *page, bool failed)
12319 ++static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { }
12320 ++static inline void f2fs_end_read_compressed_page(struct page *page,
12321 ++ bool failed, block_t blkaddr)
12322 + {
12323 + WARN_ON_ONCE(1);
12324 + }
12325 +@@ -4000,10 +4110,20 @@ static inline void f2fs_put_page_dic(struct page *page)
12326 + {
12327 + WARN_ON_ONCE(1);
12328 + }
12329 ++static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
12330 ++static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
12331 + static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
12332 + static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
12333 + static inline int __init f2fs_init_compress_cache(void) { return 0; }
12334 + static inline void f2fs_destroy_compress_cache(void) { }
12335 ++static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
12336 ++ block_t blkaddr) { }
12337 ++static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
12338 ++ struct page *page, nid_t ino, block_t blkaddr) { }
12339 ++static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
12340 ++ struct page *page, block_t blkaddr) { return false; }
12341 ++static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
12342 ++ nid_t ino) { }
12343 + #define inc_compr_inode_stat(inode) do { } while (0)
12344 + #endif
12345 +
12346 +@@ -4020,7 +4140,8 @@ static inline void set_compress_context(struct inode *inode)
12347 + 1 << COMPRESS_CHKSUM : 0;
12348 + F2FS_I(inode)->i_cluster_size =
12349 + 1 << F2FS_I(inode)->i_log_cluster_size;
12350 +- if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 &&
12351 ++ if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 ||
12352 ++ F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) &&
12353 + F2FS_OPTION(sbi).compress_level)
12354 + F2FS_I(inode)->i_compress_flag |=
12355 + F2FS_OPTION(sbi).compress_level <<
12356 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
12357 +index fb27d49e4da72..3a11e81fdf659 100644
12358 +--- a/fs/f2fs/file.c
12359 ++++ b/fs/f2fs/file.c
12360 +@@ -1086,7 +1086,6 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
12361 + }
12362 +
12363 + if (pg_start < pg_end) {
12364 +- struct address_space *mapping = inode->i_mapping;
12365 + loff_t blk_start, blk_end;
12366 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
12367 +
12368 +@@ -1098,8 +1097,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
12369 + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
12370 + down_write(&F2FS_I(inode)->i_mmap_sem);
12371 +
12372 +- truncate_inode_pages_range(mapping, blk_start,
12373 +- blk_end - 1);
12374 ++ truncate_pagecache_range(inode, blk_start, blk_end - 1);
12375 +
12376 + f2fs_lock_op(sbi);
12377 + ret = f2fs_truncate_hole(inode, pg_start, pg_end);
12378 +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
12379 +index ab63951c08cbc..1c05e156d9d50 100644
12380 +--- a/fs/f2fs/gc.c
12381 ++++ b/fs/f2fs/gc.c
12382 +@@ -1261,6 +1261,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
12383 + f2fs_put_page(mpage, 1);
12384 + invalidate_mapping_pages(META_MAPPING(fio.sbi),
12385 + fio.old_blkaddr, fio.old_blkaddr);
12386 ++ f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
12387 +
12388 + set_page_dirty(fio.encrypted_page);
12389 + if (clear_page_dirty_for_io(fio.encrypted_page))
12390 +@@ -1336,7 +1337,7 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
12391 + goto out;
12392 + }
12393 + set_page_dirty(page);
12394 +- set_cold_data(page);
12395 ++ set_page_private_gcing(page);
12396 + } else {
12397 + struct f2fs_io_info fio = {
12398 + .sbi = F2FS_I_SB(inode),
12399 +@@ -1362,11 +1363,11 @@ retry:
12400 + f2fs_remove_dirty_inode(inode);
12401 + }
12402 +
12403 +- set_cold_data(page);
12404 ++ set_page_private_gcing(page);
12405 +
12406 + err = f2fs_do_write_data_page(&fio);
12407 + if (err) {
12408 +- clear_cold_data(page);
12409 ++ clear_page_private_gcing(page);
12410 + if (err == -ENOMEM) {
12411 + congestion_wait(BLK_RW_ASYNC,
12412 + DEFAULT_IO_TIMEOUT);
12413 +@@ -1496,8 +1497,10 @@ next_step:
12414 + int err;
12415 +
12416 + if (S_ISREG(inode->i_mode)) {
12417 +- if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
12418 ++ if (!down_write_trylock(&fi->i_gc_rwsem[READ])) {
12419 ++ sbi->skipped_gc_rwsem++;
12420 + continue;
12421 ++ }
12422 + if (!down_write_trylock(
12423 + &fi->i_gc_rwsem[WRITE])) {
12424 + sbi->skipped_gc_rwsem++;
12425 +diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
12426 +index 92652ca7a7c8b..56a20d5c15dad 100644
12427 +--- a/fs/f2fs/inline.c
12428 ++++ b/fs/f2fs/inline.c
12429 +@@ -173,7 +173,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
12430 +
12431 + /* clear inline data and flag after data writeback */
12432 + f2fs_truncate_inline_inode(dn->inode, dn->inode_page, 0);
12433 +- clear_inline_node(dn->inode_page);
12434 ++ clear_page_private_inline(dn->inode_page);
12435 + clear_out:
12436 + stat_dec_inline_inode(dn->inode);
12437 + clear_inode_flag(dn->inode, FI_INLINE_DATA);
12438 +@@ -255,7 +255,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
12439 + set_inode_flag(inode, FI_APPEND_WRITE);
12440 + set_inode_flag(inode, FI_DATA_EXIST);
12441 +
12442 +- clear_inline_node(dn.inode_page);
12443 ++ clear_page_private_inline(dn.inode_page);
12444 + f2fs_put_dnode(&dn);
12445 + return 0;
12446 + }
12447 +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
12448 +index b401f08569f70..9141147b5bb00 100644
12449 +--- a/fs/f2fs/inode.c
12450 ++++ b/fs/f2fs/inode.c
12451 +@@ -18,6 +18,10 @@
12452 +
12453 + #include <trace/events/f2fs.h>
12454 +
12455 ++#ifdef CONFIG_F2FS_FS_COMPRESSION
12456 ++extern const struct address_space_operations f2fs_compress_aops;
12457 ++#endif
12458 ++
12459 + void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
12460 + {
12461 + if (is_inode_flag_set(inode, FI_NEW_INODE))
12462 +@@ -494,6 +498,11 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
12463 + if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
12464 + goto make_now;
12465 +
12466 ++#ifdef CONFIG_F2FS_FS_COMPRESSION
12467 ++ if (ino == F2FS_COMPRESS_INO(sbi))
12468 ++ goto make_now;
12469 ++#endif
12470 ++
12471 + ret = do_read_inode(inode);
12472 + if (ret)
12473 + goto bad_inode;
12474 +@@ -504,6 +513,12 @@ make_now:
12475 + } else if (ino == F2FS_META_INO(sbi)) {
12476 + inode->i_mapping->a_ops = &f2fs_meta_aops;
12477 + mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
12478 ++ } else if (ino == F2FS_COMPRESS_INO(sbi)) {
12479 ++#ifdef CONFIG_F2FS_FS_COMPRESSION
12480 ++ inode->i_mapping->a_ops = &f2fs_compress_aops;
12481 ++#endif
12482 ++ mapping_set_gfp_mask(inode->i_mapping,
12483 ++ GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
12484 + } else if (S_ISREG(inode->i_mode)) {
12485 + inode->i_op = &f2fs_file_inode_operations;
12486 + inode->i_fop = &f2fs_file_operations;
12487 +@@ -646,7 +661,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
12488 +
12489 + /* deleted inode */
12490 + if (inode->i_nlink == 0)
12491 +- clear_inline_node(node_page);
12492 ++ clear_page_private_inline(node_page);
12493 +
12494 + F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
12495 + F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
12496 +@@ -723,8 +738,12 @@ void f2fs_evict_inode(struct inode *inode)
12497 + trace_f2fs_evict_inode(inode);
12498 + truncate_inode_pages_final(&inode->i_data);
12499 +
12500 ++ if (test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
12501 ++ f2fs_invalidate_compress_pages(sbi, inode->i_ino);
12502 ++
12503 + if (inode->i_ino == F2FS_NODE_INO(sbi) ||
12504 +- inode->i_ino == F2FS_META_INO(sbi))
12505 ++ inode->i_ino == F2FS_META_INO(sbi) ||
12506 ++ inode->i_ino == F2FS_COMPRESS_INO(sbi))
12507 + goto out_clear;
12508 +
12509 + f2fs_bug_on(sbi, get_dirty_pages(inode));
12510 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
12511 +index e67ce5f13b98e..dd611efa8aa4a 100644
12512 +--- a/fs/f2fs/node.c
12513 ++++ b/fs/f2fs/node.c
12514 +@@ -97,6 +97,20 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
12515 + mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
12516 + sizeof(struct discard_cmd)) >> PAGE_SHIFT;
12517 + res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
12518 ++ } else if (type == COMPRESS_PAGE) {
12519 ++#ifdef CONFIG_F2FS_FS_COMPRESSION
12520 ++ unsigned long free_ram = val.freeram;
12521 ++
12522 ++ /*
12523 ++ * free memory is lower than watermark or cached page count
12524 ++ * exceed threshold, deny caching compress page.
12525 ++ */
12526 ++ res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
12527 ++ (COMPRESS_MAPPING(sbi)->nrpages <
12528 ++ free_ram * sbi->compress_percent / 100);
12529 ++#else
12530 ++ res = false;
12531 ++#endif
12532 + } else {
12533 + if (!sbi->sb->s_bdi->wb.dirty_exceeded)
12534 + return true;
12535 +@@ -1860,8 +1874,8 @@ continue_unlock:
12536 + }
12537 +
12538 + /* flush inline_data, if it's async context. */
12539 +- if (is_inline_node(page)) {
12540 +- clear_inline_node(page);
12541 ++ if (page_private_inline(page)) {
12542 ++ clear_page_private_inline(page);
12543 + unlock_page(page);
12544 + flush_inline_data(sbi, ino_of_node(page));
12545 + continue;
12546 +@@ -1941,8 +1955,8 @@ continue_unlock:
12547 + goto write_node;
12548 +
12549 + /* flush inline_data */
12550 +- if (is_inline_node(page)) {
12551 +- clear_inline_node(page);
12552 ++ if (page_private_inline(page)) {
12553 ++ clear_page_private_inline(page);
12554 + unlock_page(page);
12555 + flush_inline_data(sbi, ino_of_node(page));
12556 + goto lock_node;
12557 +@@ -2096,7 +2110,7 @@ static int f2fs_set_node_page_dirty(struct page *page)
12558 + if (!PageDirty(page)) {
12559 + __set_page_dirty_nobuffers(page);
12560 + inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
12561 +- f2fs_set_page_private(page, 0);
12562 ++ set_page_private_reference(page);
12563 + return 1;
12564 + }
12565 + return 0;
12566 +diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
12567 +index 7a45c0f106295..84d45385d1f20 100644
12568 +--- a/fs/f2fs/node.h
12569 ++++ b/fs/f2fs/node.h
12570 +@@ -148,6 +148,7 @@ enum mem_type {
12571 + EXTENT_CACHE, /* indicates extent cache */
12572 + INMEM_PAGES, /* indicates inmemory pages */
12573 + DISCARD_CACHE, /* indicates memory of cached discard cmds */
12574 ++ COMPRESS_PAGE, /* indicates memory of cached compressed pages */
12575 + BASE_CHECK, /* check kernel status */
12576 + };
12577 +
12578 +@@ -389,20 +390,6 @@ static inline nid_t get_nid(struct page *p, int off, bool i)
12579 + * - Mark cold node blocks in their node footer
12580 + * - Mark cold data pages in page cache
12581 + */
12582 +-static inline int is_cold_data(struct page *page)
12583 +-{
12584 +- return PageChecked(page);
12585 +-}
12586 +-
12587 +-static inline void set_cold_data(struct page *page)
12588 +-{
12589 +- SetPageChecked(page);
12590 +-}
12591 +-
12592 +-static inline void clear_cold_data(struct page *page)
12593 +-{
12594 +- ClearPageChecked(page);
12595 +-}
12596 +
12597 + static inline int is_node(struct page *page, int type)
12598 + {
12599 +@@ -414,21 +401,6 @@ static inline int is_node(struct page *page, int type)
12600 + #define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT)
12601 + #define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT)
12602 +
12603 +-static inline int is_inline_node(struct page *page)
12604 +-{
12605 +- return PageChecked(page);
12606 +-}
12607 +-
12608 +-static inline void set_inline_node(struct page *page)
12609 +-{
12610 +- SetPageChecked(page);
12611 +-}
12612 +-
12613 +-static inline void clear_inline_node(struct page *page)
12614 +-{
12615 +- ClearPageChecked(page);
12616 +-}
12617 +-
12618 + static inline void set_cold_node(struct page *page, bool is_dir)
12619 + {
12620 + struct f2fs_node *rn = F2FS_NODE(page);
12621 +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
12622 +index 51dc79fad4fe2..406a6b2447822 100644
12623 +--- a/fs/f2fs/segment.c
12624 ++++ b/fs/f2fs/segment.c
12625 +@@ -186,10 +186,7 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
12626 + {
12627 + struct inmem_pages *new;
12628 +
12629 +- if (PagePrivate(page))
12630 +- set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
12631 +- else
12632 +- f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
12633 ++ set_page_private_atomic(page);
12634 +
12635 + new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
12636 +
12637 +@@ -272,9 +269,10 @@ next:
12638 + /* we don't need to invalidate this in the sccessful status */
12639 + if (drop || recover) {
12640 + ClearPageUptodate(page);
12641 +- clear_cold_data(page);
12642 ++ clear_page_private_gcing(page);
12643 + }
12644 +- f2fs_clear_page_private(page);
12645 ++ detach_page_private(page);
12646 ++ set_page_private(page, 0);
12647 + f2fs_put_page(page, 1);
12648 +
12649 + list_del(&cur->list);
12650 +@@ -357,7 +355,7 @@ void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
12651 + struct list_head *head = &fi->inmem_pages;
12652 + struct inmem_pages *cur = NULL;
12653 +
12654 +- f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
12655 ++ f2fs_bug_on(sbi, !page_private_atomic(page));
12656 +
12657 + mutex_lock(&fi->inmem_lock);
12658 + list_for_each_entry(cur, head, list) {
12659 +@@ -373,9 +371,12 @@ void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
12660 + kmem_cache_free(inmem_entry_slab, cur);
12661 +
12662 + ClearPageUptodate(page);
12663 +- f2fs_clear_page_private(page);
12664 ++ clear_page_private_atomic(page);
12665 + f2fs_put_page(page, 0);
12666 +
12667 ++ detach_page_private(page);
12668 ++ set_page_private(page, 0);
12669 ++
12670 + trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
12671 + }
12672 +
12673 +@@ -2321,6 +2322,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
12674 + return;
12675 +
12676 + invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
12677 ++ f2fs_invalidate_compress_page(sbi, addr);
12678 +
12679 + /* add it into sit main buffer */
12680 + down_write(&sit_i->sentry_lock);
12681 +@@ -3289,7 +3291,7 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
12682 + if (fio->type == DATA) {
12683 + struct inode *inode = fio->page->mapping->host;
12684 +
12685 +- if (is_cold_data(fio->page)) {
12686 ++ if (page_private_gcing(fio->page)) {
12687 + if (fio->sbi->am.atgc_enabled &&
12688 + (fio->io_type == FS_DATA_IO) &&
12689 + (fio->sbi->gc_mode != GC_URGENT_HIGH))
12690 +@@ -3468,9 +3470,11 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
12691 + reallocate:
12692 + f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
12693 + &fio->new_blkaddr, sum, type, fio);
12694 +- if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
12695 ++ if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
12696 + invalidate_mapping_pages(META_MAPPING(fio->sbi),
12697 + fio->old_blkaddr, fio->old_blkaddr);
12698 ++ f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
12699 ++ }
12700 +
12701 + /* writeout dirty page into bdev */
12702 + f2fs_submit_page_write(fio);
12703 +@@ -3660,6 +3664,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
12704 + if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
12705 + invalidate_mapping_pages(META_MAPPING(sbi),
12706 + old_blkaddr, old_blkaddr);
12707 ++ f2fs_invalidate_compress_page(sbi, old_blkaddr);
12708 + if (!from_gc)
12709 + update_segment_mtime(sbi, old_blkaddr, 0);
12710 + update_sit_entry(sbi, old_blkaddr, -1);
12711 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
12712 +index 8553e8e5de0da..d61f7fcdc66b3 100644
12713 +--- a/fs/f2fs/super.c
12714 ++++ b/fs/f2fs/super.c
12715 +@@ -150,6 +150,7 @@ enum {
12716 + Opt_compress_extension,
12717 + Opt_compress_chksum,
12718 + Opt_compress_mode,
12719 ++ Opt_compress_cache,
12720 + Opt_atgc,
12721 + Opt_gc_merge,
12722 + Opt_nogc_merge,
12723 +@@ -224,6 +225,7 @@ static match_table_t f2fs_tokens = {
12724 + {Opt_compress_extension, "compress_extension=%s"},
12725 + {Opt_compress_chksum, "compress_chksum"},
12726 + {Opt_compress_mode, "compress_mode=%s"},
12727 ++ {Opt_compress_cache, "compress_cache"},
12728 + {Opt_atgc, "atgc"},
12729 + {Opt_gc_merge, "gc_merge"},
12730 + {Opt_nogc_merge, "nogc_merge"},
12731 +@@ -1066,12 +1068,16 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
12732 + }
12733 + kfree(name);
12734 + break;
12735 ++ case Opt_compress_cache:
12736 ++ set_opt(sbi, COMPRESS_CACHE);
12737 ++ break;
12738 + #else
12739 + case Opt_compress_algorithm:
12740 + case Opt_compress_log_size:
12741 + case Opt_compress_extension:
12742 + case Opt_compress_chksum:
12743 + case Opt_compress_mode:
12744 ++ case Opt_compress_cache:
12745 + f2fs_info(sbi, "compression options not supported");
12746 + break;
12747 + #endif
12748 +@@ -1403,6 +1409,8 @@ static void f2fs_put_super(struct super_block *sb)
12749 +
12750 + f2fs_bug_on(sbi, sbi->fsync_node_num);
12751 +
12752 ++ f2fs_destroy_compress_inode(sbi);
12753 ++
12754 + iput(sbi->node_inode);
12755 + sbi->node_inode = NULL;
12756 +
12757 +@@ -1672,6 +1680,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
12758 + seq_printf(seq, ",compress_mode=%s", "fs");
12759 + else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
12760 + seq_printf(seq, ",compress_mode=%s", "user");
12761 ++
12762 ++ if (test_opt(sbi, COMPRESS_CACHE))
12763 ++ seq_puts(seq, ",compress_cache");
12764 + }
12765 + #endif
12766 +
12767 +@@ -1955,10 +1966,10 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
12768 + bool need_restart_ckpt = false, need_stop_ckpt = false;
12769 + bool need_restart_flush = false, need_stop_flush = false;
12770 + bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
12771 +- bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
12772 ++ bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
12773 + bool no_io_align = !F2FS_IO_ALIGNED(sbi);
12774 + bool no_atgc = !test_opt(sbi, ATGC);
12775 +- bool checkpoint_changed;
12776 ++ bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
12777 + #ifdef CONFIG_QUOTA
12778 + int i, j;
12779 + #endif
12780 +@@ -2003,8 +2014,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
12781 + err = parse_options(sb, data, true);
12782 + if (err)
12783 + goto restore_opts;
12784 +- checkpoint_changed =
12785 +- disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
12786 +
12787 + /*
12788 + * Previous and new state of filesystem is RO,
12789 +@@ -2050,6 +2059,12 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
12790 + goto restore_opts;
12791 + }
12792 +
12793 ++ if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
12794 ++ err = -EINVAL;
12795 ++ f2fs_warn(sbi, "switch compress_cache option is not allowed");
12796 ++ goto restore_opts;
12797 ++ }
12798 ++
12799 + if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
12800 + err = -EINVAL;
12801 + f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
12802 +@@ -2115,7 +2130,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
12803 + need_stop_flush = true;
12804 + }
12805 +
12806 +- if (checkpoint_changed) {
12807 ++ if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
12808 + if (test_opt(sbi, DISABLE_CHECKPOINT)) {
12809 + err = f2fs_disable_checkpoint(sbi);
12810 + if (err)
12811 +@@ -2399,6 +2414,33 @@ static int f2fs_enable_quotas(struct super_block *sb)
12812 + return 0;
12813 + }
12814 +
12815 ++static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
12816 ++{
12817 ++ struct quota_info *dqopt = sb_dqopt(sbi->sb);
12818 ++ struct address_space *mapping = dqopt->files[type]->i_mapping;
12819 ++ int ret = 0;
12820 ++
12821 ++ ret = dquot_writeback_dquots(sbi->sb, type);
12822 ++ if (ret)
12823 ++ goto out;
12824 ++
12825 ++ ret = filemap_fdatawrite(mapping);
12826 ++ if (ret)
12827 ++ goto out;
12828 ++
12829 ++ /* if we are using journalled quota */
12830 ++ if (is_journalled_quota(sbi))
12831 ++ goto out;
12832 ++
12833 ++ ret = filemap_fdatawait(mapping);
12834 ++
12835 ++ truncate_inode_pages(&dqopt->files[type]->i_data, 0);
12836 ++out:
12837 ++ if (ret)
12838 ++ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
12839 ++ return ret;
12840 ++}
12841 ++
12842 + int f2fs_quota_sync(struct super_block *sb, int type)
12843 + {
12844 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
12845 +@@ -2406,57 +2448,42 @@ int f2fs_quota_sync(struct super_block *sb, int type)
12846 + int cnt;
12847 + int ret;
12848 +
12849 +- /*
12850 +- * do_quotactl
12851 +- * f2fs_quota_sync
12852 +- * down_read(quota_sem)
12853 +- * dquot_writeback_dquots()
12854 +- * f2fs_dquot_commit
12855 +- * block_operation
12856 +- * down_read(quota_sem)
12857 +- */
12858 +- f2fs_lock_op(sbi);
12859 +-
12860 +- down_read(&sbi->quota_sem);
12861 +- ret = dquot_writeback_dquots(sb, type);
12862 +- if (ret)
12863 +- goto out;
12864 +-
12865 + /*
12866 + * Now when everything is written we can discard the pagecache so
12867 + * that userspace sees the changes.
12868 + */
12869 + for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
12870 +- struct address_space *mapping;
12871 +
12872 + if (type != -1 && cnt != type)
12873 + continue;
12874 +- if (!sb_has_quota_active(sb, cnt))
12875 +- continue;
12876 +
12877 +- mapping = dqopt->files[cnt]->i_mapping;
12878 ++ if (!sb_has_quota_active(sb, type))
12879 ++ return 0;
12880 +
12881 +- ret = filemap_fdatawrite(mapping);
12882 +- if (ret)
12883 +- goto out;
12884 ++ inode_lock(dqopt->files[cnt]);
12885 +
12886 +- /* if we are using journalled quota */
12887 +- if (is_journalled_quota(sbi))
12888 +- continue;
12889 ++ /*
12890 ++ * do_quotactl
12891 ++ * f2fs_quota_sync
12892 ++ * down_read(quota_sem)
12893 ++ * dquot_writeback_dquots()
12894 ++ * f2fs_dquot_commit
12895 ++ * block_operation
12896 ++ * down_read(quota_sem)
12897 ++ */
12898 ++ f2fs_lock_op(sbi);
12899 ++ down_read(&sbi->quota_sem);
12900 +
12901 +- ret = filemap_fdatawait(mapping);
12902 +- if (ret)
12903 +- set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
12904 ++ ret = f2fs_quota_sync_file(sbi, cnt);
12905 ++
12906 ++ up_read(&sbi->quota_sem);
12907 ++ f2fs_unlock_op(sbi);
12908 +
12909 +- inode_lock(dqopt->files[cnt]);
12910 +- truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
12911 + inode_unlock(dqopt->files[cnt]);
12912 ++
12913 ++ if (ret)
12914 ++ break;
12915 + }
12916 +-out:
12917 +- if (ret)
12918 +- set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
12919 +- up_read(&sbi->quota_sem);
12920 +- f2fs_unlock_op(sbi);
12921 + return ret;
12922 + }
12923 +
12924 +@@ -3089,11 +3116,13 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
12925 + return -EFSCORRUPTED;
12926 + }
12927 +
12928 +- if (le32_to_cpu(raw_super->cp_payload) >
12929 +- (blocks_per_seg - F2FS_CP_PACKS)) {
12930 +- f2fs_info(sbi, "Insane cp_payload (%u > %u)",
12931 ++ if (le32_to_cpu(raw_super->cp_payload) >=
12932 ++ (blocks_per_seg - F2FS_CP_PACKS -
12933 ++ NR_CURSEG_PERSIST_TYPE)) {
12934 ++ f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
12935 + le32_to_cpu(raw_super->cp_payload),
12936 +- blocks_per_seg - F2FS_CP_PACKS);
12937 ++ blocks_per_seg - F2FS_CP_PACKS -
12938 ++ NR_CURSEG_PERSIST_TYPE);
12939 + return -EFSCORRUPTED;
12940 + }
12941 +
12942 +@@ -3129,6 +3158,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
12943 + unsigned int cp_pack_start_sum, cp_payload;
12944 + block_t user_block_count, valid_user_blocks;
12945 + block_t avail_node_count, valid_node_count;
12946 ++ unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
12947 + int i, j;
12948 +
12949 + total = le32_to_cpu(raw_super->segment_count);
12950 +@@ -3249,6 +3279,17 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
12951 + return 1;
12952 + }
12953 +
12954 ++ nat_blocks = nat_segs << log_blocks_per_seg;
12955 ++ nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
12956 ++ nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
12957 ++ if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
12958 ++ (cp_payload + F2FS_CP_PACKS +
12959 ++ NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
12960 ++ f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
12961 ++ cp_payload, nat_bits_blocks);
12962 ++ return -EFSCORRUPTED;
12963 ++ }
12964 ++
12965 + if (unlikely(f2fs_cp_error(sbi))) {
12966 + f2fs_err(sbi, "A bug case: need to run fsck");
12967 + return 1;
12968 +@@ -3949,10 +3990,14 @@ try_onemore:
12969 + goto free_node_inode;
12970 + }
12971 +
12972 +- err = f2fs_register_sysfs(sbi);
12973 ++ err = f2fs_init_compress_inode(sbi);
12974 + if (err)
12975 + goto free_root_inode;
12976 +
12977 ++ err = f2fs_register_sysfs(sbi);
12978 ++ if (err)
12979 ++ goto free_compress_inode;
12980 ++
12981 + #ifdef CONFIG_QUOTA
12982 + /* Enable quota usage during mount */
12983 + if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
12984 +@@ -4093,6 +4138,8 @@ free_meta:
12985 + /* evict some inodes being cached by GC */
12986 + evict_inodes(sb);
12987 + f2fs_unregister_sysfs(sbi);
12988 ++free_compress_inode:
12989 ++ f2fs_destroy_compress_inode(sbi);
12990 + free_root_inode:
12991 + dput(sb->s_root);
12992 + sb->s_root = NULL;
12993 +@@ -4171,6 +4218,15 @@ static void kill_f2fs_super(struct super_block *sb)
12994 + f2fs_stop_gc_thread(sbi);
12995 + f2fs_stop_discard_thread(sbi);
12996 +
12997 ++#ifdef CONFIG_F2FS_FS_COMPRESSION
12998 ++ /*
12999 ++ * latter evict_inode() can bypass checking and invalidating
13000 ++ * compress inode cache.
13001 ++ */
13002 ++ if (test_opt(sbi, COMPRESS_CACHE))
13003 ++ truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
13004 ++#endif
13005 ++
13006 + if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
13007 + !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
13008 + struct cp_control cpc = {
13009 +diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
13010 +index 751bc5b1cddf9..6104f627cc712 100644
13011 +--- a/fs/fscache/cookie.c
13012 ++++ b/fs/fscache/cookie.c
13013 +@@ -74,10 +74,8 @@ void fscache_free_cookie(struct fscache_cookie *cookie)
13014 + static int fscache_set_key(struct fscache_cookie *cookie,
13015 + const void *index_key, size_t index_key_len)
13016 + {
13017 +- unsigned long long h;
13018 + u32 *buf;
13019 + int bufs;
13020 +- int i;
13021 +
13022 + bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf));
13023 +
13024 +@@ -91,17 +89,7 @@ static int fscache_set_key(struct fscache_cookie *cookie,
13025 + }
13026 +
13027 + memcpy(buf, index_key, index_key_len);
13028 +-
13029 +- /* Calculate a hash and combine this with the length in the first word
13030 +- * or first half word
13031 +- */
13032 +- h = (unsigned long)cookie->parent;
13033 +- h += index_key_len + cookie->type;
13034 +-
13035 +- for (i = 0; i < bufs; i++)
13036 +- h += buf[i];
13037 +-
13038 +- cookie->key_hash = h ^ (h >> 32);
13039 ++ cookie->key_hash = fscache_hash(0, buf, bufs);
13040 + return 0;
13041 + }
13042 +
13043 +diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
13044 +index c483863b740ad..aee639d980bad 100644
13045 +--- a/fs/fscache/internal.h
13046 ++++ b/fs/fscache/internal.h
13047 +@@ -97,6 +97,8 @@ extern struct workqueue_struct *fscache_object_wq;
13048 + extern struct workqueue_struct *fscache_op_wq;
13049 + DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
13050 +
13051 ++extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n);
13052 ++
13053 + static inline bool fscache_object_congested(void)
13054 + {
13055 + return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq);
13056 +diff --git a/fs/fscache/main.c b/fs/fscache/main.c
13057 +index c1e6cc9091aac..4207f98e405fd 100644
13058 +--- a/fs/fscache/main.c
13059 ++++ b/fs/fscache/main.c
13060 +@@ -93,6 +93,45 @@ static struct ctl_table fscache_sysctls_root[] = {
13061 + };
13062 + #endif
13063 +
13064 ++/*
13065 ++ * Mixing scores (in bits) for (7,20):
13066 ++ * Input delta: 1-bit 2-bit
13067 ++ * 1 round: 330.3 9201.6
13068 ++ * 2 rounds: 1246.4 25475.4
13069 ++ * 3 rounds: 1907.1 31295.1
13070 ++ * 4 rounds: 2042.3 31718.6
13071 ++ * Perfect: 2048 31744
13072 ++ * (32*64) (32*31/2 * 64)
13073 ++ */
13074 ++#define HASH_MIX(x, y, a) \
13075 ++ ( x ^= (a), \
13076 ++ y ^= x, x = rol32(x, 7),\
13077 ++ x += y, y = rol32(y,20),\
13078 ++ y *= 9 )
13079 ++
13080 ++static inline unsigned int fold_hash(unsigned long x, unsigned long y)
13081 ++{
13082 ++ /* Use arch-optimized multiply if one exists */
13083 ++ return __hash_32(y ^ __hash_32(x));
13084 ++}
13085 ++
13086 ++/*
13087 ++ * Generate a hash. This is derived from full_name_hash(), but we want to be
13088 ++ * sure it is arch independent and that it doesn't change as bits of the
13089 ++ * computed hash value might appear on disk. The caller also guarantees that
13090 ++ * the hashed data will be a series of aligned 32-bit words.
13091 ++ */
13092 ++unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n)
13093 ++{
13094 ++ unsigned int a, x = 0, y = salt;
13095 ++
13096 ++ for (; n; n--) {
13097 ++ a = *data++;
13098 ++ HASH_MIX(x, y, a);
13099 ++ }
13100 ++ return fold_hash(x, y);
13101 ++}
13102 ++
13103 + /*
13104 + * initialise the fs caching module
13105 + */
13106 +diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
13107 +index 54d3fbeb3002f..384565d63eea8 100644
13108 +--- a/fs/gfs2/glops.c
13109 ++++ b/fs/gfs2/glops.c
13110 +@@ -610,16 +610,13 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl)
13111 + j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
13112 +
13113 + error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
13114 +- if (error)
13115 +- gfs2_consist(sdp);
13116 +- if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
13117 +- gfs2_consist(sdp);
13118 +-
13119 +- /* Initialize some head of the log stuff */
13120 +- if (!gfs2_withdrawn(sdp)) {
13121 +- sdp->sd_log_sequence = head.lh_sequence + 1;
13122 +- gfs2_log_pointers_init(sdp, head.lh_blkno);
13123 +- }
13124 ++ if (gfs2_assert_withdraw_delayed(sdp, !error))
13125 ++ return error;
13126 ++ if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
13127 ++ GFS2_LOG_HEAD_UNMOUNT))
13128 ++ return -EIO;
13129 ++ sdp->sd_log_sequence = head.lh_sequence + 1;
13130 ++ gfs2_log_pointers_init(sdp, head.lh_blkno);
13131 + }
13132 + return 0;
13133 + }
13134 +diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
13135 +index dac040162ecc1..50578f881e6de 100644
13136 +--- a/fs/gfs2/lock_dlm.c
13137 ++++ b/fs/gfs2/lock_dlm.c
13138 +@@ -299,6 +299,11 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
13139 + gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
13140 + gfs2_update_request_times(gl);
13141 +
13142 ++ /* don't want to call dlm if we've unmounted the lock protocol */
13143 ++ if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
13144 ++ gfs2_glock_free(gl);
13145 ++ return;
13146 ++ }
13147 + /* don't want to skip dlm_unlock writing the lvb when lock has one */
13148 +
13149 + if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
13150 +diff --git a/fs/io-wq.c b/fs/io-wq.c
13151 +index c7171d9758968..6612d0aa497ef 100644
13152 +--- a/fs/io-wq.c
13153 ++++ b/fs/io-wq.c
13154 +@@ -237,9 +237,9 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
13155 + * We need a worker. If we find a free one, we're good. If not, and we're
13156 + * below the max number of workers, create one.
13157 + */
13158 +-static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
13159 ++static void io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
13160 + {
13161 +- bool ret;
13162 ++ bool do_create = false, first = false;
13163 +
13164 + /*
13165 + * Most likely an attempt to queue unbounded work on an io_wq that
13166 +@@ -248,25 +248,18 @@ static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
13167 + if (unlikely(!acct->max_workers))
13168 + pr_warn_once("io-wq is not configured for unbound workers");
13169 +
13170 +- rcu_read_lock();
13171 +- ret = io_wqe_activate_free_worker(wqe);
13172 +- rcu_read_unlock();
13173 +-
13174 +- if (!ret) {
13175 +- bool do_create = false, first = false;
13176 +-
13177 +- raw_spin_lock_irq(&wqe->lock);
13178 +- if (acct->nr_workers < acct->max_workers) {
13179 +- atomic_inc(&acct->nr_running);
13180 +- atomic_inc(&wqe->wq->worker_refs);
13181 +- if (!acct->nr_workers)
13182 +- first = true;
13183 +- acct->nr_workers++;
13184 +- do_create = true;
13185 +- }
13186 +- raw_spin_unlock_irq(&wqe->lock);
13187 +- if (do_create)
13188 +- create_io_worker(wqe->wq, wqe, acct->index, first);
13189 ++ raw_spin_lock_irq(&wqe->lock);
13190 ++ if (acct->nr_workers < acct->max_workers) {
13191 ++ if (!acct->nr_workers)
13192 ++ first = true;
13193 ++ acct->nr_workers++;
13194 ++ do_create = true;
13195 ++ }
13196 ++ raw_spin_unlock_irq(&wqe->lock);
13197 ++ if (do_create) {
13198 ++ atomic_inc(&acct->nr_running);
13199 ++ atomic_inc(&wqe->wq->worker_refs);
13200 ++ create_io_worker(wqe->wq, wqe, acct->index, first);
13201 + }
13202 + }
13203 +
13204 +@@ -798,7 +791,8 @@ append:
13205 + static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
13206 + {
13207 + struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
13208 +- int work_flags;
13209 ++ unsigned work_flags = work->flags;
13210 ++ bool do_create;
13211 + unsigned long flags;
13212 +
13213 + /*
13214 +@@ -811,15 +805,19 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
13215 + return;
13216 + }
13217 +
13218 +- work_flags = work->flags;
13219 + raw_spin_lock_irqsave(&wqe->lock, flags);
13220 + io_wqe_insert_work(wqe, work);
13221 + wqe->flags &= ~IO_WQE_FLAG_STALLED;
13222 ++
13223 ++ rcu_read_lock();
13224 ++ do_create = !io_wqe_activate_free_worker(wqe);
13225 ++ rcu_read_unlock();
13226 ++
13227 + raw_spin_unlock_irqrestore(&wqe->lock, flags);
13228 +
13229 +- if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
13230 +- !atomic_read(&acct->nr_running))
13231 +- io_wqe_wake_worker(wqe, acct);
13232 ++ if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
13233 ++ !atomic_read(&acct->nr_running)))
13234 ++ io_wqe_create_worker(wqe, acct);
13235 + }
13236 +
13237 + void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
13238 +diff --git a/fs/io_uring.c b/fs/io_uring.c
13239 +index 58ae2eab99efa..925f7f27af1ae 100644
13240 +--- a/fs/io_uring.c
13241 ++++ b/fs/io_uring.c
13242 +@@ -1307,6 +1307,8 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
13243 + struct io_timeout_data *io = req->async_data;
13244 +
13245 + if (hrtimer_try_to_cancel(&io->timer) != -1) {
13246 ++ if (status)
13247 ++ req_set_fail_links(req);
13248 + atomic_set(&req->ctx->cq_timeouts,
13249 + atomic_read(&req->ctx->cq_timeouts) + 1);
13250 + list_del_init(&req->timeout.list);
13251 +@@ -3474,7 +3476,7 @@ static int io_renameat_prep(struct io_kiocb *req,
13252 +
13253 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
13254 + return -EINVAL;
13255 +- if (sqe->ioprio || sqe->buf_index)
13256 ++ if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
13257 + return -EINVAL;
13258 + if (unlikely(req->flags & REQ_F_FIXED_FILE))
13259 + return -EBADF;
13260 +@@ -3525,7 +3527,8 @@ static int io_unlinkat_prep(struct io_kiocb *req,
13261 +
13262 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
13263 + return -EINVAL;
13264 +- if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
13265 ++ if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
13266 ++ sqe->splice_fd_in)
13267 + return -EINVAL;
13268 + if (unlikely(req->flags & REQ_F_FIXED_FILE))
13269 + return -EBADF;
13270 +@@ -3571,8 +3574,8 @@ static int io_shutdown_prep(struct io_kiocb *req,
13271 + #if defined(CONFIG_NET)
13272 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
13273 + return -EINVAL;
13274 +- if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
13275 +- sqe->buf_index)
13276 ++ if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
13277 ++ sqe->buf_index || sqe->splice_fd_in))
13278 + return -EINVAL;
13279 +
13280 + req->shutdown.how = READ_ONCE(sqe->len);
13281 +@@ -3720,7 +3723,8 @@ static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
13282 +
13283 + if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
13284 + return -EINVAL;
13285 +- if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
13286 ++ if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
13287 ++ sqe->splice_fd_in))
13288 + return -EINVAL;
13289 +
13290 + req->sync.flags = READ_ONCE(sqe->fsync_flags);
13291 +@@ -3753,7 +3757,8 @@ static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
13292 + static int io_fallocate_prep(struct io_kiocb *req,
13293 + const struct io_uring_sqe *sqe)
13294 + {
13295 +- if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
13296 ++ if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
13297 ++ sqe->splice_fd_in)
13298 + return -EINVAL;
13299 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
13300 + return -EINVAL;
13301 +@@ -3784,7 +3789,7 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
13302 + const char __user *fname;
13303 + int ret;
13304 +
13305 +- if (unlikely(sqe->ioprio || sqe->buf_index))
13306 ++ if (unlikely(sqe->ioprio || sqe->buf_index || sqe->splice_fd_in))
13307 + return -EINVAL;
13308 + if (unlikely(req->flags & REQ_F_FIXED_FILE))
13309 + return -EBADF;
13310 +@@ -3909,7 +3914,8 @@ static int io_remove_buffers_prep(struct io_kiocb *req,
13311 + struct io_provide_buf *p = &req->pbuf;
13312 + u64 tmp;
13313 +
13314 +- if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
13315 ++ if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
13316 ++ sqe->splice_fd_in)
13317 + return -EINVAL;
13318 +
13319 + tmp = READ_ONCE(sqe->fd);
13320 +@@ -3980,7 +3986,7 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
13321 + struct io_provide_buf *p = &req->pbuf;
13322 + u64 tmp;
13323 +
13324 +- if (sqe->ioprio || sqe->rw_flags)
13325 ++ if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
13326 + return -EINVAL;
13327 +
13328 + tmp = READ_ONCE(sqe->fd);
13329 +@@ -4067,7 +4073,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
13330 + const struct io_uring_sqe *sqe)
13331 + {
13332 + #if defined(CONFIG_EPOLL)
13333 +- if (sqe->ioprio || sqe->buf_index)
13334 ++ if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
13335 + return -EINVAL;
13336 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
13337 + return -EINVAL;
13338 +@@ -4113,7 +4119,7 @@ static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
13339 + static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
13340 + {
13341 + #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
13342 +- if (sqe->ioprio || sqe->buf_index || sqe->off)
13343 ++ if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
13344 + return -EINVAL;
13345 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
13346 + return -EINVAL;
13347 +@@ -4148,7 +4154,7 @@ static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
13348 +
13349 + static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
13350 + {
13351 +- if (sqe->ioprio || sqe->buf_index || sqe->addr)
13352 ++ if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
13353 + return -EINVAL;
13354 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
13355 + return -EINVAL;
13356 +@@ -4186,7 +4192,7 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
13357 + {
13358 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
13359 + return -EINVAL;
13360 +- if (sqe->ioprio || sqe->buf_index)
13361 ++ if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
13362 + return -EINVAL;
13363 + if (req->flags & REQ_F_FIXED_FILE)
13364 + return -EBADF;
13365 +@@ -4222,7 +4228,7 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
13366 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
13367 + return -EINVAL;
13368 + if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
13369 +- sqe->rw_flags || sqe->buf_index)
13370 ++ sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
13371 + return -EINVAL;
13372 + if (req->flags & REQ_F_FIXED_FILE)
13373 + return -EBADF;
13374 +@@ -4283,7 +4289,8 @@ static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
13375 +
13376 + if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
13377 + return -EINVAL;
13378 +- if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
13379 ++ if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
13380 ++ sqe->splice_fd_in))
13381 + return -EINVAL;
13382 +
13383 + req->sync.off = READ_ONCE(sqe->off);
13384 +@@ -4710,7 +4717,7 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
13385 +
13386 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
13387 + return -EINVAL;
13388 +- if (sqe->ioprio || sqe->len || sqe->buf_index)
13389 ++ if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->splice_fd_in)
13390 + return -EINVAL;
13391 +
13392 + accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
13393 +@@ -4758,7 +4765,8 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
13394 +
13395 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
13396 + return -EINVAL;
13397 +- if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
13398 ++ if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
13399 ++ sqe->splice_fd_in)
13400 + return -EINVAL;
13401 +
13402 + conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
13403 +@@ -5368,7 +5376,7 @@ static int io_poll_update_prep(struct io_kiocb *req,
13404 +
13405 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
13406 + return -EINVAL;
13407 +- if (sqe->ioprio || sqe->buf_index)
13408 ++ if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
13409 + return -EINVAL;
13410 + flags = READ_ONCE(sqe->len);
13411 + if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
13412 +@@ -5603,7 +5611,7 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
13413 + return -EINVAL;
13414 + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
13415 + return -EINVAL;
13416 +- if (sqe->ioprio || sqe->buf_index || sqe->len)
13417 ++ if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
13418 + return -EINVAL;
13419 +
13420 + tr->addr = READ_ONCE(sqe->addr);
13421 +@@ -5662,7 +5670,8 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
13422 +
13423 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
13424 + return -EINVAL;
13425 +- if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
13426 ++ if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
13427 ++ sqe->splice_fd_in)
13428 + return -EINVAL;
13429 + if (off && is_timeout_link)
13430 + return -EINVAL;
13431 +@@ -5811,7 +5820,8 @@ static int io_async_cancel_prep(struct io_kiocb *req,
13432 + return -EINVAL;
13433 + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
13434 + return -EINVAL;
13435 +- if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
13436 ++ if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
13437 ++ sqe->splice_fd_in)
13438 + return -EINVAL;
13439 +
13440 + req->cancel.addr = READ_ONCE(sqe->addr);
13441 +@@ -5868,7 +5878,7 @@ static int io_rsrc_update_prep(struct io_kiocb *req,
13442 + {
13443 + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
13444 + return -EINVAL;
13445 +- if (sqe->ioprio || sqe->rw_flags)
13446 ++ if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
13447 + return -EINVAL;
13448 +
13449 + req->rsrc_update.offset = READ_ONCE(sqe->off);
13450 +@@ -6281,6 +6291,7 @@ static void io_wq_submit_work(struct io_wq_work *work)
13451 + if (timeout)
13452 + io_queue_linked_timeout(timeout);
13453 +
13454 ++ /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
13455 + if (work->flags & IO_WQ_WORK_CANCEL)
13456 + ret = -ECANCELED;
13457 +
13458 +@@ -7195,11 +7206,11 @@ static struct io_rsrc_data *io_rsrc_data_alloc(struct io_ring_ctx *ctx,
13459 + {
13460 + struct io_rsrc_data *data;
13461 +
13462 +- data = kzalloc(sizeof(*data), GFP_KERNEL);
13463 ++ data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
13464 + if (!data)
13465 + return NULL;
13466 +
13467 +- data->tags = kvcalloc(nr, sizeof(*data->tags), GFP_KERNEL);
13468 ++ data->tags = kvcalloc(nr, sizeof(*data->tags), GFP_KERNEL_ACCOUNT);
13469 + if (!data->tags) {
13470 + kfree(data);
13471 + return NULL;
13472 +@@ -7477,7 +7488,7 @@ static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
13473 + {
13474 + unsigned i, nr_tables = DIV_ROUND_UP(nr_files, IORING_MAX_FILES_TABLE);
13475 +
13476 +- table->files = kcalloc(nr_tables, sizeof(*table->files), GFP_KERNEL);
13477 ++ table->files = kcalloc(nr_tables, sizeof(*table->files), GFP_KERNEL_ACCOUNT);
13478 + if (!table->files)
13479 + return false;
13480 +
13481 +@@ -7485,7 +7496,7 @@ static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
13482 + unsigned int this_files = min(nr_files, IORING_MAX_FILES_TABLE);
13483 +
13484 + table->files[i] = kcalloc(this_files, sizeof(*table->files[i]),
13485 +- GFP_KERNEL);
13486 ++ GFP_KERNEL_ACCOUNT);
13487 + if (!table->files[i])
13488 + break;
13489 + nr_files -= this_files;
13490 +@@ -9090,8 +9101,8 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
13491 + * Must be after io_uring_del_task_file() (removes nodes under
13492 + * uring_lock) to avoid race with io_uring_try_cancel_iowq().
13493 + */
13494 +- tctx->io_wq = NULL;
13495 + io_wq_put_and_exit(wq);
13496 ++ tctx->io_wq = NULL;
13497 + }
13498 + }
13499 +
13500 +diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
13501 +index 9023717c5188b..35839acd0004a 100644
13502 +--- a/fs/iomap/buffered-io.c
13503 ++++ b/fs/iomap/buffered-io.c
13504 +@@ -1045,7 +1045,7 @@ iomap_finish_page_writeback(struct inode *inode, struct page *page,
13505 +
13506 + if (error) {
13507 + SetPageError(page);
13508 +- mapping_set_error(inode->i_mapping, -EIO);
13509 ++ mapping_set_error(inode->i_mapping, error);
13510 + }
13511 +
13512 + WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
13513 +diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
13514 +index 498cb70c2c0d0..273a81971ed57 100644
13515 +--- a/fs/lockd/svclock.c
13516 ++++ b/fs/lockd/svclock.c
13517 +@@ -395,28 +395,10 @@ nlmsvc_release_lockowner(struct nlm_lock *lock)
13518 + nlmsvc_put_lockowner(lock->fl.fl_owner);
13519 + }
13520 +
13521 +-static void nlmsvc_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
13522 +-{
13523 +- struct nlm_lockowner *nlm_lo = (struct nlm_lockowner *)fl->fl_owner;
13524 +- new->fl_owner = nlmsvc_get_lockowner(nlm_lo);
13525 +-}
13526 +-
13527 +-static void nlmsvc_locks_release_private(struct file_lock *fl)
13528 +-{
13529 +- nlmsvc_put_lockowner((struct nlm_lockowner *)fl->fl_owner);
13530 +-}
13531 +-
13532 +-static const struct file_lock_operations nlmsvc_lock_ops = {
13533 +- .fl_copy_lock = nlmsvc_locks_copy_lock,
13534 +- .fl_release_private = nlmsvc_locks_release_private,
13535 +-};
13536 +-
13537 + void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
13538 + pid_t pid)
13539 + {
13540 + fl->fl_owner = nlmsvc_find_lockowner(host, pid);
13541 +- if (fl->fl_owner != NULL)
13542 +- fl->fl_ops = &nlmsvc_lock_ops;
13543 + }
13544 +
13545 + /*
13546 +@@ -788,9 +770,21 @@ nlmsvc_notify_blocked(struct file_lock *fl)
13547 + printk(KERN_WARNING "lockd: notification for unknown block!\n");
13548 + }
13549 +
13550 ++static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
13551 ++{
13552 ++ return nlmsvc_get_lockowner(owner);
13553 ++}
13554 ++
13555 ++static void nlmsvc_put_owner(fl_owner_t owner)
13556 ++{
13557 ++ nlmsvc_put_lockowner(owner);
13558 ++}
13559 ++
13560 + const struct lock_manager_operations nlmsvc_lock_operations = {
13561 + .lm_notify = nlmsvc_notify_blocked,
13562 + .lm_grant = nlmsvc_grant_deferred,
13563 ++ .lm_get_owner = nlmsvc_get_owner,
13564 ++ .lm_put_owner = nlmsvc_put_owner,
13565 + };
13566 +
13567 + /*
13568 +diff --git a/fs/nfs/export.c b/fs/nfs/export.c
13569 +index 37a1a88df7717..d772c20bbfd15 100644
13570 +--- a/fs/nfs/export.c
13571 ++++ b/fs/nfs/export.c
13572 +@@ -180,5 +180,5 @@ const struct export_operations nfs_export_ops = {
13573 + .fetch_iversion = nfs_fetch_iversion,
13574 + .flags = EXPORT_OP_NOWCC|EXPORT_OP_NOSUBTREECHK|
13575 + EXPORT_OP_CLOSE_BEFORE_UNLINK|EXPORT_OP_REMOTE_FS|
13576 +- EXPORT_OP_NOATOMIC_ATTR,
13577 ++ EXPORT_OP_NOATOMIC_ATTR|EXPORT_OP_SYNC_LOCKS,
13578 + };
13579 +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
13580 +index be960e47d7f61..28350d62b9bd1 100644
13581 +--- a/fs/nfs/pnfs.c
13582 ++++ b/fs/nfs/pnfs.c
13583 +@@ -335,7 +335,7 @@ static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
13584 +
13585 + static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq)
13586 + {
13587 +- if (pnfs_seqid_is_newer(newseq, lo->plh_barrier))
13588 ++ if (pnfs_seqid_is_newer(newseq, lo->plh_barrier) || !lo->plh_barrier)
13589 + lo->plh_barrier = newseq;
13590 + }
13591 +
13592 +@@ -347,11 +347,15 @@ pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
13593 + iomode = IOMODE_ANY;
13594 + lo->plh_return_iomode = iomode;
13595 + set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
13596 +- if (seq != 0) {
13597 +- WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
13598 ++ /*
13599 ++ * We must set lo->plh_return_seq to avoid livelocks with
13600 ++ * pnfs_layout_need_return()
13601 ++ */
13602 ++ if (seq == 0)
13603 ++ seq = be32_to_cpu(lo->plh_stateid.seqid);
13604 ++ if (!lo->plh_return_seq || pnfs_seqid_is_newer(seq, lo->plh_return_seq))
13605 + lo->plh_return_seq = seq;
13606 +- pnfs_barrier_update(lo, seq);
13607 +- }
13608 ++ pnfs_barrier_update(lo, seq);
13609 + }
13610 +
13611 + static void
13612 +@@ -1000,7 +1004,7 @@ pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
13613 + {
13614 + u32 seqid = be32_to_cpu(stateid->seqid);
13615 +
13616 +- return !pnfs_seqid_is_newer(seqid, lo->plh_barrier) && lo->plh_barrier;
13617 ++ return lo->plh_barrier && pnfs_seqid_is_newer(lo->plh_barrier, seqid);
13618 + }
13619 +
13620 + /* lget is set to 1 if called from inside send_layoutget call chain */
13621 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
13622 +index ab81e8ae32659..42c42ee3f00a2 100644
13623 +--- a/fs/nfsd/nfs4state.c
13624 ++++ b/fs/nfsd/nfs4state.c
13625 +@@ -6727,6 +6727,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
13626 + struct nfsd4_blocked_lock *nbl = NULL;
13627 + struct file_lock *file_lock = NULL;
13628 + struct file_lock *conflock = NULL;
13629 ++ struct super_block *sb;
13630 + __be32 status = 0;
13631 + int lkflg;
13632 + int err;
13633 +@@ -6748,6 +6749,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
13634 + dprintk("NFSD: nfsd4_lock: permission denied!\n");
13635 + return status;
13636 + }
13637 ++ sb = cstate->current_fh.fh_dentry->d_sb;
13638 +
13639 + if (lock->lk_is_new) {
13640 + if (nfsd4_has_session(cstate))
13641 +@@ -6796,7 +6798,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
13642 + fp = lock_stp->st_stid.sc_file;
13643 + switch (lock->lk_type) {
13644 + case NFS4_READW_LT:
13645 +- if (nfsd4_has_session(cstate))
13646 ++ if (nfsd4_has_session(cstate) &&
13647 ++ !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
13648 + fl_flags |= FL_SLEEP;
13649 + fallthrough;
13650 + case NFS4_READ_LT:
13651 +@@ -6808,7 +6811,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
13652 + fl_type = F_RDLCK;
13653 + break;
13654 + case NFS4_WRITEW_LT:
13655 +- if (nfsd4_has_session(cstate))
13656 ++ if (nfsd4_has_session(cstate) &&
13657 ++ !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
13658 + fl_flags |= FL_SLEEP;
13659 + fallthrough;
13660 + case NFS4_WRITE_LT:
13661 +@@ -6928,8 +6932,7 @@ out:
13662 + /*
13663 + * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
13664 + * so we do a temporary open here just to get an open file to pass to
13665 +- * vfs_test_lock. (Arguably perhaps test_lock should be done with an
13666 +- * inode operation.)
13667 ++ * vfs_test_lock.
13668 + */
13669 + static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
13670 + {
13671 +@@ -6944,7 +6947,9 @@ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct
13672 + NFSD_MAY_READ));
13673 + if (err)
13674 + goto out;
13675 ++ lock->fl_file = nf->nf_file;
13676 + err = nfserrno(vfs_test_lock(nf->nf_file, lock));
13677 ++ lock->fl_file = NULL;
13678 + out:
13679 + fh_unlock(fhp);
13680 + nfsd_file_put(nf);
13681 +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
13682 +index 93efe7048a771..7c1850adec288 100644
13683 +--- a/fs/overlayfs/dir.c
13684 ++++ b/fs/overlayfs/dir.c
13685 +@@ -542,8 +542,10 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
13686 + goto out_cleanup;
13687 + }
13688 + err = ovl_instantiate(dentry, inode, newdentry, hardlink);
13689 +- if (err)
13690 +- goto out_cleanup;
13691 ++ if (err) {
13692 ++ ovl_cleanup(udir, newdentry);
13693 ++ dput(newdentry);
13694 ++ }
13695 + out_dput:
13696 + dput(upper);
13697 + out_unlock:
13698 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
13699 +index 8fc8bbf9635b6..e55f861d23fda 100644
13700 +--- a/fs/userfaultfd.c
13701 ++++ b/fs/userfaultfd.c
13702 +@@ -33,11 +33,6 @@ int sysctl_unprivileged_userfaultfd __read_mostly;
13703 +
13704 + static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
13705 +
13706 +-enum userfaultfd_state {
13707 +- UFFD_STATE_WAIT_API,
13708 +- UFFD_STATE_RUNNING,
13709 +-};
13710 +-
13711 + /*
13712 + * Start with fault_pending_wqh and fault_wqh so they're more likely
13713 + * to be in the same cacheline.
13714 +@@ -69,8 +64,6 @@ struct userfaultfd_ctx {
13715 + unsigned int flags;
13716 + /* features requested from the userspace */
13717 + unsigned int features;
13718 +- /* state machine */
13719 +- enum userfaultfd_state state;
13720 + /* released */
13721 + bool released;
13722 + /* memory mappings are changing because of non-cooperative event */
13723 +@@ -104,6 +97,14 @@ struct userfaultfd_wake_range {
13724 + unsigned long len;
13725 + };
13726 +
13727 ++/* internal indication that UFFD_API ioctl was successfully executed */
13728 ++#define UFFD_FEATURE_INITIALIZED (1u << 31)
13729 ++
13730 ++static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
13731 ++{
13732 ++ return ctx->features & UFFD_FEATURE_INITIALIZED;
13733 ++}
13734 ++
13735 + static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
13736 + int wake_flags, void *key)
13737 + {
13738 +@@ -666,7 +667,6 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
13739 +
13740 + refcount_set(&ctx->refcount, 1);
13741 + ctx->flags = octx->flags;
13742 +- ctx->state = UFFD_STATE_RUNNING;
13743 + ctx->features = octx->features;
13744 + ctx->released = false;
13745 + ctx->mmap_changing = false;
13746 +@@ -943,38 +943,33 @@ static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
13747 +
13748 + poll_wait(file, &ctx->fd_wqh, wait);
13749 +
13750 +- switch (ctx->state) {
13751 +- case UFFD_STATE_WAIT_API:
13752 ++ if (!userfaultfd_is_initialized(ctx))
13753 + return EPOLLERR;
13754 +- case UFFD_STATE_RUNNING:
13755 +- /*
13756 +- * poll() never guarantees that read won't block.
13757 +- * userfaults can be waken before they're read().
13758 +- */
13759 +- if (unlikely(!(file->f_flags & O_NONBLOCK)))
13760 +- return EPOLLERR;
13761 +- /*
13762 +- * lockless access to see if there are pending faults
13763 +- * __pollwait last action is the add_wait_queue but
13764 +- * the spin_unlock would allow the waitqueue_active to
13765 +- * pass above the actual list_add inside
13766 +- * add_wait_queue critical section. So use a full
13767 +- * memory barrier to serialize the list_add write of
13768 +- * add_wait_queue() with the waitqueue_active read
13769 +- * below.
13770 +- */
13771 +- ret = 0;
13772 +- smp_mb();
13773 +- if (waitqueue_active(&ctx->fault_pending_wqh))
13774 +- ret = EPOLLIN;
13775 +- else if (waitqueue_active(&ctx->event_wqh))
13776 +- ret = EPOLLIN;
13777 +
13778 +- return ret;
13779 +- default:
13780 +- WARN_ON_ONCE(1);
13781 ++ /*
13782 ++ * poll() never guarantees that read won't block.
13783 ++ * userfaults can be waken before they're read().
13784 ++ */
13785 ++ if (unlikely(!(file->f_flags & O_NONBLOCK)))
13786 + return EPOLLERR;
13787 +- }
13788 ++ /*
13789 ++ * lockless access to see if there are pending faults
13790 ++ * __pollwait last action is the add_wait_queue but
13791 ++ * the spin_unlock would allow the waitqueue_active to
13792 ++ * pass above the actual list_add inside
13793 ++ * add_wait_queue critical section. So use a full
13794 ++ * memory barrier to serialize the list_add write of
13795 ++ * add_wait_queue() with the waitqueue_active read
13796 ++ * below.
13797 ++ */
13798 ++ ret = 0;
13799 ++ smp_mb();
13800 ++ if (waitqueue_active(&ctx->fault_pending_wqh))
13801 ++ ret = EPOLLIN;
13802 ++ else if (waitqueue_active(&ctx->event_wqh))
13803 ++ ret = EPOLLIN;
13804 ++
13805 ++ return ret;
13806 + }
13807 +
13808 + static const struct file_operations userfaultfd_fops;
13809 +@@ -1169,7 +1164,7 @@ static ssize_t userfaultfd_read(struct file *file, char __user *buf,
13810 + int no_wait = file->f_flags & O_NONBLOCK;
13811 + struct inode *inode = file_inode(file);
13812 +
13813 +- if (ctx->state == UFFD_STATE_WAIT_API)
13814 ++ if (!userfaultfd_is_initialized(ctx))
13815 + return -EINVAL;
13816 +
13817 + for (;;) {
13818 +@@ -1905,9 +1900,10 @@ out:
13819 + static inline unsigned int uffd_ctx_features(__u64 user_features)
13820 + {
13821 + /*
13822 +- * For the current set of features the bits just coincide
13823 ++ * For the current set of features the bits just coincide. Set
13824 ++ * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
13825 + */
13826 +- return (unsigned int)user_features;
13827 ++ return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
13828 + }
13829 +
13830 + /*
13831 +@@ -1920,12 +1916,10 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
13832 + {
13833 + struct uffdio_api uffdio_api;
13834 + void __user *buf = (void __user *)arg;
13835 ++ unsigned int ctx_features;
13836 + int ret;
13837 + __u64 features;
13838 +
13839 +- ret = -EINVAL;
13840 +- if (ctx->state != UFFD_STATE_WAIT_API)
13841 +- goto out;
13842 + ret = -EFAULT;
13843 + if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
13844 + goto out;
13845 +@@ -1945,9 +1939,13 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
13846 + ret = -EFAULT;
13847 + if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
13848 + goto out;
13849 +- ctx->state = UFFD_STATE_RUNNING;
13850 ++
13851 + /* only enable the requested features for this uffd context */
13852 +- ctx->features = uffd_ctx_features(features);
13853 ++ ctx_features = uffd_ctx_features(features);
13854 ++ ret = -EINVAL;
13855 ++ if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
13856 ++ goto err_out;
13857 ++
13858 + ret = 0;
13859 + out:
13860 + return ret;
13861 +@@ -1964,7 +1962,7 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
13862 + int ret = -EINVAL;
13863 + struct userfaultfd_ctx *ctx = file->private_data;
13864 +
13865 +- if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
13866 ++ if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
13867 + return -EINVAL;
13868 +
13869 + switch(cmd) {
13870 +@@ -2078,7 +2076,6 @@ SYSCALL_DEFINE1(userfaultfd, int, flags)
13871 + refcount_set(&ctx->refcount, 1);
13872 + ctx->flags = flags;
13873 + ctx->features = 0;
13874 +- ctx->state = UFFD_STATE_WAIT_API;
13875 + ctx->released = false;
13876 + ctx->mmap_changing = false;
13877 + ctx->mm = current->mm;
13878 +diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
13879 +index 47accec68cb0f..f603325c0c30d 100644
13880 +--- a/include/crypto/public_key.h
13881 ++++ b/include/crypto/public_key.h
13882 +@@ -38,9 +38,9 @@ extern void public_key_free(struct public_key *key);
13883 + struct public_key_signature {
13884 + struct asymmetric_key_id *auth_ids[2];
13885 + u8 *s; /* Signature */
13886 +- u32 s_size; /* Number of bytes in signature */
13887 + u8 *digest;
13888 +- u8 digest_size; /* Number of bytes in digest */
13889 ++ u32 s_size; /* Number of bytes in signature */
13890 ++ u32 digest_size; /* Number of bytes in digest */
13891 + const char *pkey_algo;
13892 + const char *hash_algo;
13893 + const char *encoding;
13894 +diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
13895 +index 6bf8b2b789919..f99d3417f3042 100644
13896 +--- a/include/drm/drm_auth.h
13897 ++++ b/include/drm/drm_auth.h
13898 +@@ -107,6 +107,7 @@ struct drm_master {
13899 + };
13900 +
13901 + struct drm_master *drm_master_get(struct drm_master *master);
13902 ++struct drm_master *drm_file_get_master(struct drm_file *file_priv);
13903 + void drm_master_put(struct drm_master **master);
13904 + bool drm_is_current_master(struct drm_file *fpriv);
13905 +
13906 +diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
13907 +index b81b3bfb08c8d..726cfe0ff5f5c 100644
13908 +--- a/include/drm/drm_file.h
13909 ++++ b/include/drm/drm_file.h
13910 +@@ -226,15 +226,27 @@ struct drm_file {
13911 + /**
13912 + * @master:
13913 + *
13914 +- * Master this node is currently associated with. Only relevant if
13915 +- * drm_is_primary_client() returns true. Note that this only
13916 +- * matches &drm_device.master if the master is the currently active one.
13917 ++ * Master this node is currently associated with. Protected by struct
13918 ++ * &drm_device.master_mutex, and serialized by @master_lookup_lock.
13919 ++ *
13920 ++ * Only relevant if drm_is_primary_client() returns true. Note that
13921 ++ * this only matches &drm_device.master if the master is the currently
13922 ++ * active one.
13923 ++ *
13924 ++ * When dereferencing this pointer, either hold struct
13925 ++ * &drm_device.master_mutex for the duration of the pointer's use, or
13926 ++ * use drm_file_get_master() if struct &drm_device.master_mutex is not
13927 ++ * currently held and there is no other need to hold it. This prevents
13928 ++ * @master from being freed during use.
13929 + *
13930 + * See also @authentication and @is_master and the :ref:`section on
13931 + * primary nodes and authentication <drm_primary_node>`.
13932 + */
13933 + struct drm_master *master;
13934 +
13935 ++ /** @master_lock: Serializes @master. */
13936 ++ spinlock_t master_lookup_lock;
13937 ++
13938 + /** @pid: Process that opened this file. */
13939 + struct pid *pid;
13940 +
13941 +diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
13942 +index e030f7510cd3a..b21a553e2e062 100644
13943 +--- a/include/linux/ethtool.h
13944 ++++ b/include/linux/ethtool.h
13945 +@@ -17,8 +17,6 @@
13946 + #include <linux/compat.h>
13947 + #include <uapi/linux/ethtool.h>
13948 +
13949 +-#ifdef CONFIG_COMPAT
13950 +-
13951 + struct compat_ethtool_rx_flow_spec {
13952 + u32 flow_type;
13953 + union ethtool_flow_union h_u;
13954 +@@ -38,8 +36,6 @@ struct compat_ethtool_rxnfc {
13955 + u32 rule_locs[];
13956 + };
13957 +
13958 +-#endif /* CONFIG_COMPAT */
13959 +-
13960 + #include <linux/rculist.h>
13961 +
13962 + /**
13963 +diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
13964 +index fe848901fcc3a..3260fe7148462 100644
13965 +--- a/include/linux/exportfs.h
13966 ++++ b/include/linux/exportfs.h
13967 +@@ -221,6 +221,8 @@ struct export_operations {
13968 + #define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply
13969 + atomic attribute updates
13970 + */
13971 ++#define EXPORT_OP_SYNC_LOCKS (0x20) /* Filesystem can't do
13972 ++ asychronous blocking locks */
13973 + unsigned long flags;
13974 + };
13975 +
13976 +diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
13977 +index 5487a80617a30..0021ea8f7c3bd 100644
13978 +--- a/include/linux/f2fs_fs.h
13979 ++++ b/include/linux/f2fs_fs.h
13980 +@@ -34,6 +34,7 @@
13981 + #define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num)
13982 + #define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num)
13983 + #define F2FS_META_INO(sbi) ((sbi)->meta_ino_num)
13984 ++#define F2FS_COMPRESS_INO(sbi) (NM_I(sbi)->max_nid)
13985 +
13986 + #define F2FS_MAX_QUOTAS 3
13987 +
13988 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
13989 +index 28a110ec2a0d5..0b9a894c20c85 100644
13990 +--- a/include/linux/hugetlb.h
13991 ++++ b/include/linux/hugetlb.h
13992 +@@ -835,6 +835,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
13993 +
13994 + void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
13995 +
13996 ++static inline void hugetlb_count_init(struct mm_struct *mm)
13997 ++{
13998 ++ atomic_long_set(&mm->hugetlb_usage, 0);
13999 ++}
14000 ++
14001 + static inline void hugetlb_count_add(long l, struct mm_struct *mm)
14002 + {
14003 + atomic_long_add(l, &mm->hugetlb_usage);
14004 +@@ -1019,6 +1024,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
14005 + return &mm->page_table_lock;
14006 + }
14007 +
14008 ++static inline void hugetlb_count_init(struct mm_struct *mm)
14009 ++{
14010 ++}
14011 ++
14012 + static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
14013 + {
14014 + }
14015 +diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
14016 +index 0bff345c4bc68..171bf1be40115 100644
14017 +--- a/include/linux/hugetlb_cgroup.h
14018 ++++ b/include/linux/hugetlb_cgroup.h
14019 +@@ -118,6 +118,13 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
14020 + css_put(&h_cg->css);
14021 + }
14022 +
14023 ++static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
14024 ++ struct resv_map *resv_map)
14025 ++{
14026 ++ if (resv_map->css)
14027 ++ css_get(resv_map->css);
14028 ++}
14029 ++
14030 + extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
14031 + struct hugetlb_cgroup **ptr);
14032 + extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
14033 +@@ -196,6 +203,11 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
14034 + {
14035 + }
14036 +
14037 ++static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
14038 ++ struct resv_map *resv_map)
14039 ++{
14040 ++}
14041 ++
14042 + static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
14043 + struct hugetlb_cgroup **ptr)
14044 + {
14045 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
14046 +index 03faf20a6817e..cf2dafe3ce608 100644
14047 +--- a/include/linux/intel-iommu.h
14048 ++++ b/include/linux/intel-iommu.h
14049 +@@ -124,9 +124,9 @@
14050 + #define DMAR_MTRR_PHYSMASK8_REG 0x208
14051 + #define DMAR_MTRR_PHYSBASE9_REG 0x210
14052 + #define DMAR_MTRR_PHYSMASK9_REG 0x218
14053 +-#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */
14054 +-#define DMAR_VCMD_REG 0xe10 /* Virtual command register */
14055 +-#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */
14056 ++#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */
14057 ++#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
14058 ++#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */
14059 +
14060 + #define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg)
14061 + #define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg)
14062 +diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
14063 +index 28f32fd00fe9f..57a8aa463ccb8 100644
14064 +--- a/include/linux/memory_hotplug.h
14065 ++++ b/include/linux/memory_hotplug.h
14066 +@@ -366,8 +366,8 @@ extern void sparse_remove_section(struct mem_section *ms,
14067 + unsigned long map_offset, struct vmem_altmap *altmap);
14068 + extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
14069 + unsigned long pnum);
14070 +-extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
14071 +- unsigned long nr_pages);
14072 ++extern struct zone *zone_for_pfn_range(int online_type, int nid,
14073 ++ unsigned long start_pfn, unsigned long nr_pages);
14074 + extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
14075 + struct mhp_params *params);
14076 + void arch_remove_linear_mapping(u64 start, u64 size);
14077 +diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
14078 +index 1199ffd305d1a..fcd8ec0b7408e 100644
14079 +--- a/include/linux/rcupdate.h
14080 ++++ b/include/linux/rcupdate.h
14081 +@@ -167,7 +167,7 @@ void synchronize_rcu_tasks(void);
14082 + # define synchronize_rcu_tasks synchronize_rcu
14083 + # endif
14084 +
14085 +-# ifdef CONFIG_TASKS_RCU_TRACE
14086 ++# ifdef CONFIG_TASKS_TRACE_RCU
14087 + # define rcu_tasks_trace_qs(t) \
14088 + do { \
14089 + if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
14090 +diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
14091 +index d1672de9ca89e..87b325aec5085 100644
14092 +--- a/include/linux/rtmutex.h
14093 ++++ b/include/linux/rtmutex.h
14094 +@@ -52,17 +52,22 @@ do { \
14095 + } while (0)
14096 +
14097 + #ifdef CONFIG_DEBUG_LOCK_ALLOC
14098 +-#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
14099 +- , .dep_map = { .name = #mutexname }
14100 ++#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
14101 ++ .dep_map = { \
14102 ++ .name = #mutexname, \
14103 ++ .wait_type_inner = LD_WAIT_SLEEP, \
14104 ++ }
14105 + #else
14106 + #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
14107 + #endif
14108 +
14109 +-#define __RT_MUTEX_INITIALIZER(mutexname) \
14110 +- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
14111 +- , .waiters = RB_ROOT_CACHED \
14112 +- , .owner = NULL \
14113 +- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
14114 ++#define __RT_MUTEX_INITIALIZER(mutexname) \
14115 ++{ \
14116 ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock), \
14117 ++ .waiters = RB_ROOT_CACHED, \
14118 ++ .owner = NULL, \
14119 ++ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
14120 ++}
14121 +
14122 + #define DEFINE_RT_MUTEX(mutexname) \
14123 + struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
14124 +diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
14125 +index 61b622e334ee5..52e5b11e4b6a9 100644
14126 +--- a/include/linux/sunrpc/xprt.h
14127 ++++ b/include/linux/sunrpc/xprt.h
14128 +@@ -422,6 +422,7 @@ void xprt_unlock_connect(struct rpc_xprt *, void *);
14129 + #define XPRT_CONGESTED (9)
14130 + #define XPRT_CWND_WAIT (10)
14131 + #define XPRT_WRITE_SPACE (11)
14132 ++#define XPRT_SND_IS_COOKIE (12)
14133 +
14134 + static inline void xprt_set_connected(struct rpc_xprt *xprt)
14135 + {
14136 +diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
14137 +index 3c1423ee74b4e..8c2a712cb2420 100644
14138 +--- a/include/linux/sunrpc/xprtsock.h
14139 ++++ b/include/linux/sunrpc/xprtsock.h
14140 +@@ -10,6 +10,7 @@
14141 +
14142 + int init_socket_xprt(void);
14143 + void cleanup_socket_xprt(void);
14144 ++unsigned short get_srcport(struct rpc_xprt *);
14145 +
14146 + #define RPC_MIN_RESVPORT (1U)
14147 + #define RPC_MAX_RESVPORT (65535U)
14148 +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
14149 +index 34a92d5ed12b5..22a60291f2037 100644
14150 +--- a/include/net/bluetooth/hci_core.h
14151 ++++ b/include/net/bluetooth/hci_core.h
14152 +@@ -1411,6 +1411,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
14153 + !hci_dev_test_flag(dev, HCI_AUTO_OFF))
14154 + #define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
14155 + hci_dev_test_flag(dev, HCI_SC_ENABLED))
14156 ++#define rpa_valid(dev) (bacmp(&dev->rpa, BDADDR_ANY) && \
14157 ++ !hci_dev_test_flag(dev, HCI_RPA_EXPIRED))
14158 ++#define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \
14159 ++ !adv->rpa_expired)
14160 +
14161 + #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
14162 + ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
14163 +diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
14164 +index dc5c1e69cd9f2..26788244d75bb 100644
14165 +--- a/include/net/flow_offload.h
14166 ++++ b/include/net/flow_offload.h
14167 +@@ -451,6 +451,7 @@ struct flow_block_offload {
14168 + struct list_head *driver_block_list;
14169 + struct netlink_ext_ack *extack;
14170 + struct Qdisc *sch;
14171 ++ struct list_head *cb_list_head;
14172 + };
14173 +
14174 + enum tc_setup_type;
14175 +diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
14176 +index be07b5470f4bb..f51bc8f368134 100644
14177 +--- a/include/uapi/linux/serial_reg.h
14178 ++++ b/include/uapi/linux/serial_reg.h
14179 +@@ -62,6 +62,7 @@
14180 + * ST16C654: 8 16 56 60 8 16 32 56 PORT_16654
14181 + * TI16C750: 1 16 32 56 xx xx xx xx PORT_16750
14182 + * TI16C752: 8 16 56 60 8 16 32 56
14183 ++ * OX16C950: 16 32 112 120 16 32 64 112 PORT_16C950
14184 + * Tegra: 1 4 8 14 16 8 4 1 PORT_TEGRA
14185 + */
14186 + #define UART_FCR_R_TRIG_00 0x00
14187 +diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
14188 +index 14de1271463fd..4457545299177 100644
14189 +--- a/kernel/dma/debug.c
14190 ++++ b/kernel/dma/debug.c
14191 +@@ -794,7 +794,7 @@ static int dump_show(struct seq_file *seq, void *v)
14192 + }
14193 + DEFINE_SHOW_ATTRIBUTE(dump);
14194 +
14195 +-static void dma_debug_fs_init(void)
14196 ++static int __init dma_debug_fs_init(void)
14197 + {
14198 + struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
14199 +
14200 +@@ -807,7 +807,10 @@ static void dma_debug_fs_init(void)
14201 + debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
14202 + debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
14203 + debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
14204 ++
14205 ++ return 0;
14206 + }
14207 ++core_initcall_sync(dma_debug_fs_init);
14208 +
14209 + static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
14210 + {
14211 +@@ -892,8 +895,6 @@ static int dma_debug_init(void)
14212 + spin_lock_init(&dma_entry_hash[i].lock);
14213 + }
14214 +
14215 +- dma_debug_fs_init();
14216 +-
14217 + nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
14218 + for (i = 0; i < nr_pages; ++i)
14219 + dma_debug_create_entries(GFP_KERNEL);
14220 +diff --git a/kernel/fork.c b/kernel/fork.c
14221 +index 567fee3405003..268f1e7321cb3 100644
14222 +--- a/kernel/fork.c
14223 ++++ b/kernel/fork.c
14224 +@@ -1045,6 +1045,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
14225 + mm->pmd_huge_pte = NULL;
14226 + #endif
14227 + mm_init_uprobes_state(mm);
14228 ++ hugetlb_count_init(mm);
14229 +
14230 + if (current->mm) {
14231 + mm->flags = current->mm->flags & MMF_INIT_MASK;
14232 +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
14233 +index 3c20afbc19e13..ae5afba2162b7 100644
14234 +--- a/kernel/locking/rtmutex.c
14235 ++++ b/kernel/locking/rtmutex.c
14236 +@@ -1556,7 +1556,7 @@ void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
14237 + struct lock_class_key *key)
14238 + {
14239 + debug_check_no_locks_freed((void *)lock, sizeof(*lock));
14240 +- lockdep_init_map(&lock->dep_map, name, key, 0);
14241 ++ lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
14242 +
14243 + __rt_mutex_basic_init(lock);
14244 + }
14245 +diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
14246 +index ca43239a255ad..cb5a25a8a0cc7 100644
14247 +--- a/kernel/pid_namespace.c
14248 ++++ b/kernel/pid_namespace.c
14249 +@@ -51,7 +51,8 @@ static struct kmem_cache *create_pid_cachep(unsigned int level)
14250 + mutex_lock(&pid_caches_mutex);
14251 + /* Name collision forces to do allocation under mutex. */
14252 + if (!*pkc)
14253 +- *pkc = kmem_cache_create(name, len, 0, SLAB_HWCACHE_ALIGN, 0);
14254 ++ *pkc = kmem_cache_create(name, len, 0,
14255 ++ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, 0);
14256 + mutex_unlock(&pid_caches_mutex);
14257 + /* current can fail, but someone else can succeed. */
14258 + return READ_ONCE(*pkc);
14259 +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
14260 +index 421c35571797e..d6731384dd47b 100644
14261 +--- a/kernel/printk/printk.c
14262 ++++ b/kernel/printk/printk.c
14263 +@@ -2545,6 +2545,7 @@ void console_unlock(void)
14264 + bool do_cond_resched, retry;
14265 + struct printk_info info;
14266 + struct printk_record r;
14267 ++ u64 __maybe_unused next_seq;
14268 +
14269 + if (console_suspended) {
14270 + up_console_sem();
14271 +@@ -2654,8 +2655,10 @@ skip:
14272 + cond_resched();
14273 + }
14274 +
14275 +- console_locked = 0;
14276 ++ /* Get consistent value of the next-to-be-used sequence number. */
14277 ++ next_seq = console_seq;
14278 +
14279 ++ console_locked = 0;
14280 + up_console_sem();
14281 +
14282 + /*
14283 +@@ -2664,7 +2667,7 @@ skip:
14284 + * there's a new owner and the console_unlock() from them will do the
14285 + * flush, no worries.
14286 + */
14287 +- retry = prb_read_valid(prb, console_seq, NULL);
14288 ++ retry = prb_read_valid(prb, next_seq, NULL);
14289 + printk_safe_exit_irqrestore(flags);
14290 +
14291 + if (retry && console_trylock())
14292 +diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
14293 +index ad0156b869371..d149050515355 100644
14294 +--- a/kernel/rcu/tree_plugin.h
14295 ++++ b/kernel/rcu/tree_plugin.h
14296 +@@ -2995,17 +2995,17 @@ static void noinstr rcu_dynticks_task_exit(void)
14297 + /* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
14298 + static void rcu_dynticks_task_trace_enter(void)
14299 + {
14300 +-#ifdef CONFIG_TASKS_RCU_TRACE
14301 ++#ifdef CONFIG_TASKS_TRACE_RCU
14302 + if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
14303 + current->trc_reader_special.b.need_mb = true;
14304 +-#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
14305 ++#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
14306 + }
14307 +
14308 + /* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
14309 + static void rcu_dynticks_task_trace_exit(void)
14310 + {
14311 +-#ifdef CONFIG_TASKS_RCU_TRACE
14312 ++#ifdef CONFIG_TASKS_TRACE_RCU
14313 + if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
14314 + current->trc_reader_special.b.need_mb = false;
14315 +-#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
14316 ++#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
14317 + }
14318 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
14319 +index f148eacda55a9..542c2d03dab65 100644
14320 +--- a/kernel/workqueue.c
14321 ++++ b/kernel/workqueue.c
14322 +@@ -5902,6 +5902,13 @@ static void __init wq_numa_init(void)
14323 + return;
14324 + }
14325 +
14326 ++ for_each_possible_cpu(cpu) {
14327 ++ if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
14328 ++ pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
14329 ++ return;
14330 ++ }
14331 ++ }
14332 ++
14333 + wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
14334 + BUG_ON(!wq_update_unbound_numa_attrs_buf);
14335 +
14336 +@@ -5919,11 +5926,6 @@ static void __init wq_numa_init(void)
14337 +
14338 + for_each_possible_cpu(cpu) {
14339 + node = cpu_to_node(cpu);
14340 +- if (WARN_ON(node == NUMA_NO_NODE)) {
14341 +- pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
14342 +- /* happens iff arch is bonkers, let's just proceed */
14343 +- return;
14344 +- }
14345 + cpumask_set_cpu(cpu, tbl[node]);
14346 + }
14347 +
14348 +diff --git a/lib/test_bpf.c b/lib/test_bpf.c
14349 +index 4dc4dcbecd129..acf825d816714 100644
14350 +--- a/lib/test_bpf.c
14351 ++++ b/lib/test_bpf.c
14352 +@@ -4286,8 +4286,8 @@ static struct bpf_test tests[] = {
14353 + .u.insns_int = {
14354 + BPF_LD_IMM64(R0, 0),
14355 + BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
14356 +- BPF_STX_MEM(BPF_W, R10, R1, -40),
14357 +- BPF_LDX_MEM(BPF_W, R0, R10, -40),
14358 ++ BPF_STX_MEM(BPF_DW, R10, R1, -40),
14359 ++ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
14360 + BPF_EXIT_INSN(),
14361 + },
14362 + INTERNAL,
14363 +@@ -6659,7 +6659,14 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
14364 + u64 duration;
14365 + u32 ret;
14366 +
14367 +- if (test->test[i].data_size == 0 &&
14368 ++ /*
14369 ++ * NOTE: Several sub-tests may be present, in which case
14370 ++ * a zero {data_size, result} tuple indicates the end of
14371 ++ * the sub-test array. The first test is always run,
14372 ++ * even if both data_size and result happen to be zero.
14373 ++ */
14374 ++ if (i > 0 &&
14375 ++ test->test[i].data_size == 0 &&
14376 + test->test[i].result == 0)
14377 + break;
14378 +
14379 +diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c
14380 +index f93b1e145ada7..16b1d3a3a4975 100644
14381 +--- a/lib/test_stackinit.c
14382 ++++ b/lib/test_stackinit.c
14383 +@@ -67,10 +67,10 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
14384 + #define INIT_STRUCT_none /**/
14385 + #define INIT_STRUCT_zero = { }
14386 + #define INIT_STRUCT_static_partial = { .two = 0, }
14387 +-#define INIT_STRUCT_static_all = { .one = arg->one, \
14388 +- .two = arg->two, \
14389 +- .three = arg->three, \
14390 +- .four = arg->four, \
14391 ++#define INIT_STRUCT_static_all = { .one = 0, \
14392 ++ .two = 0, \
14393 ++ .three = 0, \
14394 ++ .four = 0, \
14395 + }
14396 + #define INIT_STRUCT_dynamic_partial = { .two = arg->two, }
14397 + #define INIT_STRUCT_dynamic_all = { .one = arg->one, \
14398 +@@ -84,8 +84,7 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
14399 + var.one = 0; \
14400 + var.two = 0; \
14401 + var.three = 0; \
14402 +- memset(&var.four, 0, \
14403 +- sizeof(var.four))
14404 ++ var.four = 0
14405 +
14406 + /*
14407 + * @name: unique string name for the test
14408 +@@ -210,18 +209,13 @@ struct test_small_hole {
14409 + unsigned long four;
14410 + };
14411 +
14412 +-/* Try to trigger unhandled padding in a structure. */
14413 +-struct test_aligned {
14414 +- u32 internal1;
14415 +- u64 internal2;
14416 +-} __aligned(64);
14417 +-
14418 ++/* Trigger unhandled padding in a structure. */
14419 + struct test_big_hole {
14420 + u8 one;
14421 + u8 two;
14422 + u8 three;
14423 + /* 61 byte padding hole here. */
14424 +- struct test_aligned four;
14425 ++ u8 four __aligned(64);
14426 + } __aligned(64);
14427 +
14428 + struct test_trailing_hole {
14429 +diff --git a/mm/hmm.c b/mm/hmm.c
14430 +index 943cb2ba44423..fb617054f9631 100644
14431 +--- a/mm/hmm.c
14432 ++++ b/mm/hmm.c
14433 +@@ -291,10 +291,13 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
14434 + goto fault;
14435 +
14436 + /*
14437 ++ * Bypass devmap pte such as DAX page when all pfn requested
14438 ++ * flags(pfn_req_flags) are fulfilled.
14439 + * Since each architecture defines a struct page for the zero page, just
14440 + * fall through and treat it like a normal page.
14441 + */
14442 +- if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
14443 ++ if (pte_special(pte) && !pte_devmap(pte) &&
14444 ++ !is_zero_pfn(pte_pfn(pte))) {
14445 + if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
14446 + pte_unmap(ptep);
14447 + return -EFAULT;
14448 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
14449 +index 6ad419e7e0a4c..dcc6ded8ff221 100644
14450 +--- a/mm/hugetlb.c
14451 ++++ b/mm/hugetlb.c
14452 +@@ -3840,8 +3840,10 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
14453 + * after this open call completes. It is therefore safe to take a
14454 + * new reference here without additional locking.
14455 + */
14456 +- if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
14457 ++ if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
14458 ++ resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
14459 + kref_get(&resv->refs);
14460 ++ }
14461 + }
14462 +
14463 + static void hugetlb_vm_op_close(struct vm_area_struct *vma)
14464 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
14465 +index 1bf3f86812509..8b2bec1963b48 100644
14466 +--- a/mm/memory_hotplug.c
14467 ++++ b/mm/memory_hotplug.c
14468 +@@ -834,8 +834,8 @@ static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn
14469 + return movable_node_enabled ? movable_zone : kernel_zone;
14470 + }
14471 +
14472 +-struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
14473 +- unsigned long nr_pages)
14474 ++struct zone *zone_for_pfn_range(int online_type, int nid,
14475 ++ unsigned long start_pfn, unsigned long nr_pages)
14476 + {
14477 + if (online_type == MMOP_ONLINE_KERNEL)
14478 + return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
14479 +diff --git a/mm/vmscan.c b/mm/vmscan.c
14480 +index f62d81f61b56b..6b06e472a07d8 100644
14481 +--- a/mm/vmscan.c
14482 ++++ b/mm/vmscan.c
14483 +@@ -2576,7 +2576,7 @@ out:
14484 + cgroup_size = max(cgroup_size, protection);
14485 +
14486 + scan = lruvec_size - lruvec_size * protection /
14487 +- cgroup_size;
14488 ++ (cgroup_size + 1);
14489 +
14490 + /*
14491 + * Minimally target SWAP_CLUSTER_MAX pages to keep
14492 +diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
14493 +index f4fea28e05da6..3ec1a51a6944e 100644
14494 +--- a/net/9p/trans_xen.c
14495 ++++ b/net/9p/trans_xen.c
14496 +@@ -138,7 +138,7 @@ static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
14497 +
14498 + static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
14499 + {
14500 +- struct xen_9pfs_front_priv *priv = NULL;
14501 ++ struct xen_9pfs_front_priv *priv;
14502 + RING_IDX cons, prod, masked_cons, masked_prod;
14503 + unsigned long flags;
14504 + u32 size = p9_req->tc.size;
14505 +@@ -151,7 +151,7 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
14506 + break;
14507 + }
14508 + read_unlock(&xen_9pfs_lock);
14509 +- if (!priv || priv->client != client)
14510 ++ if (list_entry_is_head(priv, &xen_9pfs_devs, list))
14511 + return -EINVAL;
14512 +
14513 + num = p9_req->tc.tag % priv->num_rings;
14514 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
14515 +index 62c99e015609d..89f37f26f2535 100644
14516 +--- a/net/bluetooth/hci_event.c
14517 ++++ b/net/bluetooth/hci_event.c
14518 +@@ -40,6 +40,8 @@
14519 + #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
14520 + "\x00\x00\x00\x00\x00\x00\x00\x00"
14521 +
14522 ++#define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
14523 ++
14524 + /* Handle HCI Event packets */
14525 +
14526 + static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
14527 +@@ -1171,6 +1173,12 @@ static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
14528 +
14529 + bacpy(&hdev->random_addr, sent);
14530 +
14531 ++ if (!bacmp(&hdev->rpa, sent)) {
14532 ++ hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
14533 ++ queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
14534 ++ secs_to_jiffies(hdev->rpa_timeout));
14535 ++ }
14536 ++
14537 + hci_dev_unlock(hdev);
14538 + }
14539 +
14540 +@@ -1201,24 +1209,30 @@ static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
14541 + {
14542 + __u8 status = *((__u8 *) skb->data);
14543 + struct hci_cp_le_set_adv_set_rand_addr *cp;
14544 +- struct adv_info *adv_instance;
14545 ++ struct adv_info *adv;
14546 +
14547 + if (status)
14548 + return;
14549 +
14550 + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
14551 +- if (!cp)
14552 ++ /* Update only in case the adv instance since handle 0x00 shall be using
14553 ++ * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
14554 ++ * non-extended adverting.
14555 ++ */
14556 ++ if (!cp || !cp->handle)
14557 + return;
14558 +
14559 + hci_dev_lock(hdev);
14560 +
14561 +- if (!cp->handle) {
14562 +- /* Store in hdev for instance 0 (Set adv and Directed advs) */
14563 +- bacpy(&hdev->random_addr, &cp->bdaddr);
14564 +- } else {
14565 +- adv_instance = hci_find_adv_instance(hdev, cp->handle);
14566 +- if (adv_instance)
14567 +- bacpy(&adv_instance->random_addr, &cp->bdaddr);
14568 ++ adv = hci_find_adv_instance(hdev, cp->handle);
14569 ++ if (adv) {
14570 ++ bacpy(&adv->random_addr, &cp->bdaddr);
14571 ++ if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
14572 ++ adv->rpa_expired = false;
14573 ++ queue_delayed_work(hdev->workqueue,
14574 ++ &adv->rpa_expired_cb,
14575 ++ secs_to_jiffies(hdev->rpa_timeout));
14576 ++ }
14577 + }
14578 +
14579 + hci_dev_unlock(hdev);
14580 +@@ -4373,6 +4387,21 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
14581 +
14582 + switch (ev->status) {
14583 + case 0x00:
14584 ++ /* The synchronous connection complete event should only be
14585 ++ * sent once per new connection. Receiving a successful
14586 ++ * complete event when the connection status is already
14587 ++ * BT_CONNECTED means that the device is misbehaving and sent
14588 ++ * multiple complete event packets for the same new connection.
14589 ++ *
14590 ++ * Registering the device more than once can corrupt kernel
14591 ++ * memory, hence upon detecting this invalid event, we report
14592 ++ * an error and ignore the packet.
14593 ++ */
14594 ++ if (conn->state == BT_CONNECTED) {
14595 ++ bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
14596 ++ goto unlock;
14597 ++ }
14598 ++
14599 + conn->handle = __le16_to_cpu(ev->handle);
14600 + conn->state = BT_CONNECTED;
14601 + conn->type = ev->link_type;
14602 +@@ -5095,9 +5124,64 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
14603 + }
14604 + #endif
14605 +
14606 ++static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
14607 ++ u8 bdaddr_type, bdaddr_t *local_rpa)
14608 ++{
14609 ++ if (conn->out) {
14610 ++ conn->dst_type = bdaddr_type;
14611 ++ conn->resp_addr_type = bdaddr_type;
14612 ++ bacpy(&conn->resp_addr, bdaddr);
14613 ++
14614 ++ /* Check if the controller has set a Local RPA then it must be
14615 ++ * used instead or hdev->rpa.
14616 ++ */
14617 ++ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
14618 ++ conn->init_addr_type = ADDR_LE_DEV_RANDOM;
14619 ++ bacpy(&conn->init_addr, local_rpa);
14620 ++ } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
14621 ++ conn->init_addr_type = ADDR_LE_DEV_RANDOM;
14622 ++ bacpy(&conn->init_addr, &conn->hdev->rpa);
14623 ++ } else {
14624 ++ hci_copy_identity_address(conn->hdev, &conn->init_addr,
14625 ++ &conn->init_addr_type);
14626 ++ }
14627 ++ } else {
14628 ++ conn->resp_addr_type = conn->hdev->adv_addr_type;
14629 ++ /* Check if the controller has set a Local RPA then it must be
14630 ++ * used instead or hdev->rpa.
14631 ++ */
14632 ++ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
14633 ++ conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
14634 ++ bacpy(&conn->resp_addr, local_rpa);
14635 ++ } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
14636 ++ /* In case of ext adv, resp_addr will be updated in
14637 ++ * Adv Terminated event.
14638 ++ */
14639 ++ if (!ext_adv_capable(conn->hdev))
14640 ++ bacpy(&conn->resp_addr,
14641 ++ &conn->hdev->random_addr);
14642 ++ } else {
14643 ++ bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
14644 ++ }
14645 ++
14646 ++ conn->init_addr_type = bdaddr_type;
14647 ++ bacpy(&conn->init_addr, bdaddr);
14648 ++
14649 ++ /* For incoming connections, set the default minimum
14650 ++ * and maximum connection interval. They will be used
14651 ++ * to check if the parameters are in range and if not
14652 ++ * trigger the connection update procedure.
14653 ++ */
14654 ++ conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
14655 ++ conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
14656 ++ }
14657 ++}
14658 ++
14659 + static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
14660 +- bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
14661 +- u16 interval, u16 latency, u16 supervision_timeout)
14662 ++ bdaddr_t *bdaddr, u8 bdaddr_type,
14663 ++ bdaddr_t *local_rpa, u8 role, u16 handle,
14664 ++ u16 interval, u16 latency,
14665 ++ u16 supervision_timeout)
14666 + {
14667 + struct hci_conn_params *params;
14668 + struct hci_conn *conn;
14669 +@@ -5145,32 +5229,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
14670 + cancel_delayed_work(&conn->le_conn_timeout);
14671 + }
14672 +
14673 +- if (!conn->out) {
14674 +- /* Set the responder (our side) address type based on
14675 +- * the advertising address type.
14676 +- */
14677 +- conn->resp_addr_type = hdev->adv_addr_type;
14678 +- if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
14679 +- /* In case of ext adv, resp_addr will be updated in
14680 +- * Adv Terminated event.
14681 +- */
14682 +- if (!ext_adv_capable(hdev))
14683 +- bacpy(&conn->resp_addr, &hdev->random_addr);
14684 +- } else {
14685 +- bacpy(&conn->resp_addr, &hdev->bdaddr);
14686 +- }
14687 +-
14688 +- conn->init_addr_type = bdaddr_type;
14689 +- bacpy(&conn->init_addr, bdaddr);
14690 +-
14691 +- /* For incoming connections, set the default minimum
14692 +- * and maximum connection interval. They will be used
14693 +- * to check if the parameters are in range and if not
14694 +- * trigger the connection update procedure.
14695 +- */
14696 +- conn->le_conn_min_interval = hdev->le_conn_min_interval;
14697 +- conn->le_conn_max_interval = hdev->le_conn_max_interval;
14698 +- }
14699 ++ le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
14700 +
14701 + /* Lookup the identity address from the stored connection
14702 + * address and address type.
14703 +@@ -5264,7 +5323,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
14704 + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
14705 +
14706 + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
14707 +- ev->role, le16_to_cpu(ev->handle),
14708 ++ NULL, ev->role, le16_to_cpu(ev->handle),
14709 + le16_to_cpu(ev->interval),
14710 + le16_to_cpu(ev->latency),
14711 + le16_to_cpu(ev->supervision_timeout));
14712 +@@ -5278,7 +5337,7 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
14713 + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
14714 +
14715 + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
14716 +- ev->role, le16_to_cpu(ev->handle),
14717 ++ &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
14718 + le16_to_cpu(ev->interval),
14719 + le16_to_cpu(ev->latency),
14720 + le16_to_cpu(ev->supervision_timeout));
14721 +@@ -5314,7 +5373,8 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
14722 + if (conn) {
14723 + struct adv_info *adv_instance;
14724 +
14725 +- if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
14726 ++ if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
14727 ++ bacmp(&conn->resp_addr, BDADDR_ANY))
14728 + return;
14729 +
14730 + if (!ev->handle) {
14731 +diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
14732 +index b069f640394d0..477519ab63b83 100644
14733 +--- a/net/bluetooth/hci_request.c
14734 ++++ b/net/bluetooth/hci_request.c
14735 +@@ -2053,8 +2053,6 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
14736 + * current RPA has expired then generate a new one.
14737 + */
14738 + if (use_rpa) {
14739 +- int to;
14740 +-
14741 + /* If Controller supports LL Privacy use own address type is
14742 + * 0x03
14743 + */
14744 +@@ -2065,14 +2063,10 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
14745 + *own_addr_type = ADDR_LE_DEV_RANDOM;
14746 +
14747 + if (adv_instance) {
14748 +- if (!adv_instance->rpa_expired &&
14749 +- !bacmp(&adv_instance->random_addr, &hdev->rpa))
14750 ++ if (adv_rpa_valid(adv_instance))
14751 + return 0;
14752 +-
14753 +- adv_instance->rpa_expired = false;
14754 + } else {
14755 +- if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
14756 +- !bacmp(&hdev->random_addr, &hdev->rpa))
14757 ++ if (rpa_valid(hdev))
14758 + return 0;
14759 + }
14760 +
14761 +@@ -2084,14 +2078,6 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
14762 +
14763 + bacpy(rand_addr, &hdev->rpa);
14764 +
14765 +- to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
14766 +- if (adv_instance)
14767 +- queue_delayed_work(hdev->workqueue,
14768 +- &adv_instance->rpa_expired_cb, to);
14769 +- else
14770 +- queue_delayed_work(hdev->workqueue,
14771 +- &hdev->rpa_expired, to);
14772 +-
14773 + return 0;
14774 + }
14775 +
14776 +@@ -2134,6 +2120,30 @@ void __hci_req_clear_ext_adv_sets(struct hci_request *req)
14777 + hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
14778 + }
14779 +
14780 ++static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
14781 ++{
14782 ++ struct hci_dev *hdev = req->hdev;
14783 ++
14784 ++ /* If we're advertising or initiating an LE connection we can't
14785 ++ * go ahead and change the random address at this time. This is
14786 ++ * because the eventual initiator address used for the
14787 ++ * subsequently created connection will be undefined (some
14788 ++ * controllers use the new address and others the one we had
14789 ++ * when the operation started).
14790 ++ *
14791 ++ * In this kind of scenario skip the update and let the random
14792 ++ * address be updated at the next cycle.
14793 ++ */
14794 ++ if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
14795 ++ hci_lookup_le_connect(hdev)) {
14796 ++ bt_dev_dbg(hdev, "Deferring random address update");
14797 ++ hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
14798 ++ return;
14799 ++ }
14800 ++
14801 ++ hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
14802 ++}
14803 ++
14804 + int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
14805 + {
14806 + struct hci_cp_le_set_ext_adv_params cp;
14807 +@@ -2236,6 +2246,13 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
14808 + } else {
14809 + if (!bacmp(&random_addr, &hdev->random_addr))
14810 + return 0;
14811 ++ /* Instance 0x00 doesn't have an adv_info, instead it
14812 ++ * uses hdev->random_addr to track its address so
14813 ++ * whenever it needs to be updated this also set the
14814 ++ * random address since hdev->random_addr is shared with
14815 ++ * scan state machine.
14816 ++ */
14817 ++ set_random_addr(req, &random_addr);
14818 + }
14819 +
14820 + memset(&cp, 0, sizeof(cp));
14821 +@@ -2493,30 +2510,6 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
14822 + false);
14823 + }
14824 +
14825 +-static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
14826 +-{
14827 +- struct hci_dev *hdev = req->hdev;
14828 +-
14829 +- /* If we're advertising or initiating an LE connection we can't
14830 +- * go ahead and change the random address at this time. This is
14831 +- * because the eventual initiator address used for the
14832 +- * subsequently created connection will be undefined (some
14833 +- * controllers use the new address and others the one we had
14834 +- * when the operation started).
14835 +- *
14836 +- * In this kind of scenario skip the update and let the random
14837 +- * address be updated at the next cycle.
14838 +- */
14839 +- if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
14840 +- hci_lookup_le_connect(hdev)) {
14841 +- bt_dev_dbg(hdev, "Deferring random address update");
14842 +- hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
14843 +- return;
14844 +- }
14845 +-
14846 +- hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
14847 +-}
14848 +-
14849 + int hci_update_random_address(struct hci_request *req, bool require_privacy,
14850 + bool use_rpa, u8 *own_addr_type)
14851 + {
14852 +@@ -2528,8 +2521,6 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
14853 + * the current RPA in use, then generate a new one.
14854 + */
14855 + if (use_rpa) {
14856 +- int to;
14857 +-
14858 + /* If Controller supports LL Privacy use own address type is
14859 + * 0x03
14860 + */
14861 +@@ -2539,8 +2530,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
14862 + else
14863 + *own_addr_type = ADDR_LE_DEV_RANDOM;
14864 +
14865 +- if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
14866 +- !bacmp(&hdev->random_addr, &hdev->rpa))
14867 ++ if (rpa_valid(hdev))
14868 + return 0;
14869 +
14870 + err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
14871 +@@ -2551,9 +2541,6 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
14872 +
14873 + set_random_addr(req, &hdev->rpa);
14874 +
14875 +- to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
14876 +- queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
14877 +-
14878 + return 0;
14879 + }
14880 +
14881 +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
14882 +index 9769a7ceb6898..bd0d616dbc372 100644
14883 +--- a/net/bluetooth/sco.c
14884 ++++ b/net/bluetooth/sco.c
14885 +@@ -48,6 +48,8 @@ struct sco_conn {
14886 + spinlock_t lock;
14887 + struct sock *sk;
14888 +
14889 ++ struct delayed_work timeout_work;
14890 ++
14891 + unsigned int mtu;
14892 + };
14893 +
14894 +@@ -74,9 +76,20 @@ struct sco_pinfo {
14895 + #define SCO_CONN_TIMEOUT (HZ * 40)
14896 + #define SCO_DISCONN_TIMEOUT (HZ * 2)
14897 +
14898 +-static void sco_sock_timeout(struct timer_list *t)
14899 ++static void sco_sock_timeout(struct work_struct *work)
14900 + {
14901 +- struct sock *sk = from_timer(sk, t, sk_timer);
14902 ++ struct sco_conn *conn = container_of(work, struct sco_conn,
14903 ++ timeout_work.work);
14904 ++ struct sock *sk;
14905 ++
14906 ++ sco_conn_lock(conn);
14907 ++ sk = conn->sk;
14908 ++ if (sk)
14909 ++ sock_hold(sk);
14910 ++ sco_conn_unlock(conn);
14911 ++
14912 ++ if (!sk)
14913 ++ return;
14914 +
14915 + BT_DBG("sock %p state %d", sk, sk->sk_state);
14916 +
14917 +@@ -90,14 +103,21 @@ static void sco_sock_timeout(struct timer_list *t)
14918 +
14919 + static void sco_sock_set_timer(struct sock *sk, long timeout)
14920 + {
14921 ++ if (!sco_pi(sk)->conn)
14922 ++ return;
14923 ++
14924 + BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
14925 +- sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
14926 ++ cancel_delayed_work(&sco_pi(sk)->conn->timeout_work);
14927 ++ schedule_delayed_work(&sco_pi(sk)->conn->timeout_work, timeout);
14928 + }
14929 +
14930 + static void sco_sock_clear_timer(struct sock *sk)
14931 + {
14932 ++ if (!sco_pi(sk)->conn)
14933 ++ return;
14934 ++
14935 + BT_DBG("sock %p state %d", sk, sk->sk_state);
14936 +- sk_stop_timer(sk, &sk->sk_timer);
14937 ++ cancel_delayed_work(&sco_pi(sk)->conn->timeout_work);
14938 + }
14939 +
14940 + /* ---- SCO connections ---- */
14941 +@@ -177,6 +197,9 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
14942 + sco_chan_del(sk, err);
14943 + bh_unlock_sock(sk);
14944 + sock_put(sk);
14945 ++
14946 ++ /* Ensure no more work items will run before freeing conn. */
14947 ++ cancel_delayed_work_sync(&conn->timeout_work);
14948 + }
14949 +
14950 + hcon->sco_data = NULL;
14951 +@@ -191,6 +214,8 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
14952 + sco_pi(sk)->conn = conn;
14953 + conn->sk = sk;
14954 +
14955 ++ INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout);
14956 ++
14957 + if (parent)
14958 + bt_accept_enqueue(parent, sk, true);
14959 + }
14960 +@@ -210,44 +235,32 @@ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
14961 + return err;
14962 + }
14963 +
14964 +-static int sco_connect(struct sock *sk)
14965 ++static int sco_connect(struct hci_dev *hdev, struct sock *sk)
14966 + {
14967 + struct sco_conn *conn;
14968 + struct hci_conn *hcon;
14969 +- struct hci_dev *hdev;
14970 + int err, type;
14971 +
14972 + BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
14973 +
14974 +- hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR);
14975 +- if (!hdev)
14976 +- return -EHOSTUNREACH;
14977 +-
14978 +- hci_dev_lock(hdev);
14979 +-
14980 + if (lmp_esco_capable(hdev) && !disable_esco)
14981 + type = ESCO_LINK;
14982 + else
14983 + type = SCO_LINK;
14984 +
14985 + if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
14986 +- (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
14987 +- err = -EOPNOTSUPP;
14988 +- goto done;
14989 +- }
14990 ++ (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev)))
14991 ++ return -EOPNOTSUPP;
14992 +
14993 + hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
14994 + sco_pi(sk)->setting);
14995 +- if (IS_ERR(hcon)) {
14996 +- err = PTR_ERR(hcon);
14997 +- goto done;
14998 +- }
14999 ++ if (IS_ERR(hcon))
15000 ++ return PTR_ERR(hcon);
15001 +
15002 + conn = sco_conn_add(hcon);
15003 + if (!conn) {
15004 + hci_conn_drop(hcon);
15005 +- err = -ENOMEM;
15006 +- goto done;
15007 ++ return -ENOMEM;
15008 + }
15009 +
15010 + /* Update source addr of the socket */
15011 +@@ -255,7 +268,7 @@ static int sco_connect(struct sock *sk)
15012 +
15013 + err = sco_chan_add(conn, sk, NULL);
15014 + if (err)
15015 +- goto done;
15016 ++ return err;
15017 +
15018 + if (hcon->state == BT_CONNECTED) {
15019 + sco_sock_clear_timer(sk);
15020 +@@ -265,9 +278,6 @@ static int sco_connect(struct sock *sk)
15021 + sco_sock_set_timer(sk, sk->sk_sndtimeo);
15022 + }
15023 +
15024 +-done:
15025 +- hci_dev_unlock(hdev);
15026 +- hci_dev_put(hdev);
15027 + return err;
15028 + }
15029 +
15030 +@@ -496,8 +506,6 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
15031 +
15032 + sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
15033 +
15034 +- timer_setup(&sk->sk_timer, sco_sock_timeout, 0);
15035 +-
15036 + bt_sock_link(&sco_sk_list, sk);
15037 + return sk;
15038 + }
15039 +@@ -562,6 +570,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
15040 + {
15041 + struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
15042 + struct sock *sk = sock->sk;
15043 ++ struct hci_dev *hdev;
15044 + int err;
15045 +
15046 + BT_DBG("sk %p", sk);
15047 +@@ -576,12 +585,19 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
15048 + if (sk->sk_type != SOCK_SEQPACKET)
15049 + return -EINVAL;
15050 +
15051 ++ hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR);
15052 ++ if (!hdev)
15053 ++ return -EHOSTUNREACH;
15054 ++ hci_dev_lock(hdev);
15055 ++
15056 + lock_sock(sk);
15057 +
15058 + /* Set destination address and psm */
15059 + bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
15060 +
15061 +- err = sco_connect(sk);
15062 ++ err = sco_connect(hdev, sk);
15063 ++ hci_dev_unlock(hdev);
15064 ++ hci_dev_put(hdev);
15065 + if (err)
15066 + goto done;
15067 +
15068 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
15069 +index 3ed7c98a98e1d..6076c75706d00 100644
15070 +--- a/net/core/flow_dissector.c
15071 ++++ b/net/core/flow_dissector.c
15072 +@@ -1056,8 +1056,10 @@ proto_again:
15073 + FLOW_DISSECTOR_KEY_IPV4_ADDRS,
15074 + target_container);
15075 +
15076 +- memcpy(&key_addrs->v4addrs, &iph->saddr,
15077 +- sizeof(key_addrs->v4addrs));
15078 ++ memcpy(&key_addrs->v4addrs.src, &iph->saddr,
15079 ++ sizeof(key_addrs->v4addrs.src));
15080 ++ memcpy(&key_addrs->v4addrs.dst, &iph->daddr,
15081 ++ sizeof(key_addrs->v4addrs.dst));
15082 + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
15083 + }
15084 +
15085 +@@ -1101,8 +1103,10 @@ proto_again:
15086 + FLOW_DISSECTOR_KEY_IPV6_ADDRS,
15087 + target_container);
15088 +
15089 +- memcpy(&key_addrs->v6addrs, &iph->saddr,
15090 +- sizeof(key_addrs->v6addrs));
15091 ++ memcpy(&key_addrs->v6addrs.src, &iph->saddr,
15092 ++ sizeof(key_addrs->v6addrs.src));
15093 ++ memcpy(&key_addrs->v6addrs.dst, &iph->daddr,
15094 ++ sizeof(key_addrs->v6addrs.dst));
15095 + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
15096 + }
15097 +
15098 +diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
15099 +index 715b67f6c62f3..e3f0d59068117 100644
15100 +--- a/net/core/flow_offload.c
15101 ++++ b/net/core/flow_offload.c
15102 +@@ -321,6 +321,7 @@ EXPORT_SYMBOL(flow_block_cb_setup_simple);
15103 + static DEFINE_MUTEX(flow_indr_block_lock);
15104 + static LIST_HEAD(flow_block_indr_list);
15105 + static LIST_HEAD(flow_block_indr_dev_list);
15106 ++static LIST_HEAD(flow_indir_dev_list);
15107 +
15108 + struct flow_indr_dev {
15109 + struct list_head list;
15110 +@@ -346,6 +347,33 @@ static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
15111 + return indr_dev;
15112 + }
15113 +
15114 ++struct flow_indir_dev_info {
15115 ++ void *data;
15116 ++ struct net_device *dev;
15117 ++ struct Qdisc *sch;
15118 ++ enum tc_setup_type type;
15119 ++ void (*cleanup)(struct flow_block_cb *block_cb);
15120 ++ struct list_head list;
15121 ++ enum flow_block_command command;
15122 ++ enum flow_block_binder_type binder_type;
15123 ++ struct list_head *cb_list;
15124 ++};
15125 ++
15126 ++static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
15127 ++{
15128 ++ struct flow_block_offload bo;
15129 ++ struct flow_indir_dev_info *cur;
15130 ++
15131 ++ list_for_each_entry(cur, &flow_indir_dev_list, list) {
15132 ++ memset(&bo, 0, sizeof(bo));
15133 ++ bo.command = cur->command;
15134 ++ bo.binder_type = cur->binder_type;
15135 ++ INIT_LIST_HEAD(&bo.cb_list);
15136 ++ cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
15137 ++ list_splice(&bo.cb_list, cur->cb_list);
15138 ++ }
15139 ++}
15140 ++
15141 + int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
15142 + {
15143 + struct flow_indr_dev *indr_dev;
15144 +@@ -367,6 +395,7 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
15145 + }
15146 +
15147 + list_add(&indr_dev->list, &flow_block_indr_dev_list);
15148 ++ existing_qdiscs_register(cb, cb_priv);
15149 + mutex_unlock(&flow_indr_block_lock);
15150 +
15151 + return 0;
15152 +@@ -463,7 +492,59 @@ out:
15153 + }
15154 + EXPORT_SYMBOL(flow_indr_block_cb_alloc);
15155 +
15156 +-int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
15157 ++static struct flow_indir_dev_info *find_indir_dev(void *data)
15158 ++{
15159 ++ struct flow_indir_dev_info *cur;
15160 ++
15161 ++ list_for_each_entry(cur, &flow_indir_dev_list, list) {
15162 ++ if (cur->data == data)
15163 ++ return cur;
15164 ++ }
15165 ++ return NULL;
15166 ++}
15167 ++
15168 ++static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
15169 ++ enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
15170 ++ struct flow_block_offload *bo)
15171 ++{
15172 ++ struct flow_indir_dev_info *info;
15173 ++
15174 ++ info = find_indir_dev(data);
15175 ++ if (info)
15176 ++ return -EEXIST;
15177 ++
15178 ++ info = kzalloc(sizeof(*info), GFP_KERNEL);
15179 ++ if (!info)
15180 ++ return -ENOMEM;
15181 ++
15182 ++ info->data = data;
15183 ++ info->dev = dev;
15184 ++ info->sch = sch;
15185 ++ info->type = type;
15186 ++ info->cleanup = cleanup;
15187 ++ info->command = bo->command;
15188 ++ info->binder_type = bo->binder_type;
15189 ++ info->cb_list = bo->cb_list_head;
15190 ++
15191 ++ list_add(&info->list, &flow_indir_dev_list);
15192 ++ return 0;
15193 ++}
15194 ++
15195 ++static int indir_dev_remove(void *data)
15196 ++{
15197 ++ struct flow_indir_dev_info *info;
15198 ++
15199 ++ info = find_indir_dev(data);
15200 ++ if (!info)
15201 ++ return -ENOENT;
15202 ++
15203 ++ list_del(&info->list);
15204 ++
15205 ++ kfree(info);
15206 ++ return 0;
15207 ++}
15208 ++
15209 ++int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
15210 + enum tc_setup_type type, void *data,
15211 + struct flow_block_offload *bo,
15212 + void (*cleanup)(struct flow_block_cb *block_cb))
15213 +@@ -471,6 +552,12 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
15214 + struct flow_indr_dev *this;
15215 +
15216 + mutex_lock(&flow_indr_block_lock);
15217 ++
15218 ++ if (bo->command == FLOW_BLOCK_BIND)
15219 ++ indir_dev_add(data, dev, sch, type, cleanup, bo);
15220 ++ else if (bo->command == FLOW_BLOCK_UNBIND)
15221 ++ indir_dev_remove(data);
15222 ++
15223 + list_for_each_entry(this, &flow_block_indr_dev_list, list)
15224 + this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
15225 +
15226 +diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
15227 +index baa5d10043cb0..6134b180f59f8 100644
15228 +--- a/net/ethtool/ioctl.c
15229 ++++ b/net/ethtool/ioctl.c
15230 +@@ -7,6 +7,7 @@
15231 + * the information ethtool needs.
15232 + */
15233 +
15234 ++#include <linux/compat.h>
15235 + #include <linux/module.h>
15236 + #include <linux/types.h>
15237 + #include <linux/capability.h>
15238 +@@ -807,6 +808,120 @@ out:
15239 + return ret;
15240 + }
15241 +
15242 ++static noinline_for_stack int
15243 ++ethtool_rxnfc_copy_from_compat(struct ethtool_rxnfc *rxnfc,
15244 ++ const struct compat_ethtool_rxnfc __user *useraddr,
15245 ++ size_t size)
15246 ++{
15247 ++ struct compat_ethtool_rxnfc crxnfc = {};
15248 ++
15249 ++ /* We expect there to be holes between fs.m_ext and
15250 ++ * fs.ring_cookie and at the end of fs, but nowhere else.
15251 ++ * On non-x86, no conversion should be needed.
15252 ++ */
15253 ++ BUILD_BUG_ON(!IS_ENABLED(CONFIG_X86_64) &&
15254 ++ sizeof(struct compat_ethtool_rxnfc) !=
15255 ++ sizeof(struct ethtool_rxnfc));
15256 ++ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) +
15257 ++ sizeof(useraddr->fs.m_ext) !=
15258 ++ offsetof(struct ethtool_rxnfc, fs.m_ext) +
15259 ++ sizeof(rxnfc->fs.m_ext));
15260 ++ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.location) -
15261 ++ offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
15262 ++ offsetof(struct ethtool_rxnfc, fs.location) -
15263 ++ offsetof(struct ethtool_rxnfc, fs.ring_cookie));
15264 ++
15265 ++ if (copy_from_user(&crxnfc, useraddr, min(size, sizeof(crxnfc))))
15266 ++ return -EFAULT;
15267 ++
15268 ++ *rxnfc = (struct ethtool_rxnfc) {
15269 ++ .cmd = crxnfc.cmd,
15270 ++ .flow_type = crxnfc.flow_type,
15271 ++ .data = crxnfc.data,
15272 ++ .fs = {
15273 ++ .flow_type = crxnfc.fs.flow_type,
15274 ++ .h_u = crxnfc.fs.h_u,
15275 ++ .h_ext = crxnfc.fs.h_ext,
15276 ++ .m_u = crxnfc.fs.m_u,
15277 ++ .m_ext = crxnfc.fs.m_ext,
15278 ++ .ring_cookie = crxnfc.fs.ring_cookie,
15279 ++ .location = crxnfc.fs.location,
15280 ++ },
15281 ++ .rule_cnt = crxnfc.rule_cnt,
15282 ++ };
15283 ++
15284 ++ return 0;
15285 ++}
15286 ++
15287 ++static int ethtool_rxnfc_copy_from_user(struct ethtool_rxnfc *rxnfc,
15288 ++ const void __user *useraddr,
15289 ++ size_t size)
15290 ++{
15291 ++ if (compat_need_64bit_alignment_fixup())
15292 ++ return ethtool_rxnfc_copy_from_compat(rxnfc, useraddr, size);
15293 ++
15294 ++ if (copy_from_user(rxnfc, useraddr, size))
15295 ++ return -EFAULT;
15296 ++
15297 ++ return 0;
15298 ++}
15299 ++
15300 ++static int ethtool_rxnfc_copy_to_compat(void __user *useraddr,
15301 ++ const struct ethtool_rxnfc *rxnfc,
15302 ++ size_t size, const u32 *rule_buf)
15303 ++{
15304 ++ struct compat_ethtool_rxnfc crxnfc;
15305 ++
15306 ++ memset(&crxnfc, 0, sizeof(crxnfc));
15307 ++ crxnfc = (struct compat_ethtool_rxnfc) {
15308 ++ .cmd = rxnfc->cmd,
15309 ++ .flow_type = rxnfc->flow_type,
15310 ++ .data = rxnfc->data,
15311 ++ .fs = {
15312 ++ .flow_type = rxnfc->fs.flow_type,
15313 ++ .h_u = rxnfc->fs.h_u,
15314 ++ .h_ext = rxnfc->fs.h_ext,
15315 ++ .m_u = rxnfc->fs.m_u,
15316 ++ .m_ext = rxnfc->fs.m_ext,
15317 ++ .ring_cookie = rxnfc->fs.ring_cookie,
15318 ++ .location = rxnfc->fs.location,
15319 ++ },
15320 ++ .rule_cnt = rxnfc->rule_cnt,
15321 ++ };
15322 ++
15323 ++ if (copy_to_user(useraddr, &crxnfc, min(size, sizeof(crxnfc))))
15324 ++ return -EFAULT;
15325 ++
15326 ++ return 0;
15327 ++}
15328 ++
15329 ++static int ethtool_rxnfc_copy_to_user(void __user *useraddr,
15330 ++ const struct ethtool_rxnfc *rxnfc,
15331 ++ size_t size, const u32 *rule_buf)
15332 ++{
15333 ++ int ret;
15334 ++
15335 ++ if (compat_need_64bit_alignment_fixup()) {
15336 ++ ret = ethtool_rxnfc_copy_to_compat(useraddr, rxnfc, size,
15337 ++ rule_buf);
15338 ++ useraddr += offsetof(struct compat_ethtool_rxnfc, rule_locs);
15339 ++ } else {
15340 ++ ret = copy_to_user(useraddr, &rxnfc, size);
15341 ++ useraddr += offsetof(struct ethtool_rxnfc, rule_locs);
15342 ++ }
15343 ++
15344 ++ if (ret)
15345 ++ return -EFAULT;
15346 ++
15347 ++ if (rule_buf) {
15348 ++ if (copy_to_user(useraddr, rule_buf,
15349 ++ rxnfc->rule_cnt * sizeof(u32)))
15350 ++ return -EFAULT;
15351 ++ }
15352 ++
15353 ++ return 0;
15354 ++}
15355 ++
15356 + static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
15357 + u32 cmd, void __user *useraddr)
15358 + {
15359 +@@ -825,7 +940,7 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
15360 + info_size = (offsetof(struct ethtool_rxnfc, data) +
15361 + sizeof(info.data));
15362 +
15363 +- if (copy_from_user(&info, useraddr, info_size))
15364 ++ if (ethtool_rxnfc_copy_from_user(&info, useraddr, info_size))
15365 + return -EFAULT;
15366 +
15367 + rc = dev->ethtool_ops->set_rxnfc(dev, &info);
15368 +@@ -833,7 +948,7 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
15369 + return rc;
15370 +
15371 + if (cmd == ETHTOOL_SRXCLSRLINS &&
15372 +- copy_to_user(useraddr, &info, info_size))
15373 ++ ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL))
15374 + return -EFAULT;
15375 +
15376 + return 0;
15377 +@@ -859,7 +974,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
15378 + info_size = (offsetof(struct ethtool_rxnfc, data) +
15379 + sizeof(info.data));
15380 +
15381 +- if (copy_from_user(&info, useraddr, info_size))
15382 ++ if (ethtool_rxnfc_copy_from_user(&info, useraddr, info_size))
15383 + return -EFAULT;
15384 +
15385 + /* If FLOW_RSS was requested then user-space must be using the
15386 +@@ -867,7 +982,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
15387 + */
15388 + if (cmd == ETHTOOL_GRXFH && info.flow_type & FLOW_RSS) {
15389 + info_size = sizeof(info);
15390 +- if (copy_from_user(&info, useraddr, info_size))
15391 ++ if (ethtool_rxnfc_copy_from_user(&info, useraddr, info_size))
15392 + return -EFAULT;
15393 + /* Since malicious users may modify the original data,
15394 + * we need to check whether FLOW_RSS is still requested.
15395 +@@ -893,18 +1008,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
15396 + if (ret < 0)
15397 + goto err_out;
15398 +
15399 +- ret = -EFAULT;
15400 +- if (copy_to_user(useraddr, &info, info_size))
15401 +- goto err_out;
15402 +-
15403 +- if (rule_buf) {
15404 +- useraddr += offsetof(struct ethtool_rxnfc, rule_locs);
15405 +- if (copy_to_user(useraddr, rule_buf,
15406 +- info.rule_cnt * sizeof(u32)))
15407 +- goto err_out;
15408 +- }
15409 +- ret = 0;
15410 +-
15411 ++ ret = ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, rule_buf);
15412 + err_out:
15413 + kfree(rule_buf);
15414 +
15415 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
15416 +index 8d8a8da3ae7e0..a202dcec0dc27 100644
15417 +--- a/net/ipv4/ip_output.c
15418 ++++ b/net/ipv4/ip_output.c
15419 +@@ -446,8 +446,9 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
15420 + {
15421 + BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
15422 + offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
15423 +- memcpy(&iph->saddr, &fl4->saddr,
15424 +- sizeof(fl4->saddr) + sizeof(fl4->daddr));
15425 ++
15426 ++ iph->saddr = fl4->saddr;
15427 ++ iph->daddr = fl4->daddr;
15428 + }
15429 +
15430 + /* Note: skb->sk can be different from sk, in case of tunnels */
15431 +diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
15432 +index d49709ba8e165..1071119843843 100644
15433 +--- a/net/ipv4/tcp_fastopen.c
15434 ++++ b/net/ipv4/tcp_fastopen.c
15435 +@@ -379,8 +379,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
15436 + return NULL;
15437 + }
15438 +
15439 +- if (syn_data &&
15440 +- tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
15441 ++ if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
15442 + goto fastopen;
15443 +
15444 + if (foc->len == 0) {
15445 +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
15446 +index 137fa4c50e07a..8df7ab34911c7 100644
15447 +--- a/net/mac80211/iface.c
15448 ++++ b/net/mac80211/iface.c
15449 +@@ -1985,9 +1985,16 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
15450 +
15451 + netdev_set_default_ethtool_ops(ndev, &ieee80211_ethtool_ops);
15452 +
15453 +- /* MTU range: 256 - 2304 */
15454 ++ /* MTU range is normally 256 - 2304, where the upper limit is
15455 ++ * the maximum MSDU size. Monitor interfaces send and receive
15456 ++ * MPDU and A-MSDU frames which may be much larger so we do
15457 ++ * not impose an upper limit in that case.
15458 ++ */
15459 + ndev->min_mtu = 256;
15460 +- ndev->max_mtu = local->hw.max_mtu;
15461 ++ if (type == NL80211_IFTYPE_MONITOR)
15462 ++ ndev->max_mtu = 0;
15463 ++ else
15464 ++ ndev->max_mtu = local->hw.max_mtu;
15465 +
15466 + ret = cfg80211_register_netdevice(ndev);
15467 + if (ret) {
15468 +diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
15469 +index 528b2f1726844..0587f071e5040 100644
15470 +--- a/net/netfilter/nf_flow_table_offload.c
15471 ++++ b/net/netfilter/nf_flow_table_offload.c
15472 +@@ -1097,6 +1097,7 @@ static void nf_flow_table_block_offload_init(struct flow_block_offload *bo,
15473 + bo->command = cmd;
15474 + bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
15475 + bo->extack = extack;
15476 ++ bo->cb_list_head = &flowtable->flow_block.cb_list;
15477 + INIT_LIST_HEAD(&bo->cb_list);
15478 + }
15479 +
15480 +diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
15481 +index b58d73a965232..9656c16462222 100644
15482 +--- a/net/netfilter/nf_tables_offload.c
15483 ++++ b/net/netfilter/nf_tables_offload.c
15484 +@@ -353,6 +353,7 @@ static void nft_flow_block_offload_init(struct flow_block_offload *bo,
15485 + bo->command = cmd;
15486 + bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
15487 + bo->extack = extack;
15488 ++ bo->cb_list_head = &basechain->flow_block.cb_list;
15489 + INIT_LIST_HEAD(&bo->cb_list);
15490 + }
15491 +
15492 +diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
15493 +index 5415ab14400d7..31e6da30da5f0 100644
15494 +--- a/net/netfilter/nft_compat.c
15495 ++++ b/net/netfilter/nft_compat.c
15496 +@@ -680,14 +680,12 @@ static int nfnl_compat_get_rcu(struct sk_buff *skb,
15497 + goto out_put;
15498 + }
15499 +
15500 +- ret = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
15501 +- MSG_DONTWAIT);
15502 +- if (ret > 0)
15503 +- ret = 0;
15504 ++ ret = nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
15505 + out_put:
15506 + rcu_read_lock();
15507 + module_put(THIS_MODULE);
15508 +- return ret == -EAGAIN ? -ENOBUFS : ret;
15509 ++
15510 ++ return ret;
15511 + }
15512 +
15513 + static const struct nla_policy nfnl_compat_policy_get[NFTA_COMPAT_MAX+1] = {
15514 +diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
15515 +index 50f40943c8153..f3f1df1b0f8e2 100644
15516 +--- a/net/netlabel/netlabel_cipso_v4.c
15517 ++++ b/net/netlabel/netlabel_cipso_v4.c
15518 +@@ -144,8 +144,8 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
15519 + return -ENOMEM;
15520 + doi_def->map.std = kzalloc(sizeof(*doi_def->map.std), GFP_KERNEL);
15521 + if (doi_def->map.std == NULL) {
15522 +- ret_val = -ENOMEM;
15523 +- goto add_std_failure;
15524 ++ kfree(doi_def);
15525 ++ return -ENOMEM;
15526 + }
15527 + doi_def->type = CIPSO_V4_MAP_TRANS;
15528 +
15529 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
15530 +index 6133e412b948c..b9ed16ff09c12 100644
15531 +--- a/net/netlink/af_netlink.c
15532 ++++ b/net/netlink/af_netlink.c
15533 +@@ -2545,13 +2545,15 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
15534 + /* errors reported via destination sk->sk_err, but propagate
15535 + * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
15536 + err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
15537 ++ if (err == -ESRCH)
15538 ++ err = 0;
15539 + }
15540 +
15541 + if (report) {
15542 + int err2;
15543 +
15544 + err2 = nlmsg_unicast(sk, skb, portid);
15545 +- if (!err || err == -ESRCH)
15546 ++ if (!err)
15547 + err = err2;
15548 + }
15549 +
15550 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
15551 +index e3e79e9bd7067..9b276d14be4c4 100644
15552 +--- a/net/sched/cls_api.c
15553 ++++ b/net/sched/cls_api.c
15554 +@@ -634,6 +634,7 @@ static void tcf_block_offload_init(struct flow_block_offload *bo,
15555 + bo->block_shared = shared;
15556 + bo->extack = extack;
15557 + bo->sch = sch;
15558 ++ bo->cb_list_head = &flow_block->cb_list;
15559 + INIT_LIST_HEAD(&bo->cb_list);
15560 + }
15561 +
15562 +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
15563 +index 5c91df52b8c2c..b0d6385fff9e3 100644
15564 +--- a/net/sched/sch_taprio.c
15565 ++++ b/net/sched/sch_taprio.c
15566 +@@ -1547,7 +1547,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
15567 + taprio_set_picos_per_byte(dev, q);
15568 +
15569 + if (mqprio) {
15570 +- netdev_set_num_tc(dev, mqprio->num_tc);
15571 ++ err = netdev_set_num_tc(dev, mqprio->num_tc);
15572 ++ if (err)
15573 ++ goto free_sched;
15574 + for (i = 0; i < mqprio->num_tc; i++)
15575 + netdev_set_tc_queue(dev, i,
15576 + mqprio->count[i],
15577 +diff --git a/net/socket.c b/net/socket.c
15578 +index 877f1fb61719a..caac290ba7ec1 100644
15579 +--- a/net/socket.c
15580 ++++ b/net/socket.c
15581 +@@ -3099,128 +3099,6 @@ static int compat_dev_ifconf(struct net *net, struct compat_ifconf __user *uifc3
15582 + return 0;
15583 + }
15584 +
15585 +-static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
15586 +-{
15587 +- struct compat_ethtool_rxnfc __user *compat_rxnfc;
15588 +- bool convert_in = false, convert_out = false;
15589 +- size_t buf_size = 0;
15590 +- struct ethtool_rxnfc __user *rxnfc = NULL;
15591 +- struct ifreq ifr;
15592 +- u32 rule_cnt = 0, actual_rule_cnt;
15593 +- u32 ethcmd;
15594 +- u32 data;
15595 +- int ret;
15596 +-
15597 +- if (get_user(data, &ifr32->ifr_ifru.ifru_data))
15598 +- return -EFAULT;
15599 +-
15600 +- compat_rxnfc = compat_ptr(data);
15601 +-
15602 +- if (get_user(ethcmd, &compat_rxnfc->cmd))
15603 +- return -EFAULT;
15604 +-
15605 +- /* Most ethtool structures are defined without padding.
15606 +- * Unfortunately struct ethtool_rxnfc is an exception.
15607 +- */
15608 +- switch (ethcmd) {
15609 +- default:
15610 +- break;
15611 +- case ETHTOOL_GRXCLSRLALL:
15612 +- /* Buffer size is variable */
15613 +- if (get_user(rule_cnt, &compat_rxnfc->rule_cnt))
15614 +- return -EFAULT;
15615 +- if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32))
15616 +- return -ENOMEM;
15617 +- buf_size += rule_cnt * sizeof(u32);
15618 +- fallthrough;
15619 +- case ETHTOOL_GRXRINGS:
15620 +- case ETHTOOL_GRXCLSRLCNT:
15621 +- case ETHTOOL_GRXCLSRULE:
15622 +- case ETHTOOL_SRXCLSRLINS:
15623 +- convert_out = true;
15624 +- fallthrough;
15625 +- case ETHTOOL_SRXCLSRLDEL:
15626 +- buf_size += sizeof(struct ethtool_rxnfc);
15627 +- convert_in = true;
15628 +- rxnfc = compat_alloc_user_space(buf_size);
15629 +- break;
15630 +- }
15631 +-
15632 +- if (copy_from_user(&ifr.ifr_name, &ifr32->ifr_name, IFNAMSIZ))
15633 +- return -EFAULT;
15634 +-
15635 +- ifr.ifr_data = convert_in ? rxnfc : (void __user *)compat_rxnfc;
15636 +-
15637 +- if (convert_in) {
15638 +- /* We expect there to be holes between fs.m_ext and
15639 +- * fs.ring_cookie and at the end of fs, but nowhere else.
15640 +- */
15641 +- BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) +
15642 +- sizeof(compat_rxnfc->fs.m_ext) !=
15643 +- offsetof(struct ethtool_rxnfc, fs.m_ext) +
15644 +- sizeof(rxnfc->fs.m_ext));
15645 +- BUILD_BUG_ON(
15646 +- offsetof(struct compat_ethtool_rxnfc, fs.location) -
15647 +- offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
15648 +- offsetof(struct ethtool_rxnfc, fs.location) -
15649 +- offsetof(struct ethtool_rxnfc, fs.ring_cookie));
15650 +-
15651 +- if (copy_in_user(rxnfc, compat_rxnfc,
15652 +- (void __user *)(&rxnfc->fs.m_ext + 1) -
15653 +- (void __user *)rxnfc) ||
15654 +- copy_in_user(&rxnfc->fs.ring_cookie,
15655 +- &compat_rxnfc->fs.ring_cookie,
15656 +- (void __user *)(&rxnfc->fs.location + 1) -
15657 +- (void __user *)&rxnfc->fs.ring_cookie))
15658 +- return -EFAULT;
15659 +- if (ethcmd == ETHTOOL_GRXCLSRLALL) {
15660 +- if (put_user(rule_cnt, &rxnfc->rule_cnt))
15661 +- return -EFAULT;
15662 +- } else if (copy_in_user(&rxnfc->rule_cnt,
15663 +- &compat_rxnfc->rule_cnt,
15664 +- sizeof(rxnfc->rule_cnt)))
15665 +- return -EFAULT;
15666 +- }
15667 +-
15668 +- ret = dev_ioctl(net, SIOCETHTOOL, &ifr, NULL);
15669 +- if (ret)
15670 +- return ret;
15671 +-
15672 +- if (convert_out) {
15673 +- if (copy_in_user(compat_rxnfc, rxnfc,
15674 +- (const void __user *)(&rxnfc->fs.m_ext + 1) -
15675 +- (const void __user *)rxnfc) ||
15676 +- copy_in_user(&compat_rxnfc->fs.ring_cookie,
15677 +- &rxnfc->fs.ring_cookie,
15678 +- (const void __user *)(&rxnfc->fs.location + 1) -
15679 +- (const void __user *)&rxnfc->fs.ring_cookie) ||
15680 +- copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
15681 +- sizeof(rxnfc->rule_cnt)))
15682 +- return -EFAULT;
15683 +-
15684 +- if (ethcmd == ETHTOOL_GRXCLSRLALL) {
15685 +- /* As an optimisation, we only copy the actual
15686 +- * number of rules that the underlying
15687 +- * function returned. Since Mallory might
15688 +- * change the rule count in user memory, we
15689 +- * check that it is less than the rule count
15690 +- * originally given (as the user buffer size),
15691 +- * which has been range-checked.
15692 +- */
15693 +- if (get_user(actual_rule_cnt, &rxnfc->rule_cnt))
15694 +- return -EFAULT;
15695 +- if (actual_rule_cnt < rule_cnt)
15696 +- rule_cnt = actual_rule_cnt;
15697 +- if (copy_in_user(&compat_rxnfc->rule_locs[0],
15698 +- &rxnfc->rule_locs[0],
15699 +- rule_cnt * sizeof(u32)))
15700 +- return -EFAULT;
15701 +- }
15702 +- }
15703 +-
15704 +- return 0;
15705 +-}
15706 +-
15707 + static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32)
15708 + {
15709 + compat_uptr_t uptr32;
15710 +@@ -3377,8 +3255,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
15711 + return old_bridge_ioctl(argp);
15712 + case SIOCGIFCONF:
15713 + return compat_dev_ifconf(net, argp);
15714 +- case SIOCETHTOOL:
15715 +- return ethtool_ioctl(net, argp);
15716 + case SIOCWANDEV:
15717 + return compat_siocwandev(net, argp);
15718 + case SIOCGIFMAP:
15719 +@@ -3391,6 +3267,7 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
15720 + return sock->ops->gettstamp(sock, argp, cmd == SIOCGSTAMP_OLD,
15721 + !COMPAT_USE_64BIT_TIME);
15722 +
15723 ++ case SIOCETHTOOL:
15724 + case SIOCBONDSLAVEINFOQUERY:
15725 + case SIOCBONDINFOQUERY:
15726 + case SIOCSHWTSTAMP:
15727 +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
15728 +index 6dff64374bfe1..e22f2d65457da 100644
15729 +--- a/net/sunrpc/auth_gss/svcauth_gss.c
15730 ++++ b/net/sunrpc/auth_gss/svcauth_gss.c
15731 +@@ -1980,7 +1980,7 @@ gss_svc_init_net(struct net *net)
15732 + goto out2;
15733 + return 0;
15734 + out2:
15735 +- destroy_use_gss_proxy_proc_entry(net);
15736 ++ rsi_cache_destroy_net(net);
15737 + out1:
15738 + rsc_cache_destroy_net(net);
15739 + return rv;
15740 +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
15741 +index 3509a7f139b98..8d3983c8b4d60 100644
15742 +--- a/net/sunrpc/xprt.c
15743 ++++ b/net/sunrpc/xprt.c
15744 +@@ -774,9 +774,9 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
15745 + /* Try to schedule an autoclose RPC call */
15746 + if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
15747 + queue_work(xprtiod_workqueue, &xprt->task_cleanup);
15748 +- else if (xprt->snd_task)
15749 ++ else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
15750 + rpc_wake_up_queued_task_set_status(&xprt->pending,
15751 +- xprt->snd_task, -ENOTCONN);
15752 ++ xprt->snd_task, -ENOTCONN);
15753 + spin_unlock(&xprt->transport_lock);
15754 + }
15755 + EXPORT_SYMBOL_GPL(xprt_force_disconnect);
15756 +@@ -865,12 +865,14 @@ bool xprt_lock_connect(struct rpc_xprt *xprt,
15757 + goto out;
15758 + if (xprt->snd_task != task)
15759 + goto out;
15760 ++ set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
15761 + xprt->snd_task = cookie;
15762 + ret = true;
15763 + out:
15764 + spin_unlock(&xprt->transport_lock);
15765 + return ret;
15766 + }
15767 ++EXPORT_SYMBOL_GPL(xprt_lock_connect);
15768 +
15769 + void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
15770 + {
15771 +@@ -880,12 +882,14 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
15772 + if (!test_bit(XPRT_LOCKED, &xprt->state))
15773 + goto out;
15774 + xprt->snd_task =NULL;
15775 ++ clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
15776 + xprt->ops->release_xprt(xprt, NULL);
15777 + xprt_schedule_autodisconnect(xprt);
15778 + out:
15779 + spin_unlock(&xprt->transport_lock);
15780 + wake_up_bit(&xprt->state, XPRT_LOCKED);
15781 + }
15782 ++EXPORT_SYMBOL_GPL(xprt_unlock_connect);
15783 +
15784 + /**
15785 + * xprt_connect - schedule a transport connect operation
15786 +diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
15787 +index 19a49d26b1e41..d2052f06acfae 100644
15788 +--- a/net/sunrpc/xprtrdma/transport.c
15789 ++++ b/net/sunrpc/xprtrdma/transport.c
15790 +@@ -249,12 +249,9 @@ xprt_rdma_connect_worker(struct work_struct *work)
15791 + xprt->stat.connect_start;
15792 + xprt_set_connected(xprt);
15793 + rc = -EAGAIN;
15794 +- } else {
15795 +- /* Force a call to xprt_rdma_close to clean up */
15796 +- spin_lock(&xprt->transport_lock);
15797 +- set_bit(XPRT_CLOSE_WAIT, &xprt->state);
15798 +- spin_unlock(&xprt->transport_lock);
15799 +- }
15800 ++ } else
15801 ++ rpcrdma_xprt_disconnect(r_xprt);
15802 ++ xprt_unlock_connect(xprt, r_xprt);
15803 + xprt_wake_pending_tasks(xprt, rc);
15804 + }
15805 +
15806 +@@ -487,6 +484,8 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
15807 + struct rpcrdma_ep *ep = r_xprt->rx_ep;
15808 + unsigned long delay;
15809 +
15810 ++ WARN_ON_ONCE(!xprt_lock_connect(xprt, task, r_xprt));
15811 ++
15812 + delay = 0;
15813 + if (ep && ep->re_connect_status != 0) {
15814 + delay = xprt_reconnect_delay(xprt);
15815 +diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
15816 +index 649c23518ec04..5a11e318a0d99 100644
15817 +--- a/net/sunrpc/xprtrdma/verbs.c
15818 ++++ b/net/sunrpc/xprtrdma/verbs.c
15819 +@@ -1416,11 +1416,6 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
15820 +
15821 + rc = ib_post_recv(ep->re_id->qp, wr,
15822 + (const struct ib_recv_wr **)&bad_wr);
15823 +- if (atomic_dec_return(&ep->re_receiving) > 0)
15824 +- complete(&ep->re_done);
15825 +-
15826 +-out:
15827 +- trace_xprtrdma_post_recvs(r_xprt, count, rc);
15828 + if (rc) {
15829 + for (wr = bad_wr; wr;) {
15830 + struct rpcrdma_rep *rep;
15831 +@@ -1431,6 +1426,11 @@ out:
15832 + --count;
15833 + }
15834 + }
15835 ++ if (atomic_dec_return(&ep->re_receiving) > 0)
15836 ++ complete(&ep->re_done);
15837 ++
15838 ++out:
15839 ++ trace_xprtrdma_post_recvs(r_xprt, count, rc);
15840 + ep->re_receive_count += count;
15841 + return;
15842 + }
15843 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
15844 +index 3228b7a1836aa..b836b4c322fc8 100644
15845 +--- a/net/sunrpc/xprtsock.c
15846 ++++ b/net/sunrpc/xprtsock.c
15847 +@@ -1648,6 +1648,13 @@ static int xs_get_srcport(struct sock_xprt *transport)
15848 + return port;
15849 + }
15850 +
15851 ++unsigned short get_srcport(struct rpc_xprt *xprt)
15852 ++{
15853 ++ struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
15854 ++ return xs_sock_getport(sock->sock);
15855 ++}
15856 ++EXPORT_SYMBOL(get_srcport);
15857 ++
15858 + static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
15859 + {
15860 + if (transport->srcport != 0)
15861 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
15862 +index a0dce194a404a..5d036b9c15d27 100644
15863 +--- a/net/tipc/socket.c
15864 ++++ b/net/tipc/socket.c
15865 +@@ -1905,6 +1905,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
15866 + bool connected = !tipc_sk_type_connectionless(sk);
15867 + struct tipc_sock *tsk = tipc_sk(sk);
15868 + int rc, err, hlen, dlen, copy;
15869 ++ struct tipc_skb_cb *skb_cb;
15870 + struct sk_buff_head xmitq;
15871 + struct tipc_msg *hdr;
15872 + struct sk_buff *skb;
15873 +@@ -1928,6 +1929,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
15874 + if (unlikely(rc))
15875 + goto exit;
15876 + skb = skb_peek(&sk->sk_receive_queue);
15877 ++ skb_cb = TIPC_SKB_CB(skb);
15878 + hdr = buf_msg(skb);
15879 + dlen = msg_data_sz(hdr);
15880 + hlen = msg_hdr_sz(hdr);
15881 +@@ -1947,18 +1949,33 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
15882 +
15883 + /* Capture data if non-error msg, otherwise just set return value */
15884 + if (likely(!err)) {
15885 +- copy = min_t(int, dlen, buflen);
15886 +- if (unlikely(copy != dlen))
15887 +- m->msg_flags |= MSG_TRUNC;
15888 +- rc = skb_copy_datagram_msg(skb, hlen, m, copy);
15889 ++ int offset = skb_cb->bytes_read;
15890 ++
15891 ++ copy = min_t(int, dlen - offset, buflen);
15892 ++ rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
15893 ++ if (unlikely(rc))
15894 ++ goto exit;
15895 ++ if (unlikely(offset + copy < dlen)) {
15896 ++ if (flags & MSG_EOR) {
15897 ++ if (!(flags & MSG_PEEK))
15898 ++ skb_cb->bytes_read = offset + copy;
15899 ++ } else {
15900 ++ m->msg_flags |= MSG_TRUNC;
15901 ++ skb_cb->bytes_read = 0;
15902 ++ }
15903 ++ } else {
15904 ++ if (flags & MSG_EOR)
15905 ++ m->msg_flags |= MSG_EOR;
15906 ++ skb_cb->bytes_read = 0;
15907 ++ }
15908 + } else {
15909 + copy = 0;
15910 + rc = 0;
15911 +- if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
15912 ++ if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
15913 + rc = -ECONNRESET;
15914 ++ goto exit;
15915 ++ }
15916 + }
15917 +- if (unlikely(rc))
15918 +- goto exit;
15919 +
15920 + /* Mark message as group event if applicable */
15921 + if (unlikely(grp_evt)) {
15922 +@@ -1981,9 +1998,10 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
15923 + tipc_node_distr_xmit(sock_net(sk), &xmitq);
15924 + }
15925 +
15926 +- tsk_advance_rx_queue(sk);
15927 ++ if (!skb_cb->bytes_read)
15928 ++ tsk_advance_rx_queue(sk);
15929 +
15930 +- if (likely(!connected))
15931 ++ if (likely(!connected) || skb_cb->bytes_read)
15932 + goto exit;
15933 +
15934 + /* Send connection flow control advertisement when applicable */
15935 +diff --git a/samples/bpf/test_override_return.sh b/samples/bpf/test_override_return.sh
15936 +index e68b9ee6814b8..35db26f736b9d 100755
15937 +--- a/samples/bpf/test_override_return.sh
15938 ++++ b/samples/bpf/test_override_return.sh
15939 +@@ -1,5 +1,6 @@
15940 + #!/bin/bash
15941 +
15942 ++rm -r tmpmnt
15943 + rm -f testfile.img
15944 + dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1
15945 + DEVICE=$(losetup --show -f testfile.img)
15946 +diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c
15947 +index fdcd6580dd736..8be7ce18d3ba0 100644
15948 +--- a/samples/bpf/tracex7_user.c
15949 ++++ b/samples/bpf/tracex7_user.c
15950 +@@ -14,6 +14,11 @@ int main(int argc, char **argv)
15951 + int ret = 0;
15952 + FILE *f;
15953 +
15954 ++ if (!argv[1]) {
15955 ++ fprintf(stderr, "ERROR: Run with the btrfs device argument!\n");
15956 ++ return 0;
15957 ++ }
15958 ++
15959 + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
15960 + obj = bpf_object__open_file(filename, NULL);
15961 + if (libbpf_get_error(obj)) {
15962 +diff --git a/samples/pktgen/pktgen_sample03_burst_single_flow.sh b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
15963 +index 5adcf954de731..c12198d5bbe57 100755
15964 +--- a/samples/pktgen/pktgen_sample03_burst_single_flow.sh
15965 ++++ b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
15966 +@@ -83,7 +83,7 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
15967 + done
15968 +
15969 + # Run if user hits control-c
15970 +-function control_c() {
15971 ++function print_result() {
15972 + # Print results
15973 + for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
15974 + dev=${DEV}@${thread}
15975 +@@ -92,11 +92,13 @@ function control_c() {
15976 + done
15977 + }
15978 + # trap keyboard interrupt (Ctrl-C)
15979 +-trap control_c SIGINT
15980 ++trap true SIGINT
15981 +
15982 + if [ -z "$APPEND" ]; then
15983 + echo "Running... ctrl^C to stop" >&2
15984 + pg_ctrl "start"
15985 ++
15986 ++ print_result
15987 + else
15988 + echo "Append mode: config done. Do more or use 'pg_ctrl start' to run"
15989 + fi
15990 +diff --git a/scripts/gen_ksymdeps.sh b/scripts/gen_ksymdeps.sh
15991 +index 1324986e1362c..725e8c9c1b53f 100755
15992 +--- a/scripts/gen_ksymdeps.sh
15993 ++++ b/scripts/gen_ksymdeps.sh
15994 +@@ -4,7 +4,13 @@
15995 + set -e
15996 +
15997 + # List of exported symbols
15998 +-ksyms=$($NM $1 | sed -n 's/.*__ksym_marker_\(.*\)/\1/p' | tr A-Z a-z)
15999 ++#
16000 ++# If the object has no symbol, $NM warns 'no symbols'.
16001 ++# Suppress the stderr.
16002 ++# TODO:
16003 ++# Use -q instead of 2>/dev/null when we upgrade the minimum version of
16004 ++# binutils to 2.37, llvm to 13.0.0.
16005 ++ksyms=$($NM $1 2>/dev/null | sed -n 's/.*__ksym_marker_\(.*\)/\1/p' | tr A-Z a-z)
16006 +
16007 + if [ -z "$ksyms" ]; then
16008 + exit 0
16009 +diff --git a/scripts/subarch.include b/scripts/subarch.include
16010 +index 650682821126c..776849a3c500f 100644
16011 +--- a/scripts/subarch.include
16012 ++++ b/scripts/subarch.include
16013 +@@ -7,7 +7,7 @@
16014 + SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
16015 + -e s/sun4u/sparc64/ \
16016 + -e s/arm.*/arm/ -e s/sa110/arm/ \
16017 +- -e s/s390x/s390/ -e s/parisc64/parisc/ \
16018 ++ -e s/s390x/s390/ \
16019 + -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
16020 + -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
16021 + -e s/riscv.*/riscv/)
16022 +diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
16023 +index 7eabb448acab4..169929c6c4eb3 100644
16024 +--- a/security/smack/smack_access.c
16025 ++++ b/security/smack/smack_access.c
16026 +@@ -81,23 +81,22 @@ int log_policy = SMACK_AUDIT_DENIED;
16027 + int smk_access_entry(char *subject_label, char *object_label,
16028 + struct list_head *rule_list)
16029 + {
16030 +- int may = -ENOENT;
16031 + struct smack_rule *srp;
16032 +
16033 + list_for_each_entry_rcu(srp, rule_list, list) {
16034 + if (srp->smk_object->smk_known == object_label &&
16035 + srp->smk_subject->smk_known == subject_label) {
16036 +- may = srp->smk_access;
16037 +- break;
16038 ++ int may = srp->smk_access;
16039 ++ /*
16040 ++ * MAY_WRITE implies MAY_LOCK.
16041 ++ */
16042 ++ if ((may & MAY_WRITE) == MAY_WRITE)
16043 ++ may |= MAY_LOCK;
16044 ++ return may;
16045 + }
16046 + }
16047 +
16048 +- /*
16049 +- * MAY_WRITE implies MAY_LOCK.
16050 +- */
16051 +- if ((may & MAY_WRITE) == MAY_WRITE)
16052 +- may |= MAY_LOCK;
16053 +- return may;
16054 ++ return -ENOENT;
16055 + }
16056 +
16057 + /**
16058 +diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
16059 +index ec04e3386bc0e..8617793ed9557 100644
16060 +--- a/sound/soc/atmel/Kconfig
16061 ++++ b/sound/soc/atmel/Kconfig
16062 +@@ -11,7 +11,6 @@ if SND_ATMEL_SOC
16063 +
16064 + config SND_ATMEL_SOC_PDC
16065 + bool
16066 +- depends on HAS_DMA
16067 +
16068 + config SND_ATMEL_SOC_DMA
16069 + bool
16070 +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
16071 +index 22dbd9d93c1ef..4bddb969176fe 100644
16072 +--- a/sound/soc/intel/boards/bytcr_rt5640.c
16073 ++++ b/sound/soc/intel/boards/bytcr_rt5640.c
16074 +@@ -290,9 +290,6 @@ static const struct snd_soc_dapm_widget byt_rt5640_widgets[] = {
16075 + static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = {
16076 + {"Headphone", NULL, "Platform Clock"},
16077 + {"Headset Mic", NULL, "Platform Clock"},
16078 +- {"Internal Mic", NULL, "Platform Clock"},
16079 +- {"Speaker", NULL, "Platform Clock"},
16080 +-
16081 + {"Headset Mic", NULL, "MICBIAS1"},
16082 + {"IN2P", NULL, "Headset Mic"},
16083 + {"Headphone", NULL, "HPOL"},
16084 +@@ -300,19 +297,23 @@ static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = {
16085 + };
16086 +
16087 + static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic1_map[] = {
16088 ++ {"Internal Mic", NULL, "Platform Clock"},
16089 + {"DMIC1", NULL, "Internal Mic"},
16090 + };
16091 +
16092 + static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic2_map[] = {
16093 ++ {"Internal Mic", NULL, "Platform Clock"},
16094 + {"DMIC2", NULL, "Internal Mic"},
16095 + };
16096 +
16097 + static const struct snd_soc_dapm_route byt_rt5640_intmic_in1_map[] = {
16098 ++ {"Internal Mic", NULL, "Platform Clock"},
16099 + {"Internal Mic", NULL, "MICBIAS1"},
16100 + {"IN1P", NULL, "Internal Mic"},
16101 + };
16102 +
16103 + static const struct snd_soc_dapm_route byt_rt5640_intmic_in3_map[] = {
16104 ++ {"Internal Mic", NULL, "Platform Clock"},
16105 + {"Internal Mic", NULL, "MICBIAS1"},
16106 + {"IN3P", NULL, "Internal Mic"},
16107 + };
16108 +@@ -354,6 +355,7 @@ static const struct snd_soc_dapm_route byt_rt5640_ssp0_aif2_map[] = {
16109 + };
16110 +
16111 + static const struct snd_soc_dapm_route byt_rt5640_stereo_spk_map[] = {
16112 ++ {"Speaker", NULL, "Platform Clock"},
16113 + {"Speaker", NULL, "SPOLP"},
16114 + {"Speaker", NULL, "SPOLN"},
16115 + {"Speaker", NULL, "SPORP"},
16116 +@@ -361,6 +363,7 @@ static const struct snd_soc_dapm_route byt_rt5640_stereo_spk_map[] = {
16117 + };
16118 +
16119 + static const struct snd_soc_dapm_route byt_rt5640_mono_spk_map[] = {
16120 ++ {"Speaker", NULL, "Platform Clock"},
16121 + {"Speaker", NULL, "SPOLP"},
16122 + {"Speaker", NULL, "SPOLN"},
16123 + };
16124 +diff --git a/sound/soc/intel/boards/sof_pcm512x.c b/sound/soc/intel/boards/sof_pcm512x.c
16125 +index 8620d4f38493a..335c212c1961a 100644
16126 +--- a/sound/soc/intel/boards/sof_pcm512x.c
16127 ++++ b/sound/soc/intel/boards/sof_pcm512x.c
16128 +@@ -26,11 +26,16 @@
16129 +
16130 + #define SOF_PCM512X_SSP_CODEC(quirk) ((quirk) & GENMASK(3, 0))
16131 + #define SOF_PCM512X_SSP_CODEC_MASK (GENMASK(3, 0))
16132 ++#define SOF_PCM512X_ENABLE_SSP_CAPTURE BIT(4)
16133 ++#define SOF_PCM512X_ENABLE_DMIC BIT(5)
16134 +
16135 + #define IDISP_CODEC_MASK 0x4
16136 +
16137 + /* Default: SSP5 */
16138 +-static unsigned long sof_pcm512x_quirk = SOF_PCM512X_SSP_CODEC(5);
16139 ++static unsigned long sof_pcm512x_quirk =
16140 ++ SOF_PCM512X_SSP_CODEC(5) |
16141 ++ SOF_PCM512X_ENABLE_SSP_CAPTURE |
16142 ++ SOF_PCM512X_ENABLE_DMIC;
16143 +
16144 + static bool is_legacy_cpu;
16145 +
16146 +@@ -245,8 +250,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
16147 + links[id].dpcm_playback = 1;
16148 + /*
16149 + * capture only supported with specific versions of the Hifiberry DAC+
16150 +- * links[id].dpcm_capture = 1;
16151 + */
16152 ++ if (sof_pcm512x_quirk & SOF_PCM512X_ENABLE_SSP_CAPTURE)
16153 ++ links[id].dpcm_capture = 1;
16154 + links[id].no_pcm = 1;
16155 + links[id].cpus = &cpus[id];
16156 + links[id].num_cpus = 1;
16157 +@@ -381,6 +387,9 @@ static int sof_audio_probe(struct platform_device *pdev)
16158 +
16159 + ssp_codec = sof_pcm512x_quirk & SOF_PCM512X_SSP_CODEC_MASK;
16160 +
16161 ++ if (!(sof_pcm512x_quirk & SOF_PCM512X_ENABLE_DMIC))
16162 ++ dmic_be_num = 0;
16163 ++
16164 + /* compute number of dai links */
16165 + sof_audio_card_pcm512x.num_links = 1 + dmic_be_num + hdmi_num;
16166 +
16167 +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
16168 +index 476ef1897961d..79c6cf2c14bfb 100644
16169 +--- a/sound/soc/intel/skylake/skl-messages.c
16170 ++++ b/sound/soc/intel/skylake/skl-messages.c
16171 +@@ -802,9 +802,12 @@ static u16 skl_get_module_param_size(struct skl_dev *skl,
16172 +
16173 + case SKL_MODULE_TYPE_BASE_OUTFMT:
16174 + case SKL_MODULE_TYPE_MIC_SELECT:
16175 +- case SKL_MODULE_TYPE_KPB:
16176 + return sizeof(struct skl_base_outfmt_cfg);
16177 +
16178 ++ case SKL_MODULE_TYPE_MIXER:
16179 ++ case SKL_MODULE_TYPE_KPB:
16180 ++ return sizeof(struct skl_base_cfg);
16181 ++
16182 + default:
16183 + /*
16184 + * return only base cfg when no specific module type is
16185 +@@ -857,10 +860,14 @@ static int skl_set_module_format(struct skl_dev *skl,
16186 +
16187 + case SKL_MODULE_TYPE_BASE_OUTFMT:
16188 + case SKL_MODULE_TYPE_MIC_SELECT:
16189 +- case SKL_MODULE_TYPE_KPB:
16190 + skl_set_base_outfmt_format(skl, module_config, *param_data);
16191 + break;
16192 +
16193 ++ case SKL_MODULE_TYPE_MIXER:
16194 ++ case SKL_MODULE_TYPE_KPB:
16195 ++ skl_set_base_module_format(skl, module_config, *param_data);
16196 ++ break;
16197 ++
16198 + default:
16199 + skl_set_base_module_format(skl, module_config, *param_data);
16200 + break;
16201 +diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
16202 +index b1ca64d2f7ea6..031d5dc7e6601 100644
16203 +--- a/sound/soc/intel/skylake/skl-pcm.c
16204 ++++ b/sound/soc/intel/skylake/skl-pcm.c
16205 +@@ -1317,21 +1317,6 @@ static int skl_get_module_info(struct skl_dev *skl,
16206 + return -EIO;
16207 + }
16208 +
16209 +- list_for_each_entry(module, &skl->uuid_list, list) {
16210 +- if (guid_equal(uuid_mod, &module->uuid)) {
16211 +- mconfig->id.module_id = module->id;
16212 +- if (mconfig->module)
16213 +- mconfig->module->loadable = module->is_loadable;
16214 +- ret = 0;
16215 +- break;
16216 +- }
16217 +- }
16218 +-
16219 +- if (ret)
16220 +- return ret;
16221 +-
16222 +- uuid_mod = &module->uuid;
16223 +- ret = -EIO;
16224 + for (i = 0; i < skl->nr_modules; i++) {
16225 + skl_module = skl->modules[i];
16226 + uuid_tplg = &skl_module->uuid;
16227 +@@ -1341,10 +1326,18 @@ static int skl_get_module_info(struct skl_dev *skl,
16228 + break;
16229 + }
16230 + }
16231 ++
16232 + if (skl->nr_modules && ret)
16233 + return ret;
16234 +
16235 ++ ret = -EIO;
16236 + list_for_each_entry(module, &skl->uuid_list, list) {
16237 ++ if (guid_equal(uuid_mod, &module->uuid)) {
16238 ++ mconfig->id.module_id = module->id;
16239 ++ mconfig->module->loadable = module->is_loadable;
16240 ++ ret = 0;
16241 ++ }
16242 ++
16243 + for (i = 0; i < MAX_IN_QUEUE; i++) {
16244 + pin_id = &mconfig->m_in_pin[i].id;
16245 + if (guid_equal(&pin_id->mod_uuid, &module->uuid))
16246 +@@ -1358,7 +1351,7 @@ static int skl_get_module_info(struct skl_dev *skl,
16247 + }
16248 + }
16249 +
16250 +- return 0;
16251 ++ return ret;
16252 + }
16253 +
16254 + static int skl_populate_modules(struct skl_dev *skl)
16255 +diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
16256 +index 0740764e7f71f..ac9980ed266e2 100644
16257 +--- a/sound/soc/rockchip/rockchip_i2s.c
16258 ++++ b/sound/soc/rockchip/rockchip_i2s.c
16259 +@@ -186,7 +186,9 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
16260 + {
16261 + struct rk_i2s_dev *i2s = to_info(cpu_dai);
16262 + unsigned int mask = 0, val = 0;
16263 ++ int ret = 0;
16264 +
16265 ++ pm_runtime_get_sync(cpu_dai->dev);
16266 + mask = I2S_CKR_MSS_MASK;
16267 + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
16268 + case SND_SOC_DAIFMT_CBS_CFS:
16269 +@@ -199,7 +201,8 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
16270 + i2s->is_master_mode = false;
16271 + break;
16272 + default:
16273 +- return -EINVAL;
16274 ++ ret = -EINVAL;
16275 ++ goto err_pm_put;
16276 + }
16277 +
16278 + regmap_update_bits(i2s->regmap, I2S_CKR, mask, val);
16279 +@@ -213,7 +216,8 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
16280 + val = I2S_CKR_CKP_POS;
16281 + break;
16282 + default:
16283 +- return -EINVAL;
16284 ++ ret = -EINVAL;
16285 ++ goto err_pm_put;
16286 + }
16287 +
16288 + regmap_update_bits(i2s->regmap, I2S_CKR, mask, val);
16289 +@@ -229,14 +233,15 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
16290 + case SND_SOC_DAIFMT_I2S:
16291 + val = I2S_TXCR_IBM_NORMAL;
16292 + break;
16293 +- case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
16294 +- val = I2S_TXCR_TFS_PCM;
16295 +- break;
16296 +- case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
16297 ++ case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 bit mode */
16298 + val = I2S_TXCR_TFS_PCM | I2S_TXCR_PBM_MODE(1);
16299 + break;
16300 ++ case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
16301 ++ val = I2S_TXCR_TFS_PCM;
16302 ++ break;
16303 + default:
16304 +- return -EINVAL;
16305 ++ ret = -EINVAL;
16306 ++ goto err_pm_put;
16307 + }
16308 +
16309 + regmap_update_bits(i2s->regmap, I2S_TXCR, mask, val);
16310 +@@ -252,19 +257,23 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
16311 + case SND_SOC_DAIFMT_I2S:
16312 + val = I2S_RXCR_IBM_NORMAL;
16313 + break;
16314 +- case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
16315 +- val = I2S_RXCR_TFS_PCM;
16316 +- break;
16317 +- case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
16318 ++ case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 bit mode */
16319 + val = I2S_RXCR_TFS_PCM | I2S_RXCR_PBM_MODE(1);
16320 + break;
16321 ++ case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
16322 ++ val = I2S_RXCR_TFS_PCM;
16323 ++ break;
16324 + default:
16325 +- return -EINVAL;
16326 ++ ret = -EINVAL;
16327 ++ goto err_pm_put;
16328 + }
16329 +
16330 + regmap_update_bits(i2s->regmap, I2S_RXCR, mask, val);
16331 +
16332 +- return 0;
16333 ++err_pm_put:
16334 ++ pm_runtime_put(cpu_dai->dev);
16335 ++
16336 ++ return ret;
16337 + }
16338 +
16339 + static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
16340 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
16341 +index d1c570ca21ea7..b944f56a469a6 100644
16342 +--- a/sound/soc/soc-pcm.c
16343 ++++ b/sound/soc/soc-pcm.c
16344 +@@ -2001,6 +2001,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
16345 + struct snd_soc_pcm_runtime *be;
16346 + struct snd_soc_dpcm *dpcm;
16347 + int ret = 0;
16348 ++ unsigned long flags;
16349 ++ enum snd_soc_dpcm_state state;
16350 +
16351 + for_each_dpcm_be(fe, stream, dpcm) {
16352 + struct snd_pcm_substream *be_substream;
16353 +@@ -2017,76 +2019,141 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
16354 +
16355 + switch (cmd) {
16356 + case SNDRV_PCM_TRIGGER_START:
16357 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16358 + if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
16359 + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
16360 +- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
16361 ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) {
16362 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16363 + continue;
16364 ++ }
16365 ++ state = be->dpcm[stream].state;
16366 ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
16367 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16368 +
16369 + ret = soc_pcm_trigger(be_substream, cmd);
16370 +- if (ret)
16371 ++ if (ret) {
16372 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16373 ++ be->dpcm[stream].state = state;
16374 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16375 + goto end;
16376 ++ }
16377 +
16378 +- be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
16379 + break;
16380 + case SNDRV_PCM_TRIGGER_RESUME:
16381 +- if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
16382 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16383 ++ if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) {
16384 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16385 + continue;
16386 ++ }
16387 ++
16388 ++ state = be->dpcm[stream].state;
16389 ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
16390 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16391 +
16392 + ret = soc_pcm_trigger(be_substream, cmd);
16393 +- if (ret)
16394 ++ if (ret) {
16395 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16396 ++ be->dpcm[stream].state = state;
16397 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16398 + goto end;
16399 ++ }
16400 +
16401 +- be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
16402 + break;
16403 + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
16404 +- if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
16405 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16406 ++ if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) {
16407 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16408 + continue;
16409 ++ }
16410 ++
16411 ++ state = be->dpcm[stream].state;
16412 ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
16413 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16414 +
16415 + ret = soc_pcm_trigger(be_substream, cmd);
16416 +- if (ret)
16417 ++ if (ret) {
16418 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16419 ++ be->dpcm[stream].state = state;
16420 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16421 + goto end;
16422 ++ }
16423 +
16424 +- be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
16425 + break;
16426 + case SNDRV_PCM_TRIGGER_STOP:
16427 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16428 + if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) &&
16429 +- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
16430 ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) {
16431 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16432 + continue;
16433 ++ }
16434 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16435 +
16436 + if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
16437 + continue;
16438 +
16439 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16440 ++ state = be->dpcm[stream].state;
16441 ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
16442 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16443 ++
16444 + ret = soc_pcm_trigger(be_substream, cmd);
16445 +- if (ret)
16446 ++ if (ret) {
16447 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16448 ++ be->dpcm[stream].state = state;
16449 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16450 + goto end;
16451 ++ }
16452 +
16453 +- be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
16454 + break;
16455 + case SNDRV_PCM_TRIGGER_SUSPEND:
16456 +- if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
16457 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16458 ++ if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) {
16459 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16460 + continue;
16461 ++ }
16462 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16463 +
16464 + if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
16465 + continue;
16466 +
16467 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16468 ++ state = be->dpcm[stream].state;
16469 ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
16470 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16471 ++
16472 + ret = soc_pcm_trigger(be_substream, cmd);
16473 +- if (ret)
16474 ++ if (ret) {
16475 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16476 ++ be->dpcm[stream].state = state;
16477 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16478 + goto end;
16479 ++ }
16480 +
16481 +- be->dpcm[stream].state = SND_SOC_DPCM_STATE_SUSPEND;
16482 + break;
16483 + case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
16484 +- if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
16485 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16486 ++ if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) {
16487 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16488 + continue;
16489 ++ }
16490 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16491 +
16492 + if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
16493 + continue;
16494 +
16495 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16496 ++ state = be->dpcm[stream].state;
16497 ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
16498 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16499 ++
16500 + ret = soc_pcm_trigger(be_substream, cmd);
16501 +- if (ret)
16502 ++ if (ret) {
16503 ++ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
16504 ++ be->dpcm[stream].state = state;
16505 ++ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
16506 + goto end;
16507 ++ }
16508 +
16509 +- be->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
16510 + break;
16511 + }
16512 + }
16513 +diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
16514 +index b94220306d1a8..41d7cb1321981 100644
16515 +--- a/sound/soc/ti/davinci-mcasp.c
16516 ++++ b/sound/soc/ti/davinci-mcasp.c
16517 +@@ -83,6 +83,8 @@ struct davinci_mcasp {
16518 + struct snd_pcm_substream *substreams[2];
16519 + unsigned int dai_fmt;
16520 +
16521 ++ u32 iec958_status;
16522 ++
16523 + /* Audio can not be enabled due to missing parameter(s) */
16524 + bool missing_audio_param;
16525 +
16526 +@@ -757,6 +759,9 @@ static int davinci_mcasp_set_tdm_slot(struct snd_soc_dai *dai,
16527 + {
16528 + struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
16529 +
16530 ++ if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
16531 ++ return 0;
16532 ++
16533 + dev_dbg(mcasp->dev,
16534 + "%s() tx_mask 0x%08x rx_mask 0x%08x slots %d width %d\n",
16535 + __func__, tx_mask, rx_mask, slots, slot_width);
16536 +@@ -827,6 +832,20 @@ static int davinci_config_channel_size(struct davinci_mcasp *mcasp,
16537 + mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, RXROT(rx_rotate),
16538 + RXROT(7));
16539 + mcasp_set_reg(mcasp, DAVINCI_MCASP_RXMASK_REG, mask);
16540 ++ } else {
16541 ++ /*
16542 ++ * according to the TRM it should be TXROT=0, this one works:
16543 ++ * 16 bit to 23-8 (TXROT=6, rotate 24 bits)
16544 ++ * 24 bit to 23-0 (TXROT=0, rotate 0 bits)
16545 ++ *
16546 ++ * TXROT = 0 only works with 24bit samples
16547 ++ */
16548 ++ tx_rotate = (sample_width / 4 + 2) & 0x7;
16549 ++
16550 ++ mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXROT(tx_rotate),
16551 ++ TXROT(7));
16552 ++ mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXSSZ(15),
16553 ++ TXSSZ(0x0F));
16554 + }
16555 +
16556 + mcasp_set_reg(mcasp, DAVINCI_MCASP_TXMASK_REG, mask);
16557 +@@ -842,10 +861,16 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
16558 + u8 tx_ser = 0;
16559 + u8 rx_ser = 0;
16560 + u8 slots = mcasp->tdm_slots;
16561 +- u8 max_active_serializers = (channels + slots - 1) / slots;
16562 +- u8 max_rx_serializers, max_tx_serializers;
16563 ++ u8 max_active_serializers, max_rx_serializers, max_tx_serializers;
16564 + int active_serializers, numevt;
16565 + u32 reg;
16566 ++
16567 ++ /* In DIT mode we only allow maximum of one serializers for now */
16568 ++ if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
16569 ++ max_active_serializers = 1;
16570 ++ else
16571 ++ max_active_serializers = (channels + slots - 1) / slots;
16572 ++
16573 + /* Default configuration */
16574 + if (mcasp->version < MCASP_VERSION_3)
16575 + mcasp_set_bits(mcasp, DAVINCI_MCASP_PWREMUMGT_REG, MCASP_SOFT);
16576 +@@ -1031,16 +1056,18 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream,
16577 + static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp,
16578 + unsigned int rate)
16579 + {
16580 +- u32 cs_value = 0;
16581 +- u8 *cs_bytes = (u8*) &cs_value;
16582 ++ u8 *cs_bytes = (u8 *)&mcasp->iec958_status;
16583 +
16584 +- /* Set the TX format : 24 bit right rotation, 32 bit slot, Pad 0
16585 +- and LSB first */
16586 +- mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXROT(6) | TXSSZ(15));
16587 ++ if (!mcasp->dat_port)
16588 ++ mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXSEL);
16589 ++ else
16590 ++ mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXSEL);
16591 +
16592 + /* Set TX frame synch : DIT Mode, 1 bit width, internal, rising edge */
16593 + mcasp_set_reg(mcasp, DAVINCI_MCASP_TXFMCTL_REG, AFSXE | FSXMOD(0x180));
16594 +
16595 ++ mcasp_set_reg(mcasp, DAVINCI_MCASP_TXMASK_REG, 0xFFFF);
16596 ++
16597 + /* Set the TX tdm : for all the slots */
16598 + mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, 0xFFFFFFFF);
16599 +
16600 +@@ -1049,16 +1076,8 @@ static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp,
16601 +
16602 + mcasp_clr_bits(mcasp, DAVINCI_MCASP_XEVTCTL_REG, TXDATADMADIS);
16603 +
16604 +- /* Only 44100 and 48000 are valid, both have the same setting */
16605 +- mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG, AHCLKXDIV(3));
16606 +-
16607 +- /* Enable the DIT */
16608 +- mcasp_set_bits(mcasp, DAVINCI_MCASP_TXDITCTL_REG, DITEN);
16609 +-
16610 + /* Set S/PDIF channel status bits */
16611 +- cs_bytes[0] = IEC958_AES0_CON_NOT_COPYRIGHT;
16612 +- cs_bytes[1] = IEC958_AES1_CON_PCM_CODER;
16613 +-
16614 ++ cs_bytes[3] &= ~IEC958_AES3_CON_FS;
16615 + switch (rate) {
16616 + case 22050:
16617 + cs_bytes[3] |= IEC958_AES3_CON_FS_22050;
16618 +@@ -1088,12 +1107,15 @@ static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp,
16619 + cs_bytes[3] |= IEC958_AES3_CON_FS_192000;
16620 + break;
16621 + default:
16622 +- printk(KERN_WARNING "unsupported sampling rate: %d\n", rate);
16623 ++ dev_err(mcasp->dev, "unsupported sampling rate: %d\n", rate);
16624 + return -EINVAL;
16625 + }
16626 +
16627 +- mcasp_set_reg(mcasp, DAVINCI_MCASP_DITCSRA_REG, cs_value);
16628 +- mcasp_set_reg(mcasp, DAVINCI_MCASP_DITCSRB_REG, cs_value);
16629 ++ mcasp_set_reg(mcasp, DAVINCI_MCASP_DITCSRA_REG, mcasp->iec958_status);
16630 ++ mcasp_set_reg(mcasp, DAVINCI_MCASP_DITCSRB_REG, mcasp->iec958_status);
16631 ++
16632 ++ /* Enable the DIT */
16633 ++ mcasp_set_bits(mcasp, DAVINCI_MCASP_TXDITCTL_REG, DITEN);
16634 +
16635 + return 0;
16636 + }
16637 +@@ -1237,12 +1259,18 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
16638 + int slots = mcasp->tdm_slots;
16639 + int rate = params_rate(params);
16640 + int sbits = params_width(params);
16641 ++ unsigned int bclk_target;
16642 +
16643 + if (mcasp->slot_width)
16644 + sbits = mcasp->slot_width;
16645 +
16646 ++ if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE)
16647 ++ bclk_target = rate * sbits * slots;
16648 ++ else
16649 ++ bclk_target = rate * 128;
16650 ++
16651 + davinci_mcasp_calc_clk_div(mcasp, mcasp->sysclk_freq,
16652 +- rate * sbits * slots, true);
16653 ++ bclk_target, true);
16654 + }
16655 +
16656 + ret = mcasp_common_hw_param(mcasp, substream->stream,
16657 +@@ -1598,6 +1626,77 @@ static const struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
16658 + .set_tdm_slot = davinci_mcasp_set_tdm_slot,
16659 + };
16660 +
16661 ++static int davinci_mcasp_iec958_info(struct snd_kcontrol *kcontrol,
16662 ++ struct snd_ctl_elem_info *uinfo)
16663 ++{
16664 ++ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
16665 ++ uinfo->count = 1;
16666 ++
16667 ++ return 0;
16668 ++}
16669 ++
16670 ++static int davinci_mcasp_iec958_get(struct snd_kcontrol *kcontrol,
16671 ++ struct snd_ctl_elem_value *uctl)
16672 ++{
16673 ++ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
16674 ++ struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
16675 ++
16676 ++ memcpy(uctl->value.iec958.status, &mcasp->iec958_status,
16677 ++ sizeof(mcasp->iec958_status));
16678 ++
16679 ++ return 0;
16680 ++}
16681 ++
16682 ++static int davinci_mcasp_iec958_put(struct snd_kcontrol *kcontrol,
16683 ++ struct snd_ctl_elem_value *uctl)
16684 ++{
16685 ++ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
16686 ++ struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
16687 ++
16688 ++ memcpy(&mcasp->iec958_status, uctl->value.iec958.status,
16689 ++ sizeof(mcasp->iec958_status));
16690 ++
16691 ++ return 0;
16692 ++}
16693 ++
16694 ++static int davinci_mcasp_iec958_con_mask_get(struct snd_kcontrol *kcontrol,
16695 ++ struct snd_ctl_elem_value *ucontrol)
16696 ++{
16697 ++ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
16698 ++ struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
16699 ++
16700 ++ memset(ucontrol->value.iec958.status, 0xff, sizeof(mcasp->iec958_status));
16701 ++ return 0;
16702 ++}
16703 ++
16704 ++static const struct snd_kcontrol_new davinci_mcasp_iec958_ctls[] = {
16705 ++ {
16706 ++ .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
16707 ++ SNDRV_CTL_ELEM_ACCESS_VOLATILE),
16708 ++ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
16709 ++ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
16710 ++ .info = davinci_mcasp_iec958_info,
16711 ++ .get = davinci_mcasp_iec958_get,
16712 ++ .put = davinci_mcasp_iec958_put,
16713 ++ }, {
16714 ++ .access = SNDRV_CTL_ELEM_ACCESS_READ,
16715 ++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
16716 ++ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK),
16717 ++ .info = davinci_mcasp_iec958_info,
16718 ++ .get = davinci_mcasp_iec958_con_mask_get,
16719 ++ },
16720 ++};
16721 ++
16722 ++static void davinci_mcasp_init_iec958_status(struct davinci_mcasp *mcasp)
16723 ++{
16724 ++ unsigned char *cs = (u8 *)&mcasp->iec958_status;
16725 ++
16726 ++ cs[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_NONE;
16727 ++ cs[1] = IEC958_AES1_CON_PCM_CODER;
16728 ++ cs[2] = IEC958_AES2_CON_SOURCE_UNSPEC | IEC958_AES2_CON_CHANNEL_UNSPEC;
16729 ++ cs[3] = IEC958_AES3_CON_CLOCK_1000PPM;
16730 ++}
16731 ++
16732 + static int davinci_mcasp_dai_probe(struct snd_soc_dai *dai)
16733 + {
16734 + struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
16735 +@@ -1605,6 +1704,12 @@ static int davinci_mcasp_dai_probe(struct snd_soc_dai *dai)
16736 + dai->playback_dma_data = &mcasp->dma_data[SNDRV_PCM_STREAM_PLAYBACK];
16737 + dai->capture_dma_data = &mcasp->dma_data[SNDRV_PCM_STREAM_CAPTURE];
16738 +
16739 ++ if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE) {
16740 ++ davinci_mcasp_init_iec958_status(mcasp);
16741 ++ snd_soc_add_dai_controls(dai, davinci_mcasp_iec958_ctls,
16742 ++ ARRAY_SIZE(davinci_mcasp_iec958_ctls));
16743 ++ }
16744 ++
16745 + return 0;
16746 + }
16747 +
16748 +@@ -1651,7 +1756,8 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
16749 + .channels_min = 1,
16750 + .channels_max = 384,
16751 + .rates = DAVINCI_MCASP_RATES,
16752 +- .formats = DAVINCI_MCASP_PCM_FMTS,
16753 ++ .formats = SNDRV_PCM_FMTBIT_S16_LE |
16754 ++ SNDRV_PCM_FMTBIT_S24_LE,
16755 + },
16756 + .ops = &davinci_mcasp_dai_ops,
16757 + },
16758 +@@ -1871,6 +1977,8 @@ out:
16759 + } else {
16760 + mcasp->tdm_slots = pdata->tdm_slots;
16761 + }
16762 ++ } else {
16763 ++ mcasp->tdm_slots = 32;
16764 + }
16765 +
16766 + mcasp->num_serializer = pdata->num_serializer;
16767 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
16768 +index f6ebda75b0306..533512d933c6c 100644
16769 +--- a/tools/lib/bpf/libbpf.c
16770 ++++ b/tools/lib/bpf/libbpf.c
16771 +@@ -3844,6 +3844,42 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
16772 + return 0;
16773 + }
16774 +
16775 ++static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
16776 ++{
16777 ++ char file[PATH_MAX], buff[4096];
16778 ++ FILE *fp;
16779 ++ __u32 val;
16780 ++ int err;
16781 ++
16782 ++ snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
16783 ++ memset(info, 0, sizeof(*info));
16784 ++
16785 ++ fp = fopen(file, "r");
16786 ++ if (!fp) {
16787 ++ err = -errno;
16788 ++ pr_warn("failed to open %s: %d. No procfs support?\n", file,
16789 ++ err);
16790 ++ return err;
16791 ++ }
16792 ++
16793 ++ while (fgets(buff, sizeof(buff), fp)) {
16794 ++ if (sscanf(buff, "map_type:\t%u", &val) == 1)
16795 ++ info->type = val;
16796 ++ else if (sscanf(buff, "key_size:\t%u", &val) == 1)
16797 ++ info->key_size = val;
16798 ++ else if (sscanf(buff, "value_size:\t%u", &val) == 1)
16799 ++ info->value_size = val;
16800 ++ else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
16801 ++ info->max_entries = val;
16802 ++ else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
16803 ++ info->map_flags = val;
16804 ++ }
16805 ++
16806 ++ fclose(fp);
16807 ++
16808 ++ return 0;
16809 ++}
16810 ++
16811 + int bpf_map__reuse_fd(struct bpf_map *map, int fd)
16812 + {
16813 + struct bpf_map_info info = {};
16814 +@@ -3852,6 +3888,8 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
16815 + char *new_name;
16816 +
16817 + err = bpf_obj_get_info_by_fd(fd, &info, &len);
16818 ++ if (err && errno == EINVAL)
16819 ++ err = bpf_get_map_info_from_fdinfo(fd, &info);
16820 + if (err)
16821 + return err;
16822 +
16823 +@@ -4318,12 +4356,16 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
16824 + struct bpf_map_info map_info = {};
16825 + char msg[STRERR_BUFSIZE];
16826 + __u32 map_info_len;
16827 ++ int err;
16828 +
16829 + map_info_len = sizeof(map_info);
16830 +
16831 +- if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
16832 +- pr_warn("failed to get map info for map FD %d: %s\n",
16833 +- map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
16834 ++ err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
16835 ++ if (err && errno == EINVAL)
16836 ++ err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
16837 ++ if (err) {
16838 ++ pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
16839 ++ libbpf_strerror_r(errno, msg, sizeof(msg)));
16840 + return false;
16841 + }
16842 +
16843 +@@ -4528,10 +4570,13 @@ bpf_object__create_maps(struct bpf_object *obj)
16844 + char *cp, errmsg[STRERR_BUFSIZE];
16845 + unsigned int i, j;
16846 + int err;
16847 ++ bool retried;
16848 +
16849 + for (i = 0; i < obj->nr_maps; i++) {
16850 + map = &obj->maps[i];
16851 +
16852 ++ retried = false;
16853 ++retry:
16854 + if (map->pin_path) {
16855 + err = bpf_object__reuse_map(map);
16856 + if (err) {
16857 +@@ -4539,6 +4584,12 @@ bpf_object__create_maps(struct bpf_object *obj)
16858 + map->name);
16859 + goto err_out;
16860 + }
16861 ++ if (retried && map->fd < 0) {
16862 ++ pr_warn("map '%s': cannot find pinned map\n",
16863 ++ map->name);
16864 ++ err = -ENOENT;
16865 ++ goto err_out;
16866 ++ }
16867 + }
16868 +
16869 + if (map->fd >= 0) {
16870 +@@ -4572,9 +4623,13 @@ bpf_object__create_maps(struct bpf_object *obj)
16871 + if (map->pin_path && !map->pinned) {
16872 + err = bpf_map__pin(map, NULL);
16873 + if (err) {
16874 ++ zclose(map->fd);
16875 ++ if (!retried && err == -EEXIST) {
16876 ++ retried = true;
16877 ++ goto retry;
16878 ++ }
16879 + pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
16880 + map->name, map->pin_path, err);
16881 +- zclose(map->fd);
16882 + goto err_out;
16883 + }
16884 + }
16885 +diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
16886 +index f50ac31920d13..0328a1e08f659 100644
16887 +--- a/tools/testing/selftests/arm64/mte/mte_common_util.c
16888 ++++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
16889 +@@ -298,7 +298,7 @@ int mte_default_setup(void)
16890 + int ret;
16891 +
16892 + if (!(hwcaps2 & HWCAP2_MTE)) {
16893 +- ksft_print_msg("FAIL: MTE features unavailable\n");
16894 ++ ksft_print_msg("SKIP: MTE features unavailable\n");
16895 + return KSFT_SKIP;
16896 + }
16897 + /* Get current mte mode */
16898 +diff --git a/tools/testing/selftests/arm64/pauth/pac.c b/tools/testing/selftests/arm64/pauth/pac.c
16899 +index 592fe538506e3..b743daa772f55 100644
16900 +--- a/tools/testing/selftests/arm64/pauth/pac.c
16901 ++++ b/tools/testing/selftests/arm64/pauth/pac.c
16902 +@@ -25,13 +25,15 @@
16903 + do { \
16904 + unsigned long hwcaps = getauxval(AT_HWCAP); \
16905 + /* data key instructions are not in NOP space. This prevents a SIGILL */ \
16906 +- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled"); \
16907 ++ if (!(hwcaps & HWCAP_PACA)) \
16908 ++ SKIP(return, "PAUTH not enabled"); \
16909 + } while (0)
16910 + #define ASSERT_GENERIC_PAUTH_ENABLED() \
16911 + do { \
16912 + unsigned long hwcaps = getauxval(AT_HWCAP); \
16913 + /* generic key instructions are not in NOP space. This prevents a SIGILL */ \
16914 +- ASSERT_NE(0, hwcaps & HWCAP_PACG) TH_LOG("Generic PAUTH not enabled"); \
16915 ++ if (!(hwcaps & HWCAP_PACG)) \
16916 ++ SKIP(return, "Generic PAUTH not enabled"); \
16917 + } while (0)
16918 +
16919 + void sign_specific(struct signatures *sign, size_t val)
16920 +@@ -256,7 +258,7 @@ TEST(single_thread_different_keys)
16921 + unsigned long hwcaps = getauxval(AT_HWCAP);
16922 +
16923 + /* generic and data key instructions are not in NOP space. This prevents a SIGILL */
16924 +- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
16925 ++ ASSERT_PAUTH_ENABLED();
16926 + if (!(hwcaps & HWCAP_PACG)) {
16927 + TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
16928 + nkeys = NKEYS - 1;
16929 +@@ -299,7 +301,7 @@ TEST(exec_changed_keys)
16930 + unsigned long hwcaps = getauxval(AT_HWCAP);
16931 +
16932 + /* generic and data key instructions are not in NOP space. This prevents a SIGILL */
16933 +- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
16934 ++ ASSERT_PAUTH_ENABLED();
16935 + if (!(hwcaps & HWCAP_PACG)) {
16936 + TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
16937 + nkeys = NKEYS - 1;
16938 +diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
16939 +index 7043e6ded0e60..75b72c751772b 100644
16940 +--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
16941 ++++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
16942 +@@ -1,5 +1,7 @@
16943 + // SPDX-License-Identifier: GPL-2.0
16944 + #include <test_progs.h>
16945 ++#include <sys/time.h>
16946 ++#include <sys/resource.h>
16947 + #include "test_send_signal_kern.skel.h"
16948 +
16949 + static volatile int sigusr1_received = 0;
16950 +@@ -41,12 +43,23 @@ static void test_send_signal_common(struct perf_event_attr *attr,
16951 + }
16952 +
16953 + if (pid == 0) {
16954 ++ int old_prio;
16955 ++
16956 + /* install signal handler and notify parent */
16957 + signal(SIGUSR1, sigusr1_handler);
16958 +
16959 + close(pipe_c2p[0]); /* close read */
16960 + close(pipe_p2c[1]); /* close write */
16961 +
16962 ++ /* boost with a high priority so we got a higher chance
16963 ++ * that if an interrupt happens, the underlying task
16964 ++ * is this process.
16965 ++ */
16966 ++ errno = 0;
16967 ++ old_prio = getpriority(PRIO_PROCESS, 0);
16968 ++ ASSERT_OK(errno, "getpriority");
16969 ++ ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority");
16970 ++
16971 + /* notify parent signal handler is installed */
16972 + CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
16973 +
16974 +@@ -62,6 +75,9 @@ static void test_send_signal_common(struct perf_event_attr *attr,
16975 + /* wait for parent notification and exit */
16976 + CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
16977 +
16978 ++ /* restore the old priority */
16979 ++ ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority");
16980 ++
16981 + close(pipe_c2p[1]);
16982 + close(pipe_p2c[0]);
16983 + exit(0);
16984 +diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
16985 +index ec281b0363b82..86f97681ad898 100644
16986 +--- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
16987 ++++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
16988 +@@ -195,8 +195,10 @@ static void run_test(int cgroup_fd)
16989 +
16990 + pthread_mutex_lock(&server_started_mtx);
16991 + if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
16992 +- (void *)&server_fd)))
16993 ++ (void *)&server_fd))) {
16994 ++ pthread_mutex_unlock(&server_started_mtx);
16995 + goto close_server_fd;
16996 ++ }
16997 + pthread_cond_wait(&server_started, &server_started_mtx);
16998 + pthread_mutex_unlock(&server_started_mtx);
16999 +
17000 +diff --git a/tools/testing/selftests/bpf/progs/xdp_tx.c b/tools/testing/selftests/bpf/progs/xdp_tx.c
17001 +index 94e6c2b281cb6..5f725c720e008 100644
17002 +--- a/tools/testing/selftests/bpf/progs/xdp_tx.c
17003 ++++ b/tools/testing/selftests/bpf/progs/xdp_tx.c
17004 +@@ -3,7 +3,7 @@
17005 + #include <linux/bpf.h>
17006 + #include <bpf/bpf_helpers.h>
17007 +
17008 +-SEC("tx")
17009 ++SEC("xdp")
17010 + int xdp_tx(struct xdp_md *xdp)
17011 + {
17012 + return XDP_TX;
17013 +diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
17014 +index aa38dc4a5e85f..90f38c6528a1a 100644
17015 +--- a/tools/testing/selftests/bpf/test_maps.c
17016 ++++ b/tools/testing/selftests/bpf/test_maps.c
17017 +@@ -968,7 +968,7 @@ static void test_sockmap(unsigned int tasks, void *data)
17018 +
17019 + FD_ZERO(&w);
17020 + FD_SET(sfd[3], &w);
17021 +- to.tv_sec = 1;
17022 ++ to.tv_sec = 30;
17023 + to.tv_usec = 0;
17024 + s = select(sfd[3] + 1, &w, NULL, NULL, &to);
17025 + if (s == -1) {
17026 +diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
17027 +index 6396932b97e29..9ed13187136c1 100644
17028 +--- a/tools/testing/selftests/bpf/test_progs.c
17029 ++++ b/tools/testing/selftests/bpf/test_progs.c
17030 +@@ -148,18 +148,18 @@ void test__end_subtest()
17031 + struct prog_test_def *test = env.test;
17032 + int sub_error_cnt = test->error_cnt - test->old_error_cnt;
17033 +
17034 +- if (sub_error_cnt)
17035 +- env.fail_cnt++;
17036 +- else if (test->skip_cnt == 0)
17037 +- env.sub_succ_cnt++;
17038 +- skip_account();
17039 +-
17040 + dump_test_log(test, sub_error_cnt);
17041 +
17042 + fprintf(env.stdout, "#%d/%d %s:%s\n",
17043 + test->test_num, test->subtest_num, test->subtest_name,
17044 + sub_error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK"));
17045 +
17046 ++ if (sub_error_cnt)
17047 ++ env.fail_cnt++;
17048 ++ else if (test->skip_cnt == 0)
17049 ++ env.sub_succ_cnt++;
17050 ++ skip_account();
17051 ++
17052 + free(test->subtest_name);
17053 + test->subtest_name = NULL;
17054 + }
17055 +@@ -783,17 +783,18 @@ int main(int argc, char **argv)
17056 + test__end_subtest();
17057 +
17058 + test->tested = true;
17059 +- if (test->error_cnt)
17060 +- env.fail_cnt++;
17061 +- else
17062 +- env.succ_cnt++;
17063 +- skip_account();
17064 +
17065 + dump_test_log(test, test->error_cnt);
17066 +
17067 + fprintf(env.stdout, "#%d %s:%s\n",
17068 + test->test_num, test->test_name,
17069 +- test->error_cnt ? "FAIL" : "OK");
17070 ++ test->error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK"));
17071 ++
17072 ++ if (test->error_cnt)
17073 ++ env.fail_cnt++;
17074 ++ else
17075 ++ env.succ_cnt++;
17076 ++ skip_account();
17077 +
17078 + reset_affinity();
17079 + restore_netns();
17080 +diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh
17081 +index ba8ffcdaac302..995278e684b6e 100755
17082 +--- a/tools/testing/selftests/bpf/test_xdp_veth.sh
17083 ++++ b/tools/testing/selftests/bpf/test_xdp_veth.sh
17084 +@@ -108,7 +108,7 @@ ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1
17085 + ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2
17086 +
17087 + ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp_dummy
17088 +-ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec tx
17089 ++ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec xdp
17090 + ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp_dummy
17091 +
17092 + trap cleanup EXIT
17093 +diff --git a/tools/testing/selftests/firmware/fw_namespace.c b/tools/testing/selftests/firmware/fw_namespace.c
17094 +index 0e393cb5f42de..4c6f0cd83c5b0 100644
17095 +--- a/tools/testing/selftests/firmware/fw_namespace.c
17096 ++++ b/tools/testing/selftests/firmware/fw_namespace.c
17097 +@@ -129,7 +129,8 @@ int main(int argc, char **argv)
17098 + die("mounting tmpfs to /lib/firmware failed\n");
17099 +
17100 + sys_path = argv[1];
17101 +- asprintf(&fw_path, "/lib/firmware/%s", fw_name);
17102 ++ if (asprintf(&fw_path, "/lib/firmware/%s", fw_name) < 0)
17103 ++ die("error: failed to build full fw_path\n");
17104 +
17105 + setup_fw(fw_path);
17106 +
17107 +diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
17108 +index a6fac927ee82f..0cee6b067a374 100644
17109 +--- a/tools/testing/selftests/ftrace/test.d/functions
17110 ++++ b/tools/testing/selftests/ftrace/test.d/functions
17111 +@@ -115,7 +115,7 @@ check_requires() { # Check required files and tracers
17112 + echo "Required tracer $t is not configured."
17113 + exit_unsupported
17114 + fi
17115 +- elif [ $r != $i ]; then
17116 ++ elif [ "$r" != "$i" ]; then
17117 + if ! grep -Fq "$r" README ; then
17118 + echo "Required feature pattern \"$r\" is not in README."
17119 + exit_unsupported
17120 +diff --git a/tools/testing/selftests/nci/nci_dev.c b/tools/testing/selftests/nci/nci_dev.c
17121 +index 57b505cb15618..acd4125ff39fe 100644
17122 +--- a/tools/testing/selftests/nci/nci_dev.c
17123 ++++ b/tools/testing/selftests/nci/nci_dev.c
17124 +@@ -110,11 +110,11 @@ static int send_cmd_mt_nla(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
17125 + na->nla_type = nla_type[cnt];
17126 + na->nla_len = nla_len[cnt] + NLA_HDRLEN;
17127 +
17128 +- if (nla_len > 0)
17129 ++ if (nla_len[cnt] > 0)
17130 + memcpy(NLA_DATA(na), nla_data[cnt], nla_len[cnt]);
17131 +
17132 +- msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
17133 +- prv_len = na->nla_len;
17134 ++ prv_len = NLA_ALIGN(nla_len[cnt]) + NLA_HDRLEN;
17135 ++ msg.n.nlmsg_len += prv_len;
17136 + }
17137 +
17138 + buf = (char *)&msg;
17139 +diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
17140 +index 9db867df76794..610334f86f631 100644
17141 +--- a/tools/thermal/tmon/Makefile
17142 ++++ b/tools/thermal/tmon/Makefile
17143 +@@ -10,7 +10,7 @@ override CFLAGS+= $(call cc-option,-O3,-O1) ${WARNFLAGS}
17144 + # Add "-fstack-protector" only if toolchain supports it.
17145 + override CFLAGS+= $(call cc-option,-fstack-protector-strong)
17146 + CC?= $(CROSS_COMPILE)gcc
17147 +-PKG_CONFIG?= pkg-config
17148 ++PKG_CONFIG?= $(CROSS_COMPILE)pkg-config
17149 +
17150 + override CFLAGS+=-D VERSION=\"$(VERSION)\"
17151 + LDFLAGS+=